1//===-- FixupStatepointCallerSaved.cpp - Fixup caller saved registers ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Statepoint instruction in deopt parameters contains values which are
11/// meaningful to the runtime and should be able to be read at the moment the
12/// call returns. So we can say that we need to encode the fact that these
13/// values are "late read" by runtime. If we could express this notion for
14/// register allocator it would produce the right form for us.
15/// The need to fixup (i.e this pass) is specifically handling the fact that
16/// we cannot describe such a late read for the register allocator.
17/// Register allocator may put the value on a register clobbered by the call.
18/// This pass forces the spill of such registers and replaces corresponding
19/// statepoint operands to added spill slots.
20///
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/CodeGen/MachineFrameInfo.h"
26#include "llvm/CodeGen/MachineFunctionPass.h"
27#include "llvm/CodeGen/StackMaps.h"
28#include "llvm/CodeGen/TargetInstrInfo.h"
29#include "llvm/IR/Statepoint.h"
30#include "llvm/InitializePasses.h"
31#include "llvm/Support/Debug.h"
32
33using namespace llvm;
34
35#define DEBUG_TYPE "fixup-statepoint-caller-saved"
36STATISTIC(NumSpilledRegisters, "Number of spilled register");
37STATISTIC(NumSpillSlotsAllocated, "Number of spill slots allocated");
38STATISTIC(NumSpillSlotsExtended, "Number of spill slots extended");
39
40static cl::opt<bool> FixupSCSExtendSlotSize(
41 "fixup-scs-extend-slot-size", cl::Hidden, cl::init(Val: false),
42 cl::desc("Allow spill in spill slot of greater size than register size"),
43 cl::Hidden);
44
45static cl::opt<bool> PassGCPtrInCSR(
46 "fixup-allow-gcptr-in-csr", cl::Hidden, cl::init(Val: false),
47 cl::desc("Allow passing GC Pointer arguments in callee saved registers"));
48
49static cl::opt<bool> EnableCopyProp(
50 "fixup-scs-enable-copy-propagation", cl::Hidden, cl::init(Val: true),
51 cl::desc("Enable simple copy propagation during register reloading"));
52
53// This is purely debugging option.
54// It may be handy for investigating statepoint spilling issues.
55static cl::opt<unsigned> MaxStatepointsWithRegs(
56 "fixup-max-csr-statepoints", cl::Hidden,
57 cl::desc("Max number of statepoints allowed to pass GC Ptrs in registers"));
58
59namespace {
60
61class FixupStatepointCallerSaved : public MachineFunctionPass {
62public:
63 static char ID;
64
65 FixupStatepointCallerSaved() : MachineFunctionPass(ID) {
66 initializeFixupStatepointCallerSavedPass(*PassRegistry::getPassRegistry());
67 }
68
69 void getAnalysisUsage(AnalysisUsage &AU) const override {
70 AU.setPreservesCFG();
71 MachineFunctionPass::getAnalysisUsage(AU);
72 }
73
74 StringRef getPassName() const override {
75 return "Fixup Statepoint Caller Saved";
76 }
77
78 bool runOnMachineFunction(MachineFunction &MF) override;
79};
80
81} // End anonymous namespace.
82
83char FixupStatepointCallerSaved::ID = 0;
84char &llvm::FixupStatepointCallerSavedID = FixupStatepointCallerSaved::ID;
85
86INITIALIZE_PASS_BEGIN(FixupStatepointCallerSaved, DEBUG_TYPE,
87 "Fixup Statepoint Caller Saved", false, false)
88INITIALIZE_PASS_END(FixupStatepointCallerSaved, DEBUG_TYPE,
89 "Fixup Statepoint Caller Saved", false, false)
90
91// Utility function to get size of the register.
92static unsigned getRegisterSize(const TargetRegisterInfo &TRI, Register Reg) {
93 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
94 return TRI.getSpillSize(RC: *RC);
95}
96
97// Try to eliminate redundant copy to register which we're going to
98// spill, i.e. try to change:
99// X = COPY Y
100// SPILL X
101// to
102// SPILL Y
103// If there are no uses of X between copy and STATEPOINT, that COPY
104// may be eliminated.
105// Reg - register we're about to spill
106// RI - On entry points to statepoint.
107// On successful copy propagation set to new spill point.
108// IsKill - set to true if COPY is Kill (there are no uses of Y)
109// Returns either found source copy register or original one.
110static Register performCopyPropagation(Register Reg,
111 MachineBasicBlock::iterator &RI,
112 bool &IsKill, const TargetInstrInfo &TII,
113 const TargetRegisterInfo &TRI) {
114 // First check if statepoint itself uses Reg in non-meta operands.
115 int Idx = RI->findRegisterUseOperandIdx(Reg, isKill: false, TRI: &TRI);
116 if (Idx >= 0 && (unsigned)Idx < StatepointOpers(&*RI).getNumDeoptArgsIdx()) {
117 IsKill = false;
118 return Reg;
119 }
120
121 if (!EnableCopyProp)
122 return Reg;
123
124 MachineBasicBlock *MBB = RI->getParent();
125 MachineBasicBlock::reverse_iterator E = MBB->rend();
126 MachineInstr *Def = nullptr, *Use = nullptr;
127 for (auto It = ++(RI.getReverse()); It != E; ++It) {
128 if (It->readsRegister(Reg, TRI: &TRI) && !Use)
129 Use = &*It;
130 if (It->modifiesRegister(Reg, TRI: &TRI)) {
131 Def = &*It;
132 break;
133 }
134 }
135
136 if (!Def)
137 return Reg;
138
139 auto DestSrc = TII.isCopyInstr(MI: *Def);
140 if (!DestSrc || DestSrc->Destination->getReg() != Reg)
141 return Reg;
142
143 Register SrcReg = DestSrc->Source->getReg();
144
145 if (getRegisterSize(TRI, Reg) != getRegisterSize(TRI, Reg: SrcReg))
146 return Reg;
147
148 LLVM_DEBUG(dbgs() << "spillRegisters: perform copy propagation "
149 << printReg(Reg, &TRI) << " -> " << printReg(SrcReg, &TRI)
150 << "\n");
151
152 // Insert spill immediately after Def
153 RI = ++MachineBasicBlock::iterator(Def);
154 IsKill = DestSrc->Source->isKill();
155
156 if (!Use) {
157 // There are no uses of original register between COPY and STATEPOINT.
158 // There can't be any after STATEPOINT, so we can eliminate Def.
159 LLVM_DEBUG(dbgs() << "spillRegisters: removing dead copy " << *Def);
160 Def->eraseFromParent();
161 } else if (IsKill) {
162 // COPY will remain in place, spill will be inserted *after* it, so it is
163 // not a kill of source anymore.
164 const_cast<MachineOperand *>(DestSrc->Source)->setIsKill(false);
165 }
166
167 return SrcReg;
168}
169
170namespace {
171// Pair {Register, FrameIndex}
172using RegSlotPair = std::pair<Register, int>;
173
174// Keeps track of what reloads were inserted in MBB.
175class RegReloadCache {
176 using ReloadSet = SmallSet<RegSlotPair, 8>;
177 DenseMap<const MachineBasicBlock *, ReloadSet> Reloads;
178
179public:
180 RegReloadCache() = default;
181
182 // Record reload of Reg from FI in block MBB
183 void recordReload(Register Reg, int FI, const MachineBasicBlock *MBB) {
184 RegSlotPair RSP(Reg, FI);
185 auto Res = Reloads[MBB].insert(V: RSP);
186 (void)Res;
187 assert(Res.second && "reload already exists");
188 }
189
190 // Does basic block MBB contains reload of Reg from FI?
191 bool hasReload(Register Reg, int FI, const MachineBasicBlock *MBB) {
192 RegSlotPair RSP(Reg, FI);
193 return Reloads.count(Val: MBB) && Reloads[MBB].count(V: RSP);
194 }
195};
196
197// Cache used frame indexes during statepoint re-write to re-use them in
198// processing next statepoint instruction.
199// Two strategies. One is to preserve the size of spill slot while another one
200// extends the size of spill slots to reduce the number of them, causing
201// the less total frame size. But unspill will have "implicit" any extend.
202class FrameIndexesCache {
203private:
204 struct FrameIndexesPerSize {
205 // List of used frame indexes during processing previous statepoints.
206 SmallVector<int, 8> Slots;
207 // Current index of un-used yet frame index.
208 unsigned Index = 0;
209 };
210 MachineFrameInfo &MFI;
211 const TargetRegisterInfo &TRI;
212 // Map size to list of frame indexes of this size. If the mode is
213 // FixupSCSExtendSlotSize then the key 0 is used to keep all frame indexes.
214 // If the size of required spill slot is greater than in a cache then the
215 // size will be increased.
216 DenseMap<unsigned, FrameIndexesPerSize> Cache;
217
218 // Keeps track of slots reserved for the shared landing pad processing.
219 // Initialized from GlobalIndices for the current EHPad.
220 SmallSet<int, 8> ReservedSlots;
221
222 // Landing pad can be destination of several statepoints. Every register
223 // defined by such statepoints must be spilled to the same stack slot.
224 // This map keeps that information.
225 DenseMap<const MachineBasicBlock *, SmallVector<RegSlotPair, 8>>
226 GlobalIndices;
227
228 FrameIndexesPerSize &getCacheBucket(unsigned Size) {
229 // In FixupSCSExtendSlotSize mode the bucket with 0 index is used
230 // for all sizes.
231 return Cache[FixupSCSExtendSlotSize ? 0 : Size];
232 }
233
234public:
235 FrameIndexesCache(MachineFrameInfo &MFI, const TargetRegisterInfo &TRI)
236 : MFI(MFI), TRI(TRI) {}
237 // Reset the current state of used frame indexes. After invocation of
238 // this function all frame indexes are available for allocation with
239 // the exception of slots reserved for landing pad processing (if any).
240 void reset(const MachineBasicBlock *EHPad) {
241 for (auto &It : Cache)
242 It.second.Index = 0;
243
244 ReservedSlots.clear();
245 if (EHPad && GlobalIndices.count(Val: EHPad))
246 for (auto &RSP : GlobalIndices[EHPad])
247 ReservedSlots.insert(V: RSP.second);
248 }
249
250 // Get frame index to spill the register.
251 int getFrameIndex(Register Reg, MachineBasicBlock *EHPad) {
252 // Check if slot for Reg is already reserved at EHPad.
253 auto It = GlobalIndices.find(Val: EHPad);
254 if (It != GlobalIndices.end()) {
255 auto &Vec = It->second;
256 auto Idx = llvm::find_if(
257 Range&: Vec, P: [Reg](RegSlotPair &RSP) { return Reg == RSP.first; });
258 if (Idx != Vec.end()) {
259 int FI = Idx->second;
260 LLVM_DEBUG(dbgs() << "Found global FI " << FI << " for register "
261 << printReg(Reg, &TRI) << " at "
262 << printMBBReference(*EHPad) << "\n");
263 assert(ReservedSlots.count(FI) && "using unreserved slot");
264 return FI;
265 }
266 }
267
268 unsigned Size = getRegisterSize(TRI, Reg);
269 FrameIndexesPerSize &Line = getCacheBucket(Size);
270 while (Line.Index < Line.Slots.size()) {
271 int FI = Line.Slots[Line.Index++];
272 if (ReservedSlots.count(V: FI))
273 continue;
274 // If all sizes are kept together we probably need to extend the
275 // spill slot size.
276 if (MFI.getObjectSize(ObjectIdx: FI) < Size) {
277 MFI.setObjectSize(ObjectIdx: FI, Size);
278 MFI.setObjectAlignment(ObjectIdx: FI, Alignment: Align(Size));
279 NumSpillSlotsExtended++;
280 }
281 return FI;
282 }
283 int FI = MFI.CreateSpillStackObject(Size, Alignment: Align(Size));
284 NumSpillSlotsAllocated++;
285 Line.Slots.push_back(Elt: FI);
286 ++Line.Index;
287
288 // Remember assignment {Reg, FI} for EHPad
289 if (EHPad) {
290 GlobalIndices[EHPad].push_back(Elt: std::make_pair(x&: Reg, y&: FI));
291 LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling reg "
292 << printReg(Reg, &TRI) << " at landing pad "
293 << printMBBReference(*EHPad) << "\n");
294 }
295
296 return FI;
297 }
298
299 // Sort all registers to spill in descendent order. In the
300 // FixupSCSExtendSlotSize mode it will minimize the total frame size.
301 // In non FixupSCSExtendSlotSize mode we can skip this step.
302 void sortRegisters(SmallVectorImpl<Register> &Regs) {
303 if (!FixupSCSExtendSlotSize)
304 return;
305 llvm::sort(C&: Regs, Comp: [&](Register &A, Register &B) {
306 return getRegisterSize(TRI, Reg: A) > getRegisterSize(TRI, Reg: B);
307 });
308 }
309};
310
311// Describes the state of the current processing statepoint instruction.
312class StatepointState {
313private:
314 // statepoint instruction.
315 MachineInstr &MI;
316 MachineFunction &MF;
317 // If non-null then statepoint is invoke, and this points to the landing pad.
318 MachineBasicBlock *EHPad;
319 const TargetRegisterInfo &TRI;
320 const TargetInstrInfo &TII;
321 MachineFrameInfo &MFI;
322 // Mask with callee saved registers.
323 const uint32_t *Mask;
324 // Cache of frame indexes used on previous instruction processing.
325 FrameIndexesCache &CacheFI;
326 bool AllowGCPtrInCSR;
327 // Operands with physical registers requiring spilling.
328 SmallVector<unsigned, 8> OpsToSpill;
329 // Set of register to spill.
330 SmallVector<Register, 8> RegsToSpill;
331 // Set of registers to reload after statepoint.
332 SmallVector<Register, 8> RegsToReload;
333 // Map Register to Frame Slot index.
334 DenseMap<Register, int> RegToSlotIdx;
335
336public:
337 StatepointState(MachineInstr &MI, const uint32_t *Mask,
338 FrameIndexesCache &CacheFI, bool AllowGCPtrInCSR)
339 : MI(MI), MF(*MI.getMF()), TRI(*MF.getSubtarget().getRegisterInfo()),
340 TII(*MF.getSubtarget().getInstrInfo()), MFI(MF.getFrameInfo()),
341 Mask(Mask), CacheFI(CacheFI), AllowGCPtrInCSR(AllowGCPtrInCSR) {
342
343 // Find statepoint's landing pad, if any.
344 EHPad = nullptr;
345 MachineBasicBlock *MBB = MI.getParent();
346 // Invoke statepoint must be last one in block.
347 bool Last = std::none_of(first: ++MI.getIterator(), last: MBB->end().getInstrIterator(),
348 pred: [](MachineInstr &I) {
349 return I.getOpcode() == TargetOpcode::STATEPOINT;
350 });
351
352 if (!Last)
353 return;
354
355 auto IsEHPad = [](MachineBasicBlock *B) { return B->isEHPad(); };
356
357 assert(llvm::count_if(MBB->successors(), IsEHPad) < 2 && "multiple EHPads");
358
359 auto It = llvm::find_if(Range: MBB->successors(), P: IsEHPad);
360 if (It != MBB->succ_end())
361 EHPad = *It;
362 }
363
364 MachineBasicBlock *getEHPad() const { return EHPad; }
365
366 // Return true if register is callee saved.
367 bool isCalleeSaved(Register Reg) { return (Mask[Reg / 32] >> Reg % 32) & 1; }
368
369 // Iterates over statepoint meta args to find caller saver registers.
370 // Also cache the size of found registers.
371 // Returns true if caller save registers found.
372 bool findRegistersToSpill() {
373 SmallSet<Register, 8> GCRegs;
374 // All GC pointer operands assigned to registers produce new value.
375 // Since they're tied to their defs, it is enough to collect def registers.
376 for (const auto &Def : MI.defs())
377 GCRegs.insert(V: Def.getReg());
378
379 SmallSet<Register, 8> VisitedRegs;
380 for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
381 EndIdx = MI.getNumOperands();
382 Idx < EndIdx; ++Idx) {
383 MachineOperand &MO = MI.getOperand(i: Idx);
384 // Leave `undef` operands as is, StackMaps will rewrite them
385 // into a constant.
386 if (!MO.isReg() || MO.isImplicit() || MO.isUndef())
387 continue;
388 Register Reg = MO.getReg();
389 assert(Reg.isPhysical() && "Only physical regs are expected");
390
391 if (isCalleeSaved(Reg) && (AllowGCPtrInCSR || !GCRegs.contains(V: Reg)))
392 continue;
393
394 LLVM_DEBUG(dbgs() << "Will spill " << printReg(Reg, &TRI) << " at index "
395 << Idx << "\n");
396
397 if (VisitedRegs.insert(V: Reg).second)
398 RegsToSpill.push_back(Elt: Reg);
399 OpsToSpill.push_back(Elt: Idx);
400 }
401 CacheFI.sortRegisters(Regs&: RegsToSpill);
402 return !RegsToSpill.empty();
403 }
404
405 // Spill all caller saved registers right before statepoint instruction.
406 // Remember frame index where register is spilled.
407 void spillRegisters() {
408 for (Register Reg : RegsToSpill) {
409 int FI = CacheFI.getFrameIndex(Reg, EHPad);
410
411 NumSpilledRegisters++;
412 RegToSlotIdx[Reg] = FI;
413
414 LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, &TRI) << " to FI " << FI
415 << "\n");
416
417 // Perform trivial copy propagation
418 bool IsKill = true;
419 MachineBasicBlock::iterator InsertBefore(MI);
420 Reg = performCopyPropagation(Reg, RI&: InsertBefore, IsKill, TII, TRI);
421 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
422
423 LLVM_DEBUG(dbgs() << "Insert spill before " << *InsertBefore);
424 TII.storeRegToStackSlot(MBB&: *MI.getParent(), MI: InsertBefore, SrcReg: Reg, isKill: IsKill, FrameIndex: FI,
425 RC, TRI: &TRI, VReg: Register());
426 }
427 }
428
429 void insertReloadBefore(unsigned Reg, MachineBasicBlock::iterator It,
430 MachineBasicBlock *MBB) {
431 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
432 int FI = RegToSlotIdx[Reg];
433 if (It != MBB->end()) {
434 TII.loadRegFromStackSlot(MBB&: *MBB, MI: It, DestReg: Reg, FrameIndex: FI, RC, TRI: &TRI, VReg: Register());
435 return;
436 }
437
438 // To insert reload at the end of MBB, insert it before last instruction
439 // and then swap them.
440 assert(!MBB->empty() && "Empty block");
441 --It;
442 TII.loadRegFromStackSlot(MBB&: *MBB, MI: It, DestReg: Reg, FrameIndex: FI, RC, TRI: &TRI, VReg: Register());
443 MachineInstr *Reload = It->getPrevNode();
444 int Dummy = 0;
445 (void)Dummy;
446 assert(TII.isLoadFromStackSlot(*Reload, Dummy) == Reg);
447 assert(Dummy == FI);
448 MBB->remove(I: Reload);
449 MBB->insertAfter(I: It, MI: Reload);
450 }
451
452 // Insert reloads of (relocated) registers spilled in statepoint.
453 void insertReloads(MachineInstr *NewStatepoint, RegReloadCache &RC) {
454 MachineBasicBlock *MBB = NewStatepoint->getParent();
455 auto InsertPoint = std::next(x: NewStatepoint->getIterator());
456
457 for (auto Reg : RegsToReload) {
458 insertReloadBefore(Reg, It: InsertPoint, MBB);
459 LLVM_DEBUG(dbgs() << "Reloading " << printReg(Reg, &TRI) << " from FI "
460 << RegToSlotIdx[Reg] << " after statepoint\n");
461
462 if (EHPad && !RC.hasReload(Reg, FI: RegToSlotIdx[Reg], MBB: EHPad)) {
463 RC.recordReload(Reg, FI: RegToSlotIdx[Reg], MBB: EHPad);
464 auto EHPadInsertPoint =
465 EHPad->SkipPHIsLabelsAndDebug(I: EHPad->begin(), Reg);
466 insertReloadBefore(Reg, It: EHPadInsertPoint, MBB: EHPad);
467 LLVM_DEBUG(dbgs() << "...also reload at EHPad "
468 << printMBBReference(*EHPad) << "\n");
469 }
470 }
471 }
472
473 // Re-write statepoint machine instruction to replace caller saved operands
474 // with indirect memory location (frame index).
475 MachineInstr *rewriteStatepoint() {
476 MachineInstr *NewMI =
477 MF.CreateMachineInstr(MCID: TII.get(Opcode: MI.getOpcode()), DL: MI.getDebugLoc(), NoImplicit: true);
478 MachineInstrBuilder MIB(MF, NewMI);
479
480 unsigned NumOps = MI.getNumOperands();
481
482 // New indices for the remaining defs.
483 SmallVector<unsigned, 8> NewIndices;
484 unsigned NumDefs = MI.getNumDefs();
485 for (unsigned I = 0; I < NumDefs; ++I) {
486 MachineOperand &DefMO = MI.getOperand(i: I);
487 assert(DefMO.isReg() && DefMO.isDef() && "Expected Reg Def operand");
488 Register Reg = DefMO.getReg();
489 assert(DefMO.isTied() && "Def is expected to be tied");
490 // We skipped undef uses and did not spill them, so we should not
491 // proceed with defs here.
492 if (MI.getOperand(i: MI.findTiedOperandIdx(OpIdx: I)).isUndef()) {
493 if (AllowGCPtrInCSR) {
494 NewIndices.push_back(Elt: NewMI->getNumOperands());
495 MIB.addReg(RegNo: Reg, flags: RegState::Define);
496 }
497 continue;
498 }
499 if (!AllowGCPtrInCSR) {
500 assert(is_contained(RegsToSpill, Reg));
501 RegsToReload.push_back(Elt: Reg);
502 } else {
503 if (isCalleeSaved(Reg)) {
504 NewIndices.push_back(Elt: NewMI->getNumOperands());
505 MIB.addReg(RegNo: Reg, flags: RegState::Define);
506 } else {
507 NewIndices.push_back(Elt: NumOps);
508 RegsToReload.push_back(Elt: Reg);
509 }
510 }
511 }
512
513 // Add End marker.
514 OpsToSpill.push_back(Elt: MI.getNumOperands());
515 unsigned CurOpIdx = 0;
516
517 for (unsigned I = NumDefs; I < MI.getNumOperands(); ++I) {
518 MachineOperand &MO = MI.getOperand(i: I);
519 if (I == OpsToSpill[CurOpIdx]) {
520 int FI = RegToSlotIdx[MO.getReg()];
521 MIB.addImm(Val: StackMaps::IndirectMemRefOp);
522 MIB.addImm(Val: getRegisterSize(TRI, Reg: MO.getReg()));
523 assert(MO.isReg() && "Should be register");
524 assert(MO.getReg().isPhysical() && "Should be physical register");
525 MIB.addFrameIndex(Idx: FI);
526 MIB.addImm(Val: 0);
527 ++CurOpIdx;
528 } else {
529 MIB.add(MO);
530 unsigned OldDef;
531 if (AllowGCPtrInCSR && MI.isRegTiedToDefOperand(UseOpIdx: I, DefOpIdx: &OldDef)) {
532 assert(OldDef < NumDefs);
533 assert(NewIndices[OldDef] < NumOps);
534 MIB->tieOperands(DefIdx: NewIndices[OldDef], UseIdx: MIB->getNumOperands() - 1);
535 }
536 }
537 }
538 assert(CurOpIdx == (OpsToSpill.size() - 1) && "Not all operands processed");
539 // Add mem operands.
540 NewMI->setMemRefs(MF, MemRefs: MI.memoperands());
541 for (auto It : RegToSlotIdx) {
542 Register R = It.first;
543 int FrameIndex = It.second;
544 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FI: FrameIndex);
545 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
546 if (is_contained(Range&: RegsToReload, Element: R))
547 Flags |= MachineMemOperand::MOStore;
548 auto *MMO =
549 MF.getMachineMemOperand(PtrInfo, f: Flags, s: getRegisterSize(TRI, Reg: R),
550 base_alignment: MFI.getObjectAlign(ObjectIdx: FrameIndex));
551 NewMI->addMemOperand(MF, MO: MMO);
552 }
553
554 // Insert new statepoint and erase old one.
555 MI.getParent()->insert(I: MI, MI: NewMI);
556
557 LLVM_DEBUG(dbgs() << "rewritten statepoint to : " << *NewMI << "\n");
558 MI.eraseFromParent();
559 return NewMI;
560 }
561};
562
563class StatepointProcessor {
564private:
565 MachineFunction &MF;
566 const TargetRegisterInfo &TRI;
567 FrameIndexesCache CacheFI;
568 RegReloadCache ReloadCache;
569
570public:
571 StatepointProcessor(MachineFunction &MF)
572 : MF(MF), TRI(*MF.getSubtarget().getRegisterInfo()),
573 CacheFI(MF.getFrameInfo(), TRI) {}
574
575 bool process(MachineInstr &MI, bool AllowGCPtrInCSR) {
576 StatepointOpers SO(&MI);
577 uint64_t Flags = SO.getFlags();
578 // Do nothing for LiveIn, it supports all registers.
579 if (Flags & (uint64_t)StatepointFlags::DeoptLiveIn)
580 return false;
581 LLVM_DEBUG(dbgs() << "\nMBB " << MI.getParent()->getNumber() << " "
582 << MI.getParent()->getName() << " : process statepoint "
583 << MI);
584 CallingConv::ID CC = SO.getCallingConv();
585 const uint32_t *Mask = TRI.getCallPreservedMask(MF, CC);
586 StatepointState SS(MI, Mask, CacheFI, AllowGCPtrInCSR);
587 CacheFI.reset(EHPad: SS.getEHPad());
588
589 if (!SS.findRegistersToSpill())
590 return false;
591
592 SS.spillRegisters();
593 auto *NewStatepoint = SS.rewriteStatepoint();
594 SS.insertReloads(NewStatepoint, RC&: ReloadCache);
595 return true;
596 }
597};
598} // namespace
599
600bool FixupStatepointCallerSaved::runOnMachineFunction(MachineFunction &MF) {
601 if (skipFunction(F: MF.getFunction()))
602 return false;
603
604 const Function &F = MF.getFunction();
605 if (!F.hasGC())
606 return false;
607
608 SmallVector<MachineInstr *, 16> Statepoints;
609 for (MachineBasicBlock &BB : MF)
610 for (MachineInstr &I : BB)
611 if (I.getOpcode() == TargetOpcode::STATEPOINT)
612 Statepoints.push_back(Elt: &I);
613
614 if (Statepoints.empty())
615 return false;
616
617 bool Changed = false;
618 StatepointProcessor SPP(MF);
619 unsigned NumStatepoints = 0;
620 bool AllowGCPtrInCSR = PassGCPtrInCSR;
621 for (MachineInstr *I : Statepoints) {
622 ++NumStatepoints;
623 if (MaxStatepointsWithRegs.getNumOccurrences() &&
624 NumStatepoints >= MaxStatepointsWithRegs)
625 AllowGCPtrInCSR = false;
626 Changed |= SPP.process(MI&: *I, AllowGCPtrInCSR);
627 }
628 return Changed;
629}
630

source code of llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp