1//===- RegisterScavenging.cpp - Machine register scavenging ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the machine register scavenger. It can provide
11/// information, such as unused registers, at any point in a machine basic
12/// block. It also provides a mechanism to make registers available by evicting
13/// them to spill slots.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/CodeGen/RegisterScavenging.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/CodeGen/LiveRegUnits.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineFunctionPass.h"
27#include "llvm/CodeGen/MachineInstr.h"
28#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/TargetFrameLowering.h"
31#include "llvm/CodeGen/TargetInstrInfo.h"
32#include "llvm/CodeGen/TargetRegisterInfo.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/InitializePasses.h"
35#include "llvm/MC/MCRegisterInfo.h"
36#include "llvm/Pass.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <iterator>
42#include <limits>
43#include <utility>
44
45using namespace llvm;
46
47#define DEBUG_TYPE "reg-scavenging"
48
49STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
50
51void RegScavenger::setRegUsed(Register Reg, LaneBitmask LaneMask) {
52 LiveUnits.addRegMasked(Reg, Mask: LaneMask);
53}
54
55void RegScavenger::init(MachineBasicBlock &MBB) {
56 MachineFunction &MF = *MBB.getParent();
57 TII = MF.getSubtarget().getInstrInfo();
58 TRI = MF.getSubtarget().getRegisterInfo();
59 MRI = &MF.getRegInfo();
60 LiveUnits.init(TRI: *TRI);
61
62 this->MBB = &MBB;
63
64 for (ScavengedInfo &SI : Scavenged) {
65 SI.Reg = 0;
66 SI.Restore = nullptr;
67 }
68}
69
70void RegScavenger::enterBasicBlock(MachineBasicBlock &MBB) {
71 init(MBB);
72 LiveUnits.addLiveIns(MBB);
73 MBBI = MBB.begin();
74}
75
76void RegScavenger::enterBasicBlockEnd(MachineBasicBlock &MBB) {
77 init(MBB);
78 LiveUnits.addLiveOuts(MBB);
79 MBBI = MBB.end();
80}
81
82void RegScavenger::backward() {
83 const MachineInstr &MI = *--MBBI;
84 LiveUnits.stepBackward(MI);
85
86 // Expire scavenge spill frameindex uses.
87 for (ScavengedInfo &I : Scavenged) {
88 if (I.Restore == &MI) {
89 I.Reg = 0;
90 I.Restore = nullptr;
91 }
92 }
93}
94
95bool RegScavenger::isRegUsed(Register Reg, bool includeReserved) const {
96 if (isReserved(Reg))
97 return includeReserved;
98 return !LiveUnits.available(Reg);
99}
100
101Register RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
102 for (Register Reg : *RC) {
103 if (!isRegUsed(Reg)) {
104 LLVM_DEBUG(dbgs() << "Scavenger found unused reg: " << printReg(Reg, TRI)
105 << "\n");
106 return Reg;
107 }
108 }
109 return 0;
110}
111
112BitVector RegScavenger::getRegsAvailable(const TargetRegisterClass *RC) {
113 BitVector Mask(TRI->getNumRegs());
114 for (Register Reg : *RC)
115 if (!isRegUsed(Reg))
116 Mask.set(Reg);
117 return Mask;
118}
119
120/// Given the bitvector \p Available of free register units at position
121/// \p From. Search backwards to find a register that is part of \p
122/// Candidates and not used/clobbered until the point \p To. If there is
123/// multiple candidates continue searching and pick the one that is not used/
124/// clobbered for the longest time.
125/// Returns the register and the earliest position we know it to be free or
126/// the position MBB.end() if no register is available.
127static std::pair<MCPhysReg, MachineBasicBlock::iterator>
128findSurvivorBackwards(const MachineRegisterInfo &MRI,
129 MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
130 const LiveRegUnits &LiveOut, ArrayRef<MCPhysReg> AllocationOrder,
131 bool RestoreAfter) {
132 bool FoundTo = false;
133 MCPhysReg Survivor = 0;
134 MachineBasicBlock::iterator Pos;
135 MachineBasicBlock &MBB = *From->getParent();
136 unsigned InstrLimit = 25;
137 unsigned InstrCountDown = InstrLimit;
138 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
139 LiveRegUnits Used(TRI);
140
141 assert(From->getParent() == To->getParent() &&
142 "Target instruction is in other than current basic block, use "
143 "enterBasicBlockEnd first");
144
145 for (MachineBasicBlock::iterator I = From;; --I) {
146 const MachineInstr &MI = *I;
147
148 Used.accumulate(MI);
149
150 if (I == To) {
151 // See if one of the registers in RC wasn't used so far.
152 for (MCPhysReg Reg : AllocationOrder) {
153 if (!MRI.isReserved(PhysReg: Reg) && Used.available(Reg) &&
154 LiveOut.available(Reg))
155 return std::make_pair(x&: Reg, y: MBB.end());
156 }
157 // Otherwise we will continue up to InstrLimit instructions to find
158 // the register which is not defined/used for the longest time.
159 FoundTo = true;
160 Pos = To;
161 // Note: It was fine so far to start our search at From, however now that
162 // we have to spill, and can only place the restore after From then
163 // add the regs used/defed by std::next(From) to the set.
164 if (RestoreAfter)
165 Used.accumulate(MI: *std::next(x: From));
166 }
167 if (FoundTo) {
168 // Don't search to FrameSetup instructions if we were searching from
169 // Non-FrameSetup instructions. Otherwise, the spill position may point
170 // before FrameSetup instructions.
171 if (!From->getFlag(Flag: MachineInstr::FrameSetup) &&
172 MI.getFlag(Flag: MachineInstr::FrameSetup))
173 break;
174
175 if (Survivor == 0 || !Used.available(Reg: Survivor)) {
176 MCPhysReg AvilableReg = 0;
177 for (MCPhysReg Reg : AllocationOrder) {
178 if (!MRI.isReserved(PhysReg: Reg) && Used.available(Reg)) {
179 AvilableReg = Reg;
180 break;
181 }
182 }
183 if (AvilableReg == 0)
184 break;
185 Survivor = AvilableReg;
186 }
187 if (--InstrCountDown == 0)
188 break;
189
190 // Keep searching when we find a vreg since the spilled register will
191 // be usefull for this other vreg as well later.
192 bool FoundVReg = false;
193 for (const MachineOperand &MO : MI.operands()) {
194 if (MO.isReg() && MO.getReg().isVirtual()) {
195 FoundVReg = true;
196 break;
197 }
198 }
199 if (FoundVReg) {
200 InstrCountDown = InstrLimit;
201 Pos = I;
202 }
203 if (I == MBB.begin())
204 break;
205 }
206 assert(I != MBB.begin() && "Did not find target instruction while "
207 "iterating backwards");
208 }
209
210 return std::make_pair(x&: Survivor, y&: Pos);
211}
212
213static unsigned getFrameIndexOperandNum(MachineInstr &MI) {
214 unsigned i = 0;
215 while (!MI.getOperand(i).isFI()) {
216 ++i;
217 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
218 }
219 return i;
220}
221
222RegScavenger::ScavengedInfo &
223RegScavenger::spill(Register Reg, const TargetRegisterClass &RC, int SPAdj,
224 MachineBasicBlock::iterator Before,
225 MachineBasicBlock::iterator &UseMI) {
226 // Find an available scavenging slot with size and alignment matching
227 // the requirements of the class RC.
228 const MachineFunction &MF = *Before->getMF();
229 const MachineFrameInfo &MFI = MF.getFrameInfo();
230 unsigned NeedSize = TRI->getSpillSize(RC);
231 Align NeedAlign = TRI->getSpillAlign(RC);
232
233 unsigned SI = Scavenged.size(), Diff = std::numeric_limits<unsigned>::max();
234 int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();
235 for (unsigned I = 0; I < Scavenged.size(); ++I) {
236 if (Scavenged[I].Reg != 0)
237 continue;
238 // Verify that this slot is valid for this register.
239 int FI = Scavenged[I].FrameIndex;
240 if (FI < FIB || FI >= FIE)
241 continue;
242 unsigned S = MFI.getObjectSize(ObjectIdx: FI);
243 Align A = MFI.getObjectAlign(ObjectIdx: FI);
244 if (NeedSize > S || NeedAlign > A)
245 continue;
246 // Avoid wasting slots with large size and/or large alignment. Pick one
247 // that is the best fit for this register class (in street metric).
248 // Picking a larger slot than necessary could happen if a slot for a
249 // larger register is reserved before a slot for a smaller one. When
250 // trying to spill a smaller register, the large slot would be found
251 // first, thus making it impossible to spill the larger register later.
252 unsigned D = (S - NeedSize) + (A.value() - NeedAlign.value());
253 if (D < Diff) {
254 SI = I;
255 Diff = D;
256 }
257 }
258
259 if (SI == Scavenged.size()) {
260 // We need to scavenge a register but have no spill slot, the target
261 // must know how to do it (if not, we'll assert below).
262 Scavenged.push_back(Elt: ScavengedInfo(FIE));
263 }
264
265 // Avoid infinite regress
266 Scavenged[SI].Reg = Reg;
267
268 // If the target knows how to save/restore the register, let it do so;
269 // otherwise, use the emergency stack spill slot.
270 if (!TRI->saveScavengerRegister(MBB&: *MBB, I: Before, UseMI, RC: &RC, Reg)) {
271 // Spill the scavenged register before \p Before.
272 int FI = Scavenged[SI].FrameIndex;
273 if (FI < FIB || FI >= FIE) {
274 report_fatal_error(reason: Twine("Error while trying to spill ") +
275 TRI->getName(RegNo: Reg) + " from class " +
276 TRI->getRegClassName(Class: &RC) +
277 ": Cannot scavenge register without an emergency "
278 "spill slot!");
279 }
280 TII->storeRegToStackSlot(MBB&: *MBB, MI: Before, SrcReg: Reg, isKill: true, FrameIndex: FI, RC: &RC, TRI, VReg: Register());
281 MachineBasicBlock::iterator II = std::prev(x: Before);
282
283 unsigned FIOperandNum = getFrameIndexOperandNum(MI&: *II);
284 TRI->eliminateFrameIndex(MI: II, SPAdj, FIOperandNum, RS: this);
285
286 // Restore the scavenged register before its use (or first terminator).
287 TII->loadRegFromStackSlot(MBB&: *MBB, MI: UseMI, DestReg: Reg, FrameIndex: FI, RC: &RC, TRI, VReg: Register());
288 II = std::prev(x: UseMI);
289
290 FIOperandNum = getFrameIndexOperandNum(MI&: *II);
291 TRI->eliminateFrameIndex(MI: II, SPAdj, FIOperandNum, RS: this);
292 }
293 return Scavenged[SI];
294}
295
296Register RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC,
297 MachineBasicBlock::iterator To,
298 bool RestoreAfter, int SPAdj,
299 bool AllowSpill) {
300 const MachineBasicBlock &MBB = *To->getParent();
301 const MachineFunction &MF = *MBB.getParent();
302
303 // Find the register whose use is furthest away.
304 MachineBasicBlock::iterator UseMI;
305 ArrayRef<MCPhysReg> AllocationOrder = RC.getRawAllocationOrder(MF);
306 std::pair<MCPhysReg, MachineBasicBlock::iterator> P = findSurvivorBackwards(
307 MRI: *MRI, From: std::prev(x: MBBI), To, LiveOut: LiveUnits, AllocationOrder, RestoreAfter);
308 MCPhysReg Reg = P.first;
309 MachineBasicBlock::iterator SpillBefore = P.second;
310 // Found an available register?
311 if (Reg != 0 && SpillBefore == MBB.end()) {
312 LLVM_DEBUG(dbgs() << "Scavenged free register: " << printReg(Reg, TRI)
313 << '\n');
314 return Reg;
315 }
316
317 if (!AllowSpill)
318 return 0;
319
320 assert(Reg != 0 && "No register left to scavenge!");
321
322 MachineBasicBlock::iterator ReloadBefore =
323 RestoreAfter ? std::next(x: MBBI) : MBBI;
324 if (ReloadBefore != MBB.end())
325 LLVM_DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n');
326 ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, Before: SpillBefore, UseMI&: ReloadBefore);
327 Scavenged.Restore = &*std::prev(x: SpillBefore);
328 LiveUnits.removeReg(Reg);
329 LLVM_DEBUG(dbgs() << "Scavenged register with spill: " << printReg(Reg, TRI)
330 << " until " << *SpillBefore);
331 return Reg;
332}
333
334/// Allocate a register for the virtual register \p VReg. The last use of
335/// \p VReg is around the current position of the register scavenger \p RS.
336/// \p ReserveAfter controls whether the scavenged register needs to be reserved
337/// after the current instruction, otherwise it will only be reserved before the
338/// current instruction.
339static Register scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS,
340 Register VReg, bool ReserveAfter) {
341 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
342#ifndef NDEBUG
343 // Verify that all definitions and uses are in the same basic block.
344 const MachineBasicBlock *CommonMBB = nullptr;
345 // Real definition for the reg, re-definitions are not considered.
346 const MachineInstr *RealDef = nullptr;
347 for (MachineOperand &MO : MRI.reg_nodbg_operands(Reg: VReg)) {
348 MachineBasicBlock *MBB = MO.getParent()->getParent();
349 if (CommonMBB == nullptr)
350 CommonMBB = MBB;
351 assert(MBB == CommonMBB && "All defs+uses must be in the same basic block");
352 if (MO.isDef()) {
353 const MachineInstr &MI = *MO.getParent();
354 if (!MI.readsRegister(Reg: VReg, TRI: &TRI)) {
355 assert((!RealDef || RealDef == &MI) &&
356 "Can have at most one definition which is not a redefinition");
357 RealDef = &MI;
358 }
359 }
360 }
361 assert(RealDef != nullptr && "Must have at least 1 Def");
362#endif
363
364 // We should only have one definition of the register. However to accommodate
365 // the requirements of two address code we also allow definitions in
366 // subsequent instructions provided they also read the register. That way
367 // we get a single contiguous lifetime.
368 //
369 // Definitions in MRI.def_begin() are unordered, search for the first.
370 MachineRegisterInfo::def_iterator FirstDef = llvm::find_if(
371 Range: MRI.def_operands(Reg: VReg), P: [VReg, &TRI](const MachineOperand &MO) {
372 return !MO.getParent()->readsRegister(Reg: VReg, TRI: &TRI);
373 });
374 assert(FirstDef != MRI.def_end() &&
375 "Must have one definition that does not redefine vreg");
376 MachineInstr &DefMI = *FirstDef->getParent();
377
378 // The register scavenger will report a free register inserting an emergency
379 // spill/reload if necessary.
380 int SPAdj = 0;
381 const TargetRegisterClass &RC = *MRI.getRegClass(Reg: VReg);
382 Register SReg = RS.scavengeRegisterBackwards(RC, To: DefMI.getIterator(),
383 RestoreAfter: ReserveAfter, SPAdj);
384 MRI.replaceRegWith(FromReg: VReg, ToReg: SReg);
385 ++NumScavengedRegs;
386 return SReg;
387}
388
389/// Allocate (scavenge) vregs inside a single basic block.
390/// Returns true if the target spill callback created new vregs and a 2nd pass
391/// is necessary.
392static bool scavengeFrameVirtualRegsInBlock(MachineRegisterInfo &MRI,
393 RegScavenger &RS,
394 MachineBasicBlock &MBB) {
395 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
396 RS.enterBasicBlockEnd(MBB);
397
398 unsigned InitialNumVirtRegs = MRI.getNumVirtRegs();
399 bool NextInstructionReadsVReg = false;
400 for (MachineBasicBlock::iterator I = MBB.end(); I != MBB.begin(); ) {
401 // Move RegScavenger to the position between *std::prev(I) and *I.
402 RS.backward(I);
403 --I;
404
405 // Look for unassigned vregs in the uses of *std::next(I).
406 if (NextInstructionReadsVReg) {
407 MachineBasicBlock::iterator N = std::next(x: I);
408 const MachineInstr &NMI = *N;
409 for (const MachineOperand &MO : NMI.operands()) {
410 if (!MO.isReg())
411 continue;
412 Register Reg = MO.getReg();
413 // We only care about virtual registers and ignore virtual registers
414 // created by the target callbacks in the process (those will be handled
415 // in a scavenging round).
416 if (!Reg.isVirtual() ||
417 Register::virtReg2Index(Reg) >= InitialNumVirtRegs)
418 continue;
419 if (!MO.readsReg())
420 continue;
421
422 Register SReg = scavengeVReg(MRI, RS, VReg: Reg, ReserveAfter: true);
423 N->addRegisterKilled(IncomingReg: SReg, RegInfo: &TRI, AddIfNotFound: false);
424 RS.setRegUsed(Reg: SReg);
425 }
426 }
427
428 // Look for unassigned vregs in the defs of *I.
429 NextInstructionReadsVReg = false;
430 const MachineInstr &MI = *I;
431 for (const MachineOperand &MO : MI.operands()) {
432 if (!MO.isReg())
433 continue;
434 Register Reg = MO.getReg();
435 // Only vregs, no newly created vregs (see above).
436 if (!Reg.isVirtual() ||
437 Register::virtReg2Index(Reg) >= InitialNumVirtRegs)
438 continue;
439 // We have to look at all operands anyway so we can precalculate here
440 // whether there is a reading operand. This allows use to skip the use
441 // step in the next iteration if there was none.
442 assert(!MO.isInternalRead() && "Cannot assign inside bundles");
443 assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses");
444 if (MO.readsReg()) {
445 NextInstructionReadsVReg = true;
446 }
447 if (MO.isDef()) {
448 Register SReg = scavengeVReg(MRI, RS, VReg: Reg, ReserveAfter: false);
449 I->addRegisterDead(Reg: SReg, RegInfo: &TRI, AddIfNotFound: false);
450 }
451 }
452 }
453#ifndef NDEBUG
454 for (const MachineOperand &MO : MBB.front().operands()) {
455 if (!MO.isReg() || !MO.getReg().isVirtual())
456 continue;
457 assert(!MO.isInternalRead() && "Cannot assign inside bundles");
458 assert((!MO.isUndef() || MO.isDef()) && "Cannot handle undef uses");
459 assert(!MO.readsReg() && "Vreg use in first instruction not allowed");
460 }
461#endif
462
463 return MRI.getNumVirtRegs() != InitialNumVirtRegs;
464}
465
466void llvm::scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS) {
467 // FIXME: Iterating over the instruction stream is unnecessary. We can simply
468 // iterate over the vreg use list, which at this point only contains machine
469 // operands for which eliminateFrameIndex need a new scratch reg.
470 MachineRegisterInfo &MRI = MF.getRegInfo();
471 // Shortcut.
472 if (MRI.getNumVirtRegs() == 0) {
473 MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
474 return;
475 }
476
477 // Run through the instructions and find any virtual registers.
478 for (MachineBasicBlock &MBB : MF) {
479 if (MBB.empty())
480 continue;
481
482 bool Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
483 if (Again) {
484 LLVM_DEBUG(dbgs() << "Warning: Required two scavenging passes for block "
485 << MBB.getName() << '\n');
486 Again = scavengeFrameVirtualRegsInBlock(MRI, RS, MBB);
487 // The target required a 2nd run (because it created new vregs while
488 // spilling). Refuse to do another pass to keep compiletime in check.
489 if (Again)
490 report_fatal_error(reason: "Incomplete scavenging after 2nd pass");
491 }
492 }
493
494 MRI.clearVirtRegs();
495 MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
496}
497
498namespace {
499
500/// This class runs register scavenging independ of the PrologEpilogInserter.
501/// This is used in for testing.
502class ScavengerTest : public MachineFunctionPass {
503public:
504 static char ID;
505
506 ScavengerTest() : MachineFunctionPass(ID) {}
507
508 bool runOnMachineFunction(MachineFunction &MF) override {
509 const TargetSubtargetInfo &STI = MF.getSubtarget();
510 const TargetFrameLowering &TFL = *STI.getFrameLowering();
511
512 RegScavenger RS;
513 // Let's hope that calling those outside of PrologEpilogueInserter works
514 // well enough to initialize the scavenger with some emergency spillslots
515 // for the target.
516 BitVector SavedRegs;
517 TFL.determineCalleeSaves(MF, SavedRegs, RS: &RS);
518 TFL.processFunctionBeforeFrameFinalized(MF, RS: &RS);
519
520 // Let's scavenge the current function
521 scavengeFrameVirtualRegs(MF, RS);
522 return true;
523 }
524};
525
526} // end anonymous namespace
527
528char ScavengerTest::ID;
529
530INITIALIZE_PASS(ScavengerTest, "scavenger-test",
531 "Scavenge virtual registers inside basic blocks", false, false)
532

source code of llvm/lib/CodeGen/RegisterScavenging.cpp