1//==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetRegisterInfo interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetRegisterInfo.h"
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/BitVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/BinaryFormat/Dwarf.h"
20#include "llvm/CodeGen/LiveInterval.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/TargetFrameLowering.h"
25#include "llvm/CodeGen/TargetInstrInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/CodeGen/VirtRegMap.h"
28#include "llvm/CodeGenTypes/MachineValueType.h"
29#include "llvm/Config/llvm-config.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/DebugInfoMetadata.h"
32#include "llvm/IR/Function.h"
33#include "llvm/MC/MCRegisterInfo.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Compiler.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/MathExtras.h"
38#include "llvm/Support/Printable.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <utility>
42
43#define DEBUG_TYPE "target-reg-info"
44
45using namespace llvm;
46
47static cl::opt<unsigned>
48 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
49 cl::desc("A threshold of live range size which may cause "
50 "high compile time cost in global splitting."),
51 cl::init(Val: 5000));
52
53TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
54 regclass_iterator RCB, regclass_iterator RCE,
55 const char *const *SRINames,
56 const LaneBitmask *SRILaneMasks,
57 LaneBitmask SRICoveringLanes,
58 const RegClassInfo *const RCIs,
59 const MVT::SimpleValueType *const RCVTLists,
60 unsigned Mode)
61 : InfoDesc(ID), SubRegIndexNames(SRINames),
62 SubRegIndexLaneMasks(SRILaneMasks),
63 RegClassBegin(RCB), RegClassEnd(RCE),
64 CoveringLanes(SRICoveringLanes),
65 RCInfos(RCIs), RCVTLists(RCVTLists), HwMode(Mode) {
66}
67
68TargetRegisterInfo::~TargetRegisterInfo() = default;
69
70bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
71 const MachineFunction &MF, const LiveInterval &VirtReg) const {
72 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
73 const MachineRegisterInfo &MRI = MF.getRegInfo();
74 MachineInstr *MI = MRI.getUniqueVRegDef(Reg: VirtReg.reg());
75 if (MI && TII->isTriviallyReMaterializable(MI: *MI) &&
76 VirtReg.size() > HugeSizeForSplit)
77 return false;
78 return true;
79}
80
81void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
82 MCRegister Reg) const {
83 for (MCPhysReg SR : superregs_inclusive(Reg))
84 RegisterSet.set(SR);
85}
86
87bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
88 ArrayRef<MCPhysReg> Exceptions) const {
89 // Check that all super registers of reserved regs are reserved as well.
90 BitVector Checked(getNumRegs());
91 for (unsigned Reg : RegisterSet.set_bits()) {
92 if (Checked[Reg])
93 continue;
94 for (MCPhysReg SR : superregs(Reg)) {
95 if (!RegisterSet[SR] && !is_contained(Range&: Exceptions, Element: Reg)) {
96 dbgs() << "Error: Super register " << printReg(Reg: SR, TRI: this)
97 << " of reserved register " << printReg(Reg, TRI: this)
98 << " is not reserved.\n";
99 return false;
100 }
101
102 // We transitively check superregs. So we can remember this for later
103 // to avoid compiletime explosion in deep register hierarchies.
104 Checked.set(SR);
105 }
106 }
107 return true;
108}
109
110namespace llvm {
111
112Printable printReg(Register Reg, const TargetRegisterInfo *TRI,
113 unsigned SubIdx, const MachineRegisterInfo *MRI) {
114 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
115 if (!Reg)
116 OS << "$noreg";
117 else if (Register::isStackSlot(Reg))
118 OS << "SS#" << Register::stackSlot2Index(Reg);
119 else if (Reg.isVirtual()) {
120 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
121 if (Name != "") {
122 OS << '%' << Name;
123 } else {
124 OS << '%' << Register::virtReg2Index(Reg);
125 }
126 } else if (!TRI)
127 OS << '$' << "physreg" << Reg;
128 else if (Reg < TRI->getNumRegs()) {
129 OS << '$';
130 printLowerCase(String: TRI->getName(RegNo: Reg), Out&: OS);
131 } else
132 llvm_unreachable("Register kind is unsupported.");
133
134 if (SubIdx) {
135 if (TRI)
136 OS << ':' << TRI->getSubRegIndexName(SubIdx);
137 else
138 OS << ":sub(" << SubIdx << ')';
139 }
140 });
141}
142
143Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
144 return Printable([Unit, TRI](raw_ostream &OS) {
145 // Generic printout when TRI is missing.
146 if (!TRI) {
147 OS << "Unit~" << Unit;
148 return;
149 }
150
151 // Check for invalid register units.
152 if (Unit >= TRI->getNumRegUnits()) {
153 OS << "BadUnit~" << Unit;
154 return;
155 }
156
157 // Normal units have at least one root.
158 MCRegUnitRootIterator Roots(Unit, TRI);
159 assert(Roots.isValid() && "Unit has no roots.");
160 OS << TRI->getName(RegNo: *Roots);
161 for (++Roots; Roots.isValid(); ++Roots)
162 OS << '~' << TRI->getName(RegNo: *Roots);
163 });
164}
165
166Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
167 return Printable([Unit, TRI](raw_ostream &OS) {
168 if (Register::isVirtualRegister(Reg: Unit)) {
169 OS << '%' << Register::virtReg2Index(Reg: Unit);
170 } else {
171 OS << printRegUnit(Unit, TRI);
172 }
173 });
174}
175
176Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
177 const TargetRegisterInfo *TRI) {
178 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
179 if (RegInfo.getRegClassOrNull(Reg))
180 OS << StringRef(TRI->getRegClassName(Class: RegInfo.getRegClass(Reg))).lower();
181 else if (RegInfo.getRegBankOrNull(Reg))
182 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
183 else {
184 OS << "_";
185 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
186 "Generic registers must have a valid type");
187 }
188 });
189}
190
191} // end namespace llvm
192
193/// getAllocatableClass - Return the maximal subclass of the given register
194/// class that is alloctable, or NULL.
195const TargetRegisterClass *
196TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
197 if (!RC || RC->isAllocatable())
198 return RC;
199
200 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
201 ++It) {
202 const TargetRegisterClass *SubRC = getRegClass(i: It.getID());
203 if (SubRC->isAllocatable())
204 return SubRC;
205 }
206 return nullptr;
207}
208
209/// getMinimalPhysRegClass - Returns the Register Class of a physical
210/// register of the given type, picking the most sub register class of
211/// the right type that contains this physreg.
212const TargetRegisterClass *
213TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg, MVT VT) const {
214 assert(Register::isPhysicalRegister(reg) &&
215 "reg must be a physical register");
216
217 // Pick the most sub register class of the right type that contains
218 // this physreg.
219 const TargetRegisterClass* BestRC = nullptr;
220 for (const TargetRegisterClass* RC : regclasses()) {
221 if ((VT == MVT::Other || isTypeLegalForClass(RC: *RC, T: VT)) &&
222 RC->contains(Reg: reg) && (!BestRC || BestRC->hasSubClass(RC)))
223 BestRC = RC;
224 }
225
226 assert(BestRC && "Couldn't find the register class");
227 return BestRC;
228}
229
230const TargetRegisterClass *
231TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg, LLT Ty) const {
232 assert(Register::isPhysicalRegister(reg) &&
233 "reg must be a physical register");
234
235 // Pick the most sub register class of the right type that contains
236 // this physreg.
237 const TargetRegisterClass *BestRC = nullptr;
238 for (const TargetRegisterClass *RC : regclasses()) {
239 if ((!Ty.isValid() || isTypeLegalForClass(RC: *RC, T: Ty)) && RC->contains(Reg: reg) &&
240 (!BestRC || BestRC->hasSubClass(RC)))
241 BestRC = RC;
242 }
243
244 return BestRC;
245}
246
247/// getAllocatableSetForRC - Toggle the bits that represent allocatable
248/// registers for the specific register class.
249static void getAllocatableSetForRC(const MachineFunction &MF,
250 const TargetRegisterClass *RC, BitVector &R){
251 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
252 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
253 for (MCPhysReg PR : Order)
254 R.set(PR);
255}
256
257BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
258 const TargetRegisterClass *RC) const {
259 BitVector Allocatable(getNumRegs());
260 if (RC) {
261 // A register class with no allocatable subclass returns an empty set.
262 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
263 if (SubClass)
264 getAllocatableSetForRC(MF, RC: SubClass, R&: Allocatable);
265 } else {
266 for (const TargetRegisterClass *C : regclasses())
267 if (C->isAllocatable())
268 getAllocatableSetForRC(MF, RC: C, R&: Allocatable);
269 }
270
271 // Mask out the reserved registers
272 const MachineRegisterInfo &MRI = MF.getRegInfo();
273 const BitVector &Reserved = MRI.getReservedRegs();
274 Allocatable.reset(RHS: Reserved);
275
276 return Allocatable;
277}
278
279static inline
280const TargetRegisterClass *firstCommonClass(const uint32_t *A,
281 const uint32_t *B,
282 const TargetRegisterInfo *TRI) {
283 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
284 if (unsigned Common = *A++ & *B++)
285 return TRI->getRegClass(i: I + llvm::countr_zero(Val: Common));
286 return nullptr;
287}
288
289const TargetRegisterClass *
290TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
291 const TargetRegisterClass *B) const {
292 // First take care of the trivial cases.
293 if (A == B)
294 return A;
295 if (!A || !B)
296 return nullptr;
297
298 // Register classes are ordered topologically, so the largest common
299 // sub-class it the common sub-class with the smallest ID.
300 return firstCommonClass(A: A->getSubClassMask(), B: B->getSubClassMask(), TRI: this);
301}
302
303const TargetRegisterClass *
304TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
305 const TargetRegisterClass *B,
306 unsigned Idx) const {
307 assert(A && B && "Missing register class");
308 assert(Idx && "Bad sub-register index");
309
310 // Find Idx in the list of super-register indices.
311 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
312 if (RCI.getSubReg() == Idx)
313 // The bit mask contains all register classes that are projected into B
314 // by Idx. Find a class that is also a sub-class of A.
315 return firstCommonClass(A: RCI.getMask(), B: A->getSubClassMask(), TRI: this);
316 return nullptr;
317}
318
319const TargetRegisterClass *TargetRegisterInfo::
320getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
321 const TargetRegisterClass *RCB, unsigned SubB,
322 unsigned &PreA, unsigned &PreB) const {
323 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
324
325 // Search all pairs of sub-register indices that project into RCA and RCB
326 // respectively. This is quadratic, but usually the sets are very small. On
327 // most targets like X86, there will only be a single sub-register index
328 // (e.g., sub_16bit projecting into GR16).
329 //
330 // The worst case is a register class like DPR on ARM.
331 // We have indices dsub_0..dsub_7 projecting into that class.
332 //
333 // It is very common that one register class is a sub-register of the other.
334 // Arrange for RCA to be the larger register so the answer will be found in
335 // the first iteration. This makes the search linear for the most common
336 // case.
337 const TargetRegisterClass *BestRC = nullptr;
338 unsigned *BestPreA = &PreA;
339 unsigned *BestPreB = &PreB;
340 if (getRegSizeInBits(RC: *RCA) < getRegSizeInBits(RC: *RCB)) {
341 std::swap(a&: RCA, b&: RCB);
342 std::swap(a&: SubA, b&: SubB);
343 std::swap(a&: BestPreA, b&: BestPreB);
344 }
345
346 // Also terminate the search one we have found a register class as small as
347 // RCA.
348 unsigned MinSize = getRegSizeInBits(RC: *RCA);
349
350 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
351 unsigned FinalA = composeSubRegIndices(a: IA.getSubReg(), b: SubA);
352 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
353 // Check if a common super-register class exists for this index pair.
354 const TargetRegisterClass *RC =
355 firstCommonClass(A: IA.getMask(), B: IB.getMask(), TRI: this);
356 if (!RC || getRegSizeInBits(RC: *RC) < MinSize)
357 continue;
358
359 // The indexes must compose identically: PreA+SubA == PreB+SubB.
360 unsigned FinalB = composeSubRegIndices(a: IB.getSubReg(), b: SubB);
361 if (FinalA != FinalB)
362 continue;
363
364 // Is RC a better candidate than BestRC?
365 if (BestRC && getRegSizeInBits(RC: *RC) >= getRegSizeInBits(RC: *BestRC))
366 continue;
367
368 // Yes, RC is the smallest super-register seen so far.
369 BestRC = RC;
370 *BestPreA = IA.getSubReg();
371 *BestPreB = IB.getSubReg();
372
373 // Bail early if we reached MinSize. We won't find a better candidate.
374 if (getRegSizeInBits(RC: *BestRC) == MinSize)
375 return BestRC;
376 }
377 }
378 return BestRC;
379}
380
381/// Check if the registers defined by the pair (RegisterClass, SubReg)
382/// share the same register file.
383static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
384 const TargetRegisterClass *DefRC,
385 unsigned DefSubReg,
386 const TargetRegisterClass *SrcRC,
387 unsigned SrcSubReg) {
388 // Same register class.
389 if (DefRC == SrcRC)
390 return true;
391
392 // Both operands are sub registers. Check if they share a register class.
393 unsigned SrcIdx, DefIdx;
394 if (SrcSubReg && DefSubReg) {
395 return TRI.getCommonSuperRegClass(RCA: SrcRC, SubA: SrcSubReg, RCB: DefRC, SubB: DefSubReg,
396 PreA&: SrcIdx, PreB&: DefIdx) != nullptr;
397 }
398
399 // At most one of the register is a sub register, make it Src to avoid
400 // duplicating the test.
401 if (!SrcSubReg) {
402 std::swap(a&: DefSubReg, b&: SrcSubReg);
403 std::swap(a&: DefRC, b&: SrcRC);
404 }
405
406 // One of the register is a sub register, check if we can get a superclass.
407 if (SrcSubReg)
408 return TRI.getMatchingSuperRegClass(A: SrcRC, B: DefRC, Idx: SrcSubReg) != nullptr;
409
410 // Plain copy.
411 return TRI.getCommonSubClass(A: DefRC, B: SrcRC) != nullptr;
412}
413
414bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
415 unsigned DefSubReg,
416 const TargetRegisterClass *SrcRC,
417 unsigned SrcSubReg) const {
418 // If this source does not incur a cross register bank copy, use it.
419 return shareSameRegisterFile(TRI: *this, DefRC, DefSubReg, SrcRC, SrcSubReg);
420}
421
422// Compute target-independent register allocator hints to help eliminate copies.
423bool TargetRegisterInfo::getRegAllocationHints(
424 Register VirtReg, ArrayRef<MCPhysReg> Order,
425 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
426 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
427 const MachineRegisterInfo &MRI = MF.getRegInfo();
428 const std::pair<unsigned, SmallVector<Register, 4>> &Hints_MRI =
429 MRI.getRegAllocationHints(VReg: VirtReg);
430
431 SmallSet<Register, 32> HintedRegs;
432 // First hint may be a target hint.
433 bool Skip = (Hints_MRI.first != 0);
434 for (auto Reg : Hints_MRI.second) {
435 if (Skip) {
436 Skip = false;
437 continue;
438 }
439
440 // Target-independent hints are either a physical or a virtual register.
441 Register Phys = Reg;
442 if (VRM && Phys.isVirtual())
443 Phys = VRM->getPhys(virtReg: Phys);
444
445 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
446 // registers allocated to the same physreg).
447 if (!HintedRegs.insert(V: Phys).second)
448 continue;
449 // Check that Phys is a valid hint in VirtReg's register class.
450 if (!Phys.isPhysical())
451 continue;
452 if (MRI.isReserved(PhysReg: Phys))
453 continue;
454 // Check that Phys is in the allocation order. We shouldn't heed hints
455 // from VirtReg's register class if they aren't in the allocation order. The
456 // target probably has a reason for removing the register.
457 if (!is_contained(Range&: Order, Element: Phys))
458 continue;
459
460 // All clear, tell the register allocator to prefer this register.
461 Hints.push_back(Elt: Phys);
462 }
463 return false;
464}
465
466bool TargetRegisterInfo::isCalleeSavedPhysReg(
467 MCRegister PhysReg, const MachineFunction &MF) const {
468 if (PhysReg == 0)
469 return false;
470 const uint32_t *callerPreservedRegs =
471 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
472 if (callerPreservedRegs) {
473 assert(Register::isPhysicalRegister(PhysReg) &&
474 "Expected physical register");
475 return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1;
476 }
477 return false;
478}
479
480bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
481 return !MF.getFunction().hasFnAttribute(Kind: "no-realign-stack");
482}
483
484bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
485 const MachineFrameInfo &MFI = MF.getFrameInfo();
486 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
487 const Function &F = MF.getFunction();
488 return F.hasFnAttribute(Kind: "stackrealign") ||
489 (MFI.getMaxAlign() > TFI->getStackAlign()) ||
490 F.hasFnAttribute(Attribute::StackAlignment);
491}
492
493bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
494 const uint32_t *mask1) const {
495 unsigned N = (getNumRegs()+31) / 32;
496 for (unsigned I = 0; I < N; ++I)
497 if ((mask0[I] & mask1[I]) != mask0[I])
498 return false;
499 return true;
500}
501
502TypeSize
503TargetRegisterInfo::getRegSizeInBits(Register Reg,
504 const MachineRegisterInfo &MRI) const {
505 const TargetRegisterClass *RC{};
506 if (Reg.isPhysical()) {
507 // The size is not directly available for physical registers.
508 // Instead, we need to access a register class that contains Reg and
509 // get the size of that register class.
510 RC = getMinimalPhysRegClass(reg: Reg);
511 assert(RC && "Unable to deduce the register class");
512 return getRegSizeInBits(RC: *RC);
513 }
514 LLT Ty = MRI.getType(Reg);
515 if (Ty.isValid())
516 return Ty.getSizeInBits();
517
518 // Since Reg is not a generic register, it may have a register class.
519 RC = MRI.getRegClass(Reg);
520 assert(RC && "Unable to deduce the register class");
521 return getRegSizeInBits(RC: *RC);
522}
523
524bool TargetRegisterInfo::getCoveringSubRegIndexes(
525 const MachineRegisterInfo &MRI, const TargetRegisterClass *RC,
526 LaneBitmask LaneMask, SmallVectorImpl<unsigned> &NeededIndexes) const {
527 SmallVector<unsigned, 8> PossibleIndexes;
528 unsigned BestIdx = 0;
529 unsigned BestCover = 0;
530
531 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
532 // Is this index even compatible with the given class?
533 if (getSubClassWithSubReg(RC, Idx) != RC)
534 continue;
535 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
536 // Early exit if we found a perfect match.
537 if (SubRegMask == LaneMask) {
538 BestIdx = Idx;
539 break;
540 }
541
542 // The index must not cover any lanes outside \p LaneMask.
543 if ((SubRegMask & ~LaneMask).any())
544 continue;
545
546 unsigned PopCount = SubRegMask.getNumLanes();
547 PossibleIndexes.push_back(Elt: Idx);
548 if (PopCount > BestCover) {
549 BestCover = PopCount;
550 BestIdx = Idx;
551 }
552 }
553
554 // Abort if we cannot possibly implement the COPY with the given indexes.
555 if (BestIdx == 0)
556 return false;
557
558 NeededIndexes.push_back(Elt: BestIdx);
559
560 // Greedy heuristic: Keep iterating keeping the best covering subreg index
561 // each time.
562 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(SubIdx: BestIdx);
563 while (LanesLeft.any()) {
564 unsigned BestIdx = 0;
565 int BestCover = std::numeric_limits<int>::min();
566 for (unsigned Idx : PossibleIndexes) {
567 LaneBitmask SubRegMask = getSubRegIndexLaneMask(SubIdx: Idx);
568 // Early exit if we found a perfect match.
569 if (SubRegMask == LanesLeft) {
570 BestIdx = Idx;
571 break;
572 }
573
574 // Do not cover already-covered lanes to avoid creating cycles
575 // in copy bundles (= bundle contains copies that write to the
576 // registers).
577 if ((SubRegMask & ~LanesLeft).any())
578 continue;
579
580 // Try to cover as many of the remaining lanes as possible.
581 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
582 if (Cover > BestCover) {
583 BestCover = Cover;
584 BestIdx = Idx;
585 }
586 }
587
588 if (BestIdx == 0)
589 return false; // Impossible to handle
590
591 NeededIndexes.push_back(Elt: BestIdx);
592
593 LanesLeft &= ~getSubRegIndexLaneMask(SubIdx: BestIdx);
594 }
595
596 return BestIdx;
597}
598
599Register
600TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
601 const MachineRegisterInfo *MRI) const {
602 while (true) {
603 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
604 if (!MI->isCopyLike())
605 return SrcReg;
606
607 Register CopySrcReg;
608 if (MI->isCopy())
609 CopySrcReg = MI->getOperand(i: 1).getReg();
610 else {
611 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
612 CopySrcReg = MI->getOperand(i: 2).getReg();
613 }
614
615 if (!CopySrcReg.isVirtual())
616 return CopySrcReg;
617
618 SrcReg = CopySrcReg;
619 }
620}
621
622Register TargetRegisterInfo::lookThruSingleUseCopyChain(
623 Register SrcReg, const MachineRegisterInfo *MRI) const {
624 while (true) {
625 const MachineInstr *MI = MRI->getVRegDef(Reg: SrcReg);
626 // Found the real definition, return it if it has a single use.
627 if (!MI->isCopyLike())
628 return MRI->hasOneNonDBGUse(RegNo: SrcReg) ? SrcReg : Register();
629
630 Register CopySrcReg;
631 if (MI->isCopy())
632 CopySrcReg = MI->getOperand(i: 1).getReg();
633 else {
634 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
635 CopySrcReg = MI->getOperand(i: 2).getReg();
636 }
637
638 // Continue only if the next definition in the chain is for a virtual
639 // register that has a single use.
640 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(RegNo: CopySrcReg))
641 return Register();
642
643 SrcReg = CopySrcReg;
644 }
645}
646
647void TargetRegisterInfo::getOffsetOpcodes(
648 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
649 assert(!Offset.getScalable() && "Scalable offsets are not handled");
650 DIExpression::appendOffset(Ops, Offset: Offset.getFixed());
651}
652
653DIExpression *
654TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
655 unsigned PrependFlags,
656 const StackOffset &Offset) const {
657 assert((PrependFlags &
658 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
659 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
660 "Unsupported prepend flag");
661 SmallVector<uint64_t, 16> OffsetExpr;
662 if (PrependFlags & DIExpression::DerefBefore)
663 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
664 getOffsetOpcodes(Offset, Ops&: OffsetExpr);
665 if (PrependFlags & DIExpression::DerefAfter)
666 OffsetExpr.push_back(Elt: dwarf::DW_OP_deref);
667 return DIExpression::prependOpcodes(Expr, Ops&: OffsetExpr,
668 StackValue: PrependFlags & DIExpression::StackValue,
669 EntryValue: PrependFlags & DIExpression::EntryValue);
670}
671
672#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
673LLVM_DUMP_METHOD
674void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
675 const TargetRegisterInfo *TRI) {
676 dbgs() << printReg(Reg, TRI, SubIdx: SubRegIndex) << "\n";
677}
678#endif
679

source code of llvm/lib/CodeGen/TargetRegisterInfo.cpp