1//==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes an abstract interface used to get information about a
10// target machines register file. This information is used for a variety of
11// purposed, especially register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
16#define LLVM_CODEGEN_TARGETREGISTERINFO_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/iterator_range.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
23#include "llvm/CodeGen/RegisterBank.h"
24#include "llvm/IR/CallingConv.h"
25#include "llvm/MC/LaneBitmask.h"
26#include "llvm/MC/MCRegisterInfo.h"
27#include "llvm/Support/ErrorHandling.h"
28#include "llvm/Support/MathExtras.h"
29#include "llvm/Support/Printable.h"
30#include <cassert>
31#include <cstdint>
32
33namespace llvm {
34
35class BitVector;
36class DIExpression;
37class LiveRegMatrix;
38class MachineFunction;
39class MachineInstr;
40class RegScavenger;
41class VirtRegMap;
42class LiveIntervals;
43class LiveInterval;
44
45class TargetRegisterClass {
46public:
47 using iterator = const MCPhysReg *;
48 using const_iterator = const MCPhysReg *;
49 using sc_iterator = const TargetRegisterClass* const *;
50
51 // Instance variables filled by tablegen, do not use!
52 const MCRegisterClass *MC;
53 const uint32_t *SubClassMask;
54 const uint16_t *SuperRegIndices;
55 const LaneBitmask LaneMask;
56 /// Classes with a higher priority value are assigned first by register
57 /// allocators using a greedy heuristic. The value is in the range [0,31].
58 const uint8_t AllocationPriority;
59
60 // Change allocation priority heuristic used by greedy.
61 const bool GlobalPriority;
62
63 /// Configurable target specific flags.
64 const uint8_t TSFlags;
65 /// Whether the class supports two (or more) disjunct subregister indices.
66 const bool HasDisjunctSubRegs;
67 /// Whether a combination of subregisters can cover every register in the
68 /// class. See also the CoveredBySubRegs description in Target.td.
69 const bool CoveredBySubRegs;
70 const sc_iterator SuperClasses;
71 ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
72
73 /// Return the register class ID number.
74 unsigned getID() const { return MC->getID(); }
75
76 /// begin/end - Return all of the registers in this class.
77 ///
78 iterator begin() const { return MC->begin(); }
79 iterator end() const { return MC->end(); }
80
81 /// Return the number of registers in this class.
82 unsigned getNumRegs() const { return MC->getNumRegs(); }
83
84 ArrayRef<MCPhysReg> getRegisters() const {
85 return ArrayRef(begin(), getNumRegs());
86 }
87
88 /// Return the specified register in the class.
89 MCRegister getRegister(unsigned i) const {
90 return MC->getRegister(i);
91 }
92
93 /// Return true if the specified register is included in this register class.
94 /// This does not include virtual registers.
95 bool contains(Register Reg) const {
96 /// FIXME: Historically this function has returned false when given vregs
97 /// but it should probably only receive physical registers
98 if (!Reg.isPhysical())
99 return false;
100 return MC->contains(Reg: Reg.asMCReg());
101 }
102
103 /// Return true if both registers are in this class.
104 bool contains(Register Reg1, Register Reg2) const {
105 /// FIXME: Historically this function has returned false when given a vregs
106 /// but it should probably only receive physical registers
107 if (!Reg1.isPhysical() || !Reg2.isPhysical())
108 return false;
109 return MC->contains(Reg1: Reg1.asMCReg(), Reg2: Reg2.asMCReg());
110 }
111
112 /// Return the cost of copying a value between two registers in this class.
113 /// A negative number means the register class is very expensive
114 /// to copy e.g. status flag register classes.
115 int getCopyCost() const { return MC->getCopyCost(); }
116
117 /// Return true if this register class may be used to create virtual
118 /// registers.
119 bool isAllocatable() const { return MC->isAllocatable(); }
120
121 /// Return true if this register class has a defined BaseClassOrder.
122 bool isBaseClass() const { return MC->isBaseClass(); }
123
124 /// Return true if the specified TargetRegisterClass
125 /// is a proper sub-class of this TargetRegisterClass.
126 bool hasSubClass(const TargetRegisterClass *RC) const {
127 return RC != this && hasSubClassEq(RC);
128 }
129
130 /// Returns true if RC is a sub-class of or equal to this class.
131 bool hasSubClassEq(const TargetRegisterClass *RC) const {
132 unsigned ID = RC->getID();
133 return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
134 }
135
136 /// Return true if the specified TargetRegisterClass is a
137 /// proper super-class of this TargetRegisterClass.
138 bool hasSuperClass(const TargetRegisterClass *RC) const {
139 return RC->hasSubClass(RC: this);
140 }
141
142 /// Returns true if RC is a super-class of or equal to this class.
143 bool hasSuperClassEq(const TargetRegisterClass *RC) const {
144 return RC->hasSubClassEq(RC: this);
145 }
146
147 /// Returns a bit vector of subclasses, including this one.
148 /// The vector is indexed by class IDs.
149 ///
150 /// To use it, consider the returned array as a chunk of memory that
151 /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
152 /// contains a bitset of the ID of the subclasses in big-endian style.
153
154 /// I.e., the representation of the memory from left to right at the
155 /// bit level looks like:
156 /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
157 /// [ XXX NumRegClasses NumRegClasses - 1 ... ]
158 /// Where the number represents the class ID and XXX bits that
159 /// should be ignored.
160 ///
161 /// See the implementation of hasSubClassEq for an example of how it
162 /// can be used.
163 const uint32_t *getSubClassMask() const {
164 return SubClassMask;
165 }
166
167 /// Returns a 0-terminated list of sub-register indices that project some
168 /// super-register class into this register class. The list has an entry for
169 /// each Idx such that:
170 ///
171 /// There exists SuperRC where:
172 /// For all Reg in SuperRC:
173 /// this->contains(Reg:Idx)
174 const uint16_t *getSuperRegIndices() const {
175 return SuperRegIndices;
176 }
177
178 /// Returns a NULL-terminated list of super-classes. The
179 /// classes are ordered by ID which is also a topological ordering from large
180 /// to small classes. The list does NOT include the current class.
181 sc_iterator getSuperClasses() const {
182 return SuperClasses;
183 }
184
185 /// Return true if this TargetRegisterClass is a subset
186 /// class of at least one other TargetRegisterClass.
187 bool isASubClass() const {
188 return SuperClasses[0] != nullptr;
189 }
190
191 /// Returns the preferred order for allocating registers from this register
192 /// class in MF. The raw order comes directly from the .td file and may
193 /// include reserved registers that are not allocatable.
194 /// Register allocators should also make sure to allocate
195 /// callee-saved registers only after all the volatiles are used. The
196 /// RegisterClassInfo class provides filtered allocation orders with
197 /// callee-saved registers moved to the end.
198 ///
199 /// The MachineFunction argument can be used to tune the allocatable
200 /// registers based on the characteristics of the function, subtarget, or
201 /// other criteria.
202 ///
203 /// By default, this method returns all registers in the class.
204 ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
205 return OrderFunc ? OrderFunc(MF) : getRegisters();
206 }
207
208 /// Returns the combination of all lane masks of register in this class.
209 /// The lane masks of the registers are the combination of all lane masks
210 /// of their subregisters. Returns 1 if there are no subregisters.
211 LaneBitmask getLaneMask() const {
212 return LaneMask;
213 }
214};
215
216/// Extra information, not in MCRegisterDesc, about registers.
217/// These are used by codegen, not by MC.
218struct TargetRegisterInfoDesc {
219 const uint8_t *CostPerUse; // Extra cost of instructions using register.
220 unsigned NumCosts; // Number of cost values associated with each register.
221 const bool
222 *InAllocatableClass; // Register belongs to an allocatable regclass.
223};
224
225/// Each TargetRegisterClass has a per register weight, and weight
226/// limit which must be less than the limits of its pressure sets.
227struct RegClassWeight {
228 unsigned RegWeight;
229 unsigned WeightLimit;
230};
231
232/// TargetRegisterInfo base class - We assume that the target defines a static
233/// array of TargetRegisterDesc objects that represent all of the machine
234/// registers that the target has. As such, we simply have to track a pointer
235/// to this array so that we can turn register number into a register
236/// descriptor.
237///
238class TargetRegisterInfo : public MCRegisterInfo {
239public:
240 using regclass_iterator = const TargetRegisterClass * const *;
241 using vt_iterator = const MVT::SimpleValueType *;
242 struct RegClassInfo {
243 unsigned RegSize, SpillSize, SpillAlignment;
244 unsigned VTListOffset;
245 };
246
247 /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
248 /// index, -1 in any being invalid.
249 struct SubRegCoveredBits {
250 uint16_t Offset;
251 uint16_t Size;
252 };
253
254private:
255 const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
256 const char *const *SubRegIndexNames; // Names of subreg indexes.
257 const SubRegCoveredBits *SubRegIdxRanges; // Pointer to the subreg covered
258 // bit ranges array.
259
260 // Pointer to array of lane masks, one per sub-reg index.
261 const LaneBitmask *SubRegIndexLaneMasks;
262
263 regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
264 LaneBitmask CoveringLanes;
265 const RegClassInfo *const RCInfos;
266 const MVT::SimpleValueType *const RCVTLists;
267 unsigned HwMode;
268
269protected:
270 TargetRegisterInfo(const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
271 regclass_iterator RCE, const char *const *SRINames,
272 const SubRegCoveredBits *SubIdxRanges,
273 const LaneBitmask *SRILaneMasks, LaneBitmask CoveringLanes,
274 const RegClassInfo *const RCIs,
275 const MVT::SimpleValueType *const RCVTLists,
276 unsigned Mode = 0);
277 virtual ~TargetRegisterInfo();
278
279public:
280 /// Return the number of registers for the function. (may overestimate)
281 virtual unsigned getNumSupportedRegs(const MachineFunction &) const {
282 return getNumRegs();
283 }
284
285 // Register numbers can represent physical registers, virtual registers, and
286 // sometimes stack slots. The unsigned values are divided into these ranges:
287 //
288 // 0 Not a register, can be used as a sentinel.
289 // [1;2^30) Physical registers assigned by TableGen.
290 // [2^30;2^31) Stack slots. (Rarely used.)
291 // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
292 //
293 // Further sentinels can be allocated from the small negative integers.
294 // DenseMapInfo<unsigned> uses -1u and -2u.
295
296 /// Return the size in bits of a register from class RC.
297 TypeSize getRegSizeInBits(const TargetRegisterClass &RC) const {
298 return TypeSize::getFixed(ExactSize: getRegClassInfo(RC).RegSize);
299 }
300
301 /// Return the size in bytes of the stack slot allocated to hold a spilled
302 /// copy of a register from class RC.
303 unsigned getSpillSize(const TargetRegisterClass &RC) const {
304 return getRegClassInfo(RC).SpillSize / 8;
305 }
306
307 /// Return the minimum required alignment in bytes for a spill slot for
308 /// a register of this class.
309 Align getSpillAlign(const TargetRegisterClass &RC) const {
310 return Align(getRegClassInfo(RC).SpillAlignment / 8);
311 }
312
313 /// Return true if the given TargetRegisterClass has the ValueType T.
314 bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
315 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
316 if (MVT(*I) == T)
317 return true;
318 return false;
319 }
320
321 /// Return true if the given TargetRegisterClass is compatible with LLT T.
322 bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
323 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
324 MVT VT(*I);
325 if (VT == MVT::Untyped)
326 return true;
327
328 if (LLT(VT) == T)
329 return true;
330 }
331 return false;
332 }
333
334 /// Loop over all of the value types that can be represented by values
335 /// in the given register class.
336 vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
337 return &RCVTLists[getRegClassInfo(RC).VTListOffset];
338 }
339
340 vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
341 vt_iterator I = legalclasstypes_begin(RC);
342 while (*I != MVT::Other)
343 ++I;
344 return I;
345 }
346
347 /// Returns the Register Class of a physical register of the given type,
348 /// picking the most sub register class of the right type that contains this
349 /// physreg.
350 const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
351 MVT VT = MVT::Other) const;
352
353 /// Returns the Register Class of a physical register of the given type,
354 /// picking the most sub register class of the right type that contains this
355 /// physreg. If there is no register class compatible with the given type,
356 /// returns nullptr.
357 const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
358 LLT Ty = LLT()) const;
359
360 /// Return the maximal subclass of the given register class that is
361 /// allocatable or NULL.
362 const TargetRegisterClass *
363 getAllocatableClass(const TargetRegisterClass *RC) const;
364
365 /// Returns a bitset indexed by register number indicating if a register is
366 /// allocatable or not. If a register class is specified, returns the subset
367 /// for the class.
368 BitVector getAllocatableSet(const MachineFunction &MF,
369 const TargetRegisterClass *RC = nullptr) const;
370
371 /// Get a list of cost values for all registers that correspond to the index
372 /// returned by RegisterCostTableIndex.
373 ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
374 unsigned Idx = getRegisterCostTableIndex(MF);
375 unsigned NumRegs = getNumRegs();
376 assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");
377
378 return ArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
379 }
380
381 /// Return true if the register is in the allocation of any register class.
382 bool isInAllocatableClass(MCRegister RegNo) const {
383 return InfoDesc->InAllocatableClass[RegNo];
384 }
385
386 /// Return the human-readable symbolic target-specific
387 /// name for the specified SubRegIndex.
388 const char *getSubRegIndexName(unsigned SubIdx) const {
389 assert(SubIdx && SubIdx < getNumSubRegIndices() &&
390 "This is not a subregister index");
391 return SubRegIndexNames[SubIdx-1];
392 }
393
394 /// Get the size of the bit range covered by a sub-register index.
395 /// If the index isn't continuous, return the sum of the sizes of its parts.
396 /// If the index is used to access subregisters of different sizes, return -1.
397 unsigned getSubRegIdxSize(unsigned Idx) const;
398
399 /// Get the offset of the bit range covered by a sub-register index.
400 /// If an Offset doesn't make sense (the index isn't continuous, or is used to
401 /// access sub-registers at different offsets), return -1.
402 unsigned getSubRegIdxOffset(unsigned Idx) const;
403
404 /// Return a bitmask representing the parts of a register that are covered by
405 /// SubIdx \see LaneBitmask.
406 ///
407 /// SubIdx == 0 is allowed, it has the lane mask ~0u.
408 LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
409 assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
410 return SubRegIndexLaneMasks[SubIdx];
411 }
412
413 /// Try to find one or more subregister indexes to cover \p LaneMask.
414 ///
415 /// If this is possible, returns true and appends the best matching set of
416 /// indexes to \p Indexes. If this is not possible, returns false.
417 bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
418 const TargetRegisterClass *RC,
419 LaneBitmask LaneMask,
420 SmallVectorImpl<unsigned> &Indexes) const;
421
422 /// The lane masks returned by getSubRegIndexLaneMask() above can only be
423 /// used to determine if sub-registers overlap - they can't be used to
424 /// determine if a set of sub-registers completely cover another
425 /// sub-register.
426 ///
427 /// The X86 general purpose registers have two lanes corresponding to the
428 /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
429 /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
430 /// sub_32bit sub-register.
431 ///
432 /// On the other hand, the ARM NEON lanes fully cover their registers: The
433 /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
434 /// This is related to the CoveredBySubRegs property on register definitions.
435 ///
436 /// This function returns a bit mask of lanes that completely cover their
437 /// sub-registers. More precisely, given:
438 ///
439 /// Covering = getCoveringLanes();
440 /// MaskA = getSubRegIndexLaneMask(SubA);
441 /// MaskB = getSubRegIndexLaneMask(SubB);
442 ///
443 /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
444 /// SubB.
445 LaneBitmask getCoveringLanes() const { return CoveringLanes; }
446
447 /// Returns true if the two registers are equal or alias each other.
448 /// The registers may be virtual registers.
449 bool regsOverlap(Register RegA, Register RegB) const {
450 if (RegA == RegB)
451 return true;
452 if (RegA.isPhysical() && RegB.isPhysical())
453 return MCRegisterInfo::regsOverlap(RegA: RegA.asMCReg(), RegB: RegB.asMCReg());
454 return false;
455 }
456
457 /// Returns true if Reg contains RegUnit.
458 bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
459 for (MCRegUnit Unit : regunits(Reg))
460 if (Register(Unit) == RegUnit)
461 return true;
462 return false;
463 }
464
465 /// Returns the original SrcReg unless it is the target of a copy-like
466 /// operation, in which case we chain backwards through all such operations
467 /// to the ultimate source register. If a physical register is encountered,
468 /// we stop the search.
469 virtual Register lookThruCopyLike(Register SrcReg,
470 const MachineRegisterInfo *MRI) const;
471
472 /// Find the original SrcReg unless it is the target of a copy-like operation,
473 /// in which case we chain backwards through all such operations to the
474 /// ultimate source register. If a physical register is encountered, we stop
475 /// the search.
476 /// Return the original SrcReg if all the definitions in the chain only have
477 /// one user and not a physical register.
478 virtual Register
479 lookThruSingleUseCopyChain(Register SrcReg,
480 const MachineRegisterInfo *MRI) const;
481
482 /// Return a null-terminated list of all of the callee-saved registers on
483 /// this target. The register should be in the order of desired callee-save
484 /// stack frame offset. The first register is closest to the incoming stack
485 /// pointer if stack grows down, and vice versa.
486 /// Notice: This function does not take into account disabled CSRs.
487 /// In most cases you will want to use instead the function
488 /// getCalleeSavedRegs that is implemented in MachineRegisterInfo.
489 virtual const MCPhysReg*
490 getCalleeSavedRegs(const MachineFunction *MF) const = 0;
491
492 /// Return a mask of call-preserved registers for the given calling convention
493 /// on the current function. The mask should include all call-preserved
494 /// aliases. This is used by the register allocator to determine which
495 /// registers can be live across a call.
496 ///
497 /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
498 /// A set bit indicates that all bits of the corresponding register are
499 /// preserved across the function call. The bit mask is expected to be
500 /// sub-register complete, i.e. if A is preserved, so are all its
501 /// sub-registers.
502 ///
503 /// Bits are numbered from the LSB, so the bit for physical register Reg can
504 /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
505 ///
506 /// A NULL pointer means that no register mask will be used, and call
507 /// instructions should use implicit-def operands to indicate call clobbered
508 /// registers.
509 ///
510 virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
511 CallingConv::ID) const {
512 // The default mask clobbers everything. All targets should override.
513 return nullptr;
514 }
515
516 /// Return a register mask for the registers preserved by the unwinder,
517 /// or nullptr if no custom mask is needed.
518 virtual const uint32_t *
519 getCustomEHPadPreservedMask(const MachineFunction &MF) const {
520 return nullptr;
521 }
522
523 /// Return a register mask that clobbers everything.
524 virtual const uint32_t *getNoPreservedMask() const {
525 llvm_unreachable("target does not provide no preserved mask");
526 }
527
528 /// Return a list of all of the registers which are clobbered "inside" a call
529 /// to the given function. For example, these might be needed for PLT
530 /// sequences of long-branch veneers.
531 virtual ArrayRef<MCPhysReg>
532 getIntraCallClobberedRegs(const MachineFunction *MF) const {
533 return {};
534 }
535
536 /// Return true if all bits that are set in mask \p mask0 are also set in
537 /// \p mask1.
538 bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
539
540 /// Return all the call-preserved register masks defined for this target.
541 virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
542 virtual ArrayRef<const char *> getRegMaskNames() const = 0;
543
544 /// Returns a bitset indexed by physical register number indicating if a
545 /// register is a special register that has particular uses and should be
546 /// considered unavailable at all times, e.g. stack pointer, return address.
547 /// A reserved register:
548 /// - is not allocatable
549 /// - is considered always live
550 /// - is ignored by liveness tracking
551 /// It is often necessary to reserve the super registers of a reserved
552 /// register as well, to avoid them getting allocated indirectly. You may use
553 /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
554 virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
555
556 /// Returns either a string explaining why the given register is reserved for
557 /// this function, or an empty optional if no explanation has been written.
558 /// The absence of an explanation does not mean that the register is not
559 /// reserved (meaning, you should check that PhysReg is in fact reserved
560 /// before calling this).
561 virtual std::optional<std::string>
562 explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
563 return {};
564 }
565
566 /// Returns false if we can't guarantee that Physreg, specified as an IR asm
567 /// clobber constraint, will be preserved across the statement.
568 virtual bool isAsmClobberable(const MachineFunction &MF,
569 MCRegister PhysReg) const {
570 return true;
571 }
572
573 /// Returns true if PhysReg cannot be written to in inline asm statements.
574 virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
575 unsigned PhysReg) const {
576 return false;
577 }
578
579 /// Returns true if PhysReg is unallocatable and constant throughout the
580 /// function. Used by MachineRegisterInfo::isConstantPhysReg().
581 virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }
582
583 /// Returns true if the register class is considered divergent.
584 virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
585 return false;
586 }
587
588 /// Returns true if the register is considered uniform.
589 virtual bool isUniformReg(const MachineRegisterInfo &MRI,
590 const RegisterBankInfo &RBI, Register Reg) const {
591 return false;
592 }
593
594 /// Returns true if MachineLoopInfo should analyze the given physreg
595 /// for loop invariance.
596 virtual bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const {
597 return false;
598 }
599
600 /// Physical registers that may be modified within a function but are
601 /// guaranteed to be restored before any uses. This is useful for targets that
602 /// have call sequences where a GOT register may be updated by the caller
603 /// prior to a call and is guaranteed to be restored (also by the caller)
604 /// after the call.
605 virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
606 const MachineFunction &MF) const {
607 return false;
608 }
609
610 /// This is a wrapper around getCallPreservedMask().
611 /// Return true if the register is preserved after the call.
612 virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
613 const MachineFunction &MF) const;
614
615 /// Returns true if PhysReg can be used as an argument to a function.
616 virtual bool isArgumentRegister(const MachineFunction &MF,
617 MCRegister PhysReg) const {
618 return false;
619 }
620
621 /// Returns true if PhysReg is a fixed register.
622 virtual bool isFixedRegister(const MachineFunction &MF,
623 MCRegister PhysReg) const {
624 return false;
625 }
626
627 /// Returns true if PhysReg is a general purpose register.
628 virtual bool isGeneralPurposeRegister(const MachineFunction &MF,
629 MCRegister PhysReg) const {
630 return false;
631 }
632
633 /// Prior to adding the live-out mask to a stackmap or patchpoint
634 /// instruction, provide the target the opportunity to adjust it (mainly to
635 /// remove pseudo-registers that should be ignored).
636 virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
637
638 /// Return a super-register of the specified register
639 /// Reg so its sub-register of index SubIdx is Reg.
640 MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
641 const TargetRegisterClass *RC) const {
642 return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC: RC->MC);
643 }
644
645 /// Return a subclass of the specified register
646 /// class A so that each register in it has a sub-register of the
647 /// specified sub-register index which is in the specified register class B.
648 ///
649 /// TableGen will synthesize missing A sub-classes.
650 virtual const TargetRegisterClass *
651 getMatchingSuperRegClass(const TargetRegisterClass *A,
652 const TargetRegisterClass *B, unsigned Idx) const;
653
654 // For a copy-like instruction that defines a register of class DefRC with
655 // subreg index DefSubReg, reading from another source with class SrcRC and
656 // subregister SrcSubReg return true if this is a preferable copy
657 // instruction or an earlier use should be used.
658 virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
659 unsigned DefSubReg,
660 const TargetRegisterClass *SrcRC,
661 unsigned SrcSubReg) const;
662
663 /// Returns the largest legal sub-class of RC that
664 /// supports the sub-register index Idx.
665 /// If no such sub-class exists, return NULL.
666 /// If all registers in RC already have an Idx sub-register, return RC.
667 ///
668 /// TableGen generates a version of this function that is good enough in most
669 /// cases. Targets can override if they have constraints that TableGen
670 /// doesn't understand. For example, the x86 sub_8bit sub-register index is
671 /// supported by the full GR32 register class in 64-bit mode, but only by the
672 /// GR32_ABCD regiister class in 32-bit mode.
673 ///
674 /// TableGen will synthesize missing RC sub-classes.
675 virtual const TargetRegisterClass *
676 getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
677 assert(Idx == 0 && "Target has no sub-registers");
678 return RC;
679 }
680
681 /// Return a register class that can be used for a subregister copy from/into
682 /// \p SuperRC at \p SubRegIdx.
683 virtual const TargetRegisterClass *
684 getSubRegisterClass(const TargetRegisterClass *SuperRC,
685 unsigned SubRegIdx) const {
686 return nullptr;
687 }
688
689 /// Return the subregister index you get from composing
690 /// two subregister indices.
691 ///
692 /// The special null sub-register index composes as the identity.
693 ///
694 /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
695 /// returns c. Note that composeSubRegIndices does not tell you about illegal
696 /// compositions. If R does not have a subreg a, or R:a does not have a subreg
697 /// b, composeSubRegIndices doesn't tell you.
698 ///
699 /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
700 /// ssub_0:S0 - ssub_3:S3 subregs.
701 /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
702 unsigned composeSubRegIndices(unsigned a, unsigned b) const {
703 if (!a) return b;
704 if (!b) return a;
705 return composeSubRegIndicesImpl(a, b);
706 }
707
708 /// Transforms a LaneMask computed for one subregister to the lanemask that
709 /// would have been computed when composing the subsubregisters with IdxA
710 /// first. @sa composeSubRegIndices()
711 LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
712 LaneBitmask Mask) const {
713 if (!IdxA)
714 return Mask;
715 return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
716 }
717
718 /// Transform a lanemask given for a virtual register to the corresponding
719 /// lanemask before using subregister with index \p IdxA.
720 /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
721 /// valie lane mask (no invalid bits set) the following holds:
722 /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
723 /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
724 /// => X1 == Mask
725 LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
726 LaneBitmask LaneMask) const {
727 if (!IdxA)
728 return LaneMask;
729 return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
730 }
731
732 /// Debugging helper: dump register in human readable form to dbgs() stream.
733 static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
734 const TargetRegisterInfo *TRI = nullptr);
735
736 /// Return target defined base register class for a physical register.
737 /// This is the register class with the lowest BaseClassOrder containing the
738 /// register.
739 /// Will be nullptr if the register is not in any base register class.
740 virtual const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const {
741 return nullptr;
742 }
743
744protected:
745 /// Overridden by TableGen in targets that have sub-registers.
746 virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
747 llvm_unreachable("Target has no sub-registers");
748 }
749
750 /// Overridden by TableGen in targets that have sub-registers.
751 virtual LaneBitmask
752 composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
753 llvm_unreachable("Target has no sub-registers");
754 }
755
756 virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
757 LaneBitmask) const {
758 llvm_unreachable("Target has no sub-registers");
759 }
760
761 /// Return the register cost table index. This implementation is sufficient
762 /// for most architectures and can be overriden by targets in case there are
763 /// multiple cost values associated with each register.
764 virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
765 return 0;
766 }
767
768public:
769 /// Find a common super-register class if it exists.
770 ///
771 /// Find a register class, SuperRC and two sub-register indices, PreA and
772 /// PreB, such that:
773 ///
774 /// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and
775 ///
776 /// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
777 ///
778 /// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
779 ///
780 /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
781 /// requirements, and there is no register class with a smaller spill size
782 /// that satisfies the requirements.
783 ///
784 /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
785 ///
786 /// Either of the PreA and PreB sub-register indices may be returned as 0. In
787 /// that case, the returned register class will be a sub-class of the
788 /// corresponding argument register class.
789 ///
790 /// The function returns NULL if no register class can be found.
791 const TargetRegisterClass*
792 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
793 const TargetRegisterClass *RCB, unsigned SubB,
794 unsigned &PreA, unsigned &PreB) const;
795
796 //===--------------------------------------------------------------------===//
797 // Register Class Information
798 //
799protected:
800 const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
801 return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
802 }
803
804public:
805 /// Register class iterators
806 regclass_iterator regclass_begin() const { return RegClassBegin; }
807 regclass_iterator regclass_end() const { return RegClassEnd; }
808 iterator_range<regclass_iterator> regclasses() const {
809 return make_range(x: regclass_begin(), y: regclass_end());
810 }
811
812 unsigned getNumRegClasses() const {
813 return (unsigned)(regclass_end()-regclass_begin());
814 }
815
816 /// Returns the register class associated with the enumeration value.
817 /// See class MCOperandInfo.
818 const TargetRegisterClass *getRegClass(unsigned i) const {
819 assert(i < getNumRegClasses() && "Register Class ID out of range");
820 return RegClassBegin[i];
821 }
822
823 /// Returns the name of the register class.
824 const char *getRegClassName(const TargetRegisterClass *Class) const {
825 return MCRegisterInfo::getRegClassName(Class: Class->MC);
826 }
827
828 /// Find the largest common subclass of A and B.
829 /// Return NULL if there is no common subclass.
830 const TargetRegisterClass *
831 getCommonSubClass(const TargetRegisterClass *A,
832 const TargetRegisterClass *B) const;
833
834 /// Returns a TargetRegisterClass used for pointer values.
835 /// If a target supports multiple different pointer register classes,
836 /// kind specifies which one is indicated.
837 virtual const TargetRegisterClass *
838 getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
839 llvm_unreachable("Target didn't implement getPointerRegClass!");
840 }
841
842 /// Returns a legal register class to copy a register in the specified class
843 /// to or from. If it is possible to copy the register directly without using
844 /// a cross register class copy, return the specified RC. Returns NULL if it
845 /// is not possible to copy between two registers of the specified class.
846 virtual const TargetRegisterClass *
847 getCrossCopyRegClass(const TargetRegisterClass *RC) const {
848 return RC;
849 }
850
851 /// Returns the largest super class of RC that is legal to use in the current
852 /// sub-target and has the same spill size.
853 /// The returned register class can be used to create virtual registers which
854 /// means that all its registers can be copied and spilled.
855 virtual const TargetRegisterClass *
856 getLargestLegalSuperClass(const TargetRegisterClass *RC,
857 const MachineFunction &) const {
858 /// The default implementation is very conservative and doesn't allow the
859 /// register allocator to inflate register classes.
860 return RC;
861 }
862
863 /// Return the register pressure "high water mark" for the specific register
864 /// class. The scheduler is in high register pressure mode (for the specific
865 /// register class) if it goes over the limit.
866 ///
867 /// Note: this is the old register pressure model that relies on a manually
868 /// specified representative register class per value type.
869 virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
870 MachineFunction &MF) const {
871 return 0;
872 }
873
874 /// Return a heuristic for the machine scheduler to compare the profitability
875 /// of increasing one register pressure set versus another. The scheduler
876 /// will prefer increasing the register pressure of the set which returns
877 /// the largest value for this function.
878 virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
879 unsigned PSetID) const {
880 return PSetID;
881 }
882
883 /// Get the weight in units of pressure for this register class.
884 virtual const RegClassWeight &getRegClassWeight(
885 const TargetRegisterClass *RC) const = 0;
886
887 /// Returns size in bits of a phys/virtual/generic register.
888 TypeSize getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
889
890 /// Get the weight in units of pressure for this register unit.
891 virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
892
893 /// Get the number of dimensions of register pressure.
894 virtual unsigned getNumRegPressureSets() const = 0;
895
896 /// Get the name of this register unit pressure set.
897 virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
898
899 /// Get the register unit pressure limit for this dimension.
900 /// This limit must be adjusted dynamically for reserved registers.
901 virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
902 unsigned Idx) const = 0;
903
904 /// Get the dimensions of register pressure impacted by this register class.
905 /// Returns a -1 terminated array of pressure set IDs.
906 virtual const int *getRegClassPressureSets(
907 const TargetRegisterClass *RC) const = 0;
908
909 /// Get the dimensions of register pressure impacted by this register unit.
910 /// Returns a -1 terminated array of pressure set IDs.
911 virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
912
913 /// Get a list of 'hint' registers that the register allocator should try
914 /// first when allocating a physical register for the virtual register
915 /// VirtReg. These registers are effectively moved to the front of the
916 /// allocation order. If true is returned, regalloc will try to only use
917 /// hints to the greatest extent possible even if it means spilling.
918 ///
919 /// The Order argument is the allocation order for VirtReg's register class
920 /// as returned from RegisterClassInfo::getOrder(). The hint registers must
921 /// come from Order, and they must not be reserved.
922 ///
923 /// The default implementation of this function will only add target
924 /// independent register allocation hints. Targets that override this
925 /// function should typically call this default implementation as well and
926 /// expect to see generic copy hints added.
927 virtual bool
928 getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
929 SmallVectorImpl<MCPhysReg> &Hints,
930 const MachineFunction &MF,
931 const VirtRegMap *VRM = nullptr,
932 const LiveRegMatrix *Matrix = nullptr) const;
933
934 /// A callback to allow target a chance to update register allocation hints
935 /// when a register is "changed" (e.g. coalesced) to another register.
936 /// e.g. On ARM, some virtual registers should target register pairs,
937 /// if one of pair is coalesced to another register, the allocation hint of
938 /// the other half of the pair should be changed to point to the new register.
939 virtual void updateRegAllocHint(Register Reg, Register NewReg,
940 MachineFunction &MF) const {
941 // Do nothing.
942 }
943
944 /// Allow the target to reverse allocation order of local live ranges. This
945 /// will generally allocate shorter local live ranges first. For targets with
946 /// many registers, this could reduce regalloc compile time by a large
947 /// factor. It is disabled by default for three reasons:
948 /// (1) Top-down allocation is simpler and easier to debug for targets that
949 /// don't benefit from reversing the order.
950 /// (2) Bottom-up allocation could result in poor evicition decisions on some
951 /// targets affecting the performance of compiled code.
952 /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
953 virtual bool reverseLocalAssignment() const { return false; }
954
955 /// Allow the target to override the cost of using a callee-saved register for
956 /// the first time. Default value of 0 means we will use a callee-saved
957 /// register if it is available.
958 virtual unsigned getCSRFirstUseCost() const { return 0; }
959
960 /// Returns true if the target requires (and can make use of) the register
961 /// scavenger.
962 virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
963 return false;
964 }
965
966 /// Returns true if the target wants to use frame pointer based accesses to
967 /// spill to the scavenger emergency spill slot.
968 virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
969 return true;
970 }
971
972 /// Returns true if the target requires post PEI scavenging of registers for
973 /// materializing frame index constants.
974 virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
975 return false;
976 }
977
978 /// Returns true if the target requires using the RegScavenger directly for
979 /// frame elimination despite using requiresFrameIndexScavenging.
980 virtual bool requiresFrameIndexReplacementScavenging(
981 const MachineFunction &MF) const {
982 return false;
983 }
984
985 /// Returns true if the target wants the LocalStackAllocation pass to be run
986 /// and virtual base registers used for more efficient stack access.
987 virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
988 return false;
989 }
990
991 /// Return true if target has reserved a spill slot in the stack frame of
992 /// the given function for the specified register. e.g. On x86, if the frame
993 /// register is required, the first fixed stack object is reserved as its
994 /// spill slot. This tells PEI not to create a new stack frame
995 /// object for the given register. It should be called only after
996 /// determineCalleeSaves().
997 virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
998 int &FrameIdx) const {
999 return false;
1000 }
1001
1002 /// Returns true if the live-ins should be tracked after register allocation.
1003 virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
1004 return true;
1005 }
1006
1007 /// True if the stack can be realigned for the target.
1008 virtual bool canRealignStack(const MachineFunction &MF) const;
1009
1010 /// True if storage within the function requires the stack pointer to be
1011 /// aligned more than the normal calling convention calls for.
1012 virtual bool shouldRealignStack(const MachineFunction &MF) const;
1013
1014 /// True if stack realignment is required and still possible.
1015 bool hasStackRealignment(const MachineFunction &MF) const {
1016 return shouldRealignStack(MF) && canRealignStack(MF);
1017 }
1018
1019 /// Get the offset from the referenced frame index in the instruction,
1020 /// if there is one.
1021 virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
1022 int Idx) const {
1023 return 0;
1024 }
1025
1026 /// Returns true if the instruction's frame index reference would be better
1027 /// served by a base register other than FP or SP.
1028 /// Used by LocalStackFrameAllocation to determine which frame index
1029 /// references it should create new base registers for.
1030 virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1031 return false;
1032 }
1033
1034 /// Insert defining instruction(s) for a pointer to FrameIdx before
1035 /// insertion point I. Return materialized frame pointer.
1036 virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
1037 int FrameIdx,
1038 int64_t Offset) const {
1039 llvm_unreachable("materializeFrameBaseRegister does not exist on this "
1040 "target");
1041 }
1042
1043 /// Resolve a frame index operand of an instruction
1044 /// to reference the indicated base register plus offset instead.
1045 virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
1046 int64_t Offset) const {
1047 llvm_unreachable("resolveFrameIndex does not exist on this target");
1048 }
1049
1050 /// Determine whether a given base register plus offset immediate is
1051 /// encodable to resolve a frame index.
1052 virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
1053 int64_t Offset) const {
1054 llvm_unreachable("isFrameOffsetLegal does not exist on this target");
1055 }
1056
1057 /// Gets the DWARF expression opcodes for \p Offset.
1058 virtual void getOffsetOpcodes(const StackOffset &Offset,
1059 SmallVectorImpl<uint64_t> &Ops) const;
1060
1061 /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
1062 DIExpression *
1063 prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
1064 const StackOffset &Offset) const;
1065
1066 /// Spill the register so it can be used by the register scavenger.
1067 /// Return true if the register was spilled, false otherwise.
1068 /// If this function does not spill the register, the scavenger
1069 /// will instead spill it to the emergency spill slot.
1070 virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
1071 MachineBasicBlock::iterator I,
1072 MachineBasicBlock::iterator &UseMI,
1073 const TargetRegisterClass *RC,
1074 Register Reg) const {
1075 return false;
1076 }
1077
1078 /// Process frame indices in reverse block order. This changes the behavior of
1079 /// the RegScavenger passed to eliminateFrameIndex. If this is true targets
1080 /// should scavengeRegisterBackwards in eliminateFrameIndex. New targets
1081 /// should prefer reverse scavenging behavior.
1082 /// TODO: Remove this when all targets return true.
1083 virtual bool eliminateFrameIndicesBackwards() const { return true; }
1084
1085 /// This method must be overriden to eliminate abstract frame indices from
1086 /// instructions which may use them. The instruction referenced by the
1087 /// iterator contains an MO_FrameIndex operand which must be eliminated by
1088 /// this method. This method may modify or replace the specified instruction,
1089 /// as long as it keeps the iterator pointing at the finished product.
1090 /// SPAdj is the SP adjustment due to call frame setup instruction.
1091 /// FIOperandNum is the FI operand number.
1092 /// Returns true if the current instruction was removed and the iterator
1093 /// is not longer valid
1094 virtual bool eliminateFrameIndex(MachineBasicBlock::iterator MI,
1095 int SPAdj, unsigned FIOperandNum,
1096 RegScavenger *RS = nullptr) const = 0;
1097
1098 /// Return the assembly name for \p Reg.
1099 virtual StringRef getRegAsmName(MCRegister Reg) const {
1100 // FIXME: We are assuming that the assembly name is equal to the TableGen
1101 // name converted to lower case
1102 //
1103 // The TableGen name is the name of the definition for this register in the
1104 // target's tablegen files. For example, the TableGen name of
1105 // def EAX : Register <...>; is "EAX"
1106 return StringRef(getName(RegNo: Reg));
1107 }
1108
1109 //===--------------------------------------------------------------------===//
1110 /// Subtarget Hooks
1111
1112 /// SrcRC and DstRC will be morphed into NewRC if this returns true.
1113 virtual bool shouldCoalesce(MachineInstr *MI,
1114 const TargetRegisterClass *SrcRC,
1115 unsigned SubReg,
1116 const TargetRegisterClass *DstRC,
1117 unsigned DstSubReg,
1118 const TargetRegisterClass *NewRC,
1119 LiveIntervals &LIS) const
1120 { return true; }
1121
1122 /// Region split has a high compile time cost especially for large live range.
1123 /// This method is used to decide whether or not \p VirtReg should
1124 /// go through this expensive splitting heuristic.
1125 virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
1126 const LiveInterval &VirtReg) const;
1127
1128 /// Last chance recoloring has a high compile time cost especially for
1129 /// targets with a lot of registers.
1130 /// This method is used to decide whether or not \p VirtReg should
1131 /// go through this expensive heuristic.
1132 /// When this target hook is hit, by returning false, there is a high
1133 /// chance that the register allocation will fail altogether (usually with
1134 /// "ran out of registers").
1135 /// That said, this error usually points to another problem in the
1136 /// optimization pipeline.
1137 virtual bool
1138 shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
1139 const LiveInterval &VirtReg) const {
1140 return true;
1141 }
1142
1143 /// Deferred spilling delays the spill insertion of a virtual register
1144 /// after every other allocation. By deferring the spilling, it is
1145 /// sometimes possible to eliminate that spilling altogether because
1146 /// something else could have been eliminated, thus leaving some space
1147 /// for the virtual register.
1148 /// However, this comes with a compile time impact because it adds one
1149 /// more stage to the greedy register allocator.
1150 /// This method is used to decide whether \p VirtReg should use the deferred
1151 /// spilling stage instead of being spilled right away.
1152 virtual bool
1153 shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
1154 const LiveInterval &VirtReg) const {
1155 return false;
1156 }
1157
1158 /// When prioritizing live ranges in register allocation, if this hook returns
1159 /// true then the AllocationPriority of the register class will be treated as
1160 /// more important than whether the range is local to a basic block or global.
1161 virtual bool
1162 regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const {
1163 return false;
1164 }
1165
1166 //===--------------------------------------------------------------------===//
1167 /// Debug information queries.
1168
1169 /// getFrameRegister - This method should return the register used as a base
1170 /// for values allocated in the current stack frame.
1171 virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
1172
1173 /// Mark a register and all its aliases as reserved in the given set.
1174 void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;
1175
1176 /// Returns true if for every register in the set all super registers are part
1177 /// of the set as well.
1178 bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
1179 ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
1180
1181 virtual const TargetRegisterClass *
1182 getConstrainedRegClassForOperand(const MachineOperand &MO,
1183 const MachineRegisterInfo &MRI) const {
1184 return nullptr;
1185 }
1186
1187 /// Returns the physical register number of sub-register "Index"
1188 /// for physical register RegNo. Return zero if the sub-register does not
1189 /// exist.
1190 inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
1191 return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
1192 }
1193
1194 /// Some targets have non-allocatable registers that aren't technically part
1195 /// of the explicit callee saved register list, but should be handled as such
1196 /// in certain cases.
1197 virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
1198 return false;
1199 }
1200
1201 /// Returns the Largest Super Class that is being initialized. There
1202 /// should be a Pseudo Instruction implemented for the super class
1203 /// that is being returned to ensure that Init Undef can apply the
1204 /// initialization correctly.
1205 virtual const TargetRegisterClass *
1206 getLargestSuperClass(const TargetRegisterClass *RC) const {
1207 llvm_unreachable("Unexpected target register class.");
1208 }
1209
1210 /// Returns if the architecture being targeted has the required Pseudo
1211 /// Instructions for initializing the register. By default this returns false,
1212 /// but where it is overriden for an architecture, the behaviour will be
1213 /// different. This can either be a check to ensure the Register Class is
1214 /// present, or to return true as an indication the architecture supports the
1215 /// pass. If using the method that does not check for the Register Class, it
1216 /// is imperative to ensure all required Pseudo Instructions are implemented,
1217 /// otherwise compilation may fail with an `Unexpected register class` error.
1218 virtual bool
1219 doesRegClassHavePseudoInitUndef(const TargetRegisterClass *RC) const {
1220 return false;
1221 }
1222};
1223
1224//===----------------------------------------------------------------------===//
1225// SuperRegClassIterator
1226//===----------------------------------------------------------------------===//
1227//
1228// Iterate over the possible super-registers for a given register class. The
1229// iterator will visit a list of pairs (Idx, Mask) corresponding to the
1230// possible classes of super-registers.
1231//
1232// Each bit mask will have at least one set bit, and each set bit in Mask
1233// corresponds to a SuperRC such that:
1234//
1235// For all Reg in SuperRC: Reg:Idx is in RC.
1236//
1237// The iterator can include (O, RC->getSubClassMask()) as the first entry which
1238// also satisfies the above requirement, assuming Reg:0 == Reg.
1239//
1240class SuperRegClassIterator {
1241 const unsigned RCMaskWords;
1242 unsigned SubReg = 0;
1243 const uint16_t *Idx;
1244 const uint32_t *Mask;
1245
1246public:
1247 /// Create a SuperRegClassIterator that visits all the super-register classes
1248 /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
1249 SuperRegClassIterator(const TargetRegisterClass *RC,
1250 const TargetRegisterInfo *TRI,
1251 bool IncludeSelf = false)
1252 : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
1253 Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
1254 if (!IncludeSelf)
1255 ++*this;
1256 }
1257
1258 /// Returns true if this iterator is still pointing at a valid entry.
1259 bool isValid() const { return Idx; }
1260
1261 /// Returns the current sub-register index.
1262 unsigned getSubReg() const { return SubReg; }
1263
1264 /// Returns the bit mask of register classes that getSubReg() projects into
1265 /// RC.
1266 /// See TargetRegisterClass::getSubClassMask() for how to use it.
1267 const uint32_t *getMask() const { return Mask; }
1268
1269 /// Advance iterator to the next entry.
1270 void operator++() {
1271 assert(isValid() && "Cannot move iterator past end.");
1272 Mask += RCMaskWords;
1273 SubReg = *Idx++;
1274 if (!SubReg)
1275 Idx = nullptr;
1276 }
1277};
1278
1279//===----------------------------------------------------------------------===//
1280// BitMaskClassIterator
1281//===----------------------------------------------------------------------===//
1282/// This class encapuslates the logic to iterate over bitmask returned by
1283/// the various RegClass related APIs.
1284/// E.g., this class can be used to iterate over the subclasses provided by
1285/// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
1286class BitMaskClassIterator {
1287 /// Total number of register classes.
1288 const unsigned NumRegClasses;
1289 /// Base index of CurrentChunk.
1290 /// In other words, the number of bit we read to get at the
1291 /// beginning of that chunck.
1292 unsigned Base = 0;
1293 /// Adjust base index of CurrentChunk.
1294 /// Base index + how many bit we read within CurrentChunk.
1295 unsigned Idx = 0;
1296 /// Current register class ID.
1297 unsigned ID = 0;
1298 /// Mask we are iterating over.
1299 const uint32_t *Mask;
1300 /// Current chunk of the Mask we are traversing.
1301 uint32_t CurrentChunk;
1302
1303 /// Move ID to the next set bit.
1304 void moveToNextID() {
1305 // If the current chunk of memory is empty, move to the next one,
1306 // while making sure we do not go pass the number of register
1307 // classes.
1308 while (!CurrentChunk) {
1309 // Move to the next chunk.
1310 Base += 32;
1311 if (Base >= NumRegClasses) {
1312 ID = NumRegClasses;
1313 return;
1314 }
1315 CurrentChunk = *++Mask;
1316 Idx = Base;
1317 }
1318 // Otherwise look for the first bit set from the right
1319 // (representation of the class ID is big endian).
1320 // See getSubClassMask for more details on the representation.
1321 unsigned Offset = llvm::countr_zero(Val: CurrentChunk);
1322 // Add the Offset to the adjusted base number of this chunk: Idx.
1323 // This is the ID of the register class.
1324 ID = Idx + Offset;
1325
1326 // Consume the zeros, if any, and the bit we just read
1327 // so that we are at the right spot for the next call.
1328 // Do not do Offset + 1 because Offset may be 31 and 32
1329 // will be UB for the shift, though in that case we could
1330 // have make the chunk being equal to 0, but that would
1331 // have introduced a if statement.
1332 moveNBits(NumBits: Offset);
1333 moveNBits(NumBits: 1);
1334 }
1335
1336 /// Move \p NumBits Bits forward in CurrentChunk.
1337 void moveNBits(unsigned NumBits) {
1338 assert(NumBits < 32 && "Undefined behavior spotted!");
1339 // Consume the bit we read for the next call.
1340 CurrentChunk >>= NumBits;
1341 // Adjust the base for the chunk.
1342 Idx += NumBits;
1343 }
1344
1345public:
1346 /// Create a BitMaskClassIterator that visits all the register classes
1347 /// represented by \p Mask.
1348 ///
1349 /// \pre \p Mask != nullptr
1350 BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
1351 : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
1352 // Move to the first ID.
1353 moveToNextID();
1354 }
1355
1356 /// Returns true if this iterator is still pointing at a valid entry.
1357 bool isValid() const { return getID() != NumRegClasses; }
1358
1359 /// Returns the current register class ID.
1360 unsigned getID() const { return ID; }
1361
1362 /// Advance iterator to the next entry.
1363 void operator++() {
1364 assert(isValid() && "Cannot move iterator past end.");
1365 moveToNextID();
1366 }
1367};
1368
1369// This is useful when building IndexedMaps keyed on virtual registers
1370struct VirtReg2IndexFunctor {
1371 using argument_type = Register;
1372 unsigned operator()(Register Reg) const {
1373 return Register::virtReg2Index(Reg);
1374 }
1375};
1376
1377/// Prints virtual and physical registers with or without a TRI instance.
1378///
1379/// The format is:
1380/// %noreg - NoRegister
1381/// %5 - a virtual register.
1382/// %5:sub_8bit - a virtual register with sub-register index (with TRI).
1383/// %eax - a physical register
1384/// %physreg17 - a physical register when no TRI instance given.
1385///
1386/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
1387Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
1388 unsigned SubIdx = 0,
1389 const MachineRegisterInfo *MRI = nullptr);
1390
1391/// Create Printable object to print register units on a \ref raw_ostream.
1392///
1393/// Register units are named after their root registers:
1394///
1395/// al - Single root.
1396/// fp0~st7 - Dual roots.
1397///
1398/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
1399Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
1400
1401/// Create Printable object to print virtual registers and physical
1402/// registers on a \ref raw_ostream.
1403Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
1404
1405/// Create Printable object to print register classes or register banks
1406/// on a \ref raw_ostream.
1407Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
1408 const TargetRegisterInfo *TRI);
1409
1410} // end namespace llvm
1411
1412#endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
1413

source code of llvm/include/llvm/CodeGen/TargetRegisterInfo.h