1//==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes an abstract interface used to get information about a
10// target machines register file. This information is used for a variety of
11// purposed, especially register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
16#define LLVM_CODEGEN_TARGETREGISTERINFO_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/iterator_range.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
23#include "llvm/IR/CallingConv.h"
24#include "llvm/MC/LaneBitmask.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/MachineValueType.h"
28#include "llvm/Support/MathExtras.h"
29#include "llvm/Support/Printable.h"
30#include <cassert>
31#include <cstdint>
32#include <functional>
33
34namespace llvm {
35
36class BitVector;
37class DIExpression;
38class LiveRegMatrix;
39class MachineFunction;
40class MachineInstr;
41class RegScavenger;
42class VirtRegMap;
43class LiveIntervals;
44class LiveInterval;
45
46class TargetRegisterClass {
47public:
48 using iterator = const MCPhysReg *;
49 using const_iterator = const MCPhysReg *;
50 using sc_iterator = const TargetRegisterClass* const *;
51
52 // Instance variables filled by tablegen, do not use!
53 const MCRegisterClass *MC;
54 const uint32_t *SubClassMask;
55 const uint16_t *SuperRegIndices;
56 const LaneBitmask LaneMask;
57 /// Classes with a higher priority value are assigned first by register
58 /// allocators using a greedy heuristic. The value is in the range [0,63].
59 const uint8_t AllocationPriority;
60 /// Whether the class supports two (or more) disjunct subregister indices.
61 const bool HasDisjunctSubRegs;
62 /// Whether a combination of subregisters can cover every register in the
63 /// class. See also the CoveredBySubRegs description in Target.td.
64 const bool CoveredBySubRegs;
65 const sc_iterator SuperClasses;
66 ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
67
68 /// Return the register class ID number.
69 unsigned getID() const { return MC->getID(); }
70
71 /// begin/end - Return all of the registers in this class.
72 ///
73 iterator begin() const { return MC->begin(); }
74 iterator end() const { return MC->end(); }
75
76 /// Return the number of registers in this class.
77 unsigned getNumRegs() const { return MC->getNumRegs(); }
78
79 iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
80 getRegisters() const {
81 return make_range(MC->begin(), MC->end());
82 }
83
84 /// Return the specified register in the class.
85 MCRegister getRegister(unsigned i) const {
86 return MC->getRegister(i);
87 }
88
89 /// Return true if the specified register is included in this register class.
90 /// This does not include virtual registers.
91 bool contains(Register Reg) const {
92 /// FIXME: Historically this function has returned false when given vregs
93 /// but it should probably only receive physical registers
94 if (!Reg.isPhysical())
95 return false;
96 return MC->contains(Reg.asMCReg());
97 }
98
99 /// Return true if both registers are in this class.
100 bool contains(Register Reg1, Register Reg2) const {
101 /// FIXME: Historically this function has returned false when given a vregs
102 /// but it should probably only receive physical registers
103 if (!Reg1.isPhysical() || !Reg2.isPhysical())
104 return false;
105 return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
106 }
107
108 /// Return the cost of copying a value between two registers in this class.
109 /// A negative number means the register class is very expensive
110 /// to copy e.g. status flag register classes.
111 int getCopyCost() const { return MC->getCopyCost(); }
112
113 /// Return true if this register class may be used to create virtual
114 /// registers.
115 bool isAllocatable() const { return MC->isAllocatable(); }
116
117 /// Return true if the specified TargetRegisterClass
118 /// is a proper sub-class of this TargetRegisterClass.
119 bool hasSubClass(const TargetRegisterClass *RC) const {
120 return RC != this && hasSubClassEq(RC);
121 }
122
123 /// Returns true if RC is a sub-class of or equal to this class.
124 bool hasSubClassEq(const TargetRegisterClass *RC) const {
125 unsigned ID = RC->getID();
126 return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
127 }
128
129 /// Return true if the specified TargetRegisterClass is a
130 /// proper super-class of this TargetRegisterClass.
131 bool hasSuperClass(const TargetRegisterClass *RC) const {
132 return RC->hasSubClass(this);
133 }
134
135 /// Returns true if RC is a super-class of or equal to this class.
136 bool hasSuperClassEq(const TargetRegisterClass *RC) const {
137 return RC->hasSubClassEq(this);
138 }
139
140 /// Returns a bit vector of subclasses, including this one.
141 /// The vector is indexed by class IDs.
142 ///
143 /// To use it, consider the returned array as a chunk of memory that
144 /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
145 /// contains a bitset of the ID of the subclasses in big-endian style.
146
147 /// I.e., the representation of the memory from left to right at the
148 /// bit level looks like:
149 /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
150 /// [ XXX NumRegClasses NumRegClasses - 1 ... ]
151 /// Where the number represents the class ID and XXX bits that
152 /// should be ignored.
153 ///
154 /// See the implementation of hasSubClassEq for an example of how it
155 /// can be used.
156 const uint32_t *getSubClassMask() const {
157 return SubClassMask;
158 }
159
160 /// Returns a 0-terminated list of sub-register indices that project some
161 /// super-register class into this register class. The list has an entry for
162 /// each Idx such that:
163 ///
164 /// There exists SuperRC where:
165 /// For all Reg in SuperRC:
166 /// this->contains(Reg:Idx)
167 const uint16_t *getSuperRegIndices() const {
168 return SuperRegIndices;
169 }
170
171 /// Returns a NULL-terminated list of super-classes. The
172 /// classes are ordered by ID which is also a topological ordering from large
173 /// to small classes. The list does NOT include the current class.
174 sc_iterator getSuperClasses() const {
175 return SuperClasses;
176 }
177
178 /// Return true if this TargetRegisterClass is a subset
179 /// class of at least one other TargetRegisterClass.
180 bool isASubClass() const {
181 return SuperClasses[0] != nullptr;
182 }
183
184 /// Returns the preferred order for allocating registers from this register
185 /// class in MF. The raw order comes directly from the .td file and may
186 /// include reserved registers that are not allocatable.
187 /// Register allocators should also make sure to allocate
188 /// callee-saved registers only after all the volatiles are used. The
189 /// RegisterClassInfo class provides filtered allocation orders with
190 /// callee-saved registers moved to the end.
191 ///
192 /// The MachineFunction argument can be used to tune the allocatable
193 /// registers based on the characteristics of the function, subtarget, or
194 /// other criteria.
195 ///
196 /// By default, this method returns all registers in the class.
197 ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
198 return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
199 }
200
201 /// Returns the combination of all lane masks of register in this class.
202 /// The lane masks of the registers are the combination of all lane masks
203 /// of their subregisters. Returns 1 if there are no subregisters.
204 LaneBitmask getLaneMask() const {
205 return LaneMask;
206 }
207};
208
209/// Extra information, not in MCRegisterDesc, about registers.
210/// These are used by codegen, not by MC.
211struct TargetRegisterInfoDesc {
212 const uint8_t *CostPerUse; // Extra cost of instructions using register.
213 unsigned NumCosts; // Number of cost values associated with each register.
214 const bool
215 *InAllocatableClass; // Register belongs to an allocatable regclass.
216};
217
218/// Each TargetRegisterClass has a per register weight, and weight
219/// limit which must be less than the limits of its pressure sets.
220struct RegClassWeight {
221 unsigned RegWeight;
222 unsigned WeightLimit;
223};
224
225/// TargetRegisterInfo base class - We assume that the target defines a static
226/// array of TargetRegisterDesc objects that represent all of the machine
227/// registers that the target has. As such, we simply have to track a pointer
228/// to this array so that we can turn register number into a register
229/// descriptor.
230///
231class TargetRegisterInfo : public MCRegisterInfo {
232public:
233 using regclass_iterator = const TargetRegisterClass * const *;
234 using vt_iterator = const MVT::SimpleValueType *;
235 struct RegClassInfo {
236 unsigned RegSize, SpillSize, SpillAlignment;
237 vt_iterator VTList;
238 };
239private:
240 const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
241 const char *const *SubRegIndexNames; // Names of subreg indexes.
242 // Pointer to array of lane masks, one per sub-reg index.
243 const LaneBitmask *SubRegIndexLaneMasks;
244
245 regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
246 LaneBitmask CoveringLanes;
247 const RegClassInfo *const RCInfos;
248 unsigned HwMode;
249
250protected:
251 TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
252 regclass_iterator RCB,
253 regclass_iterator RCE,
254 const char *const *SRINames,
255 const LaneBitmask *SRILaneMasks,
256 LaneBitmask CoveringLanes,
257 const RegClassInfo *const RCIs,
258 unsigned Mode = 0);
259 virtual ~TargetRegisterInfo();
260
261public:
262 // Register numbers can represent physical registers, virtual registers, and
263 // sometimes stack slots. The unsigned values are divided into these ranges:
264 //
265 // 0 Not a register, can be used as a sentinel.
266 // [1;2^30) Physical registers assigned by TableGen.
267 // [2^30;2^31) Stack slots. (Rarely used.)
268 // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
269 //
270 // Further sentinels can be allocated from the small negative integers.
271 // DenseMapInfo<unsigned> uses -1u and -2u.
272
273 /// Return the size in bits of a register from class RC.
274 unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
275 return getRegClassInfo(RC).RegSize;
276 }
277
278 /// Return the size in bytes of the stack slot allocated to hold a spilled
279 /// copy of a register from class RC.
280 unsigned getSpillSize(const TargetRegisterClass &RC) const {
281 return getRegClassInfo(RC).SpillSize / 8;
282 }
283
284 /// Return the minimum required alignment in bytes for a spill slot for
285 /// a register of this class.
286 unsigned getSpillAlignment(const TargetRegisterClass &RC) const {
287 return getRegClassInfo(RC).SpillAlignment / 8;
288 }
289
290 /// Return the minimum required alignment in bytes for a spill slot for
291 /// a register of this class.
292 Align getSpillAlign(const TargetRegisterClass &RC) const {
293 return Align(getRegClassInfo(RC).SpillAlignment / 8);
294 }
295
296 /// Return true if the given TargetRegisterClass has the ValueType T.
297 bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
298 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
299 if (MVT(*I) == T)
300 return true;
301 return false;
302 }
303
304 /// Return true if the given TargetRegisterClass is compatible with LLT T.
305 bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
306 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
307 MVT VT(*I);
308 if (VT == MVT::Untyped)
309 return true;
310
311 if (LLT(VT) == T)
312 return true;
313 }
314 return false;
315 }
316
317 /// Loop over all of the value types that can be represented by values
318 /// in the given register class.
319 vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
320 return getRegClassInfo(RC).VTList;
321 }
322
323 vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
324 vt_iterator I = legalclasstypes_begin(RC);
325 while (*I != MVT::Other)
326 ++I;
327 return I;
328 }
329
330 /// Returns the Register Class of a physical register of the given type,
331 /// picking the most sub register class of the right type that contains this
332 /// physreg.
333 const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
334 MVT VT = MVT::Other) const;
335
336 /// Returns the Register Class of a physical register of the given type,
337 /// picking the most sub register class of the right type that contains this
338 /// physreg. If there is no register class compatible with the given type,
339 /// returns nullptr.
340 const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
341 LLT Ty = LLT()) const;
342
343 /// Return the maximal subclass of the given register class that is
344 /// allocatable or NULL.
345 const TargetRegisterClass *
346 getAllocatableClass(const TargetRegisterClass *RC) const;
347
348 /// Returns a bitset indexed by register number indicating if a register is
349 /// allocatable or not. If a register class is specified, returns the subset
350 /// for the class.
351 BitVector getAllocatableSet(const MachineFunction &MF,
352 const TargetRegisterClass *RC = nullptr) const;
353
354 /// Get a list of cost values for all registers that correspond to the index
355 /// returned by RegisterCostTableIndex.
356 ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
357 unsigned Idx = getRegisterCostTableIndex(MF);
358 unsigned NumRegs = getNumRegs();
359 assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");
360
361 return makeArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
362 }
363
364 /// Return true if the register is in the allocation of any register class.
365 bool isInAllocatableClass(MCRegister RegNo) const {
366 return InfoDesc->InAllocatableClass[RegNo];
367 }
368
369 /// Return the human-readable symbolic target-specific
370 /// name for the specified SubRegIndex.
371 const char *getSubRegIndexName(unsigned SubIdx) const {
372 assert(SubIdx && SubIdx < getNumSubRegIndices() &&
373 "This is not a subregister index");
374 return SubRegIndexNames[SubIdx-1];
375 }
376
377 /// Return a bitmask representing the parts of a register that are covered by
378 /// SubIdx \see LaneBitmask.
379 ///
380 /// SubIdx == 0 is allowed, it has the lane mask ~0u.
381 LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
382 assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
383 return SubRegIndexLaneMasks[SubIdx];
384 }
385
386 /// Try to find one or more subregister indexes to cover \p LaneMask.
387 ///
388 /// If this is possible, returns true and appends the best matching set of
389 /// indexes to \p Indexes. If this is not possible, returns false.
390 bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
391 const TargetRegisterClass *RC,
392 LaneBitmask LaneMask,
393 SmallVectorImpl<unsigned> &Indexes) const;
394
395 /// The lane masks returned by getSubRegIndexLaneMask() above can only be
396 /// used to determine if sub-registers overlap - they can't be used to
397 /// determine if a set of sub-registers completely cover another
398 /// sub-register.
399 ///
400 /// The X86 general purpose registers have two lanes corresponding to the
401 /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
402 /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
403 /// sub_32bit sub-register.
404 ///
405 /// On the other hand, the ARM NEON lanes fully cover their registers: The
406 /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
407 /// This is related to the CoveredBySubRegs property on register definitions.
408 ///
409 /// This function returns a bit mask of lanes that completely cover their
410 /// sub-registers. More precisely, given:
411 ///
412 /// Covering = getCoveringLanes();
413 /// MaskA = getSubRegIndexLaneMask(SubA);
414 /// MaskB = getSubRegIndexLaneMask(SubB);
415 ///
416 /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
417 /// SubB.
418 LaneBitmask getCoveringLanes() const { return CoveringLanes; }
419
420 /// Returns true if the two registers are equal or alias each other.
421 /// The registers may be virtual registers.
422 bool regsOverlap(Register regA, Register regB) const {
423 if (regA == regB) return true;
424 if (!regA.isPhysical() || !regB.isPhysical())
425 return false;
426
427 // Regunits are numerically ordered. Find a common unit.
428 MCRegUnitIterator RUA(regA.asMCReg(), this);
429 MCRegUnitIterator RUB(regB.asMCReg(), this);
430 do {
431 if (*RUA == *RUB) return true;
432 if (*RUA < *RUB) ++RUA;
433 else ++RUB;
434 } while (RUA.isValid() && RUB.isValid());
435 return false;
436 }
437
438 /// Returns true if Reg contains RegUnit.
439 bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
440 for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
441 if (Register(*Units) == RegUnit)
442 return true;
443 return false;
444 }
445
446 /// Returns the original SrcReg unless it is the target of a copy-like
447 /// operation, in which case we chain backwards through all such operations
448 /// to the ultimate source register. If a physical register is encountered,
449 /// we stop the search.
450 virtual Register lookThruCopyLike(Register SrcReg,
451 const MachineRegisterInfo *MRI) const;
452
453 /// Find the original SrcReg unless it is the target of a copy-like operation,
454 /// in which case we chain backwards through all such operations to the
455 /// ultimate source register. If a physical register is encountered, we stop
456 /// the search.
457 /// Return the original SrcReg if all the definitions in the chain only have
458 /// one user and not a physical register.
459 virtual Register
460 lookThruSingleUseCopyChain(Register SrcReg,
461 const MachineRegisterInfo *MRI) const;
462
463 /// Return a null-terminated list of all of the callee-saved registers on
464 /// this target. The register should be in the order of desired callee-save
465 /// stack frame offset. The first register is closest to the incoming stack
466 /// pointer if stack grows down, and vice versa.
467 /// Notice: This function does not take into account disabled CSRs.
468 /// In most cases you will want to use instead the function
469 /// getCalleeSavedRegs that is implemented in MachineRegisterInfo.
470 virtual const MCPhysReg*
471 getCalleeSavedRegs(const MachineFunction *MF) const = 0;
472
473 /// Return a mask of call-preserved registers for the given calling convention
474 /// on the current function. The mask should include all call-preserved
475 /// aliases. This is used by the register allocator to determine which
476 /// registers can be live across a call.
477 ///
478 /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
479 /// A set bit indicates that all bits of the corresponding register are
480 /// preserved across the function call. The bit mask is expected to be
481 /// sub-register complete, i.e. if A is preserved, so are all its
482 /// sub-registers.
483 ///
484 /// Bits are numbered from the LSB, so the bit for physical register Reg can
485 /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
486 ///
487 /// A NULL pointer means that no register mask will be used, and call
488 /// instructions should use implicit-def operands to indicate call clobbered
489 /// registers.
490 ///
491 virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
492 CallingConv::ID) const {
493 // The default mask clobbers everything. All targets should override.
494 return nullptr;
495 }
496
497 /// Return a register mask for the registers preserved by the unwinder,
498 /// or nullptr if no custom mask is needed.
499 virtual const uint32_t *
500 getCustomEHPadPreservedMask(const MachineFunction &MF) const {
501 return nullptr;
502 }
503
504 /// Return a register mask that clobbers everything.
505 virtual const uint32_t *getNoPreservedMask() const {
506 llvm_unreachable("target does not provide no preserved mask");
507 }
508
509 /// Return a list of all of the registers which are clobbered "inside" a call
510 /// to the given function. For example, these might be needed for PLT
511 /// sequences of long-branch veneers.
512 virtual ArrayRef<MCPhysReg>
513 getIntraCallClobberedRegs(const MachineFunction *MF) const {
514 return {};
515 }
516
517 /// Return true if all bits that are set in mask \p mask0 are also set in
518 /// \p mask1.
519 bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
520
521 /// Return all the call-preserved register masks defined for this target.
522 virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
523 virtual ArrayRef<const char *> getRegMaskNames() const = 0;
524
525 /// Returns a bitset indexed by physical register number indicating if a
526 /// register is a special register that has particular uses and should be
527 /// considered unavailable at all times, e.g. stack pointer, return address.
528 /// A reserved register:
529 /// - is not allocatable
530 /// - is considered always live
531 /// - is ignored by liveness tracking
532 /// It is often necessary to reserve the super registers of a reserved
533 /// register as well, to avoid them getting allocated indirectly. You may use
534 /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
535 virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
536
537 /// Returns false if we can't guarantee that Physreg, specified as an IR asm
538 /// clobber constraint, will be preserved across the statement.
539 virtual bool isAsmClobberable(const MachineFunction &MF,
540 MCRegister PhysReg) const {
541 return true;
542 }
543
544 /// Returns true if PhysReg cannot be written to in inline asm statements.
545 virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
546 unsigned PhysReg) const {
547 return false;
548 }
549
550 /// Returns true if PhysReg is unallocatable and constant throughout the
551 /// function. Used by MachineRegisterInfo::isConstantPhysReg().
552 virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }
553
554 /// Returns true if the register class is considered divergent.
555 virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
556 return false;
557 }
558
559 /// Physical registers that may be modified within a function but are
560 /// guaranteed to be restored before any uses. This is useful for targets that
561 /// have call sequences where a GOT register may be updated by the caller
562 /// prior to a call and is guaranteed to be restored (also by the caller)
563 /// after the call.
564 virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
565 const MachineFunction &MF) const {
566 return false;
567 }
568
569 /// This is a wrapper around getCallPreservedMask().
570 /// Return true if the register is preserved after the call.
571 virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
572 const MachineFunction &MF) const;
573
574 /// Prior to adding the live-out mask to a stackmap or patchpoint
575 /// instruction, provide the target the opportunity to adjust it (mainly to
576 /// remove pseudo-registers that should be ignored).
577 virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
578
579 /// Return a super-register of the specified register
580 /// Reg so its sub-register of index SubIdx is Reg.
581 MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
582 const TargetRegisterClass *RC) const {
583 return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
584 }
585
586 /// Return a subclass of the specified register
587 /// class A so that each register in it has a sub-register of the
588 /// specified sub-register index which is in the specified register class B.
589 ///
590 /// TableGen will synthesize missing A sub-classes.
591 virtual const TargetRegisterClass *
592 getMatchingSuperRegClass(const TargetRegisterClass *A,
593 const TargetRegisterClass *B, unsigned Idx) const;
594
595 // For a copy-like instruction that defines a register of class DefRC with
596 // subreg index DefSubReg, reading from another source with class SrcRC and
597 // subregister SrcSubReg return true if this is a preferable copy
598 // instruction or an earlier use should be used.
599 virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
600 unsigned DefSubReg,
601 const TargetRegisterClass *SrcRC,
602 unsigned SrcSubReg) const;
603
604 /// Returns the largest legal sub-class of RC that
605 /// supports the sub-register index Idx.
606 /// If no such sub-class exists, return NULL.
607 /// If all registers in RC already have an Idx sub-register, return RC.
608 ///
609 /// TableGen generates a version of this function that is good enough in most
610 /// cases. Targets can override if they have constraints that TableGen
611 /// doesn't understand. For example, the x86 sub_8bit sub-register index is
612 /// supported by the full GR32 register class in 64-bit mode, but only by the
613 /// GR32_ABCD regiister class in 32-bit mode.
614 ///
615 /// TableGen will synthesize missing RC sub-classes.
616 virtual const TargetRegisterClass *
617 getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
618 assert(Idx == 0 && "Target has no sub-registers");
619 return RC;
620 }
621
622 /// Return the subregister index you get from composing
623 /// two subregister indices.
624 ///
625 /// The special null sub-register index composes as the identity.
626 ///
627 /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
628 /// returns c. Note that composeSubRegIndices does not tell you about illegal
629 /// compositions. If R does not have a subreg a, or R:a does not have a subreg
630 /// b, composeSubRegIndices doesn't tell you.
631 ///
632 /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
633 /// ssub_0:S0 - ssub_3:S3 subregs.
634 /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
635 unsigned composeSubRegIndices(unsigned a, unsigned b) const {
636 if (!a) return b;
637 if (!b) return a;
638 return composeSubRegIndicesImpl(a, b);
639 }
640
641 /// Transforms a LaneMask computed for one subregister to the lanemask that
642 /// would have been computed when composing the subsubregisters with IdxA
643 /// first. @sa composeSubRegIndices()
644 LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
645 LaneBitmask Mask) const {
646 if (!IdxA)
647 return Mask;
648 return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
649 }
650
651 /// Transform a lanemask given for a virtual register to the corresponding
652 /// lanemask before using subregister with index \p IdxA.
653 /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
654 /// valie lane mask (no invalid bits set) the following holds:
655 /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
656 /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
657 /// => X1 == Mask
658 LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
659 LaneBitmask LaneMask) const {
660 if (!IdxA)
661 return LaneMask;
662 return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
663 }
664
665 /// Debugging helper: dump register in human readable form to dbgs() stream.
666 static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
667 const TargetRegisterInfo *TRI = nullptr);
668
669protected:
670 /// Overridden by TableGen in targets that have sub-registers.
671 virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
672 llvm_unreachable("Target has no sub-registers");
673 }
674
675 /// Overridden by TableGen in targets that have sub-registers.
676 virtual LaneBitmask
677 composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
678 llvm_unreachable("Target has no sub-registers");
679 }
680
681 virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
682 LaneBitmask) const {
683 llvm_unreachable("Target has no sub-registers");
684 }
685
686 /// Return the register cost table index. This implementation is sufficient
687 /// for most architectures and can be overriden by targets in case there are
688 /// multiple cost values associated with each register.
689 virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
690 return 0;
691 }
692
693public:
694 /// Find a common super-register class if it exists.
695 ///
696 /// Find a register class, SuperRC and two sub-register indices, PreA and
697 /// PreB, such that:
698 ///
699 /// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and
700 ///
701 /// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
702 ///
703 /// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
704 ///
705 /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
706 /// requirements, and there is no register class with a smaller spill size
707 /// that satisfies the requirements.
708 ///
709 /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
710 ///
711 /// Either of the PreA and PreB sub-register indices may be returned as 0. In
712 /// that case, the returned register class will be a sub-class of the
713 /// corresponding argument register class.
714 ///
715 /// The function returns NULL if no register class can be found.
716 const TargetRegisterClass*
717 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
718 const TargetRegisterClass *RCB, unsigned SubB,
719 unsigned &PreA, unsigned &PreB) const;
720
721 //===--------------------------------------------------------------------===//
722 // Register Class Information
723 //
724protected:
725 const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
726 return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
727 }
728
729public:
730 /// Register class iterators
731 regclass_iterator regclass_begin() const { return RegClassBegin; }
732 regclass_iterator regclass_end() const { return RegClassEnd; }
733 iterator_range<regclass_iterator> regclasses() const {
734 return make_range(regclass_begin(), regclass_end());
735 }
736
737 unsigned getNumRegClasses() const {
738 return (unsigned)(regclass_end()-regclass_begin());
739 }
740
741 /// Returns the register class associated with the enumeration value.
742 /// See class MCOperandInfo.
743 const TargetRegisterClass *getRegClass(unsigned i) const {
744 assert(i < getNumRegClasses() && "Register Class ID out of range");
745 return RegClassBegin[i];
746 }
747
748 /// Returns the name of the register class.
749 const char *getRegClassName(const TargetRegisterClass *Class) const {
750 return MCRegisterInfo::getRegClassName(Class->MC);
751 }
752
753 /// Find the largest common subclass of A and B.
754 /// Return NULL if there is no common subclass.
755 const TargetRegisterClass *
756 getCommonSubClass(const TargetRegisterClass *A,
757 const TargetRegisterClass *B) const;
758
759 /// Returns a TargetRegisterClass used for pointer values.
760 /// If a target supports multiple different pointer register classes,
761 /// kind specifies which one is indicated.
762 virtual const TargetRegisterClass *
763 getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
764 llvm_unreachable("Target didn't implement getPointerRegClass!");
765 }
766
767 /// Returns a legal register class to copy a register in the specified class
768 /// to or from. If it is possible to copy the register directly without using
769 /// a cross register class copy, return the specified RC. Returns NULL if it
770 /// is not possible to copy between two registers of the specified class.
771 virtual const TargetRegisterClass *
772 getCrossCopyRegClass(const TargetRegisterClass *RC) const {
773 return RC;
774 }
775
776 /// Returns the largest super class of RC that is legal to use in the current
777 /// sub-target and has the same spill size.
778 /// The returned register class can be used to create virtual registers which
779 /// means that all its registers can be copied and spilled.
780 virtual const TargetRegisterClass *
781 getLargestLegalSuperClass(const TargetRegisterClass *RC,
782 const MachineFunction &) const {
783 /// The default implementation is very conservative and doesn't allow the
784 /// register allocator to inflate register classes.
785 return RC;
786 }
787
788 /// Return the register pressure "high water mark" for the specific register
789 /// class. The scheduler is in high register pressure mode (for the specific
790 /// register class) if it goes over the limit.
791 ///
792 /// Note: this is the old register pressure model that relies on a manually
793 /// specified representative register class per value type.
794 virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
795 MachineFunction &MF) const {
796 return 0;
797 }
798
799 /// Return a heuristic for the machine scheduler to compare the profitability
800 /// of increasing one register pressure set versus another. The scheduler
801 /// will prefer increasing the register pressure of the set which returns
802 /// the largest value for this function.
803 virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
804 unsigned PSetID) const {
805 return PSetID;
806 }
807
808 /// Get the weight in units of pressure for this register class.
809 virtual const RegClassWeight &getRegClassWeight(
810 const TargetRegisterClass *RC) const = 0;
811
812 /// Returns size in bits of a phys/virtual/generic register.
813 unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
814
815 /// Get the weight in units of pressure for this register unit.
816 virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
817
818 /// Get the number of dimensions of register pressure.
819 virtual unsigned getNumRegPressureSets() const = 0;
820
821 /// Get the name of this register unit pressure set.
822 virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
823
824 /// Get the register unit pressure limit for this dimension.
825 /// This limit must be adjusted dynamically for reserved registers.
826 virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
827 unsigned Idx) const = 0;
828
829 /// Get the dimensions of register pressure impacted by this register class.
830 /// Returns a -1 terminated array of pressure set IDs.
831 virtual const int *getRegClassPressureSets(
832 const TargetRegisterClass *RC) const = 0;
833
834 /// Get the dimensions of register pressure impacted by this register unit.
835 /// Returns a -1 terminated array of pressure set IDs.
836 virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
837
838 /// Get a list of 'hint' registers that the register allocator should try
839 /// first when allocating a physical register for the virtual register
840 /// VirtReg. These registers are effectively moved to the front of the
841 /// allocation order. If true is returned, regalloc will try to only use
842 /// hints to the greatest extent possible even if it means spilling.
843 ///
844 /// The Order argument is the allocation order for VirtReg's register class
845 /// as returned from RegisterClassInfo::getOrder(). The hint registers must
846 /// come from Order, and they must not be reserved.
847 ///
848 /// The default implementation of this function will only add target
849 /// independent register allocation hints. Targets that override this
850 /// function should typically call this default implementation as well and
851 /// expect to see generic copy hints added.
852 virtual bool
853 getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
854 SmallVectorImpl<MCPhysReg> &Hints,
855 const MachineFunction &MF,
856 const VirtRegMap *VRM = nullptr,
857 const LiveRegMatrix *Matrix = nullptr) const;
858
859 /// A callback to allow target a chance to update register allocation hints
860 /// when a register is "changed" (e.g. coalesced) to another register.
861 /// e.g. On ARM, some virtual registers should target register pairs,
862 /// if one of pair is coalesced to another register, the allocation hint of
863 /// the other half of the pair should be changed to point to the new register.
864 virtual void updateRegAllocHint(Register Reg, Register NewReg,
865 MachineFunction &MF) const {
866 // Do nothing.
867 }
868
869 /// Allow the target to reverse allocation order of local live ranges. This
870 /// will generally allocate shorter local live ranges first. For targets with
871 /// many registers, this could reduce regalloc compile time by a large
872 /// factor. It is disabled by default for three reasons:
873 /// (1) Top-down allocation is simpler and easier to debug for targets that
874 /// don't benefit from reversing the order.
875 /// (2) Bottom-up allocation could result in poor evicition decisions on some
876 /// targets affecting the performance of compiled code.
877 /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
878 virtual bool reverseLocalAssignment() const { return false; }
879
880 /// Allow the target to override the cost of using a callee-saved register for
881 /// the first time. Default value of 0 means we will use a callee-saved
882 /// register if it is available.
883 virtual unsigned getCSRFirstUseCost() const { return 0; }
884
885 /// Returns true if the target requires (and can make use of) the register
886 /// scavenger.
887 virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
888 return false;
889 }
890
891 /// Returns true if the target wants to use frame pointer based accesses to
892 /// spill to the scavenger emergency spill slot.
893 virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
894 return true;
895 }
896
897 /// Returns true if the target requires post PEI scavenging of registers for
898 /// materializing frame index constants.
899 virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
900 return false;
901 }
902
903 /// Returns true if the target requires using the RegScavenger directly for
904 /// frame elimination despite using requiresFrameIndexScavenging.
905 virtual bool requiresFrameIndexReplacementScavenging(
906 const MachineFunction &MF) const {
907 return false;
908 }
909
910 /// Returns true if the target wants the LocalStackAllocation pass to be run
911 /// and virtual base registers used for more efficient stack access.
912 virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
913 return false;
914 }
915
916 /// Return true if target has reserved a spill slot in the stack frame of
917 /// the given function for the specified register. e.g. On x86, if the frame
918 /// register is required, the first fixed stack object is reserved as its
919 /// spill slot. This tells PEI not to create a new stack frame
920 /// object for the given register. It should be called only after
921 /// determineCalleeSaves().
922 virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
923 int &FrameIdx) const {
924 return false;
925 }
926
927 /// Returns true if the live-ins should be tracked after register allocation.
928 virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
929 return true;
930 }
931
932 /// True if the stack can be realigned for the target.
933 virtual bool canRealignStack(const MachineFunction &MF) const;
934
935 /// True if storage within the function requires the stack pointer to be
936 /// aligned more than the normal calling convention calls for.
937 virtual bool shouldRealignStack(const MachineFunction &MF) const;
938
939 /// True if stack realignment is required and still possible.
940 bool hasStackRealignment(const MachineFunction &MF) const {
941 return shouldRealignStack(MF) && canRealignStack(MF);
942 }
943
944 /// Get the offset from the referenced frame index in the instruction,
945 /// if there is one.
946 virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
947 int Idx) const {
948 return 0;
949 }
950
951 /// Returns true if the instruction's frame index reference would be better
952 /// served by a base register other than FP or SP.
953 /// Used by LocalStackFrameAllocation to determine which frame index
954 /// references it should create new base registers for.
955 virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
956 return false;
957 }
958
959 /// Insert defining instruction(s) for a pointer to FrameIdx before
960 /// insertion point I. Return materialized frame pointer.
961 virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
962 int FrameIdx,
963 int64_t Offset) const {
964 llvm_unreachable("materializeFrameBaseRegister does not exist on this "
965 "target");
966 }
967
968 /// Resolve a frame index operand of an instruction
969 /// to reference the indicated base register plus offset instead.
970 virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
971 int64_t Offset) const {
972 llvm_unreachable("resolveFrameIndex does not exist on this target");
973 }
974
975 /// Determine whether a given base register plus offset immediate is
976 /// encodable to resolve a frame index.
977 virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
978 int64_t Offset) const {
979 llvm_unreachable("isFrameOffsetLegal does not exist on this target");
980 }
981
982 /// Gets the DWARF expression opcodes for \p Offset.
983 virtual void getOffsetOpcodes(const StackOffset &Offset,
984 SmallVectorImpl<uint64_t> &Ops) const;
985
986 /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
987 DIExpression *
988 prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
989 const StackOffset &Offset) const;
990
991 /// Spill the register so it can be used by the register scavenger.
992 /// Return true if the register was spilled, false otherwise.
993 /// If this function does not spill the register, the scavenger
994 /// will instead spill it to the emergency spill slot.
995 virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
996 MachineBasicBlock::iterator I,
997 MachineBasicBlock::iterator &UseMI,
998 const TargetRegisterClass *RC,
999 Register Reg) const {
1000 return false;
1001 }
1002
1003 /// This method must be overriden to eliminate abstract frame indices from
1004 /// instructions which may use them. The instruction referenced by the
1005 /// iterator contains an MO_FrameIndex operand which must be eliminated by
1006 /// this method. This method may modify or replace the specified instruction,
1007 /// as long as it keeps the iterator pointing at the finished product.
1008 /// SPAdj is the SP adjustment due to call frame setup instruction.
1009 /// FIOperandNum is the FI operand number.
1010 virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
1011 int SPAdj, unsigned FIOperandNum,
1012 RegScavenger *RS = nullptr) const = 0;
1013
1014 /// Return the assembly name for \p Reg.
1015 virtual StringRef getRegAsmName(MCRegister Reg) const {
1016 // FIXME: We are assuming that the assembly name is equal to the TableGen
1017 // name converted to lower case
1018 //
1019 // The TableGen name is the name of the definition for this register in the
1020 // target's tablegen files. For example, the TableGen name of
1021 // def EAX : Register <...>; is "EAX"
1022 return StringRef(getName(Reg));
1023 }
1024
1025 //===--------------------------------------------------------------------===//
1026 /// Subtarget Hooks
1027
1028 /// SrcRC and DstRC will be morphed into NewRC if this returns true.
1029 virtual bool shouldCoalesce(MachineInstr *MI,
1030 const TargetRegisterClass *SrcRC,
1031 unsigned SubReg,
1032 const TargetRegisterClass *DstRC,
1033 unsigned DstSubReg,
1034 const TargetRegisterClass *NewRC,
1035 LiveIntervals &LIS) const
1036 { return true; }
1037
1038 /// Region split has a high compile time cost especially for large live range.
1039 /// This method is used to decide whether or not \p VirtReg should
1040 /// go through this expensive splitting heuristic.
1041 virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
1042 const LiveInterval &VirtReg) const;
1043
1044 /// Last chance recoloring has a high compile time cost especially for
1045 /// targets with a lot of registers.
1046 /// This method is used to decide whether or not \p VirtReg should
1047 /// go through this expensive heuristic.
1048 /// When this target hook is hit, by returning false, there is a high
1049 /// chance that the register allocation will fail altogether (usually with
1050 /// "ran out of registers").
1051 /// That said, this error usually points to another problem in the
1052 /// optimization pipeline.
1053 virtual bool
1054 shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
1055 const LiveInterval &VirtReg) const {
1056 return true;
1057 }
1058
1059 /// Deferred spilling delays the spill insertion of a virtual register
1060 /// after every other allocation. By deferring the spilling, it is
1061 /// sometimes possible to eliminate that spilling altogether because
1062 /// something else could have been eliminated, thus leaving some space
1063 /// for the virtual register.
1064 /// However, this comes with a compile time impact because it adds one
1065 /// more stage to the greedy register allocator.
1066 /// This method is used to decide whether \p VirtReg should use the deferred
1067 /// spilling stage instead of being spilled right away.
1068 virtual bool
1069 shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
1070 const LiveInterval &VirtReg) const {
1071 return false;
1072 }
1073
1074 //===--------------------------------------------------------------------===//
1075 /// Debug information queries.
1076
1077 /// getFrameRegister - This method should return the register used as a base
1078 /// for values allocated in the current stack frame.
1079 virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
1080
1081 /// Mark a register and all its aliases as reserved in the given set.
1082 void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;
1083
1084 /// Returns true if for every register in the set all super registers are part
1085 /// of the set as well.
1086 bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
1087 ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
1088
1089 virtual const TargetRegisterClass *
1090 getConstrainedRegClassForOperand(const MachineOperand &MO,
1091 const MachineRegisterInfo &MRI) const {
1092 return nullptr;
1093 }
1094
1095 /// Returns the physical register number of sub-register "Index"
1096 /// for physical register RegNo. Return zero if the sub-register does not
1097 /// exist.
1098 inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
1099 return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
1100 }
1101};
1102
1103//===----------------------------------------------------------------------===//
1104// SuperRegClassIterator
1105//===----------------------------------------------------------------------===//
1106//
1107// Iterate over the possible super-registers for a given register class. The
1108// iterator will visit a list of pairs (Idx, Mask) corresponding to the
1109// possible classes of super-registers.
1110//
1111// Each bit mask will have at least one set bit, and each set bit in Mask
1112// corresponds to a SuperRC such that:
1113//
1114// For all Reg in SuperRC: Reg:Idx is in RC.
1115//
1116// The iterator can include (O, RC->getSubClassMask()) as the first entry which
1117// also satisfies the above requirement, assuming Reg:0 == Reg.
1118//
1119class SuperRegClassIterator {
1120 const unsigned RCMaskWords;
1121 unsigned SubReg = 0;
1122 const uint16_t *Idx;
1123 const uint32_t *Mask;
1124
1125public:
1126 /// Create a SuperRegClassIterator that visits all the super-register classes
1127 /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
1128 SuperRegClassIterator(const TargetRegisterClass *RC,
1129 const TargetRegisterInfo *TRI,
1130 bool IncludeSelf = false)
1131 : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
1132 Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
1133 if (!IncludeSelf)
1134 ++*this;
1135 }
1136
1137 /// Returns true if this iterator is still pointing at a valid entry.
1138 bool isValid() const { return Idx; }
1139
1140 /// Returns the current sub-register index.
1141 unsigned getSubReg() const { return SubReg; }
1142
1143 /// Returns the bit mask of register classes that getSubReg() projects into
1144 /// RC.
1145 /// See TargetRegisterClass::getSubClassMask() for how to use it.
1146 const uint32_t *getMask() const { return Mask; }
1147
1148 /// Advance iterator to the next entry.
1149 void operator++() {
1150 assert(isValid() && "Cannot move iterator past end.");
1151 Mask += RCMaskWords;
1152 SubReg = *Idx++;
1153 if (!SubReg)
1154 Idx = nullptr;
1155 }
1156};
1157
1158//===----------------------------------------------------------------------===//
1159// BitMaskClassIterator
1160//===----------------------------------------------------------------------===//
1161/// This class encapuslates the logic to iterate over bitmask returned by
1162/// the various RegClass related APIs.
1163/// E.g., this class can be used to iterate over the subclasses provided by
1164/// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
1165class BitMaskClassIterator {
1166 /// Total number of register classes.
1167 const unsigned NumRegClasses;
1168 /// Base index of CurrentChunk.
1169 /// In other words, the number of bit we read to get at the
1170 /// beginning of that chunck.
1171 unsigned Base = 0;
1172 /// Adjust base index of CurrentChunk.
1173 /// Base index + how many bit we read within CurrentChunk.
1174 unsigned Idx = 0;
1175 /// Current register class ID.
1176 unsigned ID = 0;
1177 /// Mask we are iterating over.
1178 const uint32_t *Mask;
1179 /// Current chunk of the Mask we are traversing.
1180 uint32_t CurrentChunk;
1181
1182 /// Move ID to the next set bit.
1183 void moveToNextID() {
1184 // If the current chunk of memory is empty, move to the next one,
1185 // while making sure we do not go pass the number of register
1186 // classes.
1187 while (!CurrentChunk) {
1188 // Move to the next chunk.
1189 Base += 32;
1190 if (Base >= NumRegClasses) {
1191 ID = NumRegClasses;
1192 return;
1193 }
1194 CurrentChunk = *++Mask;
1195 Idx = Base;
1196 }
1197 // Otherwise look for the first bit set from the right
1198 // (representation of the class ID is big endian).
1199 // See getSubClassMask for more details on the representation.
1200 unsigned Offset = countTrailingZeros(CurrentChunk);
1201 // Add the Offset to the adjusted base number of this chunk: Idx.
1202 // This is the ID of the register class.
1203 ID = Idx + Offset;
1204
1205 // Consume the zeros, if any, and the bit we just read
1206 // so that we are at the right spot for the next call.
1207 // Do not do Offset + 1 because Offset may be 31 and 32
1208 // will be UB for the shift, though in that case we could
1209 // have make the chunk being equal to 0, but that would
1210 // have introduced a if statement.
1211 moveNBits(Offset);
1212 moveNBits(1);
1213 }
1214
1215 /// Move \p NumBits Bits forward in CurrentChunk.
1216 void moveNBits(unsigned NumBits) {
1217 assert(NumBits < 32 && "Undefined behavior spotted!");
1218 // Consume the bit we read for the next call.
1219 CurrentChunk >>= NumBits;
1220 // Adjust the base for the chunk.
1221 Idx += NumBits;
1222 }
1223
1224public:
1225 /// Create a BitMaskClassIterator that visits all the register classes
1226 /// represented by \p Mask.
1227 ///
1228 /// \pre \p Mask != nullptr
1229 BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
1230 : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
1231 // Move to the first ID.
1232 moveToNextID();
1233 }
1234
1235 /// Returns true if this iterator is still pointing at a valid entry.
1236 bool isValid() const { return getID() != NumRegClasses; }
1237
1238 /// Returns the current register class ID.
1239 unsigned getID() const { return ID; }
1240
1241 /// Advance iterator to the next entry.
1242 void operator++() {
1243 assert(isValid() && "Cannot move iterator past end.");
1244 moveToNextID();
1245 }
1246};
1247
1248// This is useful when building IndexedMaps keyed on virtual registers
1249struct VirtReg2IndexFunctor {
1250 using argument_type = Register;
1251 unsigned operator()(Register Reg) const {
1252 return Register::virtReg2Index(Reg);
1253 }
1254};
1255
1256/// Prints virtual and physical registers with or without a TRI instance.
1257///
1258/// The format is:
1259/// %noreg - NoRegister
1260/// %5 - a virtual register.
1261/// %5:sub_8bit - a virtual register with sub-register index (with TRI).
1262/// %eax - a physical register
1263/// %physreg17 - a physical register when no TRI instance given.
1264///
1265/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
1266Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
1267 unsigned SubIdx = 0,
1268 const MachineRegisterInfo *MRI = nullptr);
1269
1270/// Create Printable object to print register units on a \ref raw_ostream.
1271///
1272/// Register units are named after their root registers:
1273///
1274/// al - Single root.
1275/// fp0~st7 - Dual roots.
1276///
1277/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
1278Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
1279
1280/// Create Printable object to print virtual registers and physical
1281/// registers on a \ref raw_ostream.
1282Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
1283
1284/// Create Printable object to print register classes or register banks
1285/// on a \ref raw_ostream.
1286Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
1287 const TargetRegisterInfo *TRI);
1288
1289} // end namespace llvm
1290
1291#endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
1292