• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes an abstract interface used to get information about a
10 // target machines register file.  This information is used for a variety of
11 // purposed, especially register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
16 #define LLVM_CODEGEN_TARGETREGISTERINFO_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/RegisterBank.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/MC/LaneBitmask.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Support/Printable.h"
30 #include <cassert>
31 #include <cstdint>
32 
33 namespace llvm {
34 
35 class BitVector;
36 class DIExpression;
37 class LiveRegMatrix;
38 class MachineFunction;
39 class MachineInstr;
40 class RegScavenger;
41 class VirtRegMap;
42 class LiveIntervals;
43 class LiveInterval;
44 
45 class TargetRegisterClass {
46 public:
47   using iterator = const MCPhysReg *;
48   using const_iterator = const MCPhysReg *;
49   using sc_iterator = const TargetRegisterClass* const *;
50 
51   // Instance variables filled by tablegen, do not use!
52   const MCRegisterClass *MC;
53   const uint32_t *SubClassMask;
54   const uint16_t *SuperRegIndices;
55   const LaneBitmask LaneMask;
56   /// Classes with a higher priority value are assigned first by register
57   /// allocators using a greedy heuristic. The value is in the range [0,31].
58   const uint8_t AllocationPriority;
59 
60   // Change allocation priority heuristic used by greedy.
61   const bool GlobalPriority;
62 
63   /// Configurable target specific flags.
64   const uint8_t TSFlags;
65   /// Whether the class supports two (or more) disjunct subregister indices.
66   const bool HasDisjunctSubRegs;
67   /// Whether a combination of subregisters can cover every register in the
68   /// class. See also the CoveredBySubRegs description in Target.td.
69   const bool CoveredBySubRegs;
70   const sc_iterator SuperClasses;
71   ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
72 
73   /// Return the register class ID number.
getID()74   unsigned getID() const { return MC->getID(); }
75 
76   /// begin/end - Return all of the registers in this class.
77   ///
begin()78   iterator       begin() const { return MC->begin(); }
end()79   iterator         end() const { return MC->end(); }
80 
81   /// Return the number of registers in this class.
getNumRegs()82   unsigned getNumRegs() const { return MC->getNumRegs(); }
83 
getRegisters()84   ArrayRef<MCPhysReg> getRegisters() const {
85     return ArrayRef(begin(), getNumRegs());
86   }
87 
88   /// Return the specified register in the class.
getRegister(unsigned i)89   MCRegister getRegister(unsigned i) const {
90     return MC->getRegister(i);
91   }
92 
93   /// Return true if the specified register is included in this register class.
94   /// This does not include virtual registers.
contains(Register Reg)95   bool contains(Register Reg) const {
96     /// FIXME: Historically this function has returned false when given vregs
97     ///        but it should probably only receive physical registers
98     if (!Reg.isPhysical())
99       return false;
100     return MC->contains(Reg.asMCReg());
101   }
102 
103   /// Return true if both registers are in this class.
contains(Register Reg1,Register Reg2)104   bool contains(Register Reg1, Register Reg2) const {
105     /// FIXME: Historically this function has returned false when given a vregs
106     ///        but it should probably only receive physical registers
107     if (!Reg1.isPhysical() || !Reg2.isPhysical())
108       return false;
109     return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
110   }
111 
112   /// Return the cost of copying a value between two registers in this class.
113   /// A negative number means the register class is very expensive
114   /// to copy e.g. status flag register classes.
getCopyCost()115   int getCopyCost() const { return MC->getCopyCost(); }
116 
117   /// Return true if this register class may be used to create virtual
118   /// registers.
isAllocatable()119   bool isAllocatable() const { return MC->isAllocatable(); }
120 
121   /// Return true if this register class has a defined BaseClassOrder.
isBaseClass()122   bool isBaseClass() const { return MC->isBaseClass(); }
123 
124   /// Return true if the specified TargetRegisterClass
125   /// is a proper sub-class of this TargetRegisterClass.
hasSubClass(const TargetRegisterClass * RC)126   bool hasSubClass(const TargetRegisterClass *RC) const {
127     return RC != this && hasSubClassEq(RC);
128   }
129 
130   /// Returns true if RC is a sub-class of or equal to this class.
hasSubClassEq(const TargetRegisterClass * RC)131   bool hasSubClassEq(const TargetRegisterClass *RC) const {
132     unsigned ID = RC->getID();
133     return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
134   }
135 
136   /// Return true if the specified TargetRegisterClass is a
137   /// proper super-class of this TargetRegisterClass.
hasSuperClass(const TargetRegisterClass * RC)138   bool hasSuperClass(const TargetRegisterClass *RC) const {
139     return RC->hasSubClass(this);
140   }
141 
142   /// Returns true if RC is a super-class of or equal to this class.
hasSuperClassEq(const TargetRegisterClass * RC)143   bool hasSuperClassEq(const TargetRegisterClass *RC) const {
144     return RC->hasSubClassEq(this);
145   }
146 
147   /// Returns a bit vector of subclasses, including this one.
148   /// The vector is indexed by class IDs.
149   ///
150   /// To use it, consider the returned array as a chunk of memory that
151   /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
152   /// contains a bitset of the ID of the subclasses in big-endian style.
153 
154   /// I.e., the representation of the memory from left to right at the
155   /// bit level looks like:
156   /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
157   ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
158   /// Where the number represents the class ID and XXX bits that
159   /// should be ignored.
160   ///
161   /// See the implementation of hasSubClassEq for an example of how it
162   /// can be used.
getSubClassMask()163   const uint32_t *getSubClassMask() const {
164     return SubClassMask;
165   }
166 
167   /// Returns a 0-terminated list of sub-register indices that project some
168   /// super-register class into this register class. The list has an entry for
169   /// each Idx such that:
170   ///
171   ///   There exists SuperRC where:
172   ///     For all Reg in SuperRC:
173   ///       this->contains(Reg:Idx)
getSuperRegIndices()174   const uint16_t *getSuperRegIndices() const {
175     return SuperRegIndices;
176   }
177 
178   /// Returns a NULL-terminated list of super-classes.  The
179   /// classes are ordered by ID which is also a topological ordering from large
180   /// to small classes.  The list does NOT include the current class.
getSuperClasses()181   sc_iterator getSuperClasses() const {
182     return SuperClasses;
183   }
184 
185   /// Return true if this TargetRegisterClass is a subset
186   /// class of at least one other TargetRegisterClass.
isASubClass()187   bool isASubClass() const {
188     return SuperClasses[0] != nullptr;
189   }
190 
191   /// Returns the preferred order for allocating registers from this register
192   /// class in MF. The raw order comes directly from the .td file and may
193   /// include reserved registers that are not allocatable.
194   /// Register allocators should also make sure to allocate
195   /// callee-saved registers only after all the volatiles are used. The
196   /// RegisterClassInfo class provides filtered allocation orders with
197   /// callee-saved registers moved to the end.
198   ///
199   /// The MachineFunction argument can be used to tune the allocatable
200   /// registers based on the characteristics of the function, subtarget, or
201   /// other criteria.
202   ///
203   /// By default, this method returns all registers in the class.
getRawAllocationOrder(const MachineFunction & MF)204   ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
205     return OrderFunc ? OrderFunc(MF) : getRegisters();
206   }
207 
208   /// Returns the combination of all lane masks of register in this class.
209   /// The lane masks of the registers are the combination of all lane masks
210   /// of their subregisters. Returns 1 if there are no subregisters.
getLaneMask()211   LaneBitmask getLaneMask() const {
212     return LaneMask;
213   }
214 };
215 
216 /// Extra information, not in MCRegisterDesc, about registers.
217 /// These are used by codegen, not by MC.
218 struct TargetRegisterInfoDesc {
219   const uint8_t *CostPerUse; // Extra cost of instructions using register.
220   unsigned NumCosts; // Number of cost values associated with each register.
221   const bool
222       *InAllocatableClass; // Register belongs to an allocatable regclass.
223 };
224 
225 /// Each TargetRegisterClass has a per register weight, and weight
226 /// limit which must be less than the limits of its pressure sets.
227 struct RegClassWeight {
228   unsigned RegWeight;
229   unsigned WeightLimit;
230 };
231 
232 /// TargetRegisterInfo base class - We assume that the target defines a static
233 /// array of TargetRegisterDesc objects that represent all of the machine
234 /// registers that the target has.  As such, we simply have to track a pointer
235 /// to this array so that we can turn register number into a register
236 /// descriptor.
237 ///
238 class TargetRegisterInfo : public MCRegisterInfo {
239 public:
240   using regclass_iterator = const TargetRegisterClass * const *;
241   using vt_iterator = const MVT::SimpleValueType *;
242   struct RegClassInfo {
243     unsigned RegSize, SpillSize, SpillAlignment;
244     unsigned VTListOffset;
245   };
246 
247   /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
248   /// index, -1 in any being invalid.
249   struct SubRegCoveredBits {
250     uint16_t Offset;
251     uint16_t Size;
252   };
253 
254 private:
255   const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
256   const char *const *SubRegIndexNames;        // Names of subreg indexes.
257   const SubRegCoveredBits *SubRegIdxRanges;   // Pointer to the subreg covered
258                                               // bit ranges array.
259 
260   // Pointer to array of lane masks, one per sub-reg index.
261   const LaneBitmask *SubRegIndexLaneMasks;
262 
263   regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
264   LaneBitmask CoveringLanes;
265   const RegClassInfo *const RCInfos;
266   const MVT::SimpleValueType *const RCVTLists;
267   unsigned HwMode;
268 
269 protected:
270   TargetRegisterInfo(const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
271                      regclass_iterator RCE, const char *const *SRINames,
272                      const SubRegCoveredBits *SubIdxRanges,
273                      const LaneBitmask *SRILaneMasks, LaneBitmask CoveringLanes,
274                      const RegClassInfo *const RCIs,
275                      const MVT::SimpleValueType *const RCVTLists,
276                      unsigned Mode = 0);
277   virtual ~TargetRegisterInfo();
278 
279 public:
280   /// Return the number of registers for the function. (may overestimate)
getNumSupportedRegs(const MachineFunction &)281   virtual unsigned getNumSupportedRegs(const MachineFunction &) const {
282     return getNumRegs();
283   }
284 
285   // Register numbers can represent physical registers, virtual registers, and
286   // sometimes stack slots. The unsigned values are divided into these ranges:
287   //
288   //   0           Not a register, can be used as a sentinel.
289   //   [1;2^30)    Physical registers assigned by TableGen.
290   //   [2^30;2^31) Stack slots. (Rarely used.)
291   //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
292   //
293   // Further sentinels can be allocated from the small negative integers.
294   // DenseMapInfo<unsigned> uses -1u and -2u.
295 
296   /// Return the size in bits of a register from class RC.
getRegSizeInBits(const TargetRegisterClass & RC)297   TypeSize getRegSizeInBits(const TargetRegisterClass &RC) const {
298     return TypeSize::getFixed(getRegClassInfo(RC).RegSize);
299   }
300 
301   /// Return the size in bytes of the stack slot allocated to hold a spilled
302   /// copy of a register from class RC.
getSpillSize(const TargetRegisterClass & RC)303   unsigned getSpillSize(const TargetRegisterClass &RC) const {
304     return getRegClassInfo(RC).SpillSize / 8;
305   }
306 
307   /// Return the minimum required alignment in bytes for a spill slot for
308   /// a register of this class.
getSpillAlign(const TargetRegisterClass & RC)309   Align getSpillAlign(const TargetRegisterClass &RC) const {
310     return Align(getRegClassInfo(RC).SpillAlignment / 8);
311   }
312 
313   /// Return true if the given TargetRegisterClass has the ValueType T.
isTypeLegalForClass(const TargetRegisterClass & RC,MVT T)314   bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
315     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
316       if (MVT(*I) == T)
317         return true;
318     return false;
319   }
320 
321   /// Return true if the given TargetRegisterClass is compatible with LLT T.
isTypeLegalForClass(const TargetRegisterClass & RC,LLT T)322   bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
323     for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
324       MVT VT(*I);
325       if (VT == MVT::Untyped)
326         return true;
327 
328       if (LLT(VT) == T)
329         return true;
330     }
331     return false;
332   }
333 
334   /// Loop over all of the value types that can be represented by values
335   /// in the given register class.
legalclasstypes_begin(const TargetRegisterClass & RC)336   vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
337     return &RCVTLists[getRegClassInfo(RC).VTListOffset];
338   }
339 
legalclasstypes_end(const TargetRegisterClass & RC)340   vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
341     vt_iterator I = legalclasstypes_begin(RC);
342     while (*I != MVT::Other)
343       ++I;
344     return I;
345   }
346 
347   /// Returns the Register Class of a physical register of the given type,
348   /// picking the most sub register class of the right type that contains this
349   /// physreg.
350   const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
351                                                     MVT VT = MVT::Other) const;
352 
353   /// Returns the Register Class of a physical register of the given type,
354   /// picking the most sub register class of the right type that contains this
355   /// physreg. If there is no register class compatible with the given type,
356   /// returns nullptr.
357   const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
358                                                        LLT Ty = LLT()) const;
359 
360   /// Return the maximal subclass of the given register class that is
361   /// allocatable or NULL.
362   const TargetRegisterClass *
363     getAllocatableClass(const TargetRegisterClass *RC) const;
364 
365   /// Returns a bitset indexed by register number indicating if a register is
366   /// allocatable or not. If a register class is specified, returns the subset
367   /// for the class.
368   BitVector getAllocatableSet(const MachineFunction &MF,
369                               const TargetRegisterClass *RC = nullptr) const;
370 
371   /// Get a list of cost values for all registers that correspond to the index
372   /// returned by RegisterCostTableIndex.
getRegisterCosts(const MachineFunction & MF)373   ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
374     unsigned Idx = getRegisterCostTableIndex(MF);
375     unsigned NumRegs = getNumRegs();
376     assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");
377 
378     return ArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
379   }
380 
381   /// Return true if the register is in the allocation of any register class.
isInAllocatableClass(MCRegister RegNo)382   bool isInAllocatableClass(MCRegister RegNo) const {
383     return InfoDesc->InAllocatableClass[RegNo];
384   }
385 
386   /// Return the human-readable symbolic target-specific
387   /// name for the specified SubRegIndex.
getSubRegIndexName(unsigned SubIdx)388   const char *getSubRegIndexName(unsigned SubIdx) const {
389     assert(SubIdx && SubIdx < getNumSubRegIndices() &&
390            "This is not a subregister index");
391     return SubRegIndexNames[SubIdx-1];
392   }
393 
394   /// Get the size of the bit range covered by a sub-register index.
395   /// If the index isn't continuous, return the sum of the sizes of its parts.
396   /// If the index is used to access subregisters of different sizes, return -1.
397   unsigned getSubRegIdxSize(unsigned Idx) const;
398 
399   /// Get the offset of the bit range covered by a sub-register index.
400   /// If an Offset doesn't make sense (the index isn't continuous, or is used to
401   /// access sub-registers at different offsets), return -1.
402   unsigned getSubRegIdxOffset(unsigned Idx) const;
403 
404   /// Return a bitmask representing the parts of a register that are covered by
405   /// SubIdx \see LaneBitmask.
406   ///
407   /// SubIdx == 0 is allowed, it has the lane mask ~0u.
getSubRegIndexLaneMask(unsigned SubIdx)408   LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
409     assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
410     return SubRegIndexLaneMasks[SubIdx];
411   }
412 
413   /// Try to find one or more subregister indexes to cover \p LaneMask.
414   ///
415   /// If this is possible, returns true and appends the best matching set of
416   /// indexes to \p Indexes. If this is not possible, returns false.
417   bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
418                                 const TargetRegisterClass *RC,
419                                 LaneBitmask LaneMask,
420                                 SmallVectorImpl<unsigned> &Indexes) const;
421 
422   /// The lane masks returned by getSubRegIndexLaneMask() above can only be
423   /// used to determine if sub-registers overlap - they can't be used to
424   /// determine if a set of sub-registers completely cover another
425   /// sub-register.
426   ///
427   /// The X86 general purpose registers have two lanes corresponding to the
428   /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
429   /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
430   /// sub_32bit sub-register.
431   ///
432   /// On the other hand, the ARM NEON lanes fully cover their registers: The
433   /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
434   /// This is related to the CoveredBySubRegs property on register definitions.
435   ///
436   /// This function returns a bit mask of lanes that completely cover their
437   /// sub-registers. More precisely, given:
438   ///
439   ///   Covering = getCoveringLanes();
440   ///   MaskA = getSubRegIndexLaneMask(SubA);
441   ///   MaskB = getSubRegIndexLaneMask(SubB);
442   ///
443   /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
444   /// SubB.
getCoveringLanes()445   LaneBitmask getCoveringLanes() const { return CoveringLanes; }
446 
447   /// Returns true if the two registers are equal or alias each other.
448   /// The registers may be virtual registers.
regsOverlap(Register RegA,Register RegB)449   bool regsOverlap(Register RegA, Register RegB) const {
450     if (RegA == RegB)
451       return true;
452     if (RegA.isPhysical() && RegB.isPhysical())
453       return MCRegisterInfo::regsOverlap(RegA.asMCReg(), RegB.asMCReg());
454     return false;
455   }
456 
457   /// Returns true if Reg contains RegUnit.
hasRegUnit(MCRegister Reg,Register RegUnit)458   bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
459     for (MCRegUnit Unit : regunits(Reg))
460       if (Register(Unit) == RegUnit)
461         return true;
462     return false;
463   }
464 
465   /// Returns the original SrcReg unless it is the target of a copy-like
466   /// operation, in which case we chain backwards through all such operations
467   /// to the ultimate source register.  If a physical register is encountered,
468   /// we stop the search.
469   virtual Register lookThruCopyLike(Register SrcReg,
470                                     const MachineRegisterInfo *MRI) const;
471 
472   /// Find the original SrcReg unless it is the target of a copy-like operation,
473   /// in which case we chain backwards through all such operations to the
474   /// ultimate source register. If a physical register is encountered, we stop
475   /// the search.
476   /// Return the original SrcReg if all the definitions in the chain only have
477   /// one user and not a physical register.
478   virtual Register
479   lookThruSingleUseCopyChain(Register SrcReg,
480                              const MachineRegisterInfo *MRI) const;
481 
482   /// Return a null-terminated list of all of the callee-saved registers on
483   /// this target. The register should be in the order of desired callee-save
484   /// stack frame offset. The first register is closest to the incoming stack
485   /// pointer if stack grows down, and vice versa.
486   /// Notice: This function does not take into account disabled CSRs.
487   ///         In most cases you will want to use instead the function
488   ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
489   virtual const MCPhysReg*
490   getCalleeSavedRegs(const MachineFunction *MF) const = 0;
491 
492   /// Return a mask of call-preserved registers for the given calling convention
493   /// on the current function. The mask should include all call-preserved
494   /// aliases. This is used by the register allocator to determine which
495   /// registers can be live across a call.
496   ///
497   /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
498   /// A set bit indicates that all bits of the corresponding register are
499   /// preserved across the function call.  The bit mask is expected to be
500   /// sub-register complete, i.e. if A is preserved, so are all its
501   /// sub-registers.
502   ///
503   /// Bits are numbered from the LSB, so the bit for physical register Reg can
504   /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
505   ///
506   /// A NULL pointer means that no register mask will be used, and call
507   /// instructions should use implicit-def operands to indicate call clobbered
508   /// registers.
509   ///
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID)510   virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
511                                                CallingConv::ID) const {
512     // The default mask clobbers everything.  All targets should override.
513     return nullptr;
514   }
515 
516   /// Return a register mask for the registers preserved by the unwinder,
517   /// or nullptr if no custom mask is needed.
518   virtual const uint32_t *
getCustomEHPadPreservedMask(const MachineFunction & MF)519   getCustomEHPadPreservedMask(const MachineFunction &MF) const {
520     return nullptr;
521   }
522 
523   /// Return a register mask that clobbers everything.
getNoPreservedMask()524   virtual const uint32_t *getNoPreservedMask() const {
525     llvm_unreachable("target does not provide no preserved mask");
526   }
527 
528   /// Return a list of all of the registers which are clobbered "inside" a call
529   /// to the given function. For example, these might be needed for PLT
530   /// sequences of long-branch veneers.
531   virtual ArrayRef<MCPhysReg>
getIntraCallClobberedRegs(const MachineFunction * MF)532   getIntraCallClobberedRegs(const MachineFunction *MF) const {
533     return {};
534   }
535 
536   /// Return true if all bits that are set in mask \p mask0 are also set in
537   /// \p mask1.
538   bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
539 
540   /// Return all the call-preserved register masks defined for this target.
541   virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
542   virtual ArrayRef<const char *> getRegMaskNames() const = 0;
543 
544   /// Returns a bitset indexed by physical register number indicating if a
545   /// register is a special register that has particular uses and should be
546   /// considered unavailable at all times, e.g. stack pointer, return address.
547   /// A reserved register:
548   /// - is not allocatable
549   /// - is considered always live
550   /// - is ignored by liveness tracking
551   /// It is often necessary to reserve the super registers of a reserved
552   /// register as well, to avoid them getting allocated indirectly. You may use
553   /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
554   virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
555 
556   /// Returns either a string explaining why the given register is reserved for
557   /// this function, or an empty optional if no explanation has been written.
558   /// The absence of an explanation does not mean that the register is not
559   /// reserved (meaning, you should check that PhysReg is in fact reserved
560   /// before calling this).
561   virtual std::optional<std::string>
explainReservedReg(const MachineFunction & MF,MCRegister PhysReg)562   explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
563     return {};
564   }
565 
566   /// Returns false if we can't guarantee that Physreg, specified as an IR asm
567   /// clobber constraint, will be preserved across the statement.
isAsmClobberable(const MachineFunction & MF,MCRegister PhysReg)568   virtual bool isAsmClobberable(const MachineFunction &MF,
569                                 MCRegister PhysReg) const {
570     return true;
571   }
572 
573   /// Returns true if PhysReg cannot be written to in inline asm statements.
isInlineAsmReadOnlyReg(const MachineFunction & MF,unsigned PhysReg)574   virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
575                                       unsigned PhysReg) const {
576     return false;
577   }
578 
579   /// Returns true if PhysReg is unallocatable and constant throughout the
580   /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
isConstantPhysReg(MCRegister PhysReg)581   virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }
582 
583   /// Returns true if the register class is considered divergent.
isDivergentRegClass(const TargetRegisterClass * RC)584   virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
585     return false;
586   }
587 
588   /// Returns true if the register is considered uniform.
isUniformReg(const MachineRegisterInfo & MRI,const RegisterBankInfo & RBI,Register Reg)589   virtual bool isUniformReg(const MachineRegisterInfo &MRI,
590                             const RegisterBankInfo &RBI, Register Reg) const {
591     return false;
592   }
593 
594   /// Returns true if MachineLoopInfo should analyze the given physreg
595   /// for loop invariance.
shouldAnalyzePhysregInMachineLoopInfo(MCRegister R)596   virtual bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const {
597     return false;
598   }
599 
600   /// Physical registers that may be modified within a function but are
601   /// guaranteed to be restored before any uses. This is useful for targets that
602   /// have call sequences where a GOT register may be updated by the caller
603   /// prior to a call and is guaranteed to be restored (also by the caller)
604   /// after the call.
isCallerPreservedPhysReg(MCRegister PhysReg,const MachineFunction & MF)605   virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
606                                         const MachineFunction &MF) const {
607     return false;
608   }
609 
610   /// This is a wrapper around getCallPreservedMask().
611   /// Return true if the register is preserved after the call.
612   virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
613                                     const MachineFunction &MF) const;
614 
615   /// Returns true if PhysReg can be used as an argument to a function.
isArgumentRegister(const MachineFunction & MF,MCRegister PhysReg)616   virtual bool isArgumentRegister(const MachineFunction &MF,
617                                   MCRegister PhysReg) const {
618     return false;
619   }
620 
621   /// Returns true if PhysReg is a fixed register.
isFixedRegister(const MachineFunction & MF,MCRegister PhysReg)622   virtual bool isFixedRegister(const MachineFunction &MF,
623                                MCRegister PhysReg) const {
624     return false;
625   }
626 
627   /// Returns true if PhysReg is a general purpose register.
isGeneralPurposeRegister(const MachineFunction & MF,MCRegister PhysReg)628   virtual bool isGeneralPurposeRegister(const MachineFunction &MF,
629                                         MCRegister PhysReg) const {
630     return false;
631   }
632 
633   /// Returns true if RC is a class/subclass of general purpose register.
634   virtual bool
isGeneralPurposeRegisterClass(const TargetRegisterClass * RC)635   isGeneralPurposeRegisterClass(const TargetRegisterClass *RC) const {
636     return false;
637   }
638 
639   /// Prior to adding the live-out mask to a stackmap or patchpoint
640   /// instruction, provide the target the opportunity to adjust it (mainly to
641   /// remove pseudo-registers that should be ignored).
adjustStackMapLiveOutMask(uint32_t * Mask)642   virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
643 
644   /// Return a super-register of the specified register
645   /// Reg so its sub-register of index SubIdx is Reg.
getMatchingSuperReg(MCRegister Reg,unsigned SubIdx,const TargetRegisterClass * RC)646   MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
647                                  const TargetRegisterClass *RC) const {
648     return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
649   }
650 
651   /// Return a subclass of the specified register
652   /// class A so that each register in it has a sub-register of the
653   /// specified sub-register index which is in the specified register class B.
654   ///
655   /// TableGen will synthesize missing A sub-classes.
656   virtual const TargetRegisterClass *
657   getMatchingSuperRegClass(const TargetRegisterClass *A,
658                            const TargetRegisterClass *B, unsigned Idx) const;
659 
660   // For a copy-like instruction that defines a register of class DefRC with
661   // subreg index DefSubReg, reading from another source with class SrcRC and
662   // subregister SrcSubReg return true if this is a preferable copy
663   // instruction or an earlier use should be used.
664   virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
665                                     unsigned DefSubReg,
666                                     const TargetRegisterClass *SrcRC,
667                                     unsigned SrcSubReg) const;
668 
669   /// Returns the largest legal sub-class of RC that
670   /// supports the sub-register index Idx.
671   /// If no such sub-class exists, return NULL.
672   /// If all registers in RC already have an Idx sub-register, return RC.
673   ///
674   /// TableGen generates a version of this function that is good enough in most
675   /// cases.  Targets can override if they have constraints that TableGen
676   /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
677   /// supported by the full GR32 register class in 64-bit mode, but only by the
678   /// GR32_ABCD regiister class in 32-bit mode.
679   ///
680   /// TableGen will synthesize missing RC sub-classes.
681   virtual const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass * RC,unsigned Idx)682   getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
683     assert(Idx == 0 && "Target has no sub-registers");
684     return RC;
685   }
686 
687   /// Return a register class that can be used for a subregister copy from/into
688   /// \p SuperRC at \p SubRegIdx.
689   virtual const TargetRegisterClass *
getSubRegisterClass(const TargetRegisterClass * SuperRC,unsigned SubRegIdx)690   getSubRegisterClass(const TargetRegisterClass *SuperRC,
691                       unsigned SubRegIdx) const {
692     return nullptr;
693   }
694 
695   /// Return the subregister index you get from composing
696   /// two subregister indices.
697   ///
698   /// The special null sub-register index composes as the identity.
699   ///
700   /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
701   /// returns c. Note that composeSubRegIndices does not tell you about illegal
702   /// compositions. If R does not have a subreg a, or R:a does not have a subreg
703   /// b, composeSubRegIndices doesn't tell you.
704   ///
705   /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
706   /// ssub_0:S0 - ssub_3:S3 subregs.
707   /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
composeSubRegIndices(unsigned a,unsigned b)708   unsigned composeSubRegIndices(unsigned a, unsigned b) const {
709     if (!a) return b;
710     if (!b) return a;
711     return composeSubRegIndicesImpl(a, b);
712   }
713 
714   /// Transforms a LaneMask computed for one subregister to the lanemask that
715   /// would have been computed when composing the subsubregisters with IdxA
716   /// first. @sa composeSubRegIndices()
composeSubRegIndexLaneMask(unsigned IdxA,LaneBitmask Mask)717   LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
718                                          LaneBitmask Mask) const {
719     if (!IdxA)
720       return Mask;
721     return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
722   }
723 
724   /// Transform a lanemask given for a virtual register to the corresponding
725   /// lanemask before using subregister with index \p IdxA.
726   /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
727   /// valie lane mask (no invalid bits set) the following holds:
728   /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
729   /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
730   /// => X1 == Mask
reverseComposeSubRegIndexLaneMask(unsigned IdxA,LaneBitmask LaneMask)731   LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
732                                                 LaneBitmask LaneMask) const {
733     if (!IdxA)
734       return LaneMask;
735     return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
736   }
737 
738   /// Debugging helper: dump register in human readable form to dbgs() stream.
739   static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
740                       const TargetRegisterInfo *TRI = nullptr);
741 
742   /// Return target defined base register class for a physical register.
743   /// This is the register class with the lowest BaseClassOrder containing the
744   /// register.
745   /// Will be nullptr if the register is not in any base register class.
getPhysRegBaseClass(MCRegister Reg)746   virtual const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const {
747     return nullptr;
748   }
749 
750 protected:
751   /// Overridden by TableGen in targets that have sub-registers.
composeSubRegIndicesImpl(unsigned,unsigned)752   virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
753     llvm_unreachable("Target has no sub-registers");
754   }
755 
756   /// Overridden by TableGen in targets that have sub-registers.
757   virtual LaneBitmask
composeSubRegIndexLaneMaskImpl(unsigned,LaneBitmask)758   composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
759     llvm_unreachable("Target has no sub-registers");
760   }
761 
reverseComposeSubRegIndexLaneMaskImpl(unsigned,LaneBitmask)762   virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
763                                                             LaneBitmask) const {
764     llvm_unreachable("Target has no sub-registers");
765   }
766 
767   /// Return the register cost table index. This implementation is sufficient
768   /// for most architectures and can be overriden by targets in case there are
769   /// multiple cost values associated with each register.
getRegisterCostTableIndex(const MachineFunction & MF)770   virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
771     return 0;
772   }
773 
774 public:
775   /// Find a common super-register class if it exists.
776   ///
777   /// Find a register class, SuperRC and two sub-register indices, PreA and
778   /// PreB, such that:
779   ///
780   ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
781   ///
782   ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
783   ///
784   ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
785   ///
786   /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
787   /// requirements, and there is no register class with a smaller spill size
788   /// that satisfies the requirements.
789   ///
790   /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
791   ///
792   /// Either of the PreA and PreB sub-register indices may be returned as 0. In
793   /// that case, the returned register class will be a sub-class of the
794   /// corresponding argument register class.
795   ///
796   /// The function returns NULL if no register class can be found.
797   const TargetRegisterClass*
798   getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
799                          const TargetRegisterClass *RCB, unsigned SubB,
800                          unsigned &PreA, unsigned &PreB) const;
801 
802   //===--------------------------------------------------------------------===//
803   // Register Class Information
804   //
805 protected:
getRegClassInfo(const TargetRegisterClass & RC)806   const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
807     return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
808   }
809 
810 public:
811   /// Register class iterators
regclass_begin()812   regclass_iterator regclass_begin() const { return RegClassBegin; }
regclass_end()813   regclass_iterator regclass_end() const { return RegClassEnd; }
regclasses()814   iterator_range<regclass_iterator> regclasses() const {
815     return make_range(regclass_begin(), regclass_end());
816   }
817 
getNumRegClasses()818   unsigned getNumRegClasses() const {
819     return (unsigned)(regclass_end()-regclass_begin());
820   }
821 
822   /// Returns the register class associated with the enumeration value.
823   /// See class MCOperandInfo.
getRegClass(unsigned i)824   const TargetRegisterClass *getRegClass(unsigned i) const {
825     assert(i < getNumRegClasses() && "Register Class ID out of range");
826     return RegClassBegin[i];
827   }
828 
829   /// Returns the name of the register class.
getRegClassName(const TargetRegisterClass * Class)830   const char *getRegClassName(const TargetRegisterClass *Class) const {
831     return MCRegisterInfo::getRegClassName(Class->MC);
832   }
833 
834   /// Find the largest common subclass of A and B.
835   /// Return NULL if there is no common subclass.
836   const TargetRegisterClass *
837   getCommonSubClass(const TargetRegisterClass *A,
838                     const TargetRegisterClass *B) const;
839 
840   /// Returns a TargetRegisterClass used for pointer values.
841   /// If a target supports multiple different pointer register classes,
842   /// kind specifies which one is indicated.
843   virtual const TargetRegisterClass *
844   getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
845     llvm_unreachable("Target didn't implement getPointerRegClass!");
846   }
847 
848   /// Returns a legal register class to copy a register in the specified class
849   /// to or from. If it is possible to copy the register directly without using
850   /// a cross register class copy, return the specified RC. Returns NULL if it
851   /// is not possible to copy between two registers of the specified class.
852   virtual const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass * RC)853   getCrossCopyRegClass(const TargetRegisterClass *RC) const {
854     return RC;
855   }
856 
857   /// Returns the largest super class of RC that is legal to use in the current
858   /// sub-target and has the same spill size.
859   /// The returned register class can be used to create virtual registers which
860   /// means that all its registers can be copied and spilled.
861   virtual const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction &)862   getLargestLegalSuperClass(const TargetRegisterClass *RC,
863                             const MachineFunction &) const {
864     /// The default implementation is very conservative and doesn't allow the
865     /// register allocator to inflate register classes.
866     return RC;
867   }
868 
869   /// Return the register pressure "high water mark" for the specific register
870   /// class. The scheduler is in high register pressure mode (for the specific
871   /// register class) if it goes over the limit.
872   ///
873   /// Note: this is the old register pressure model that relies on a manually
874   /// specified representative register class per value type.
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF)875   virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
876                                        MachineFunction &MF) const {
877     return 0;
878   }
879 
880   /// Return a heuristic for the machine scheduler to compare the profitability
881   /// of increasing one register pressure set versus another.  The scheduler
882   /// will prefer increasing the register pressure of the set which returns
883   /// the largest value for this function.
getRegPressureSetScore(const MachineFunction & MF,unsigned PSetID)884   virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
885                                           unsigned PSetID) const {
886     return PSetID;
887   }
888 
889   /// Get the weight in units of pressure for this register class.
890   virtual const RegClassWeight &getRegClassWeight(
891     const TargetRegisterClass *RC) const = 0;
892 
893   /// Returns size in bits of a phys/virtual/generic register.
894   TypeSize getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
895 
896   /// Get the weight in units of pressure for this register unit.
897   virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
898 
899   /// Get the number of dimensions of register pressure.
900   virtual unsigned getNumRegPressureSets() const = 0;
901 
902   /// Get the name of this register unit pressure set.
903   virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
904 
905   /// Get the register unit pressure limit for this dimension.
906   /// This limit must be adjusted dynamically for reserved registers.
907   virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
908                                           unsigned Idx) const = 0;
909 
910   /// Get the dimensions of register pressure impacted by this register class.
911   /// Returns a -1 terminated array of pressure set IDs.
912   virtual const int *getRegClassPressureSets(
913     const TargetRegisterClass *RC) const = 0;
914 
915   /// Get the dimensions of register pressure impacted by this register unit.
916   /// Returns a -1 terminated array of pressure set IDs.
917   virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
918 
919   /// Get a list of 'hint' registers that the register allocator should try
920   /// first when allocating a physical register for the virtual register
921   /// VirtReg. These registers are effectively moved to the front of the
922   /// allocation order. If true is returned, regalloc will try to only use
923   /// hints to the greatest extent possible even if it means spilling.
924   ///
925   /// The Order argument is the allocation order for VirtReg's register class
926   /// as returned from RegisterClassInfo::getOrder(). The hint registers must
927   /// come from Order, and they must not be reserved.
928   ///
929   /// The default implementation of this function will only add target
930   /// independent register allocation hints. Targets that override this
931   /// function should typically call this default implementation as well and
932   /// expect to see generic copy hints added.
933   virtual bool
934   getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
935                         SmallVectorImpl<MCPhysReg> &Hints,
936                         const MachineFunction &MF,
937                         const VirtRegMap *VRM = nullptr,
938                         const LiveRegMatrix *Matrix = nullptr) const;
939 
940   /// A callback to allow target a chance to update register allocation hints
941   /// when a register is "changed" (e.g. coalesced) to another register.
942   /// e.g. On ARM, some virtual registers should target register pairs,
943   /// if one of pair is coalesced to another register, the allocation hint of
944   /// the other half of the pair should be changed to point to the new register.
updateRegAllocHint(Register Reg,Register NewReg,MachineFunction & MF)945   virtual void updateRegAllocHint(Register Reg, Register NewReg,
946                                   MachineFunction &MF) const {
947     // Do nothing.
948   }
949 
950   /// Allow the target to reverse allocation order of local live ranges. This
951   /// will generally allocate shorter local live ranges first. For targets with
952   /// many registers, this could reduce regalloc compile time by a large
953   /// factor. It is disabled by default for three reasons:
954   /// (1) Top-down allocation is simpler and easier to debug for targets that
955   /// don't benefit from reversing the order.
956   /// (2) Bottom-up allocation could result in poor evicition decisions on some
957   /// targets affecting the performance of compiled code.
958   /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
reverseLocalAssignment()959   virtual bool reverseLocalAssignment() const { return false; }
960 
961   /// Allow the target to override the cost of using a callee-saved register for
962   /// the first time. Default value of 0 means we will use a callee-saved
963   /// register if it is available.
getCSRFirstUseCost()964   virtual unsigned getCSRFirstUseCost() const { return 0; }
965 
966   /// Returns true if the target requires (and can make use of) the register
967   /// scavenger.
requiresRegisterScavenging(const MachineFunction & MF)968   virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
969     return false;
970   }
971 
972   /// Returns true if the target wants to use frame pointer based accesses to
973   /// spill to the scavenger emergency spill slot.
useFPForScavengingIndex(const MachineFunction & MF)974   virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
975     return true;
976   }
977 
978   /// Returns true if the target requires post PEI scavenging of registers for
979   /// materializing frame index constants.
requiresFrameIndexScavenging(const MachineFunction & MF)980   virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
981     return false;
982   }
983 
984   /// Returns true if the target requires using the RegScavenger directly for
985   /// frame elimination despite using requiresFrameIndexScavenging.
requiresFrameIndexReplacementScavenging(const MachineFunction & MF)986   virtual bool requiresFrameIndexReplacementScavenging(
987       const MachineFunction &MF) const {
988     return false;
989   }
990 
991   /// Returns true if the target wants the LocalStackAllocation pass to be run
992   /// and virtual base registers used for more efficient stack access.
requiresVirtualBaseRegisters(const MachineFunction & MF)993   virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
994     return false;
995   }
996 
997   /// Return true if target has reserved a spill slot in the stack frame of
998   /// the given function for the specified register. e.g. On x86, if the frame
999   /// register is required, the first fixed stack object is reserved as its
1000   /// spill slot. This tells PEI not to create a new stack frame
1001   /// object for the given register. It should be called only after
1002   /// determineCalleeSaves().
hasReservedSpillSlot(const MachineFunction & MF,Register Reg,int & FrameIdx)1003   virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
1004                                     int &FrameIdx) const {
1005     return false;
1006   }
1007 
1008   /// Returns true if the live-ins should be tracked after register allocation.
trackLivenessAfterRegAlloc(const MachineFunction & MF)1009   virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
1010     return true;
1011   }
1012 
1013   /// True if the stack can be realigned for the target.
1014   virtual bool canRealignStack(const MachineFunction &MF) const;
1015 
1016   /// True if storage within the function requires the stack pointer to be
1017   /// aligned more than the normal calling convention calls for.
1018   virtual bool shouldRealignStack(const MachineFunction &MF) const;
1019 
1020   /// True if stack realignment is required and still possible.
hasStackRealignment(const MachineFunction & MF)1021   bool hasStackRealignment(const MachineFunction &MF) const {
1022     return shouldRealignStack(MF) && canRealignStack(MF);
1023   }
1024 
1025   /// Get the offset from the referenced frame index in the instruction,
1026   /// if there is one.
getFrameIndexInstrOffset(const MachineInstr * MI,int Idx)1027   virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
1028                                            int Idx) const {
1029     return 0;
1030   }
1031 
1032   /// Returns true if the instruction's frame index reference would be better
1033   /// served by a base register other than FP or SP.
1034   /// Used by LocalStackFrameAllocation to determine which frame index
1035   /// references it should create new base registers for.
needsFrameBaseReg(MachineInstr * MI,int64_t Offset)1036   virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1037     return false;
1038   }
1039 
1040   /// Insert defining instruction(s) for a pointer to FrameIdx before
1041   /// insertion point I. Return materialized frame pointer.
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset)1042   virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
1043                                                 int FrameIdx,
1044                                                 int64_t Offset) const {
1045     llvm_unreachable("materializeFrameBaseRegister does not exist on this "
1046                      "target");
1047   }
1048 
1049   /// Resolve a frame index operand of an instruction
1050   /// to reference the indicated base register plus offset instead.
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset)1051   virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
1052                                  int64_t Offset) const {
1053     llvm_unreachable("resolveFrameIndex does not exist on this target");
1054   }
1055 
1056   /// Determine whether a given base register plus offset immediate is
1057   /// encodable to resolve a frame index.
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset)1058   virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
1059                                   int64_t Offset) const {
1060     llvm_unreachable("isFrameOffsetLegal does not exist on this target");
1061   }
1062 
1063   /// Gets the DWARF expression opcodes for \p Offset.
1064   virtual void getOffsetOpcodes(const StackOffset &Offset,
1065                                 SmallVectorImpl<uint64_t> &Ops) const;
1066 
1067   /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
1068   DIExpression *
1069   prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
1070                           const StackOffset &Offset) const;
1071 
1072   /// Spill the register so it can be used by the register scavenger.
1073   /// Return true if the register was spilled, false otherwise.
1074   /// If this function does not spill the register, the scavenger
1075   /// will instead spill it to the emergency spill slot.
saveScavengerRegister(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,MachineBasicBlock::iterator & UseMI,const TargetRegisterClass * RC,Register Reg)1076   virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
1077                                      MachineBasicBlock::iterator I,
1078                                      MachineBasicBlock::iterator &UseMI,
1079                                      const TargetRegisterClass *RC,
1080                                      Register Reg) const {
1081     return false;
1082   }
1083 
1084   /// Process frame indices in reverse block order. This changes the behavior of
1085   /// the RegScavenger passed to eliminateFrameIndex. If this is true targets
1086   /// should scavengeRegisterBackwards in eliminateFrameIndex. New targets
1087   /// should prefer reverse scavenging behavior.
1088   /// TODO: Remove this when all targets return true.
eliminateFrameIndicesBackwards()1089   virtual bool eliminateFrameIndicesBackwards() const { return true; }
1090 
1091   /// This method must be overriden to eliminate abstract frame indices from
1092   /// instructions which may use them. The instruction referenced by the
1093   /// iterator contains an MO_FrameIndex operand which must be eliminated by
1094   /// this method. This method may modify or replace the specified instruction,
1095   /// as long as it keeps the iterator pointing at the finished product.
1096   /// SPAdj is the SP adjustment due to call frame setup instruction.
1097   /// FIOperandNum is the FI operand number.
1098   /// Returns true if the current instruction was removed and the iterator
1099   /// is not longer valid
1100   virtual bool eliminateFrameIndex(MachineBasicBlock::iterator MI,
1101                                    int SPAdj, unsigned FIOperandNum,
1102                                    RegScavenger *RS = nullptr) const = 0;
1103 
1104   /// Return the assembly name for \p Reg.
getRegAsmName(MCRegister Reg)1105   virtual StringRef getRegAsmName(MCRegister Reg) const {
1106     // FIXME: We are assuming that the assembly name is equal to the TableGen
1107     // name converted to lower case
1108     //
1109     // The TableGen name is the name of the definition for this register in the
1110     // target's tablegen files.  For example, the TableGen name of
1111     // def EAX : Register <...>; is "EAX"
1112     return StringRef(getName(Reg));
1113   }
1114 
1115   //===--------------------------------------------------------------------===//
1116   /// Subtarget Hooks
1117 
1118   /// SrcRC and DstRC will be morphed into NewRC if this returns true.
shouldCoalesce(MachineInstr * MI,const TargetRegisterClass * SrcRC,unsigned SubReg,const TargetRegisterClass * DstRC,unsigned DstSubReg,const TargetRegisterClass * NewRC,LiveIntervals & LIS)1119   virtual bool shouldCoalesce(MachineInstr *MI,
1120                               const TargetRegisterClass *SrcRC,
1121                               unsigned SubReg,
1122                               const TargetRegisterClass *DstRC,
1123                               unsigned DstSubReg,
1124                               const TargetRegisterClass *NewRC,
1125                               LiveIntervals &LIS) const
1126   { return true; }
1127 
1128   /// Region split has a high compile time cost especially for large live range.
1129   /// This method is used to decide whether or not \p VirtReg should
1130   /// go through this expensive splitting heuristic.
1131   virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
1132                                            const LiveInterval &VirtReg) const;
1133 
1134   /// Last chance recoloring has a high compile time cost especially for
1135   /// targets with a lot of registers.
1136   /// This method is used to decide whether or not \p VirtReg should
1137   /// go through this expensive heuristic.
1138   /// When this target hook is hit, by returning false, there is a high
1139   /// chance that the register allocation will fail altogether (usually with
1140   /// "ran out of registers").
1141   /// That said, this error usually points to another problem in the
1142   /// optimization pipeline.
1143   virtual bool
shouldUseLastChanceRecoloringForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg)1144   shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
1145                                           const LiveInterval &VirtReg) const {
1146     return true;
1147   }
1148 
1149   /// Deferred spilling delays the spill insertion of a virtual register
1150   /// after every other allocation. By deferring the spilling, it is
1151   /// sometimes possible to eliminate that spilling altogether because
1152   /// something else could have been eliminated, thus leaving some space
1153   /// for the virtual register.
1154   /// However, this comes with a compile time impact because it adds one
1155   /// more stage to the greedy register allocator.
1156   /// This method is used to decide whether \p VirtReg should use the deferred
1157   /// spilling stage instead of being spilled right away.
1158   virtual bool
shouldUseDeferredSpillingForVirtReg(const MachineFunction & MF,const LiveInterval & VirtReg)1159   shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
1160                                       const LiveInterval &VirtReg) const {
1161     return false;
1162   }
1163 
1164   /// When prioritizing live ranges in register allocation, if this hook returns
1165   /// true then the AllocationPriority of the register class will be treated as
1166   /// more important than whether the range is local to a basic block or global.
1167   virtual bool
regClassPriorityTrumpsGlobalness(const MachineFunction & MF)1168   regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const {
1169     return false;
1170   }
1171 
1172   //===--------------------------------------------------------------------===//
1173   /// Debug information queries.
1174 
1175   /// getFrameRegister - This method should return the register used as a base
1176   /// for values allocated in the current stack frame.
1177   virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
1178 
1179   /// Mark a register and all its aliases as reserved in the given set.
1180   void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;
1181 
1182   /// Returns true if for every register in the set all super registers are part
1183   /// of the set as well.
1184   bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
1185       ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
1186 
1187   virtual const TargetRegisterClass *
getConstrainedRegClassForOperand(const MachineOperand & MO,const MachineRegisterInfo & MRI)1188   getConstrainedRegClassForOperand(const MachineOperand &MO,
1189                                    const MachineRegisterInfo &MRI) const {
1190     return nullptr;
1191   }
1192 
1193   /// Returns the physical register number of sub-register "Index"
1194   /// for physical register RegNo. Return zero if the sub-register does not
1195   /// exist.
getSubReg(MCRegister Reg,unsigned Idx)1196   inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
1197     return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
1198   }
1199 
1200   /// Some targets have non-allocatable registers that aren't technically part
1201   /// of the explicit callee saved register list, but should be handled as such
1202   /// in certain cases.
isNonallocatableRegisterCalleeSave(MCRegister Reg)1203   virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
1204     return false;
1205   }
1206 
1207   /// Returns the Largest Super Class that is being initialized. There
1208   /// should be a Pseudo Instruction implemented for the super class
1209   /// that is being returned to ensure that Init Undef can apply the
1210   /// initialization correctly.
1211   virtual const TargetRegisterClass *
getLargestSuperClass(const TargetRegisterClass * RC)1212   getLargestSuperClass(const TargetRegisterClass *RC) const {
1213     llvm_unreachable("Unexpected target register class.");
1214   }
1215 
1216   /// Returns if the architecture being targeted has the required Pseudo
1217   /// Instructions for initializing the register. By default this returns false,
1218   /// but where it is overriden for an architecture, the behaviour will be
1219   /// different. This can either be a check to ensure the Register Class is
1220   /// present, or to return true as an indication the architecture supports the
1221   /// pass. If using the method that does not check for the Register Class, it
1222   /// is imperative to ensure all required Pseudo Instructions are implemented,
1223   /// otherwise compilation may fail with an `Unexpected register class` error.
1224   virtual bool
doesRegClassHavePseudoInitUndef(const TargetRegisterClass * RC)1225   doesRegClassHavePseudoInitUndef(const TargetRegisterClass *RC) const {
1226     return false;
1227   }
1228 };
1229 
1230 //===----------------------------------------------------------------------===//
1231 //                           SuperRegClassIterator
1232 //===----------------------------------------------------------------------===//
1233 //
1234 // Iterate over the possible super-registers for a given register class. The
1235 // iterator will visit a list of pairs (Idx, Mask) corresponding to the
1236 // possible classes of super-registers.
1237 //
1238 // Each bit mask will have at least one set bit, and each set bit in Mask
1239 // corresponds to a SuperRC such that:
1240 //
1241 //   For all Reg in SuperRC: Reg:Idx is in RC.
1242 //
1243 // The iterator can include (O, RC->getSubClassMask()) as the first entry which
1244 // also satisfies the above requirement, assuming Reg:0 == Reg.
1245 //
1246 class SuperRegClassIterator {
1247   const unsigned RCMaskWords;
1248   unsigned SubReg = 0;
1249   const uint16_t *Idx;
1250   const uint32_t *Mask;
1251 
1252 public:
1253   /// Create a SuperRegClassIterator that visits all the super-register classes
1254   /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
1255   SuperRegClassIterator(const TargetRegisterClass *RC,
1256                         const TargetRegisterInfo *TRI,
1257                         bool IncludeSelf = false)
1258     : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
1259       Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
1260     if (!IncludeSelf)
1261       ++*this;
1262   }
1263 
1264   /// Returns true if this iterator is still pointing at a valid entry.
isValid()1265   bool isValid() const { return Idx; }
1266 
1267   /// Returns the current sub-register index.
getSubReg()1268   unsigned getSubReg() const { return SubReg; }
1269 
1270   /// Returns the bit mask of register classes that getSubReg() projects into
1271   /// RC.
1272   /// See TargetRegisterClass::getSubClassMask() for how to use it.
getMask()1273   const uint32_t *getMask() const { return Mask; }
1274 
1275   /// Advance iterator to the next entry.
1276   void operator++() {
1277     assert(isValid() && "Cannot move iterator past end.");
1278     Mask += RCMaskWords;
1279     SubReg = *Idx++;
1280     if (!SubReg)
1281       Idx = nullptr;
1282   }
1283 };
1284 
1285 //===----------------------------------------------------------------------===//
1286 //                           BitMaskClassIterator
1287 //===----------------------------------------------------------------------===//
1288 /// This class encapuslates the logic to iterate over bitmask returned by
1289 /// the various RegClass related APIs.
1290 /// E.g., this class can be used to iterate over the subclasses provided by
1291 /// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
1292 class BitMaskClassIterator {
1293   /// Total number of register classes.
1294   const unsigned NumRegClasses;
1295   /// Base index of CurrentChunk.
1296   /// In other words, the number of bit we read to get at the
1297   /// beginning of that chunck.
1298   unsigned Base = 0;
1299   /// Adjust base index of CurrentChunk.
1300   /// Base index + how many bit we read within CurrentChunk.
1301   unsigned Idx = 0;
1302   /// Current register class ID.
1303   unsigned ID = 0;
1304   /// Mask we are iterating over.
1305   const uint32_t *Mask;
1306   /// Current chunk of the Mask we are traversing.
1307   uint32_t CurrentChunk;
1308 
1309   /// Move ID to the next set bit.
moveToNextID()1310   void moveToNextID() {
1311     // If the current chunk of memory is empty, move to the next one,
1312     // while making sure we do not go pass the number of register
1313     // classes.
1314     while (!CurrentChunk) {
1315       // Move to the next chunk.
1316       Base += 32;
1317       if (Base >= NumRegClasses) {
1318         ID = NumRegClasses;
1319         return;
1320       }
1321       CurrentChunk = *++Mask;
1322       Idx = Base;
1323     }
1324     // Otherwise look for the first bit set from the right
1325     // (representation of the class ID is big endian).
1326     // See getSubClassMask for more details on the representation.
1327     unsigned Offset = llvm::countr_zero(CurrentChunk);
1328     // Add the Offset to the adjusted base number of this chunk: Idx.
1329     // This is the ID of the register class.
1330     ID = Idx + Offset;
1331 
1332     // Consume the zeros, if any, and the bit we just read
1333     // so that we are at the right spot for the next call.
1334     // Do not do Offset + 1 because Offset may be 31 and 32
1335     // will be UB for the shift, though in that case we could
1336     // have make the chunk being equal to 0, but that would
1337     // have introduced a if statement.
1338     moveNBits(Offset);
1339     moveNBits(1);
1340   }
1341 
1342   /// Move \p NumBits Bits forward in CurrentChunk.
moveNBits(unsigned NumBits)1343   void moveNBits(unsigned NumBits) {
1344     assert(NumBits < 32 && "Undefined behavior spotted!");
1345     // Consume the bit we read for the next call.
1346     CurrentChunk >>= NumBits;
1347     // Adjust the base for the chunk.
1348     Idx += NumBits;
1349   }
1350 
1351 public:
1352   /// Create a BitMaskClassIterator that visits all the register classes
1353   /// represented by \p Mask.
1354   ///
1355   /// \pre \p Mask != nullptr
BitMaskClassIterator(const uint32_t * Mask,const TargetRegisterInfo & TRI)1356   BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
1357       : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
1358     // Move to the first ID.
1359     moveToNextID();
1360   }
1361 
1362   /// Returns true if this iterator is still pointing at a valid entry.
isValid()1363   bool isValid() const { return getID() != NumRegClasses; }
1364 
1365   /// Returns the current register class ID.
getID()1366   unsigned getID() const { return ID; }
1367 
1368   /// Advance iterator to the next entry.
1369   void operator++() {
1370     assert(isValid() && "Cannot move iterator past end.");
1371     moveToNextID();
1372   }
1373 };
1374 
1375 // This is useful when building IndexedMaps keyed on virtual registers
1376 struct VirtReg2IndexFunctor {
1377   using argument_type = Register;
operatorVirtReg2IndexFunctor1378   unsigned operator()(Register Reg) const {
1379     return Register::virtReg2Index(Reg);
1380   }
1381 };
1382 
1383 /// Prints virtual and physical registers with or without a TRI instance.
1384 ///
1385 /// The format is:
1386 ///   %noreg          - NoRegister
1387 ///   %5              - a virtual register.
1388 ///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
1389 ///   %eax            - a physical register
1390 ///   %physreg17      - a physical register when no TRI instance given.
1391 ///
1392 /// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
1393 Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
1394                    unsigned SubIdx = 0,
1395                    const MachineRegisterInfo *MRI = nullptr);
1396 
1397 /// Create Printable object to print register units on a \ref raw_ostream.
1398 ///
1399 /// Register units are named after their root registers:
1400 ///
1401 ///   al      - Single root.
1402 ///   fp0~st7 - Dual roots.
1403 ///
1404 /// Usage: OS << printRegUnit(Unit, TRI) << '\n';
1405 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
1406 
1407 /// Create Printable object to print virtual registers and physical
1408 /// registers on a \ref raw_ostream.
1409 Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
1410 
1411 /// Create Printable object to print register classes or register banks
1412 /// on a \ref raw_ostream.
1413 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
1414                               const TargetRegisterInfo *TRI);
1415 
1416 } // end namespace llvm
1417 
1418 #endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
1419