• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file describes how to lower LLVM code to machine code.  This has two
12 /// main components:
13 ///
14 ///  1. Which ValueTypes are natively supported by the target.
15 ///  2. Which operations are supported for supported ValueTypes.
16 ///  3. Cost thresholds for alternative implementations of certain operations.
17 ///
18 /// In addition it has a few other components, like information about FP
19 /// immediates.
20 ///
21 //===----------------------------------------------------------------------===//
22 
23 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
24 #define LLVM_CODEGEN_TARGETLOWERING_H
25 
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/StringRef.h"
32 #include "llvm/Analysis/DivergenceAnalysis.h"
33 #include "llvm/CodeGen/DAGCombine.h"
34 #include "llvm/CodeGen/ISDOpcodes.h"
35 #include "llvm/CodeGen/RuntimeLibcalls.h"
36 #include "llvm/CodeGen/SelectionDAG.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetCallingConv.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/CallSite.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InlineAsm.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/MC/MCRegisterInfo.h"
52 #include "llvm/Support/AtomicOrdering.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/MachineValueType.h"
56 #include "llvm/Target/TargetMachine.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <climits>
60 #include <cstdint>
61 #include <iterator>
62 #include <map>
63 #include <string>
64 #include <utility>
65 #include <vector>
66 
67 namespace llvm {
68 
69 class BranchProbability;
70 class CCState;
71 class CCValAssign;
72 class Constant;
73 class FastISel;
74 class FunctionLoweringInfo;
75 class GlobalValue;
76 class IntrinsicInst;
77 struct KnownBits;
78 class LLVMContext;
79 class MachineBasicBlock;
80 class MachineFunction;
81 class MachineInstr;
82 class MachineJumpTableInfo;
83 class MachineLoop;
84 class MachineRegisterInfo;
85 class MCContext;
86 class MCExpr;
87 class Module;
88 class TargetRegisterClass;
89 class TargetLibraryInfo;
90 class TargetRegisterInfo;
91 class Value;
92 
93 namespace Sched {
94 
95   enum Preference {
96     None,             // No preference
97     Source,           // Follow source order.
98     RegPressure,      // Scheduling for lowest register pressure.
99     Hybrid,           // Scheduling for both latency and register pressure.
100     ILP,              // Scheduling for ILP in low register pressure mode.
101     VLIW              // Scheduling for VLIW targets.
102   };
103 
104 } // end namespace Sched
105 
106 /// This base class for TargetLowering contains the SelectionDAG-independent
107 /// parts that can be used from the rest of CodeGen.
108 class TargetLoweringBase {
109 public:
110   /// This enum indicates whether operations are valid for a target, and if not,
111   /// what action should be used to make them valid.
112   enum LegalizeAction : uint8_t {
113     Legal,      // The target natively supports this operation.
114     Promote,    // This operation should be executed in a larger type.
115     Expand,     // Try to expand this to other ops, otherwise use a libcall.
116     LibCall,    // Don't try to expand this to other ops, always use a libcall.
117     Custom      // Use the LowerOperation hook to implement custom lowering.
118   };
119 
120   /// This enum indicates whether a types are legal for a target, and if not,
121   /// what action should be used to make them valid.
122   enum LegalizeTypeAction : uint8_t {
123     TypeLegal,           // The target natively supports this type.
124     TypePromoteInteger,  // Replace this integer with a larger one.
125     TypeExpandInteger,   // Split this integer into two of half the size.
126     TypeSoftenFloat,     // Convert this float to a same size integer type,
127                          // if an operation is not supported in target HW.
128     TypeExpandFloat,     // Split this float into two of half the size.
129     TypeScalarizeVector, // Replace this one-element vector with its element.
130     TypeSplitVector,     // Split this vector into two of half the size.
131     TypeWidenVector,     // This vector should be widened into a larger vector.
132     TypePromoteFloat     // Replace this float with a larger one.
133   };
134 
135   /// LegalizeKind holds the legalization kind that needs to happen to EVT
136   /// in order to type-legalize it.
137   using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
138 
139   /// Enum that describes how the target represents true/false values.
140   enum BooleanContent {
141     UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
142     ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
143     ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
144   };
145 
146   /// Enum that describes what type of support for selects the target has.
147   enum SelectSupportKind {
148     ScalarValSelect,      // The target supports scalar selects (ex: cmov).
149     ScalarCondVectorVal,  // The target supports selects with a scalar condition
150                           // and vector values (ex: cmov).
151     VectorMaskSelect      // The target supports vector selects with a vector
152                           // mask (ex: x86 blends).
153   };
154 
155   /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
156   /// to, if at all. Exists because different targets have different levels of
157   /// support for these atomic instructions, and also have different options
158   /// w.r.t. what they should expand to.
159   enum class AtomicExpansionKind {
160     None,    // Don't expand the instruction.
161     LLSC,    // Expand the instruction into loadlinked/storeconditional; used
162              // by ARM/AArch64.
163     LLOnly,  // Expand the (load) instruction into just a load-linked, which has
164              // greater atomic guarantees than a normal load.
165     CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
166   };
167 
168   /// Enum that specifies when a multiplication should be expanded.
169   enum class MulExpansionKind {
170     Always,            // Always expand the instruction.
171     OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
172                        // or custom.
173   };
174 
175   class ArgListEntry {
176   public:
177     Value *Val = nullptr;
178     SDValue Node = SDValue();
179     Type *Ty = nullptr;
180     bool IsSExt : 1;
181     bool IsZExt : 1;
182     bool IsInReg : 1;
183     bool IsSRet : 1;
184     bool IsNest : 1;
185     bool IsByVal : 1;
186     bool IsInAlloca : 1;
187     bool IsReturned : 1;
188     bool IsSwiftSelf : 1;
189     bool IsSwiftError : 1;
190     uint16_t Alignment = 0;
191 
ArgListEntry()192     ArgListEntry()
193         : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
194           IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
195           IsSwiftSelf(false), IsSwiftError(false) {}
196 
197     void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx);
198   };
199   using ArgListTy = std::vector<ArgListEntry>;
200 
markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args)201   virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
202                                      ArgListTy &Args) const {};
203 
getExtendForContent(BooleanContent Content)204   static ISD::NodeType getExtendForContent(BooleanContent Content) {
205     switch (Content) {
206     case UndefinedBooleanContent:
207       // Extend by adding rubbish bits.
208       return ISD::ANY_EXTEND;
209     case ZeroOrOneBooleanContent:
210       // Extend by adding zero bits.
211       return ISD::ZERO_EXTEND;
212     case ZeroOrNegativeOneBooleanContent:
213       // Extend by copying the sign bit.
214       return ISD::SIGN_EXTEND;
215     }
216     llvm_unreachable("Invalid content kind");
217   }
218 
219   /// NOTE: The TargetMachine owns TLOF.
220   explicit TargetLoweringBase(const TargetMachine &TM);
221   TargetLoweringBase(const TargetLoweringBase &) = delete;
222   TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
223   virtual ~TargetLoweringBase() = default;
224 
225 protected:
226   /// Initialize all of the actions to default values.
227   void initActions();
228 
229 public:
getTargetMachine()230   const TargetMachine &getTargetMachine() const { return TM; }
231 
useSoftFloat()232   virtual bool useSoftFloat() const { return false; }
233 
234   /// Return the pointer type for the given address space, defaults to
235   /// the pointer type from the data layout.
236   /// FIXME: The default needs to be removed once all the code is updated.
237   MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
238     return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
239   }
240 
241   /// Return the type for frame index, which is determined by
242   /// the alloca address space specified through the data layout.
getFrameIndexTy(const DataLayout & DL)243   MVT getFrameIndexTy(const DataLayout &DL) const {
244     return getPointerTy(DL, DL.getAllocaAddrSpace());
245   }
246 
247   /// Return the type for operands of fence.
248   /// TODO: Let fence operands be of i32 type and remove this.
getFenceOperandTy(const DataLayout & DL)249   virtual MVT getFenceOperandTy(const DataLayout &DL) const {
250     return getPointerTy(DL);
251   }
252 
253   /// EVT is not used in-tree, but is used by out-of-tree target.
254   /// A documentation for this function would be nice...
255   virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
256 
257   EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
258                        bool LegalTypes = true) const;
259 
260   /// Returns the type to be used for the index operand of:
261   /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
262   /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
getVectorIdxTy(const DataLayout & DL)263   virtual MVT getVectorIdxTy(const DataLayout &DL) const {
264     return getPointerTy(DL);
265   }
266 
isSelectSupported(SelectSupportKind)267   virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
268     return true;
269   }
270 
271   /// Return true if multiple condition registers are available.
hasMultipleConditionRegisters()272   bool hasMultipleConditionRegisters() const {
273     return HasMultipleConditionRegisters;
274   }
275 
276   /// Return true if the target has BitExtract instructions.
hasExtractBitsInsn()277   bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
278 
279   /// Return the preferred vector type legalization action.
280   virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(EVT VT)281   getPreferredVectorAction(EVT VT) const {
282     // The default action for one element vectors is to scalarize
283     if (VT.getVectorNumElements() == 1)
284       return TypeScalarizeVector;
285     // The default action for other vectors is to promote
286     return TypePromoteInteger;
287   }
288 
289   // There are two general methods for expanding a BUILD_VECTOR node:
290   //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
291   //     them together.
292   //  2. Build the vector on the stack and then load it.
293   // If this function returns true, then method (1) will be used, subject to
294   // the constraint that all of the necessary shuffles are legal (as determined
295   // by isShuffleMaskLegal). If this function returns false, then method (2) is
296   // always used. The vector type, and the number of defined values, are
297   // provided.
298   virtual bool
shouldExpandBuildVectorWithShuffles(EVT,unsigned DefinedValues)299   shouldExpandBuildVectorWithShuffles(EVT /* VT */,
300                                       unsigned DefinedValues) const {
301     return DefinedValues < 3;
302   }
303 
304   /// Return true if integer divide is usually cheaper than a sequence of
305   /// several shifts, adds, and multiplies for this target.
306   /// The definition of "cheaper" may depend on whether we're optimizing
307   /// for speed or for size.
isIntDivCheap(EVT VT,AttributeList Attr)308   virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
309 
310   /// Return true if the target can handle a standalone remainder operation.
hasStandaloneRem(EVT VT)311   virtual bool hasStandaloneRem(EVT VT) const {
312     return true;
313   }
314 
315   /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
isFsqrtCheap(SDValue X,SelectionDAG & DAG)316   virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
317     // Default behavior is to replace SQRT(X) with X*RSQRT(X).
318     return false;
319   }
320 
321   /// Reciprocal estimate status values used by the functions below.
322   enum ReciprocalEstimate : int {
323     Unspecified = -1,
324     Disabled = 0,
325     Enabled = 1
326   };
327 
328   /// Return a ReciprocalEstimate enum value for a square root of the given type
329   /// based on the function's attributes. If the operation is not overridden by
330   /// the function's attributes, "Unspecified" is returned and target defaults
331   /// are expected to be used for instruction selection.
332   int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
333 
334   /// Return a ReciprocalEstimate enum value for a division of the given type
335   /// based on the function's attributes. If the operation is not overridden by
336   /// the function's attributes, "Unspecified" is returned and target defaults
337   /// are expected to be used for instruction selection.
338   int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
339 
340   /// Return the refinement step count for a square root of the given type based
341   /// on the function's attributes. If the operation is not overridden by
342   /// the function's attributes, "Unspecified" is returned and target defaults
343   /// are expected to be used for instruction selection.
344   int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
345 
346   /// Return the refinement step count for a division of the given type based
347   /// on the function's attributes. If the operation is not overridden by
348   /// the function's attributes, "Unspecified" is returned and target defaults
349   /// are expected to be used for instruction selection.
350   int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
351 
352   /// Returns true if target has indicated at least one type should be bypassed.
isSlowDivBypassed()353   bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
354 
355   /// Returns map of slow types for division or remainder with corresponding
356   /// fast types
getBypassSlowDivWidths()357   const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
358     return BypassSlowDivWidths;
359   }
360 
361   /// Return true if Flow Control is an expensive operation that should be
362   /// avoided.
isJumpExpensive()363   bool isJumpExpensive() const { return JumpIsExpensive; }
364 
365   /// Return true if selects are only cheaper than branches if the branch is
366   /// unlikely to be predicted right.
isPredictableSelectExpensive()367   bool isPredictableSelectExpensive() const {
368     return PredictableSelectIsExpensive;
369   }
370 
371   /// If a branch or a select condition is skewed in one direction by more than
372   /// this factor, it is very likely to be predicted correctly.
373   virtual BranchProbability getPredictableBranchThreshold() const;
374 
375   /// Return true if the following transform is beneficial:
376   /// fold (conv (load x)) -> (load (conv*)x)
377   /// On architectures that don't natively support some vector loads
378   /// efficiently, casting the load to a smaller vector of larger types and
379   /// loading is more efficient, however, this can be undone by optimizations in
380   /// dag combiner.
isLoadBitCastBeneficial(EVT LoadVT,EVT BitcastVT)381   virtual bool isLoadBitCastBeneficial(EVT LoadVT,
382                                        EVT BitcastVT) const {
383     // Don't do if we could do an indexed load on the original type, but not on
384     // the new one.
385     if (!LoadVT.isSimple() || !BitcastVT.isSimple())
386       return true;
387 
388     MVT LoadMVT = LoadVT.getSimpleVT();
389 
390     // Don't bother doing this if it's just going to be promoted again later, as
391     // doing so might interfere with other combines.
392     if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
393         getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
394       return false;
395 
396     return true;
397   }
398 
399   /// Return true if the following transform is beneficial:
400   /// (store (y (conv x)), y*)) -> (store x, (x*))
isStoreBitCastBeneficial(EVT StoreVT,EVT BitcastVT)401   virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
402     // Default to the same logic as loads.
403     return isLoadBitCastBeneficial(StoreVT, BitcastVT);
404   }
405 
406   /// Return true if it is expected to be cheaper to do a store of a non-zero
407   /// vector constant with the given size and type for the address space than to
408   /// store the individual scalar element constants.
storeOfVectorConstantIsCheap(EVT MemVT,unsigned NumElem,unsigned AddrSpace)409   virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
410                                             unsigned NumElem,
411                                             unsigned AddrSpace) const {
412     return false;
413   }
414 
415   /// Allow store merging after legalization in addition to before legalization.
416   /// This may catch stores that do not exist earlier (eg, stores created from
417   /// intrinsics).
mergeStoresAfterLegalization()418   virtual bool mergeStoresAfterLegalization() const { return true; }
419 
420   /// Returns if it's reasonable to merge stores to MemVT size.
canMergeStoresTo(unsigned AS,EVT MemVT,const SelectionDAG & DAG)421   virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
422                                 const SelectionDAG &DAG) const {
423     return true;
424   }
425 
426   /// Return true if it is cheap to speculate a call to intrinsic cttz.
isCheapToSpeculateCttz()427   virtual bool isCheapToSpeculateCttz() const {
428     return false;
429   }
430 
431   /// Return true if it is cheap to speculate a call to intrinsic ctlz.
isCheapToSpeculateCtlz()432   virtual bool isCheapToSpeculateCtlz() const {
433     return false;
434   }
435 
436   /// Return true if ctlz instruction is fast.
isCtlzFast()437   virtual bool isCtlzFast() const {
438     return false;
439   }
440 
441   /// Return true if it is safe to transform an integer-domain bitwise operation
442   /// into the equivalent floating-point operation. This should be set to true
443   /// if the target has IEEE-754-compliant fabs/fneg operations for the input
444   /// type.
hasBitPreservingFPLogic(EVT VT)445   virtual bool hasBitPreservingFPLogic(EVT VT) const {
446     return false;
447   }
448 
449   /// Return true if it is cheaper to split the store of a merged int val
450   /// from a pair of smaller values into multiple stores.
isMultiStoresCheaperThanBitsMerge(EVT LTy,EVT HTy)451   virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
452     return false;
453   }
454 
455   /// Return if the target supports combining a
456   /// chain like:
457   /// \code
458   ///   %andResult = and %val1, #mask
459   ///   %icmpResult = icmp %andResult, 0
460   /// \endcode
461   /// into a single machine instruction of a form like:
462   /// \code
463   ///   cc = test %register, #mask
464   /// \endcode
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI)465   virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
466     return false;
467   }
468 
469   /// Use bitwise logic to make pairs of compares more efficient. For example:
470   /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
471   /// This should be true when it takes more than one instruction to lower
472   /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
473   /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
convertSetCCLogicToBitwiseLogic(EVT VT)474   virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
475     return false;
476   }
477 
478   /// Return the preferred operand type if the target has a quick way to compare
479   /// integer values of the given size. Assume that any legal integer type can
480   /// be compared efficiently. Targets may override this to allow illegal wide
481   /// types to return a vector type if there is support to compare that type.
hasFastEqualityCompare(unsigned NumBits)482   virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
483     MVT VT = MVT::getIntegerVT(NumBits);
484     return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
485   }
486 
487   /// Return true if the target should transform:
488   /// (X & Y) == Y ---> (~X & Y) == 0
489   /// (X & Y) != Y ---> (~X & Y) != 0
490   ///
491   /// This may be profitable if the target has a bitwise and-not operation that
492   /// sets comparison flags. A target may want to limit the transformation based
493   /// on the type of Y or if Y is a constant.
494   ///
495   /// Note that the transform will not occur if Y is known to be a power-of-2
496   /// because a mask and compare of a single bit can be handled by inverting the
497   /// predicate, for example:
498   /// (X & 8) == 8 ---> (X & 8) != 0
hasAndNotCompare(SDValue Y)499   virtual bool hasAndNotCompare(SDValue Y) const {
500     return false;
501   }
502 
503   /// Return true if the target has a bitwise and-not operation:
504   /// X = ~A & B
505   /// This can be used to simplify select or other instructions.
hasAndNot(SDValue X)506   virtual bool hasAndNot(SDValue X) const {
507     // If the target has the more complex version of this operation, assume that
508     // it has this operation too.
509     return hasAndNotCompare(X);
510   }
511 
512   /// There are two ways to clear extreme bits (either low or high):
513   /// Mask:    x &  (-1 << y)  (the instcombine canonical form)
514   /// Shifts:  x >> y << y
515   /// Return true if the variant with 2 shifts is preferred.
516   /// Return false if there is no preference.
preferShiftsToClearExtremeBits(SDValue X)517   virtual bool preferShiftsToClearExtremeBits(SDValue X) const {
518     // By default, let's assume that no one prefers shifts.
519     return false;
520   }
521 
522   /// Should we tranform the IR-optimal check for whether given truncation
523   /// down into KeptBits would be truncating or not:
524   ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
525   /// Into it's more traditional form:
526   ///   ((%x << C) a>> C) dstcond %x
527   /// Return true if we should transform.
528   /// Return false if there is no preference.
shouldTransformSignedTruncationCheck(EVT XVT,unsigned KeptBits)529   virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
530                                                     unsigned KeptBits) const {
531     // By default, let's assume that no one prefers shifts.
532     return false;
533   }
534 
535   /// Return true if the target wants to use the optimization that
536   /// turns ext(promotableInst1(...(promotableInstN(load)))) into
537   /// promotedInst1(...(promotedInstN(ext(load)))).
enableExtLdPromotion()538   bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
539 
540   /// Return true if the target can combine store(extractelement VectorTy,
541   /// Idx).
542   /// \p Cost[out] gives the cost of that transformation when this is true.
canCombineStoreAndExtract(Type * VectorTy,Value * Idx,unsigned & Cost)543   virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
544                                          unsigned &Cost) const {
545     return false;
546   }
547 
548   /// Return true if target supports floating point exceptions.
hasFloatingPointExceptions()549   bool hasFloatingPointExceptions() const {
550     return HasFloatingPointExceptions;
551   }
552 
553   /// Return true if target always beneficiates from combining into FMA for a
554   /// given value type. This must typically return false on targets where FMA
555   /// takes more cycles to execute than FADD.
enableAggressiveFMAFusion(EVT VT)556   virtual bool enableAggressiveFMAFusion(EVT VT) const {
557     return false;
558   }
559 
560   /// Return the ValueType of the result of SETCC operations.
561   virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
562                                  EVT VT) const;
563 
564   /// Return the ValueType for comparison libcalls. Comparions libcalls include
565   /// floating point comparion calls, and Ordered/Unordered check calls on
566   /// floating point numbers.
567   virtual
568   MVT::SimpleValueType getCmpLibcallReturnType() const;
569 
570   /// For targets without i1 registers, this gives the nature of the high-bits
571   /// of boolean values held in types wider than i1.
572   ///
573   /// "Boolean values" are special true/false values produced by nodes like
574   /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
575   /// Not to be confused with general values promoted from i1.  Some cpus
576   /// distinguish between vectors of boolean and scalars; the isVec parameter
577   /// selects between the two kinds.  For example on X86 a scalar boolean should
578   /// be zero extended from i1, while the elements of a vector of booleans
579   /// should be sign extended from i1.
580   ///
581   /// Some cpus also treat floating point types the same way as they treat
582   /// vectors instead of the way they treat scalars.
getBooleanContents(bool isVec,bool isFloat)583   BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
584     if (isVec)
585       return BooleanVectorContents;
586     return isFloat ? BooleanFloatContents : BooleanContents;
587   }
588 
getBooleanContents(EVT Type)589   BooleanContent getBooleanContents(EVT Type) const {
590     return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
591   }
592 
593   /// Return target scheduling preference.
getSchedulingPreference()594   Sched::Preference getSchedulingPreference() const {
595     return SchedPreferenceInfo;
596   }
597 
598   /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
599   /// for different nodes. This function returns the preference (or none) for
600   /// the given node.
getSchedulingPreference(SDNode *)601   virtual Sched::Preference getSchedulingPreference(SDNode *) const {
602     return Sched::None;
603   }
604 
605   /// Return the register class that should be used for the specified value
606   /// type.
getRegClassFor(MVT VT)607   virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
608     const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
609     assert(RC && "This value type is not natively supported!");
610     return RC;
611   }
612 
613   /// Return the 'representative' register class for the specified value
614   /// type.
615   ///
616   /// The 'representative' register class is the largest legal super-reg
617   /// register class for the register class of the value type.  For example, on
618   /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
619   /// register class is GR64 on x86_64.
getRepRegClassFor(MVT VT)620   virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
621     const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
622     return RC;
623   }
624 
625   /// Return the cost of the 'representative' register class for the specified
626   /// value type.
getRepRegClassCostFor(MVT VT)627   virtual uint8_t getRepRegClassCostFor(MVT VT) const {
628     return RepRegClassCostForVT[VT.SimpleTy];
629   }
630 
631   /// Return true if the target has native support for the specified value type.
632   /// This means that it has a register that directly holds it without
633   /// promotions or expansions.
isTypeLegal(EVT VT)634   bool isTypeLegal(EVT VT) const {
635     assert(!VT.isSimple() ||
636            (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
637     return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
638   }
639 
640   class ValueTypeActionImpl {
641     /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
642     /// that indicates how instruction selection should deal with the type.
643     LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
644 
645   public:
ValueTypeActionImpl()646     ValueTypeActionImpl() {
647       std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
648                 TypeLegal);
649     }
650 
getTypeAction(MVT VT)651     LegalizeTypeAction getTypeAction(MVT VT) const {
652       return ValueTypeActions[VT.SimpleTy];
653     }
654 
setTypeAction(MVT VT,LegalizeTypeAction Action)655     void setTypeAction(MVT VT, LegalizeTypeAction Action) {
656       ValueTypeActions[VT.SimpleTy] = Action;
657     }
658   };
659 
getValueTypeActions()660   const ValueTypeActionImpl &getValueTypeActions() const {
661     return ValueTypeActions;
662   }
663 
664   /// Return how we should legalize values of this type, either it is already
665   /// legal (return 'Legal') or we need to promote it to a larger type (return
666   /// 'Promote'), or we need to expand it into multiple registers of smaller
667   /// integer type (return 'Expand').  'Custom' is not an option.
getTypeAction(LLVMContext & Context,EVT VT)668   LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
669     return getTypeConversion(Context, VT).first;
670   }
getTypeAction(MVT VT)671   LegalizeTypeAction getTypeAction(MVT VT) const {
672     return ValueTypeActions.getTypeAction(VT);
673   }
674 
675   /// For types supported by the target, this is an identity function.  For
676   /// types that must be promoted to larger types, this returns the larger type
677   /// to promote to.  For integer types that are larger than the largest integer
678   /// register, this contains one step in the expansion to get to the smaller
679   /// register. For illegal floating point types, this returns the integer type
680   /// to transform to.
getTypeToTransformTo(LLVMContext & Context,EVT VT)681   EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
682     return getTypeConversion(Context, VT).second;
683   }
684 
685   /// For types supported by the target, this is an identity function.  For
686   /// types that must be expanded (i.e. integer types that are larger than the
687   /// largest integer register or illegal floating point types), this returns
688   /// the largest legal type it will be expanded to.
getTypeToExpandTo(LLVMContext & Context,EVT VT)689   EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
690     assert(!VT.isVector());
691     while (true) {
692       switch (getTypeAction(Context, VT)) {
693       case TypeLegal:
694         return VT;
695       case TypeExpandInteger:
696         VT = getTypeToTransformTo(Context, VT);
697         break;
698       default:
699         llvm_unreachable("Type is not legal nor is it to be expanded!");
700       }
701     }
702   }
703 
704   /// Vector types are broken down into some number of legal first class types.
705   /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
706   /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
707   /// turns into 4 EVT::i32 values with both PPC and X86.
708   ///
709   /// This method returns the number of registers needed, and the VT for each
710   /// register.  It also returns the VT and quantity of the intermediate values
711   /// before they are promoted/expanded.
712   unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
713                                   EVT &IntermediateVT,
714                                   unsigned &NumIntermediates,
715                                   MVT &RegisterVT) const;
716 
717   /// Certain targets such as MIPS require that some types such as vectors are
718   /// always broken down into scalars in some contexts. This occurs even if the
719   /// vector type is legal.
getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT)720   virtual unsigned getVectorTypeBreakdownForCallingConv(
721       LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
722       unsigned &NumIntermediates, MVT &RegisterVT) const {
723     return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
724                                   RegisterVT);
725   }
726 
727   struct IntrinsicInfo {
728     unsigned     opc = 0;          // target opcode
729     EVT          memVT;            // memory VT
730 
731     // value representing memory location
732     PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
733 
734     int          offset = 0;       // offset off of ptrVal
735     unsigned     size = 0;         // the size of the memory location
736                                    // (taken from memVT if zero)
737     unsigned     align = 1;        // alignment
738 
739     MachineMemOperand::Flags flags = MachineMemOperand::MONone;
740     IntrinsicInfo() = default;
741   };
742 
743   /// Given an intrinsic, checks if on the target the intrinsic will need to map
744   /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
745   /// true and store the intrinsic information into the IntrinsicInfo that was
746   /// passed to the function.
getTgtMemIntrinsic(IntrinsicInfo &,const CallInst &,MachineFunction &,unsigned)747   virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
748                                   MachineFunction &,
749                                   unsigned /*Intrinsic*/) const {
750     return false;
751   }
752 
753   /// Returns true if the target can instruction select the specified FP
754   /// immediate natively. If false, the legalizer will materialize the FP
755   /// immediate as a load from a constant pool.
isFPImmLegal(const APFloat &,EVT)756   virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
757     return false;
758   }
759 
760   /// Targets can use this to indicate that they only support *some*
761   /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
762   /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
763   /// legal.
isShuffleMaskLegal(ArrayRef<int>,EVT)764   virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
765     return true;
766   }
767 
768   /// Returns true if the operation can trap for the value type.
769   ///
770   /// VT must be a legal type. By default, we optimistically assume most
771   /// operations don't trap except for integer divide and remainder.
772   virtual bool canOpTrap(unsigned Op, EVT VT) const;
773 
774   /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
775   /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
776   /// constant pool entry.
isVectorClearMaskLegal(ArrayRef<int>,EVT)777   virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
778                                       EVT /*VT*/) const {
779     return false;
780   }
781 
782   /// Return how this operation should be treated: either it is legal, needs to
783   /// be promoted to a larger size, needs to be expanded to some other code
784   /// sequence, or the target has a custom expander for it.
getOperationAction(unsigned Op,EVT VT)785   LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
786     if (VT.isExtended()) return Expand;
787     // If a target-specific SDNode requires legalization, require the target
788     // to provide custom legalization for it.
789     if (Op >= array_lengthof(OpActions[0])) return Custom;
790     return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
791   }
792 
getStrictFPOperationAction(unsigned Op,EVT VT)793   LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
794     unsigned EqOpc;
795     switch (Op) {
796       default: llvm_unreachable("Unexpected FP pseudo-opcode");
797       case ISD::STRICT_FADD: EqOpc = ISD::FADD; break;
798       case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break;
799       case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break;
800       case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break;
801       case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break;
802       case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break;
803       case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break;
804       case ISD::STRICT_FMA: EqOpc = ISD::FMA; break;
805       case ISD::STRICT_FSIN: EqOpc = ISD::FSIN; break;
806       case ISD::STRICT_FCOS: EqOpc = ISD::FCOS; break;
807       case ISD::STRICT_FEXP: EqOpc = ISD::FEXP; break;
808       case ISD::STRICT_FEXP2: EqOpc = ISD::FEXP2; break;
809       case ISD::STRICT_FLOG: EqOpc = ISD::FLOG; break;
810       case ISD::STRICT_FLOG10: EqOpc = ISD::FLOG10; break;
811       case ISD::STRICT_FLOG2: EqOpc = ISD::FLOG2; break;
812       case ISD::STRICT_FRINT: EqOpc = ISD::FRINT; break;
813       case ISD::STRICT_FNEARBYINT: EqOpc = ISD::FNEARBYINT; break;
814     }
815 
816     auto Action = getOperationAction(EqOpc, VT);
817 
818     // We don't currently handle Custom or Promote for strict FP pseudo-ops.
819     // For now, we just expand for those cases.
820     if (Action != Legal)
821       Action = Expand;
822 
823     return Action;
824   }
825 
826   /// Return true if the specified operation is legal on this target or can be
827   /// made legal with custom lowering. This is used to help guide high-level
828   /// lowering decisions.
isOperationLegalOrCustom(unsigned Op,EVT VT)829   bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
830     return (VT == MVT::Other || isTypeLegal(VT)) &&
831       (getOperationAction(Op, VT) == Legal ||
832        getOperationAction(Op, VT) == Custom);
833   }
834 
835   /// Return true if the specified operation is legal on this target or can be
836   /// made legal using promotion. This is used to help guide high-level lowering
837   /// decisions.
isOperationLegalOrPromote(unsigned Op,EVT VT)838   bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
839     return (VT == MVT::Other || isTypeLegal(VT)) &&
840       (getOperationAction(Op, VT) == Legal ||
841        getOperationAction(Op, VT) == Promote);
842   }
843 
844   /// Return true if the specified operation is legal on this target or can be
845   /// made legal with custom lowering or using promotion. This is used to help
846   /// guide high-level lowering decisions.
isOperationLegalOrCustomOrPromote(unsigned Op,EVT VT)847   bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
848     return (VT == MVT::Other || isTypeLegal(VT)) &&
849       (getOperationAction(Op, VT) == Legal ||
850        getOperationAction(Op, VT) == Custom ||
851        getOperationAction(Op, VT) == Promote);
852   }
853 
854   /// Return true if the operation uses custom lowering, regardless of whether
855   /// the type is legal or not.
isOperationCustom(unsigned Op,EVT VT)856   bool isOperationCustom(unsigned Op, EVT VT) const {
857     return getOperationAction(Op, VT) == Custom;
858   }
859 
860   /// Return true if lowering to a jump table is allowed.
areJTsAllowed(const Function * Fn)861   virtual bool areJTsAllowed(const Function *Fn) const {
862     if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
863       return false;
864 
865     return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
866            isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
867   }
868 
869   /// Check whether the range [Low,High] fits in a machine word.
rangeFitsInWord(const APInt & Low,const APInt & High,const DataLayout & DL)870   bool rangeFitsInWord(const APInt &Low, const APInt &High,
871                        const DataLayout &DL) const {
872     // FIXME: Using the pointer type doesn't seem ideal.
873     uint64_t BW = DL.getIndexSizeInBits(0u);
874     uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
875     return Range <= BW;
876   }
877 
878   /// Return true if lowering to a jump table is suitable for a set of case
879   /// clusters which may contain \p NumCases cases, \p Range range of values.
880   /// FIXME: This function check the maximum table size and density, but the
881   /// minimum size is not checked. It would be nice if the minimum size is
882   /// also combined within this function. Currently, the minimum size check is
883   /// performed in findJumpTable() in SelectionDAGBuiler and
884   /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
isSuitableForJumpTable(const SwitchInst * SI,uint64_t NumCases,uint64_t Range)885   virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
886                                       uint64_t Range) const {
887     const bool OptForSize = SI->getParent()->getParent()->optForSize();
888     const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
889     const unsigned MaxJumpTableSize =
890         OptForSize || getMaximumJumpTableSize() == 0
891             ? UINT_MAX
892             : getMaximumJumpTableSize();
893     // Check whether a range of clusters is dense enough for a jump table.
894     if (Range <= MaxJumpTableSize &&
895         (NumCases * 100 >= Range * MinDensity)) {
896       return true;
897     }
898     return false;
899   }
900 
901   /// Return true if lowering to a bit test is suitable for a set of case
902   /// clusters which contains \p NumDests unique destinations, \p Low and
903   /// \p High as its lowest and highest case values, and expects \p NumCmps
904   /// case value comparisons. Check if the number of destinations, comparison
905   /// metric, and range are all suitable.
isSuitableForBitTests(unsigned NumDests,unsigned NumCmps,const APInt & Low,const APInt & High,const DataLayout & DL)906   bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
907                              const APInt &Low, const APInt &High,
908                              const DataLayout &DL) const {
909     // FIXME: I don't think NumCmps is the correct metric: a single case and a
910     // range of cases both require only one branch to lower. Just looking at the
911     // number of clusters and destinations should be enough to decide whether to
912     // build bit tests.
913 
914     // To lower a range with bit tests, the range must fit the bitwidth of a
915     // machine word.
916     if (!rangeFitsInWord(Low, High, DL))
917       return false;
918 
919     // Decide whether it's profitable to lower this range with bit tests. Each
920     // destination requires a bit test and branch, and there is an overall range
921     // check branch. For a small number of clusters, separate comparisons might
922     // be cheaper, and for many destinations, splitting the range might be
923     // better.
924     return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
925            (NumDests == 3 && NumCmps >= 6);
926   }
927 
928   /// Return true if the specified operation is illegal on this target or
929   /// unlikely to be made legal with custom lowering. This is used to help guide
930   /// high-level lowering decisions.
isOperationExpand(unsigned Op,EVT VT)931   bool isOperationExpand(unsigned Op, EVT VT) const {
932     return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
933   }
934 
935   /// Return true if the specified operation is legal on this target.
isOperationLegal(unsigned Op,EVT VT)936   bool isOperationLegal(unsigned Op, EVT VT) const {
937     return (VT == MVT::Other || isTypeLegal(VT)) &&
938            getOperationAction(Op, VT) == Legal;
939   }
940 
941   /// Return how this load with extension should be treated: either it is legal,
942   /// needs to be promoted to a larger size, needs to be expanded to some other
943   /// code sequence, or the target has a custom expander for it.
getLoadExtAction(unsigned ExtType,EVT ValVT,EVT MemVT)944   LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
945                                   EVT MemVT) const {
946     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
947     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
948     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
949     assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
950            MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
951     unsigned Shift = 4 * ExtType;
952     return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
953   }
954 
955   /// Return true if the specified load with extension is legal on this target.
isLoadExtLegal(unsigned ExtType,EVT ValVT,EVT MemVT)956   bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
957     return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
958   }
959 
960   /// Return true if the specified load with extension is legal or custom
961   /// on this target.
isLoadExtLegalOrCustom(unsigned ExtType,EVT ValVT,EVT MemVT)962   bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
963     return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
964            getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
965   }
966 
967   /// Return how this store with truncation should be treated: either it is
968   /// legal, needs to be promoted to a larger size, needs to be expanded to some
969   /// other code sequence, or the target has a custom expander for it.
getTruncStoreAction(EVT ValVT,EVT MemVT)970   LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
971     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
972     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
973     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
974     assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
975            "Table isn't big enough!");
976     return TruncStoreActions[ValI][MemI];
977   }
978 
979   /// Return true if the specified store with truncation is legal on this
980   /// target.
isTruncStoreLegal(EVT ValVT,EVT MemVT)981   bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
982     return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
983   }
984 
985   /// Return true if the specified store with truncation has solution on this
986   /// target.
isTruncStoreLegalOrCustom(EVT ValVT,EVT MemVT)987   bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
988     return isTypeLegal(ValVT) &&
989       (getTruncStoreAction(ValVT, MemVT) == Legal ||
990        getTruncStoreAction(ValVT, MemVT) == Custom);
991   }
992 
993   /// Return how the indexed load should be treated: either it is legal, needs
994   /// to be promoted to a larger size, needs to be expanded to some other code
995   /// sequence, or the target has a custom expander for it.
996   LegalizeAction
getIndexedLoadAction(unsigned IdxMode,MVT VT)997   getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
998     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
999            "Table isn't big enough!");
1000     unsigned Ty = (unsigned)VT.SimpleTy;
1001     return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
1002   }
1003 
1004   /// Return true if the specified indexed load is legal on this target.
isIndexedLoadLegal(unsigned IdxMode,EVT VT)1005   bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1006     return VT.isSimple() &&
1007       (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1008        getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1009   }
1010 
1011   /// Return how the indexed store should be treated: either it is legal, needs
1012   /// to be promoted to a larger size, needs to be expanded to some other code
1013   /// sequence, or the target has a custom expander for it.
1014   LegalizeAction
getIndexedStoreAction(unsigned IdxMode,MVT VT)1015   getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1016     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
1017            "Table isn't big enough!");
1018     unsigned Ty = (unsigned)VT.SimpleTy;
1019     return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
1020   }
1021 
1022   /// Return true if the specified indexed load is legal on this target.
isIndexedStoreLegal(unsigned IdxMode,EVT VT)1023   bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1024     return VT.isSimple() &&
1025       (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1026        getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1027   }
1028 
1029   /// Return how the condition code should be treated: either it is legal, needs
1030   /// to be expanded to some other code sequence, or the target has a custom
1031   /// expander for it.
1032   LegalizeAction
getCondCodeAction(ISD::CondCode CC,MVT VT)1033   getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1034     assert((unsigned)CC < array_lengthof(CondCodeActions) &&
1035            ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
1036            "Table isn't big enough!");
1037     // See setCondCodeAction for how this is encoded.
1038     uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1039     uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1040     LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1041     assert(Action != Promote && "Can't promote condition code!");
1042     return Action;
1043   }
1044 
1045   /// Return true if the specified condition code is legal on this target.
isCondCodeLegal(ISD::CondCode CC,MVT VT)1046   bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1047     return getCondCodeAction(CC, VT) == Legal;
1048   }
1049 
1050   /// Return true if the specified condition code is legal or custom on this
1051   /// target.
isCondCodeLegalOrCustom(ISD::CondCode CC,MVT VT)1052   bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1053     return getCondCodeAction(CC, VT) == Legal ||
1054            getCondCodeAction(CC, VT) == Custom;
1055   }
1056 
1057   /// If the action for this operation is to promote, this method returns the
1058   /// ValueType to promote to.
getTypeToPromoteTo(unsigned Op,MVT VT)1059   MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1060     assert(getOperationAction(Op, VT) == Promote &&
1061            "This operation isn't promoted!");
1062 
1063     // See if this has an explicit type specified.
1064     std::map<std::pair<unsigned, MVT::SimpleValueType>,
1065              MVT::SimpleValueType>::const_iterator PTTI =
1066       PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1067     if (PTTI != PromoteToType.end()) return PTTI->second;
1068 
1069     assert((VT.isInteger() || VT.isFloatingPoint()) &&
1070            "Cannot autopromote this type, add it with AddPromotedToType.");
1071 
1072     MVT NVT = VT;
1073     do {
1074       NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1075       assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1076              "Didn't find type to promote to!");
1077     } while (!isTypeLegal(NVT) ||
1078               getOperationAction(Op, NVT) == Promote);
1079     return NVT;
1080   }
1081 
1082   /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
1083   /// operations except for the pointer size.  If AllowUnknown is true, this
1084   /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1085   /// otherwise it will assert.
1086   EVT getValueType(const DataLayout &DL, Type *Ty,
1087                    bool AllowUnknown = false) const {
1088     // Lower scalar pointers to native pointer types.
1089     if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1090       return getPointerTy(DL, PTy->getAddressSpace());
1091 
1092     if (Ty->isVectorTy()) {
1093       VectorType *VTy = cast<VectorType>(Ty);
1094       Type *Elm = VTy->getElementType();
1095       // Lower vectors of pointers to native pointer types.
1096       if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1097         EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
1098         Elm = PointerTy.getTypeForEVT(Ty->getContext());
1099       }
1100 
1101       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1102                        VTy->getNumElements());
1103     }
1104     return EVT::getEVT(Ty, AllowUnknown);
1105   }
1106 
1107   /// Return the MVT corresponding to this LLVM type. See getValueType.
1108   MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1109                          bool AllowUnknown = false) const {
1110     return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1111   }
1112 
1113   /// Return the desired alignment for ByVal or InAlloca aggregate function
1114   /// arguments in the caller parameter area.  This is the actual alignment, not
1115   /// its logarithm.
1116   virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1117 
1118   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(MVT VT)1119   MVT getRegisterType(MVT VT) const {
1120     assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
1121     return RegisterTypeForVT[VT.SimpleTy];
1122   }
1123 
1124   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(LLVMContext & Context,EVT VT)1125   MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1126     if (VT.isSimple()) {
1127       assert((unsigned)VT.getSimpleVT().SimpleTy <
1128                 array_lengthof(RegisterTypeForVT));
1129       return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1130     }
1131     if (VT.isVector()) {
1132       EVT VT1;
1133       MVT RegisterVT;
1134       unsigned NumIntermediates;
1135       (void)getVectorTypeBreakdown(Context, VT, VT1,
1136                                    NumIntermediates, RegisterVT);
1137       return RegisterVT;
1138     }
1139     if (VT.isInteger()) {
1140       return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1141     }
1142     llvm_unreachable("Unsupported extended type!");
1143   }
1144 
1145   /// Return the number of registers that this ValueType will eventually
1146   /// require.
1147   ///
1148   /// This is one for any types promoted to live in larger registers, but may be
1149   /// more than one for types (like i64) that are split into pieces.  For types
1150   /// like i140, which are first promoted then expanded, it is the number of
1151   /// registers needed to hold all the bits of the original type.  For an i140
1152   /// on a 32 bit machine this means 5 registers.
getNumRegisters(LLVMContext & Context,EVT VT)1153   unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1154     if (VT.isSimple()) {
1155       assert((unsigned)VT.getSimpleVT().SimpleTy <
1156                 array_lengthof(NumRegistersForVT));
1157       return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1158     }
1159     if (VT.isVector()) {
1160       EVT VT1;
1161       MVT VT2;
1162       unsigned NumIntermediates;
1163       return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1164     }
1165     if (VT.isInteger()) {
1166       unsigned BitWidth = VT.getSizeInBits();
1167       unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1168       return (BitWidth + RegWidth - 1) / RegWidth;
1169     }
1170     llvm_unreachable("Unsupported extended type!");
1171   }
1172 
1173   /// Certain combinations of ABIs, Targets and features require that types
1174   /// are legal for some operations and not for other operations.
1175   /// For MIPS all vector types must be passed through the integer register set.
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1176   virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1177                                             CallingConv::ID CC, EVT VT) const {
1178     return getRegisterType(Context, VT);
1179   }
1180 
1181   /// Certain targets require unusual breakdowns of certain types. For MIPS,
1182   /// this occurs when a vector type is used, as vector are passed through the
1183   /// integer register set.
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1184   virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1185                                                  CallingConv::ID CC,
1186                                                  EVT VT) const {
1187     return getNumRegisters(Context, VT);
1188   }
1189 
1190   /// Certain targets have context senstive alignment requirements, where one
1191   /// type has the alignment requirement of another type.
getABIAlignmentForCallingConv(Type * ArgTy,DataLayout DL)1192   virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
1193                                                  DataLayout DL) const {
1194     return DL.getABITypeAlignment(ArgTy);
1195   }
1196 
1197   /// If true, then instruction selection should seek to shrink the FP constant
1198   /// of the specified type to a smaller type in order to save space and / or
1199   /// reduce runtime.
ShouldShrinkFPConstant(EVT)1200   virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1201 
1202   // Return true if it is profitable to reduce the given load node to a smaller
1203   // type.
1204   //
1205   // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT)1206   virtual bool shouldReduceLoadWidth(SDNode *Load,
1207                                      ISD::LoadExtType ExtTy,
1208                                      EVT NewVT) const {
1209     return true;
1210   }
1211 
1212   /// When splitting a value of the specified type into parts, does the Lo
1213   /// or Hi part come first?  This usually follows the endianness, except
1214   /// for ppcf128, where the Hi part always comes first.
hasBigEndianPartOrdering(EVT VT,const DataLayout & DL)1215   bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1216     return DL.isBigEndian() || VT == MVT::ppcf128;
1217   }
1218 
1219   /// If true, the target has custom DAG combine transformations that it can
1220   /// perform for the specified node.
hasTargetDAGCombine(ISD::NodeType NT)1221   bool hasTargetDAGCombine(ISD::NodeType NT) const {
1222     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1223     return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1224   }
1225 
getGatherAllAliasesMaxDepth()1226   unsigned getGatherAllAliasesMaxDepth() const {
1227     return GatherAllAliasesMaxDepth;
1228   }
1229 
1230   /// Returns the size of the platform's va_list object.
getVaListSizeInBits(const DataLayout & DL)1231   virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1232     return getPointerTy(DL).getSizeInBits();
1233   }
1234 
1235   /// Get maximum # of store operations permitted for llvm.memset
1236   ///
1237   /// This function returns the maximum number of store operations permitted
1238   /// to replace a call to llvm.memset. The value is set by the target at the
1239   /// performance threshold for such a replacement. If OptSize is true,
1240   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemset(bool OptSize)1241   unsigned getMaxStoresPerMemset(bool OptSize) const {
1242     return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1243   }
1244 
1245   /// Get maximum # of store operations permitted for llvm.memcpy
1246   ///
1247   /// This function returns the maximum number of store operations permitted
1248   /// to replace a call to llvm.memcpy. The value is set by the target at the
1249   /// performance threshold for such a replacement. If OptSize is true,
1250   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemcpy(bool OptSize)1251   unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1252     return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1253   }
1254 
1255   /// \brief Get maximum # of store operations to be glued together
1256   ///
1257   /// This function returns the maximum number of store operations permitted
1258   /// to glue together during lowering of llvm.memcpy. The value is set by
1259   //  the target at the performance threshold for such a replacement.
getMaxGluedStoresPerMemcpy()1260   virtual unsigned getMaxGluedStoresPerMemcpy() const {
1261     return MaxGluedStoresPerMemcpy;
1262   }
1263 
1264   /// Get maximum # of load operations permitted for memcmp
1265   ///
1266   /// This function returns the maximum number of load operations permitted
1267   /// to replace a call to memcmp. The value is set by the target at the
1268   /// performance threshold for such a replacement. If OptSize is true,
1269   /// return the limit for functions that have OptSize attribute.
getMaxExpandSizeMemcmp(bool OptSize)1270   unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1271     return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1272   }
1273 
1274   /// For memcmp expansion when the memcmp result is only compared equal or
1275   /// not-equal to 0, allow up to this number of load pairs per block. As an
1276   /// example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
1277   ///   a0 = load2bytes &a[0]
1278   ///   b0 = load2bytes &b[0]
1279   ///   a2 = load1byte  &a[2]
1280   ///   b2 = load1byte  &b[2]
1281   ///   r  = cmp eq (a0 ^ b0 | a2 ^ b2), 0
getMemcmpEqZeroLoadsPerBlock()1282   virtual unsigned getMemcmpEqZeroLoadsPerBlock() const {
1283     return 1;
1284   }
1285 
1286   /// Get maximum # of store operations permitted for llvm.memmove
1287   ///
1288   /// This function returns the maximum number of store operations permitted
1289   /// to replace a call to llvm.memmove. The value is set by the target at the
1290   /// performance threshold for such a replacement. If OptSize is true,
1291   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemmove(bool OptSize)1292   unsigned getMaxStoresPerMemmove(bool OptSize) const {
1293     return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1294   }
1295 
1296   /// Determine if the target supports unaligned memory accesses.
1297   ///
1298   /// This function returns true if the target allows unaligned memory accesses
1299   /// of the specified type in the given address space. If true, it also returns
1300   /// whether the unaligned memory access is "fast" in the last argument by
1301   /// reference. This is used, for example, in situations where an array
1302   /// copy/move/set is converted to a sequence of store operations. Its use
1303   /// helps to ensure that such replacements don't generate code that causes an
1304   /// alignment error (trap) on the target machine.
1305   virtual bool allowsMisalignedMemoryAccesses(EVT,
1306                                               unsigned AddrSpace = 0,
1307                                               unsigned Align = 1,
1308                                               bool * /*Fast*/ = nullptr) const {
1309     return false;
1310   }
1311 
1312   /// Return true if the target supports a memory access of this type for the
1313   /// given address space and alignment. If the access is allowed, the optional
1314   /// final parameter returns if the access is also fast (as defined by the
1315   /// target).
1316   bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1317                           unsigned AddrSpace = 0, unsigned Alignment = 1,
1318                           bool *Fast = nullptr) const;
1319 
1320   /// Returns the target specific optimal type for load and store operations as
1321   /// a result of memset, memcpy, and memmove lowering.
1322   ///
1323   /// If DstAlign is zero that means it's safe to destination alignment can
1324   /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1325   /// a need to check it against alignment requirement, probably because the
1326   /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1327   /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1328   /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1329   /// does not need to be loaded.  It returns EVT::Other if the type should be
1330   /// determined using generic target-independent logic.
getOptimalMemOpType(uint64_t,unsigned,unsigned,bool,bool,bool,MachineFunction &)1331   virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
1332                                   unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
1333                                   bool /*IsMemset*/,
1334                                   bool /*ZeroMemset*/,
1335                                   bool /*MemcpyStrSrc*/,
1336                                   MachineFunction &/*MF*/) const {
1337     return MVT::Other;
1338   }
1339 
1340   /// Returns true if it's safe to use load / store of the specified type to
1341   /// expand memcpy / memset inline.
1342   ///
1343   /// This is mostly true for all types except for some special cases. For
1344   /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1345   /// fstpl which also does type conversion. Note the specified type doesn't
1346   /// have to be legal as the hook is used before type legalization.
isSafeMemOpType(MVT)1347   virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1348 
1349   /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
usesUnderscoreSetJmp()1350   bool usesUnderscoreSetJmp() const {
1351     return UseUnderscoreSetJmp;
1352   }
1353 
1354   /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
usesUnderscoreLongJmp()1355   bool usesUnderscoreLongJmp() const {
1356     return UseUnderscoreLongJmp;
1357   }
1358 
1359   /// Return lower limit for number of blocks in a jump table.
1360   virtual unsigned getMinimumJumpTableEntries() const;
1361 
1362   /// Return lower limit of the density in a jump table.
1363   unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1364 
1365   /// Return upper limit for number of entries in a jump table.
1366   /// Zero if no limit.
1367   unsigned getMaximumJumpTableSize() const;
1368 
isJumpTableRelative()1369   virtual bool isJumpTableRelative() const {
1370     return TM.isPositionIndependent();
1371   }
1372 
1373   /// If a physical register, this specifies the register that
1374   /// llvm.savestack/llvm.restorestack should save and restore.
getStackPointerRegisterToSaveRestore()1375   unsigned getStackPointerRegisterToSaveRestore() const {
1376     return StackPointerRegisterToSaveRestore;
1377   }
1378 
1379   /// If a physical register, this returns the register that receives the
1380   /// exception address on entry to an EH pad.
1381   virtual unsigned
getExceptionPointerRegister(const Constant * PersonalityFn)1382   getExceptionPointerRegister(const Constant *PersonalityFn) const {
1383     // 0 is guaranteed to be the NoRegister value on all targets
1384     return 0;
1385   }
1386 
1387   /// If a physical register, this returns the register that receives the
1388   /// exception typeid on entry to a landing pad.
1389   virtual unsigned
getExceptionSelectorRegister(const Constant * PersonalityFn)1390   getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1391     // 0 is guaranteed to be the NoRegister value on all targets
1392     return 0;
1393   }
1394 
needsFixedCatchObjects()1395   virtual bool needsFixedCatchObjects() const {
1396     report_fatal_error("Funclet EH is not implemented for this target");
1397   }
1398 
1399   /// Returns the target's jmp_buf size in bytes (if never set, the default is
1400   /// 200)
getJumpBufSize()1401   unsigned getJumpBufSize() const {
1402     return JumpBufSize;
1403   }
1404 
1405   /// Returns the target's jmp_buf alignment in bytes (if never set, the default
1406   /// is 0)
getJumpBufAlignment()1407   unsigned getJumpBufAlignment() const {
1408     return JumpBufAlignment;
1409   }
1410 
1411   /// Return the minimum stack alignment of an argument.
getMinStackArgumentAlignment()1412   unsigned getMinStackArgumentAlignment() const {
1413     return MinStackArgumentAlignment;
1414   }
1415 
1416   /// Return the minimum function alignment.
getMinFunctionAlignment()1417   unsigned getMinFunctionAlignment() const {
1418     return MinFunctionAlignment;
1419   }
1420 
1421   /// Return the preferred function alignment.
getPrefFunctionAlignment()1422   unsigned getPrefFunctionAlignment() const {
1423     return PrefFunctionAlignment;
1424   }
1425 
1426   /// Return the preferred loop alignment.
1427   virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1428     return PrefLoopAlignment;
1429   }
1430 
1431   /// If the target has a standard location for the stack protector guard,
1432   /// returns the address of that location. Otherwise, returns nullptr.
1433   /// DEPRECATED: please override useLoadStackGuardNode and customize
1434   ///             LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1435   virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1436 
1437   /// Inserts necessary declarations for SSP (stack protection) purpose.
1438   /// Should be used only when getIRStackGuard returns nullptr.
1439   virtual void insertSSPDeclarations(Module &M) const;
1440 
1441   /// Return the variable that's previously inserted by insertSSPDeclarations,
1442   /// if any, otherwise return nullptr. Should be used only when
1443   /// getIRStackGuard returns nullptr.
1444   virtual Value *getSDagStackGuard(const Module &M) const;
1445 
1446   /// If this function returns true, stack protection checks should XOR the
1447   /// frame pointer (or whichever pointer is used to address locals) into the
1448   /// stack guard value before checking it. getIRStackGuard must return nullptr
1449   /// if this returns true.
useStackGuardXorFP()1450   virtual bool useStackGuardXorFP() const { return false; }
1451 
1452   /// If the target has a standard stack protection check function that
1453   /// performs validation and error handling, returns the function. Otherwise,
1454   /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1455   /// Should be used only when getIRStackGuard returns nullptr.
1456   virtual Value *getSSPStackGuardCheck(const Module &M) const;
1457 
1458 protected:
1459   Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1460                                             bool UseTLS) const;
1461 
1462 public:
1463   /// Returns the target-specific address of the unsafe stack pointer.
1464   virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1465 
1466   /// Returns the name of the symbol used to emit stack probes or the empty
1467   /// string if not applicable.
getStackProbeSymbolName(MachineFunction & MF)1468   virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1469     return "";
1470   }
1471 
1472   /// Returns true if a cast between SrcAS and DestAS is a noop.
isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS)1473   virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1474     return false;
1475   }
1476 
1477   /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1478   /// are happy to sink it into basic blocks.
isCheapAddrSpaceCast(unsigned SrcAS,unsigned DestAS)1479   virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1480     return isNoopAddrSpaceCast(SrcAS, DestAS);
1481   }
1482 
1483   /// Return true if the pointer arguments to CI should be aligned by aligning
1484   /// the object whose address is being passed. If so then MinSize is set to the
1485   /// minimum size the object must be to be aligned and PrefAlign is set to the
1486   /// preferred alignment.
shouldAlignPointerArgs(CallInst *,unsigned &,unsigned &)1487   virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1488                                       unsigned & /*PrefAlign*/) const {
1489     return false;
1490   }
1491 
1492   //===--------------------------------------------------------------------===//
1493   /// \name Helpers for TargetTransformInfo implementations
1494   /// @{
1495 
1496   /// Get the ISD node that corresponds to the Instruction class opcode.
1497   int InstructionOpcodeToISD(unsigned Opcode) const;
1498 
1499   /// Estimate the cost of type-legalization and the legalized type.
1500   std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1501                                               Type *Ty) const;
1502 
1503   /// @}
1504 
1505   //===--------------------------------------------------------------------===//
1506   /// \name Helpers for atomic expansion.
1507   /// @{
1508 
1509   /// Returns the maximum atomic operation size (in bits) supported by
1510   /// the backend. Atomic operations greater than this size (as well
1511   /// as ones that are not naturally aligned), will be expanded by
1512   /// AtomicExpandPass into an __atomic_* library call.
getMaxAtomicSizeInBitsSupported()1513   unsigned getMaxAtomicSizeInBitsSupported() const {
1514     return MaxAtomicSizeInBitsSupported;
1515   }
1516 
1517   /// Returns the size of the smallest cmpxchg or ll/sc instruction
1518   /// the backend supports.  Any smaller operations are widened in
1519   /// AtomicExpandPass.
1520   ///
1521   /// Note that *unlike* operations above the maximum size, atomic ops
1522   /// are still natively supported below the minimum; they just
1523   /// require a more complex expansion.
getMinCmpXchgSizeInBits()1524   unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1525 
1526   /// Whether the target supports unaligned atomic operations.
supportsUnalignedAtomics()1527   bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1528 
1529   /// Whether AtomicExpandPass should automatically insert fences and reduce
1530   /// ordering for this atomic. This should be true for most architectures with
1531   /// weak memory ordering. Defaults to false.
shouldInsertFencesForAtomic(const Instruction * I)1532   virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1533     return false;
1534   }
1535 
1536   /// Perform a load-linked operation on Addr, returning a "Value *" with the
1537   /// corresponding pointee type. This may entail some non-trivial operations to
1538   /// truncate or reconstruct types that will be illegal in the backend. See
1539   /// ARMISelLowering for an example implementation.
emitLoadLinked(IRBuilder<> & Builder,Value * Addr,AtomicOrdering Ord)1540   virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1541                                 AtomicOrdering Ord) const {
1542     llvm_unreachable("Load linked unimplemented on this target");
1543   }
1544 
1545   /// Perform a store-conditional operation to Addr. Return the status of the
1546   /// store. This should be 0 if the store succeeded, non-zero otherwise.
emitStoreConditional(IRBuilder<> & Builder,Value * Val,Value * Addr,AtomicOrdering Ord)1547   virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1548                                       Value *Addr, AtomicOrdering Ord) const {
1549     llvm_unreachable("Store conditional unimplemented on this target");
1550   }
1551 
1552   /// Inserts in the IR a target-specific intrinsic specifying a fence.
1553   /// It is called by AtomicExpandPass before expanding an
1554   ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1555   ///   if shouldInsertFencesForAtomic returns true.
1556   ///
1557   /// Inst is the original atomic instruction, prior to other expansions that
1558   /// may be performed.
1559   ///
1560   /// This function should either return a nullptr, or a pointer to an IR-level
1561   ///   Instruction*. Even complex fence sequences can be represented by a
1562   ///   single Instruction* through an intrinsic to be lowered later.
1563   /// Backends should override this method to produce target-specific intrinsic
1564   ///   for their fences.
1565   /// FIXME: Please note that the default implementation here in terms of
1566   ///   IR-level fences exists for historical/compatibility reasons and is
1567   ///   *unsound* ! Fences cannot, in general, be used to restore sequential
1568   ///   consistency. For example, consider the following example:
1569   /// atomic<int> x = y = 0;
1570   /// int r1, r2, r3, r4;
1571   /// Thread 0:
1572   ///   x.store(1);
1573   /// Thread 1:
1574   ///   y.store(1);
1575   /// Thread 2:
1576   ///   r1 = x.load();
1577   ///   r2 = y.load();
1578   /// Thread 3:
1579   ///   r3 = y.load();
1580   ///   r4 = x.load();
1581   ///  r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1582   ///  seq_cst. But if they are lowered to monotonic accesses, no amount of
1583   ///  IR-level fences can prevent it.
1584   /// @{
emitLeadingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord)1585   virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1586                                         AtomicOrdering Ord) const {
1587     if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1588       return Builder.CreateFence(Ord);
1589     else
1590       return nullptr;
1591   }
1592 
emitTrailingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord)1593   virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1594                                          Instruction *Inst,
1595                                          AtomicOrdering Ord) const {
1596     if (isAcquireOrStronger(Ord))
1597       return Builder.CreateFence(Ord);
1598     else
1599       return nullptr;
1600   }
1601   /// @}
1602 
1603   // Emits code that executes when the comparison result in the ll/sc
1604   // expansion of a cmpxchg instruction is such that the store-conditional will
1605   // not execute.  This makes it possible to balance out the load-linked with
1606   // a dedicated instruction, if desired.
1607   // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1608   // be unnecessarily held, except if clrex, inserted by this hook, is executed.
emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> & Builder)1609   virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1610 
1611   /// Returns true if the given (atomic) store should be expanded by the
1612   /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
shouldExpandAtomicStoreInIR(StoreInst * SI)1613   virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1614     return false;
1615   }
1616 
1617   /// Returns true if arguments should be sign-extended in lib calls.
shouldSignExtendTypeInLibCall(EVT Type,bool IsSigned)1618   virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1619     return IsSigned;
1620   }
1621 
1622   /// Returns how the given (atomic) load should be expanded by the
1623   /// IR-level AtomicExpand pass.
shouldExpandAtomicLoadInIR(LoadInst * LI)1624   virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1625     return AtomicExpansionKind::None;
1626   }
1627 
1628   /// Returns true if the given atomic cmpxchg should be expanded by the
1629   /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
1630   /// (through emitLoadLinked() and emitStoreConditional()).
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI)1631   virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1632     return false;
1633   }
1634 
1635   /// Returns how the IR-level AtomicExpand pass should expand the given
1636   /// AtomicRMW, if at all. Default is to never expand.
shouldExpandAtomicRMWInIR(AtomicRMWInst *)1637   virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
1638     return AtomicExpansionKind::None;
1639   }
1640 
1641   /// On some platforms, an AtomicRMW that never actually modifies the value
1642   /// (such as fetch_add of 0) can be turned into a fence followed by an
1643   /// atomic load. This may sound useless, but it makes it possible for the
1644   /// processor to keep the cacheline shared, dramatically improving
1645   /// performance. And such idempotent RMWs are useful for implementing some
1646   /// kinds of locks, see for example (justification + benchmarks):
1647   /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1648   /// This method tries doing that transformation, returning the atomic load if
1649   /// it succeeds, and nullptr otherwise.
1650   /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1651   /// another round of expansion.
1652   virtual LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * RMWI)1653   lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1654     return nullptr;
1655   }
1656 
1657   /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1658   /// SIGN_EXTEND, or ANY_EXTEND).
getExtendForAtomicOps()1659   virtual ISD::NodeType getExtendForAtomicOps() const {
1660     return ISD::ZERO_EXTEND;
1661   }
1662 
1663   /// @}
1664 
1665   /// Returns true if we should normalize
1666   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1667   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1668   /// that it saves us from materializing N0 and N1 in an integer register.
1669   /// Targets that are able to perform and/or on flags should return false here.
shouldNormalizeToSelectSequence(LLVMContext & Context,EVT VT)1670   virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1671                                                EVT VT) const {
1672     // If a target has multiple condition registers, then it likely has logical
1673     // operations on those registers.
1674     if (hasMultipleConditionRegisters())
1675       return false;
1676     // Only do the transform if the value won't be split into multiple
1677     // registers.
1678     LegalizeTypeAction Action = getTypeAction(Context, VT);
1679     return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1680       Action != TypeSplitVector;
1681   }
1682 
1683   /// Return true if a select of constants (select Cond, C1, C2) should be
1684   /// transformed into simple math ops with the condition value. For example:
1685   /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
convertSelectOfConstantsToMath(EVT VT)1686   virtual bool convertSelectOfConstantsToMath(EVT VT) const {
1687     return false;
1688   }
1689 
1690   //===--------------------------------------------------------------------===//
1691   // TargetLowering Configuration Methods - These methods should be invoked by
1692   // the derived class constructor to configure this object for the target.
1693   //
1694 protected:
1695   /// Specify how the target extends the result of integer and floating point
1696   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent Ty)1697   void setBooleanContents(BooleanContent Ty) {
1698     BooleanContents = Ty;
1699     BooleanFloatContents = Ty;
1700   }
1701 
1702   /// Specify how the target extends the result of integer and floating point
1703   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent IntTy,BooleanContent FloatTy)1704   void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1705     BooleanContents = IntTy;
1706     BooleanFloatContents = FloatTy;
1707   }
1708 
1709   /// Specify how the target extends the result of a vector boolean value from a
1710   /// vector of i1 to a wider type.  See getBooleanContents.
setBooleanVectorContents(BooleanContent Ty)1711   void setBooleanVectorContents(BooleanContent Ty) {
1712     BooleanVectorContents = Ty;
1713   }
1714 
1715   /// Specify the target scheduling preference.
setSchedulingPreference(Sched::Preference Pref)1716   void setSchedulingPreference(Sched::Preference Pref) {
1717     SchedPreferenceInfo = Pref;
1718   }
1719 
1720   /// Indicate whether this target prefers to use _setjmp to implement
1721   /// llvm.setjmp or the version without _.  Defaults to false.
setUseUnderscoreSetJmp(bool Val)1722   void setUseUnderscoreSetJmp(bool Val) {
1723     UseUnderscoreSetJmp = Val;
1724   }
1725 
1726   /// Indicate whether this target prefers to use _longjmp to implement
1727   /// llvm.longjmp or the version without _.  Defaults to false.
setUseUnderscoreLongJmp(bool Val)1728   void setUseUnderscoreLongJmp(bool Val) {
1729     UseUnderscoreLongJmp = Val;
1730   }
1731 
1732   /// Indicate the minimum number of blocks to generate jump tables.
1733   void setMinimumJumpTableEntries(unsigned Val);
1734 
1735   /// Indicate the maximum number of entries in jump tables.
1736   /// Set to zero to generate unlimited jump tables.
1737   void setMaximumJumpTableSize(unsigned);
1738 
1739   /// If set to a physical register, this specifies the register that
1740   /// llvm.savestack/llvm.restorestack should save and restore.
setStackPointerRegisterToSaveRestore(unsigned R)1741   void setStackPointerRegisterToSaveRestore(unsigned R) {
1742     StackPointerRegisterToSaveRestore = R;
1743   }
1744 
1745   /// Tells the code generator that the target has multiple (allocatable)
1746   /// condition registers that can be used to store the results of comparisons
1747   /// for use by selects and conditional branches. With multiple condition
1748   /// registers, the code generator will not aggressively sink comparisons into
1749   /// the blocks of their users.
1750   void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1751     HasMultipleConditionRegisters = hasManyRegs;
1752   }
1753 
1754   /// Tells the code generator that the target has BitExtract instructions.
1755   /// The code generator will aggressively sink "shift"s into the blocks of
1756   /// their users if the users will generate "and" instructions which can be
1757   /// combined with "shift" to BitExtract instructions.
1758   void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1759     HasExtractBitsInsn = hasExtractInsn;
1760   }
1761 
1762   /// Tells the code generator not to expand logic operations on comparison
1763   /// predicates into separate sequences that increase the amount of flow
1764   /// control.
1765   void setJumpIsExpensive(bool isExpensive = true);
1766 
1767   /// Tells the code generator that this target supports floating point
1768   /// exceptions and cares about preserving floating point exception behavior.
1769   void setHasFloatingPointExceptions(bool FPExceptions = true) {
1770     HasFloatingPointExceptions = FPExceptions;
1771   }
1772 
1773   /// Tells the code generator which bitwidths to bypass.
addBypassSlowDiv(unsigned int SlowBitWidth,unsigned int FastBitWidth)1774   void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1775     BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1776   }
1777 
1778   /// Add the specified register class as an available regclass for the
1779   /// specified value type. This indicates the selector can handle values of
1780   /// that class natively.
addRegisterClass(MVT VT,const TargetRegisterClass * RC)1781   void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1782     assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1783     RegClassForVT[VT.SimpleTy] = RC;
1784   }
1785 
1786   /// Return the largest legal super-reg register class of the register class
1787   /// for the specified type and its associated "cost".
1788   virtual std::pair<const TargetRegisterClass *, uint8_t>
1789   findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1790 
1791   /// Once all of the register classes are added, this allows us to compute
1792   /// derived properties we expose.
1793   void computeRegisterProperties(const TargetRegisterInfo *TRI);
1794 
1795   /// Indicate that the specified operation does not work with the specified
1796   /// type and indicate what to do about it. Note that VT may refer to either
1797   /// the type of a result or that of an operand of Op.
setOperationAction(unsigned Op,MVT VT,LegalizeAction Action)1798   void setOperationAction(unsigned Op, MVT VT,
1799                           LegalizeAction Action) {
1800     assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1801     OpActions[(unsigned)VT.SimpleTy][Op] = Action;
1802   }
1803 
1804   /// Indicate that the specified load with extension does not work with the
1805   /// specified type and indicate what to do about it.
setLoadExtAction(unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action)1806   void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1807                         LegalizeAction Action) {
1808     assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1809            MemVT.isValid() && "Table isn't big enough!");
1810     assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1811     unsigned Shift = 4 * ExtType;
1812     LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
1813     LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
1814   }
1815 
1816   /// Indicate that the specified truncating store does not work with the
1817   /// specified type and indicate what to do about it.
setTruncStoreAction(MVT ValVT,MVT MemVT,LegalizeAction Action)1818   void setTruncStoreAction(MVT ValVT, MVT MemVT,
1819                            LegalizeAction Action) {
1820     assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1821     TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
1822   }
1823 
1824   /// Indicate that the specified indexed load does or does not work with the
1825   /// specified type and indicate what to do abort it.
1826   ///
1827   /// NOTE: All indexed mode loads are initialized to Expand in
1828   /// TargetLowering.cpp
setIndexedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)1829   void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1830                             LegalizeAction Action) {
1831     assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1832            (unsigned)Action < 0xf && "Table isn't big enough!");
1833     // Load action are kept in the upper half.
1834     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1835     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1836   }
1837 
1838   /// Indicate that the specified indexed store does or does not work with the
1839   /// specified type and indicate what to do about it.
1840   ///
1841   /// NOTE: All indexed mode stores are initialized to Expand in
1842   /// TargetLowering.cpp
setIndexedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)1843   void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1844                              LegalizeAction Action) {
1845     assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1846            (unsigned)Action < 0xf && "Table isn't big enough!");
1847     // Store action are kept in the lower half.
1848     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1849     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1850   }
1851 
1852   /// Indicate that the specified condition code is or isn't supported on the
1853   /// target and indicate what to do about it.
setCondCodeAction(ISD::CondCode CC,MVT VT,LegalizeAction Action)1854   void setCondCodeAction(ISD::CondCode CC, MVT VT,
1855                          LegalizeAction Action) {
1856     assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1857            "Table isn't big enough!");
1858     assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1859     /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
1860     /// value and the upper 29 bits index into the second dimension of the array
1861     /// to select what 32-bit value to use.
1862     uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1863     CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
1864     CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
1865   }
1866 
1867   /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1868   /// to trying a larger integer/fp until it can find one that works. If that
1869   /// default is insufficient, this method can be used by the target to override
1870   /// the default.
AddPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)1871   void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1872     PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1873   }
1874 
1875   /// Convenience method to set an operation to Promote and specify the type
1876   /// in a single call.
setOperationPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)1877   void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1878     setOperationAction(Opc, OrigVT, Promote);
1879     AddPromotedToType(Opc, OrigVT, DestVT);
1880   }
1881 
1882   /// Targets should invoke this method for each target independent node that
1883   /// they want to provide a custom DAG combiner for by implementing the
1884   /// PerformDAGCombine virtual method.
setTargetDAGCombine(ISD::NodeType NT)1885   void setTargetDAGCombine(ISD::NodeType NT) {
1886     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1887     TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1888   }
1889 
1890   /// Set the target's required jmp_buf buffer size (in bytes); default is 200
setJumpBufSize(unsigned Size)1891   void setJumpBufSize(unsigned Size) {
1892     JumpBufSize = Size;
1893   }
1894 
1895   /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1896   /// 0
setJumpBufAlignment(unsigned Align)1897   void setJumpBufAlignment(unsigned Align) {
1898     JumpBufAlignment = Align;
1899   }
1900 
1901   /// Set the target's minimum function alignment (in log2(bytes))
setMinFunctionAlignment(unsigned Align)1902   void setMinFunctionAlignment(unsigned Align) {
1903     MinFunctionAlignment = Align;
1904   }
1905 
1906   /// Set the target's preferred function alignment.  This should be set if
1907   /// there is a performance benefit to higher-than-minimum alignment (in
1908   /// log2(bytes))
setPrefFunctionAlignment(unsigned Align)1909   void setPrefFunctionAlignment(unsigned Align) {
1910     PrefFunctionAlignment = Align;
1911   }
1912 
1913   /// Set the target's preferred loop alignment. Default alignment is zero, it
1914   /// means the target does not care about loop alignment.  The alignment is
1915   /// specified in log2(bytes). The target may also override
1916   /// getPrefLoopAlignment to provide per-loop values.
setPrefLoopAlignment(unsigned Align)1917   void setPrefLoopAlignment(unsigned Align) {
1918     PrefLoopAlignment = Align;
1919   }
1920 
1921   /// Set the minimum stack alignment of an argument (in log2(bytes)).
setMinStackArgumentAlignment(unsigned Align)1922   void setMinStackArgumentAlignment(unsigned Align) {
1923     MinStackArgumentAlignment = Align;
1924   }
1925 
1926   /// Set the maximum atomic operation size supported by the
1927   /// backend. Atomic operations greater than this size (as well as
1928   /// ones that are not naturally aligned), will be expanded by
1929   /// AtomicExpandPass into an __atomic_* library call.
setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)1930   void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
1931     MaxAtomicSizeInBitsSupported = SizeInBits;
1932   }
1933 
1934   /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
setMinCmpXchgSizeInBits(unsigned SizeInBits)1935   void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
1936     MinCmpXchgSizeInBits = SizeInBits;
1937   }
1938 
1939   /// Sets whether unaligned atomic operations are supported.
setSupportsUnalignedAtomics(bool UnalignedSupported)1940   void setSupportsUnalignedAtomics(bool UnalignedSupported) {
1941     SupportsUnalignedAtomics = UnalignedSupported;
1942   }
1943 
1944 public:
1945   //===--------------------------------------------------------------------===//
1946   // Addressing mode description hooks (used by LSR etc).
1947   //
1948 
1949   /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1950   /// instructions reading the address. This allows as much computation as
1951   /// possible to be done in the address mode for that operand. This hook lets
1952   /// targets also pass back when this should be done on intrinsics which
1953   /// load/store.
getAddrModeArguments(IntrinsicInst *,SmallVectorImpl<Value * > &,Type * &)1954   virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
1955                                     SmallVectorImpl<Value*> &/*Ops*/,
1956                                     Type *&/*AccessTy*/) const {
1957     return false;
1958   }
1959 
1960   /// This represents an addressing mode of:
1961   ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1962   /// If BaseGV is null,  there is no BaseGV.
1963   /// If BaseOffs is zero, there is no base offset.
1964   /// If HasBaseReg is false, there is no base register.
1965   /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1966   /// no scale.
1967   struct AddrMode {
1968     GlobalValue *BaseGV = nullptr;
1969     int64_t      BaseOffs = 0;
1970     bool         HasBaseReg = false;
1971     int64_t      Scale = 0;
1972     AddrMode() = default;
1973   };
1974 
1975   /// Return true if the addressing mode represented by AM is legal for this
1976   /// target, for a load/store of the specified type.
1977   ///
1978   /// The type may be VoidTy, in which case only return true if the addressing
1979   /// mode is legal for a load/store of any legal type.  TODO: Handle
1980   /// pre/postinc as well.
1981   ///
1982   /// If the address space cannot be determined, it will be -1.
1983   ///
1984   /// TODO: Remove default argument
1985   virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
1986                                      Type *Ty, unsigned AddrSpace,
1987                                      Instruction *I = nullptr) const;
1988 
1989   /// Return the cost of the scaling factor used in the addressing mode
1990   /// represented by AM for this target, for a load/store of the specified type.
1991   ///
1992   /// If the AM is supported, the return value must be >= 0.
1993   /// If the AM is not supported, it returns a negative value.
1994   /// TODO: Handle pre/postinc as well.
1995   /// TODO: Remove default argument
1996   virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
1997                                    Type *Ty, unsigned AS = 0) const {
1998     // Default: assume that any scaling factor used in a legal AM is free.
1999     if (isLegalAddressingMode(DL, AM, Ty, AS))
2000       return 0;
2001     return -1;
2002   }
2003 
2004   /// Return true if the specified immediate is legal icmp immediate, that is
2005   /// the target has icmp instructions which can compare a register against the
2006   /// immediate without having to materialize the immediate into a register.
isLegalICmpImmediate(int64_t)2007   virtual bool isLegalICmpImmediate(int64_t) const {
2008     return true;
2009   }
2010 
2011   /// Return true if the specified immediate is legal add immediate, that is the
2012   /// target has add instructions which can add a register with the immediate
2013   /// without having to materialize the immediate into a register.
isLegalAddImmediate(int64_t)2014   virtual bool isLegalAddImmediate(int64_t) const {
2015     return true;
2016   }
2017 
2018   /// Return true if it's significantly cheaper to shift a vector by a uniform
2019   /// scalar than by an amount which will vary across each lane. On x86, for
2020   /// example, there is a "psllw" instruction for the former case, but no simple
2021   /// instruction for a general "a << b" operation on vectors.
isVectorShiftByScalarCheap(Type * Ty)2022   virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2023     return false;
2024   }
2025 
2026   /// Returns true if the opcode is a commutative binary operation.
isCommutativeBinOp(unsigned Opcode)2027   virtual bool isCommutativeBinOp(unsigned Opcode) const {
2028     // FIXME: This should get its info from the td file.
2029     switch (Opcode) {
2030     case ISD::ADD:
2031     case ISD::SMIN:
2032     case ISD::SMAX:
2033     case ISD::UMIN:
2034     case ISD::UMAX:
2035     case ISD::MUL:
2036     case ISD::MULHU:
2037     case ISD::MULHS:
2038     case ISD::SMUL_LOHI:
2039     case ISD::UMUL_LOHI:
2040     case ISD::FADD:
2041     case ISD::FMUL:
2042     case ISD::AND:
2043     case ISD::OR:
2044     case ISD::XOR:
2045     case ISD::SADDO:
2046     case ISD::UADDO:
2047     case ISD::ADDC:
2048     case ISD::ADDE:
2049     case ISD::FMINNUM:
2050     case ISD::FMAXNUM:
2051     case ISD::FMINNAN:
2052     case ISD::FMAXNAN:
2053       return true;
2054     default: return false;
2055     }
2056   }
2057 
2058   /// Return true if it's free to truncate a value of type FromTy to type
2059   /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2060   /// by referencing its sub-register AX.
2061   /// Targets must return false when FromTy <= ToTy.
isTruncateFree(Type * FromTy,Type * ToTy)2062   virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2063     return false;
2064   }
2065 
2066   /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2067   /// whether a call is in tail position. Typically this means that both results
2068   /// would be assigned to the same register or stack slot, but it could mean
2069   /// the target performs adequate checks of its own before proceeding with the
2070   /// tail call.  Targets must return false when FromTy <= ToTy.
allowTruncateForTailCall(Type * FromTy,Type * ToTy)2071   virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2072     return false;
2073   }
2074 
isTruncateFree(EVT FromVT,EVT ToVT)2075   virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2076     return false;
2077   }
2078 
isProfitableToHoist(Instruction * I)2079   virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2080 
2081   /// Return true if the extension represented by \p I is free.
2082   /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2083   /// this method can use the context provided by \p I to decide
2084   /// whether or not \p I is free.
2085   /// This method extends the behavior of the is[Z|FP]ExtFree family.
2086   /// In other words, if is[Z|FP]Free returns true, then this method
2087   /// returns true as well. The converse is not true.
2088   /// The target can perform the adequate checks by overriding isExtFreeImpl.
2089   /// \pre \p I must be a sign, zero, or fp extension.
isExtFree(const Instruction * I)2090   bool isExtFree(const Instruction *I) const {
2091     switch (I->getOpcode()) {
2092     case Instruction::FPExt:
2093       if (isFPExtFree(EVT::getEVT(I->getType()),
2094                       EVT::getEVT(I->getOperand(0)->getType())))
2095         return true;
2096       break;
2097     case Instruction::ZExt:
2098       if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2099         return true;
2100       break;
2101     case Instruction::SExt:
2102       break;
2103     default:
2104       llvm_unreachable("Instruction is not an extension");
2105     }
2106     return isExtFreeImpl(I);
2107   }
2108 
2109   /// Return true if \p Load and \p Ext can form an ExtLoad.
2110   /// For example, in AArch64
2111   ///   %L = load i8, i8* %ptr
2112   ///   %E = zext i8 %L to i32
2113   /// can be lowered into one load instruction
2114   ///   ldrb w0, [x0]
isExtLoad(const LoadInst * Load,const Instruction * Ext,const DataLayout & DL)2115   bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2116                  const DataLayout &DL) const {
2117     EVT VT = getValueType(DL, Ext->getType());
2118     EVT LoadVT = getValueType(DL, Load->getType());
2119 
2120     // If the load has other users and the truncate is not free, the ext
2121     // probably isn't free.
2122     if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2123         !isTruncateFree(Ext->getType(), Load->getType()))
2124       return false;
2125 
2126     // Check whether the target supports casts folded into loads.
2127     unsigned LType;
2128     if (isa<ZExtInst>(Ext))
2129       LType = ISD::ZEXTLOAD;
2130     else {
2131       assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2132       LType = ISD::SEXTLOAD;
2133     }
2134 
2135     return isLoadExtLegal(LType, VT, LoadVT);
2136   }
2137 
2138   /// Return true if any actual instruction that defines a value of type FromTy
2139   /// implicitly zero-extends the value to ToTy in the result register.
2140   ///
2141   /// The function should return true when it is likely that the truncate can
2142   /// be freely folded with an instruction defining a value of FromTy. If
2143   /// the defining instruction is unknown (because you're looking at a
2144   /// function argument, PHI, etc.) then the target may require an
2145   /// explicit truncate, which is not necessarily free, but this function
2146   /// does not deal with those cases.
2147   /// Targets must return false when FromTy >= ToTy.
isZExtFree(Type * FromTy,Type * ToTy)2148   virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2149     return false;
2150   }
2151 
isZExtFree(EVT FromTy,EVT ToTy)2152   virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2153     return false;
2154   }
2155 
2156   /// Return true if the target supplies and combines to a paired load
2157   /// two loaded values of type LoadedType next to each other in memory.
2158   /// RequiredAlignment gives the minimal alignment constraints that must be met
2159   /// to be able to select this paired load.
2160   ///
2161   /// This information is *not* used to generate actual paired loads, but it is
2162   /// used to generate a sequence of loads that is easier to combine into a
2163   /// paired load.
2164   /// For instance, something like this:
2165   /// a = load i64* addr
2166   /// b = trunc i64 a to i32
2167   /// c = lshr i64 a, 32
2168   /// d = trunc i64 c to i32
2169   /// will be optimized into:
2170   /// b = load i32* addr1
2171   /// d = load i32* addr2
2172   /// Where addr1 = addr2 +/- sizeof(i32).
2173   ///
2174   /// In other words, unless the target performs a post-isel load combining,
2175   /// this information should not be provided because it will generate more
2176   /// loads.
hasPairedLoad(EVT,unsigned &)2177   virtual bool hasPairedLoad(EVT /*LoadedType*/,
2178                              unsigned & /*RequiredAlignment*/) const {
2179     return false;
2180   }
2181 
2182   /// Return true if the target has a vector blend instruction.
hasVectorBlend()2183   virtual bool hasVectorBlend() const { return false; }
2184 
2185   /// Get the maximum supported factor for interleaved memory accesses.
2186   /// Default to be the minimum interleave factor: 2.
getMaxSupportedInterleaveFactor()2187   virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2188 
2189   /// Lower an interleaved load to target specific intrinsics. Return
2190   /// true on success.
2191   ///
2192   /// \p LI is the vector load instruction.
2193   /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2194   /// \p Indices is the corresponding indices for each shufflevector.
2195   /// \p Factor is the interleave factor.
lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor)2196   virtual bool lowerInterleavedLoad(LoadInst *LI,
2197                                     ArrayRef<ShuffleVectorInst *> Shuffles,
2198                                     ArrayRef<unsigned> Indices,
2199                                     unsigned Factor) const {
2200     return false;
2201   }
2202 
2203   /// Lower an interleaved store to target specific intrinsics. Return
2204   /// true on success.
2205   ///
2206   /// \p SI is the vector store instruction.
2207   /// \p SVI is the shufflevector to RE-interleave the stored vector.
2208   /// \p Factor is the interleave factor.
lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor)2209   virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2210                                      unsigned Factor) const {
2211     return false;
2212   }
2213 
2214   /// Return true if zero-extending the specific node Val to type VT2 is free
2215   /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2216   /// because it's folded such as X86 zero-extending loads).
isZExtFree(SDValue Val,EVT VT2)2217   virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2218     return isZExtFree(Val.getValueType(), VT2);
2219   }
2220 
2221   /// Return true if an fpext operation is free (for instance, because
2222   /// single-precision floating-point numbers are implicitly extended to
2223   /// double-precision).
isFPExtFree(EVT DestVT,EVT SrcVT)2224   virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2225     assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2226            "invalid fpext types");
2227     return false;
2228   }
2229 
2230   /// Return true if an fpext operation input to an \p Opcode operation is free
2231   /// (for instance, because half-precision floating-point numbers are
2232   /// implicitly extended to float-precision) for an FMA instruction.
isFPExtFoldable(unsigned Opcode,EVT DestVT,EVT SrcVT)2233   virtual bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const {
2234     assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2235            "invalid fpext types");
2236     return isFPExtFree(DestVT, SrcVT);
2237   }
2238 
2239   /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2240   /// extend node) is profitable.
isVectorLoadExtDesirable(SDValue ExtVal)2241   virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2242 
2243   /// Return true if an fneg operation is free to the point where it is never
2244   /// worthwhile to replace it with a bitwise operation.
isFNegFree(EVT VT)2245   virtual bool isFNegFree(EVT VT) const {
2246     assert(VT.isFloatingPoint());
2247     return false;
2248   }
2249 
2250   /// Return true if an fabs operation is free to the point where it is never
2251   /// worthwhile to replace it with a bitwise operation.
isFAbsFree(EVT VT)2252   virtual bool isFAbsFree(EVT VT) const {
2253     assert(VT.isFloatingPoint());
2254     return false;
2255   }
2256 
2257   /// Return true if an FMA operation is faster than a pair of fmul and fadd
2258   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2259   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2260   ///
2261   /// NOTE: This may be called before legalization on types for which FMAs are
2262   /// not legal, but should return true if those types will eventually legalize
2263   /// to types that support FMAs. After legalization, it will only be called on
2264   /// types that support FMAs (via Legal or Custom actions)
isFMAFasterThanFMulAndFAdd(EVT)2265   virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
2266     return false;
2267   }
2268 
2269   /// Return true if it's profitable to narrow operations of type VT1 to
2270   /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2271   /// i32 to i16.
isNarrowingProfitable(EVT,EVT)2272   virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2273     return false;
2274   }
2275 
2276   /// Return true if it is beneficial to convert a load of a constant to
2277   /// just the constant itself.
2278   /// On some targets it might be more efficient to use a combination of
2279   /// arithmetic instructions to materialize the constant instead of loading it
2280   /// from a constant pool.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)2281   virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2282                                                  Type *Ty) const {
2283     return false;
2284   }
2285 
2286   /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2287   /// from this source type with this index. This is needed because
2288   /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2289   /// the first element, and only the target knows which lowering is cheap.
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index)2290   virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2291                                        unsigned Index) const {
2292     return false;
2293   }
2294 
2295   // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2296   // even if the vector itself has multiple uses.
aggressivelyPreferBuildVectorSources(EVT VecVT)2297   virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2298     return false;
2299   }
2300 
2301   // Return true if CodeGenPrepare should consider splitting large offset of a
2302   // GEP to make the GEP fit into the addressing mode and can be sunk into the
2303   // same blocks of its users.
shouldConsiderGEPOffsetSplit()2304   virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2305 
2306   //===--------------------------------------------------------------------===//
2307   // Runtime Library hooks
2308   //
2309 
2310   /// Rename the default libcall routine name for the specified libcall.
setLibcallName(RTLIB::Libcall Call,const char * Name)2311   void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2312     LibcallRoutineNames[Call] = Name;
2313   }
2314 
2315   /// Get the libcall routine name for the specified libcall.
getLibcallName(RTLIB::Libcall Call)2316   const char *getLibcallName(RTLIB::Libcall Call) const {
2317     return LibcallRoutineNames[Call];
2318   }
2319 
2320   /// Override the default CondCode to be used to test the result of the
2321   /// comparison libcall against zero.
setCmpLibcallCC(RTLIB::Libcall Call,ISD::CondCode CC)2322   void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2323     CmpLibcallCCs[Call] = CC;
2324   }
2325 
2326   /// Get the CondCode that's to be used to test the result of the comparison
2327   /// libcall against zero.
getCmpLibcallCC(RTLIB::Libcall Call)2328   ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2329     return CmpLibcallCCs[Call];
2330   }
2331 
2332   /// Set the CallingConv that should be used for the specified libcall.
setLibcallCallingConv(RTLIB::Libcall Call,CallingConv::ID CC)2333   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2334     LibcallCallingConvs[Call] = CC;
2335   }
2336 
2337   /// Get the CallingConv that should be used for the specified libcall.
getLibcallCallingConv(RTLIB::Libcall Call)2338   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2339     return LibcallCallingConvs[Call];
2340   }
2341 
2342   /// Execute target specific actions to finalize target lowering.
2343   /// This is used to set extra flags in MachineFrameInformation and freezing
2344   /// the set of reserved registers.
2345   /// The default implementation just freezes the set of reserved registers.
2346   virtual void finalizeLowering(MachineFunction &MF) const;
2347 
2348 private:
2349   const TargetMachine &TM;
2350 
2351   /// Tells the code generator that the target has multiple (allocatable)
2352   /// condition registers that can be used to store the results of comparisons
2353   /// for use by selects and conditional branches. With multiple condition
2354   /// registers, the code generator will not aggressively sink comparisons into
2355   /// the blocks of their users.
2356   bool HasMultipleConditionRegisters;
2357 
2358   /// Tells the code generator that the target has BitExtract instructions.
2359   /// The code generator will aggressively sink "shift"s into the blocks of
2360   /// their users if the users will generate "and" instructions which can be
2361   /// combined with "shift" to BitExtract instructions.
2362   bool HasExtractBitsInsn;
2363 
2364   /// Tells the code generator to bypass slow divide or remainder
2365   /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2366   /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2367   /// div/rem when the operands are positive and less than 256.
2368   DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2369 
2370   /// Tells the code generator that it shouldn't generate extra flow control
2371   /// instructions and should attempt to combine flow control instructions via
2372   /// predication.
2373   bool JumpIsExpensive;
2374 
2375   /// Whether the target supports or cares about preserving floating point
2376   /// exception behavior.
2377   bool HasFloatingPointExceptions;
2378 
2379   /// This target prefers to use _setjmp to implement llvm.setjmp.
2380   ///
2381   /// Defaults to false.
2382   bool UseUnderscoreSetJmp;
2383 
2384   /// This target prefers to use _longjmp to implement llvm.longjmp.
2385   ///
2386   /// Defaults to false.
2387   bool UseUnderscoreLongJmp;
2388 
2389   /// Information about the contents of the high-bits in boolean values held in
2390   /// a type wider than i1. See getBooleanContents.
2391   BooleanContent BooleanContents;
2392 
2393   /// Information about the contents of the high-bits in boolean values held in
2394   /// a type wider than i1. See getBooleanContents.
2395   BooleanContent BooleanFloatContents;
2396 
2397   /// Information about the contents of the high-bits in boolean vector values
2398   /// when the element type is wider than i1. See getBooleanContents.
2399   BooleanContent BooleanVectorContents;
2400 
2401   /// The target scheduling preference: shortest possible total cycles or lowest
2402   /// register usage.
2403   Sched::Preference SchedPreferenceInfo;
2404 
2405   /// The size, in bytes, of the target's jmp_buf buffers
2406   unsigned JumpBufSize;
2407 
2408   /// The alignment, in bytes, of the target's jmp_buf buffers
2409   unsigned JumpBufAlignment;
2410 
2411   /// The minimum alignment that any argument on the stack needs to have.
2412   unsigned MinStackArgumentAlignment;
2413 
2414   /// The minimum function alignment (used when optimizing for size, and to
2415   /// prevent explicitly provided alignment from leading to incorrect code).
2416   unsigned MinFunctionAlignment;
2417 
2418   /// The preferred function alignment (used when alignment unspecified and
2419   /// optimizing for speed).
2420   unsigned PrefFunctionAlignment;
2421 
2422   /// The preferred loop alignment.
2423   unsigned PrefLoopAlignment;
2424 
2425   /// Size in bits of the maximum atomics size the backend supports.
2426   /// Accesses larger than this will be expanded by AtomicExpandPass.
2427   unsigned MaxAtomicSizeInBitsSupported;
2428 
2429   /// Size in bits of the minimum cmpxchg or ll/sc operation the
2430   /// backend supports.
2431   unsigned MinCmpXchgSizeInBits;
2432 
2433   /// This indicates if the target supports unaligned atomic operations.
2434   bool SupportsUnalignedAtomics;
2435 
2436   /// If set to a physical register, this specifies the register that
2437   /// llvm.savestack/llvm.restorestack should save and restore.
2438   unsigned StackPointerRegisterToSaveRestore;
2439 
2440   /// This indicates the default register class to use for each ValueType the
2441   /// target supports natively.
2442   const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2443   unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
2444   MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2445 
2446   /// This indicates the "representative" register class to use for each
2447   /// ValueType the target supports natively. This information is used by the
2448   /// scheduler to track register pressure. By default, the representative
2449   /// register class is the largest legal super-reg register class of the
2450   /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2451   /// representative class would be GR32.
2452   const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2453 
2454   /// This indicates the "cost" of the "representative" register class for each
2455   /// ValueType. The cost is used by the scheduler to approximate register
2456   /// pressure.
2457   uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2458 
2459   /// For any value types we are promoting or expanding, this contains the value
2460   /// type that we are changing to.  For Expanded types, this contains one step
2461   /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2462   /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
2463   /// the same type (e.g. i32 -> i32).
2464   MVT TransformToType[MVT::LAST_VALUETYPE];
2465 
2466   /// For each operation and each value type, keep a LegalizeAction that
2467   /// indicates how instruction selection should deal with the operation.  Most
2468   /// operations are Legal (aka, supported natively by the target), but
2469   /// operations that are not should be described.  Note that operations on
2470   /// non-legal value types are not described here.
2471   LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2472 
2473   /// For each load extension type and each value type, keep a LegalizeAction
2474   /// that indicates how instruction selection should deal with a load of a
2475   /// specific value type and extension type. Uses 4-bits to store the action
2476   /// for each of the 4 load ext types.
2477   uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2478 
2479   /// For each value type pair keep a LegalizeAction that indicates whether a
2480   /// truncating store of a specific value type and truncating type is legal.
2481   LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2482 
2483   /// For each indexed mode and each value type, keep a pair of LegalizeAction
2484   /// that indicates how instruction selection should deal with the load /
2485   /// store.
2486   ///
2487   /// The first dimension is the value_type for the reference. The second
2488   /// dimension represents the various modes for load store.
2489   uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2490 
2491   /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2492   /// indicates how instruction selection should deal with the condition code.
2493   ///
2494   /// Because each CC action takes up 4 bits, we need to have the array size be
2495   /// large enough to fit all of the value types. This can be done by rounding
2496   /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2497   uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2498 
2499 protected:
2500   ValueTypeActionImpl ValueTypeActions;
2501 
2502 private:
2503   LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2504 
2505   /// Targets can specify ISD nodes that they would like PerformDAGCombine
2506   /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2507   /// array.
2508   unsigned char
2509   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2510 
2511   /// For operations that must be promoted to a specific type, this holds the
2512   /// destination type.  This map should be sparse, so don't hold it as an
2513   /// array.
2514   ///
2515   /// Targets add entries to this map with AddPromotedToType(..), clients access
2516   /// this with getTypeToPromoteTo(..).
2517   std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2518     PromoteToType;
2519 
2520   /// Stores the name each libcall.
2521   const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
2522 
2523   /// The ISD::CondCode that should be used to test the result of each of the
2524   /// comparison libcall against zero.
2525   ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2526 
2527   /// Stores the CallingConv that should be used for each libcall.
2528   CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2529 
2530   /// Set default libcall names and calling conventions.
2531   void InitLibcalls(const Triple &TT);
2532 
2533 protected:
2534   /// Return true if the extension represented by \p I is free.
2535   /// \pre \p I is a sign, zero, or fp extension and
2536   ///      is[Z|FP]ExtFree of the related types is not true.
isExtFreeImpl(const Instruction * I)2537   virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
2538 
2539   /// Depth that GatherAllAliases should should continue looking for chain
2540   /// dependencies when trying to find a more preferable chain. As an
2541   /// approximation, this should be more than the number of consecutive stores
2542   /// expected to be merged.
2543   unsigned GatherAllAliasesMaxDepth;
2544 
2545   /// Specify maximum number of store instructions per memset call.
2546   ///
2547   /// When lowering \@llvm.memset this field specifies the maximum number of
2548   /// store operations that may be substituted for the call to memset. Targets
2549   /// must set this value based on the cost threshold for that target. Targets
2550   /// should assume that the memset will be done using as many of the largest
2551   /// store operations first, followed by smaller ones, if necessary, per
2552   /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2553   /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2554   /// store.  This only applies to setting a constant array of a constant size.
2555   unsigned MaxStoresPerMemset;
2556 
2557   /// Maximum number of stores operations that may be substituted for the call
2558   /// to memset, used for functions with OptSize attribute.
2559   unsigned MaxStoresPerMemsetOptSize;
2560 
2561   /// Specify maximum bytes of store instructions per memcpy call.
2562   ///
2563   /// When lowering \@llvm.memcpy this field specifies the maximum number of
2564   /// store operations that may be substituted for a call to memcpy. Targets
2565   /// must set this value based on the cost threshold for that target. Targets
2566   /// should assume that the memcpy will be done using as many of the largest
2567   /// store operations first, followed by smaller ones, if necessary, per
2568   /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2569   /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2570   /// and one 1-byte store. This only applies to copying a constant array of
2571   /// constant size.
2572   unsigned MaxStoresPerMemcpy;
2573 
2574 
2575   /// \brief Specify max number of store instructions to glue in inlined memcpy.
2576   ///
2577   /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
2578   /// of store instructions to keep together. This helps in pairing and
2579   //  vectorization later on.
2580   unsigned MaxGluedStoresPerMemcpy = 0;
2581 
2582   /// Maximum number of store operations that may be substituted for a call to
2583   /// memcpy, used for functions with OptSize attribute.
2584   unsigned MaxStoresPerMemcpyOptSize;
2585   unsigned MaxLoadsPerMemcmp;
2586   unsigned MaxLoadsPerMemcmpOptSize;
2587 
2588   /// Specify maximum bytes of store instructions per memmove call.
2589   ///
2590   /// When lowering \@llvm.memmove this field specifies the maximum number of
2591   /// store instructions that may be substituted for a call to memmove. Targets
2592   /// must set this value based on the cost threshold for that target. Targets
2593   /// should assume that the memmove will be done using as many of the largest
2594   /// store operations first, followed by smaller ones, if necessary, per
2595   /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2596   /// with 8-bit alignment would result in nine 1-byte stores.  This only
2597   /// applies to copying a constant array of constant size.
2598   unsigned MaxStoresPerMemmove;
2599 
2600   /// Maximum number of store instructions that may be substituted for a call to
2601   /// memmove, used for functions with OptSize attribute.
2602   unsigned MaxStoresPerMemmoveOptSize;
2603 
2604   /// Tells the code generator that select is more expensive than a branch if
2605   /// the branch is usually predicted right.
2606   bool PredictableSelectIsExpensive;
2607 
2608   /// \see enableExtLdPromotion.
2609   bool EnableExtLdPromotion;
2610 
2611   /// Return true if the value types that can be represented by the specified
2612   /// register class are all legal.
2613   bool isLegalRC(const TargetRegisterInfo &TRI,
2614                  const TargetRegisterClass &RC) const;
2615 
2616   /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2617   /// sequence of memory operands that is recognized by PrologEpilogInserter.
2618   MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
2619                                     MachineBasicBlock *MBB) const;
2620 
2621   /// Replace/modify the XRay custom event operands with target-dependent
2622   /// details.
2623   MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
2624                                          MachineBasicBlock *MBB) const;
2625 
2626   /// Replace/modify the XRay typed event operands with target-dependent
2627   /// details.
2628   MachineBasicBlock *emitXRayTypedEvent(MachineInstr &MI,
2629                                         MachineBasicBlock *MBB) const;
2630 };
2631 
2632 /// This class defines information used to lower LLVM code to legal SelectionDAG
2633 /// operators that the target instruction selector can accept natively.
2634 ///
2635 /// This class also defines callbacks that targets must implement to lower
2636 /// target-specific constructs to SelectionDAG operators.
2637 class TargetLowering : public TargetLoweringBase {
2638 public:
2639   struct DAGCombinerInfo;
2640 
2641   TargetLowering(const TargetLowering &) = delete;
2642   TargetLowering &operator=(const TargetLowering &) = delete;
2643 
2644   /// NOTE: The TargetMachine owns TLOF.
2645   explicit TargetLowering(const TargetMachine &TM);
2646 
2647   bool isPositionIndependent() const;
2648 
isSDNodeSourceOfDivergence(const SDNode * N,FunctionLoweringInfo * FLI,DivergenceAnalysis * DA)2649   virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
2650                                           FunctionLoweringInfo *FLI,
2651                                           DivergenceAnalysis *DA) const {
2652     return false;
2653   }
2654 
isSDNodeAlwaysUniform(const SDNode * N)2655   virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
2656     return false;
2657   }
2658 
2659   /// Returns true by value, base pointer and offset pointer and addressing mode
2660   /// by reference if the node's address can be legally represented as
2661   /// pre-indexed load / store address.
getPreIndexedAddressParts(SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)2662   virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2663                                          SDValue &/*Offset*/,
2664                                          ISD::MemIndexedMode &/*AM*/,
2665                                          SelectionDAG &/*DAG*/) const {
2666     return false;
2667   }
2668 
2669   /// Returns true by value, base pointer and offset pointer and addressing mode
2670   /// by reference if this node can be combined with a load / store to form a
2671   /// post-indexed load / store.
getPostIndexedAddressParts(SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)2672   virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2673                                           SDValue &/*Base*/,
2674                                           SDValue &/*Offset*/,
2675                                           ISD::MemIndexedMode &/*AM*/,
2676                                           SelectionDAG &/*DAG*/) const {
2677     return false;
2678   }
2679 
2680   /// Return the entry encoding for a jump table in the current function.  The
2681   /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2682   virtual unsigned getJumpTableEncoding() const;
2683 
2684   virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *,const MachineBasicBlock *,unsigned,MCContext &)2685   LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
2686                             const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2687                             MCContext &/*Ctx*/) const {
2688     llvm_unreachable("Need to implement this hook if target has custom JTIs");
2689   }
2690 
2691   /// Returns relocation base for the given PIC jumptable.
2692   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
2693                                            SelectionDAG &DAG) const;
2694 
2695   /// This returns the relocation base for the given PIC jumptable, the same as
2696   /// getPICJumpTableRelocBase, but as an MCExpr.
2697   virtual const MCExpr *
2698   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2699                                unsigned JTI, MCContext &Ctx) const;
2700 
2701   /// Return true if folding a constant offset with the given GlobalAddress is
2702   /// legal.  It is frequently not legal in PIC relocation models.
2703   virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2704 
2705   bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2706                             SDValue &Chain) const;
2707 
2708   void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
2709                            SDValue &NewRHS, ISD::CondCode &CCCode,
2710                            const SDLoc &DL) const;
2711 
2712   /// Returns a pair of (return value, chain).
2713   /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2714   std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2715                                           EVT RetVT, ArrayRef<SDValue> Ops,
2716                                           bool isSigned, const SDLoc &dl,
2717                                           bool doesNotReturn = false,
2718                                           bool isReturnValueUsed = true) const;
2719 
2720   /// Check whether parameters to a call that are passed in callee saved
2721   /// registers are the same as from the calling function.  This needs to be
2722   /// checked for tail call eligibility.
2723   bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
2724       const uint32_t *CallerPreservedMask,
2725       const SmallVectorImpl<CCValAssign> &ArgLocs,
2726       const SmallVectorImpl<SDValue> &OutVals) const;
2727 
2728   //===--------------------------------------------------------------------===//
2729   // TargetLowering Optimization Methods
2730   //
2731 
2732   /// A convenience struct that encapsulates a DAG, and two SDValues for
2733   /// returning information from TargetLowering to its clients that want to
2734   /// combine.
2735   struct TargetLoweringOpt {
2736     SelectionDAG &DAG;
2737     bool LegalTys;
2738     bool LegalOps;
2739     SDValue Old;
2740     SDValue New;
2741 
TargetLoweringOptTargetLoweringOpt2742     explicit TargetLoweringOpt(SelectionDAG &InDAG,
2743                                bool LT, bool LO) :
2744       DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2745 
LegalTypesTargetLoweringOpt2746     bool LegalTypes() const { return LegalTys; }
LegalOperationsTargetLoweringOpt2747     bool LegalOperations() const { return LegalOps; }
2748 
CombineToTargetLoweringOpt2749     bool CombineTo(SDValue O, SDValue N) {
2750       Old = O;
2751       New = N;
2752       return true;
2753     }
2754   };
2755 
2756   /// Check to see if the specified operand of the specified instruction is a
2757   /// constant integer.  If so, check to see if there are any bits set in the
2758   /// constant that are not demanded.  If so, shrink the constant and return
2759   /// true.
2760   bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
2761                               TargetLoweringOpt &TLO) const;
2762 
2763   // Target hook to do target-specific const optimization, which is called by
2764   // ShrinkDemandedConstant. This function should return true if the target
2765   // doesn't want ShrinkDemandedConstant to further optimize the constant.
targetShrinkDemandedConstant(SDValue Op,const APInt & Demanded,TargetLoweringOpt & TLO)2766   virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
2767                                             TargetLoweringOpt &TLO) const {
2768     return false;
2769   }
2770 
2771   /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
2772   /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2773   /// generalized for targets with other types of implicit widening casts.
2774   bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2775                         TargetLoweringOpt &TLO) const;
2776 
2777   /// Helper for SimplifyDemandedBits that can simplify an operation with
2778   /// multiple uses.  This function simplifies operand \p OpIdx of \p User and
2779   /// then updates \p User with the simplified version. No other uses of
2780   /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
2781   /// function behaves exactly like function SimplifyDemandedBits declared
2782   /// below except that it also updates the DAG by calling
2783   /// DCI.CommitTargetLoweringOpt.
2784   bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
2785                             DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
2786 
2787   /// Look at Op.  At this point, we know that only the DemandedMask bits of the
2788   /// result of Op are ever used downstream.  If we can use this information to
2789   /// simplify Op, create a new simplified DAG node and return true, returning
2790   /// the original and new nodes in Old and New.  Otherwise, analyze the
2791   /// expression and return a mask of KnownOne and KnownZero bits for the
2792   /// expression (used to simplify the caller).  The KnownZero/One bits may only
2793   /// be accurate for those bits in the DemandedMask.
2794   /// \p AssumeSingleUse When this parameter is true, this function will
2795   ///    attempt to simplify \p Op even if there are multiple uses.
2796   ///    Callers are responsible for correctly updating the DAG based on the
2797   ///    results of this function, because simply replacing replacing TLO.Old
2798   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
2799   ///    has multiple uses.
2800   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2801                             KnownBits &Known,
2802                             TargetLoweringOpt &TLO,
2803                             unsigned Depth = 0,
2804                             bool AssumeSingleUse = false) const;
2805 
2806   /// Helper wrapper around SimplifyDemandedBits
2807   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2808                             DAGCombinerInfo &DCI) const;
2809 
2810   /// Look at Vector Op. At this point, we know that only the DemandedElts
2811   /// elements of the result of Op are ever used downstream.  If we can use
2812   /// this information to simplify Op, create a new simplified DAG node and
2813   /// return true, storing the original and new nodes in TLO.
2814   /// Otherwise, analyze the expression and return a mask of KnownUndef and
2815   /// KnownZero elements for the expression (used to simplify the caller).
2816   /// The KnownUndef/Zero elements may only be accurate for those bits
2817   /// in the DemandedMask.
2818   /// \p AssumeSingleUse When this parameter is true, this function will
2819   ///    attempt to simplify \p Op even if there are multiple uses.
2820   ///    Callers are responsible for correctly updating the DAG based on the
2821   ///    results of this function, because simply replacing replacing TLO.Old
2822   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
2823   ///    has multiple uses.
2824   bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
2825                                   APInt &KnownUndef, APInt &KnownZero,
2826                                   TargetLoweringOpt &TLO, unsigned Depth = 0,
2827                                   bool AssumeSingleUse = false) const;
2828 
2829   /// Helper wrapper around SimplifyDemandedVectorElts
2830   bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
2831                                   APInt &KnownUndef, APInt &KnownZero,
2832                                   DAGCombinerInfo &DCI) const;
2833 
2834   /// Determine which of the bits specified in Mask are known to be either zero
2835   /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
2836   /// argument allows us to only collect the known bits that are shared by the
2837   /// requested vector elements.
2838   virtual void computeKnownBitsForTargetNode(const SDValue Op,
2839                                              KnownBits &Known,
2840                                              const APInt &DemandedElts,
2841                                              const SelectionDAG &DAG,
2842                                              unsigned Depth = 0) const;
2843 
2844   /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
2845   /// Default implementation computes low bits based on alignment
2846   /// information. This should preserve known bits passed into it.
2847   virtual void computeKnownBitsForFrameIndex(const SDValue FIOp,
2848                                              KnownBits &Known,
2849                                              const APInt &DemandedElts,
2850                                              const SelectionDAG &DAG,
2851                                              unsigned Depth = 0) const;
2852 
2853   /// This method can be implemented by targets that want to expose additional
2854   /// information about sign bits to the DAG Combiner. The DemandedElts
2855   /// argument allows us to only collect the minimum sign bits that are shared
2856   /// by the requested vector elements.
2857   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2858                                                    const APInt &DemandedElts,
2859                                                    const SelectionDAG &DAG,
2860                                                    unsigned Depth = 0) const;
2861 
2862   /// Attempt to simplify any target nodes based on the demanded vector
2863   /// elements, returning true on success. Otherwise, analyze the expression and
2864   /// return a mask of KnownUndef and KnownZero elements for the expression
2865   /// (used to simplify the caller). The KnownUndef/Zero elements may only be
2866   /// accurate for those bits in the DemandedMask
2867   virtual bool SimplifyDemandedVectorEltsForTargetNode(
2868       SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
2869       APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
2870 
2871   struct DAGCombinerInfo {
2872     void *DC;  // The DAG Combiner object.
2873     CombineLevel Level;
2874     bool CalledByLegalizer;
2875 
2876   public:
2877     SelectionDAG &DAG;
2878 
DAGCombinerInfoDAGCombinerInfo2879     DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
2880       : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2881 
isBeforeLegalizeDAGCombinerInfo2882     bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
isBeforeLegalizeOpsDAGCombinerInfo2883     bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
isAfterLegalizeDAGDAGCombinerInfo2884     bool isAfterLegalizeDAG() const {
2885       return Level == AfterLegalizeDAG;
2886     }
getDAGCombineLevelDAGCombinerInfo2887     CombineLevel getDAGCombineLevel() { return Level; }
isCalledByLegalizerDAGCombinerInfo2888     bool isCalledByLegalizer() const { return CalledByLegalizer; }
2889 
2890     void AddToWorklist(SDNode *N);
2891     SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
2892     SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2893     SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2894 
2895     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2896   };
2897 
2898   /// Return if the N is a constant or constant vector equal to the true value
2899   /// from getBooleanContents().
2900   bool isConstTrueVal(const SDNode *N) const;
2901 
2902   /// Return if the N is a constant or constant vector equal to the false value
2903   /// from getBooleanContents().
2904   bool isConstFalseVal(const SDNode *N) const;
2905 
2906   /// Return if \p N is a True value when extended to \p VT.
2907   bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
2908 
2909   /// Try to simplify a setcc built with the specified operands and cc. If it is
2910   /// unable to simplify it, return a null SDValue.
2911   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
2912                         bool foldBooleans, DAGCombinerInfo &DCI,
2913                         const SDLoc &dl) const;
2914 
2915   // For targets which wrap address, unwrap for analysis.
unwrapAddress(SDValue N)2916   virtual SDValue unwrapAddress(SDValue N) const { return N; }
2917 
2918   /// Returns true (and the GlobalValue and the offset) if the node is a
2919   /// GlobalAddress + offset.
2920   virtual bool
2921   isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2922 
2923   /// This method will be invoked for all target nodes and for any
2924   /// target-independent nodes that the target has registered with invoke it
2925   /// for.
2926   ///
2927   /// The semantics are as follows:
2928   /// Return Value:
2929   ///   SDValue.Val == 0   - No change was made
2930   ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
2931   ///   otherwise          - N should be replaced by the returned Operand.
2932   ///
2933   /// In addition, methods provided by DAGCombinerInfo may be used to perform
2934   /// more complex transformations.
2935   ///
2936   virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2937 
2938   /// Return true if it is profitable to move a following shift through this
2939   //  node, adjusting any immediate operands as necessary to preserve semantics.
2940   //  This transformation may not be desirable if it disrupts a particularly
2941   //  auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2942   //  By default, it returns true.
isDesirableToCommuteWithShift(const SDNode * N)2943   virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
2944     return true;
2945   }
2946 
2947   // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
2948   // to a shuffle and a truncate.
2949   // Example of such a combine:
2950   // v4i32 build_vector((extract_elt V, 1),
2951   //                    (extract_elt V, 3),
2952   //                    (extract_elt V, 5),
2953   //                    (extract_elt V, 7))
2954   //  -->
2955   // v4i32 truncate (bitcast (shuffle<1,u,3,u,5,u,7,u> V, u) to v4i64)
isDesirableToCombineBuildVectorToShuffleTruncate(ArrayRef<int> ShuffleMask,EVT SrcVT,EVT TruncVT)2956   virtual bool isDesirableToCombineBuildVectorToShuffleTruncate(
2957       ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
2958     return false;
2959   }
2960 
2961   /// Return true if the target has native support for the specified value type
2962   /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2963   /// i16 is legal, but undesirable since i16 instruction encodings are longer
2964   /// and some i16 instructions are slow.
isTypeDesirableForOp(unsigned,EVT VT)2965   virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2966     // By default, assume all legal types are desirable.
2967     return isTypeLegal(VT);
2968   }
2969 
2970   /// Return true if it is profitable for dag combiner to transform a floating
2971   /// point op of specified opcode to a equivalent op of an integer
2972   /// type. e.g. f32 load -> i32 load can be profitable on ARM.
isDesirableToTransformToIntegerOp(unsigned,EVT)2973   virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2974                                                  EVT /*VT*/) const {
2975     return false;
2976   }
2977 
2978   /// This method query the target whether it is beneficial for dag combiner to
2979   /// promote the specified node. If true, it should return the desired
2980   /// promotion type by reference.
IsDesirableToPromoteOp(SDValue,EVT &)2981   virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2982     return false;
2983   }
2984 
2985   /// Return true if the target supports swifterror attribute. It optimizes
2986   /// loads and stores to reading and writing a specific register.
supportSwiftError()2987   virtual bool supportSwiftError() const {
2988     return false;
2989   }
2990 
2991   /// Return true if the target supports that a subset of CSRs for the given
2992   /// machine function is handled explicitly via copies.
supportSplitCSR(MachineFunction * MF)2993   virtual bool supportSplitCSR(MachineFunction *MF) const {
2994     return false;
2995   }
2996 
2997   /// Perform necessary initialization to handle a subset of CSRs explicitly
2998   /// via copies. This function is called at the beginning of instruction
2999   /// selection.
initializeSplitCSR(MachineBasicBlock * Entry)3000   virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
3001     llvm_unreachable("Not Implemented");
3002   }
3003 
3004   /// Insert explicit copies in entry and exit blocks. We copy a subset of
3005   /// CSRs to virtual registers in the entry block, and copy them back to
3006   /// physical registers in the exit blocks. This function is called at the end
3007   /// of instruction selection.
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits)3008   virtual void insertCopiesSplitCSR(
3009       MachineBasicBlock *Entry,
3010       const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
3011     llvm_unreachable("Not Implemented");
3012   }
3013 
3014   //===--------------------------------------------------------------------===//
3015   // Lowering methods - These methods must be implemented by targets so that
3016   // the SelectionDAGBuilder code knows how to lower these.
3017   //
3018 
3019   /// This hook must be implemented to lower the incoming (formal) arguments,
3020   /// described by the Ins array, into the specified DAG. The implementation
3021   /// should fill in the InVals array with legal-type argument values, and
3022   /// return the resulting token chain value.
LowerFormalArguments(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::InputArg> &,const SDLoc &,SelectionDAG &,SmallVectorImpl<SDValue> &)3023   virtual SDValue LowerFormalArguments(
3024       SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
3025       const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
3026       SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
3027     llvm_unreachable("Not Implemented");
3028   }
3029 
3030   /// This structure contains all information that is necessary for lowering
3031   /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
3032   /// needs to lower a call, and targets will see this struct in their LowerCall
3033   /// implementation.
3034   struct CallLoweringInfo {
3035     SDValue Chain;
3036     Type *RetTy = nullptr;
3037     bool RetSExt           : 1;
3038     bool RetZExt           : 1;
3039     bool IsVarArg          : 1;
3040     bool IsInReg           : 1;
3041     bool DoesNotReturn     : 1;
3042     bool IsReturnValueUsed : 1;
3043     bool IsConvergent      : 1;
3044     bool IsPatchPoint      : 1;
3045 
3046     // IsTailCall should be modified by implementations of
3047     // TargetLowering::LowerCall that perform tail call conversions.
3048     bool IsTailCall = false;
3049 
3050     // Is Call lowering done post SelectionDAG type legalization.
3051     bool IsPostTypeLegalization = false;
3052 
3053     unsigned NumFixedArgs = -1;
3054     CallingConv::ID CallConv = CallingConv::C;
3055     SDValue Callee;
3056     ArgListTy Args;
3057     SelectionDAG &DAG;
3058     SDLoc DL;
3059     ImmutableCallSite CS;
3060     SmallVector<ISD::OutputArg, 32> Outs;
3061     SmallVector<SDValue, 32> OutVals;
3062     SmallVector<ISD::InputArg, 32> Ins;
3063     SmallVector<SDValue, 4> InVals;
3064 
CallLoweringInfoCallLoweringInfo3065     CallLoweringInfo(SelectionDAG &DAG)
3066         : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
3067           DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
3068           IsPatchPoint(false), DAG(DAG) {}
3069 
setDebugLocCallLoweringInfo3070     CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
3071       DL = dl;
3072       return *this;
3073     }
3074 
setChainCallLoweringInfo3075     CallLoweringInfo &setChain(SDValue InChain) {
3076       Chain = InChain;
3077       return *this;
3078     }
3079 
3080     // setCallee with target/module-specific attributes
setLibCalleeCallLoweringInfo3081     CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
3082                                    SDValue Target, ArgListTy &&ArgsList) {
3083       RetTy = ResultType;
3084       Callee = Target;
3085       CallConv = CC;
3086       NumFixedArgs = ArgsList.size();
3087       Args = std::move(ArgsList);
3088 
3089       DAG.getTargetLoweringInfo().markLibCallAttributes(
3090           &(DAG.getMachineFunction()), CC, Args);
3091       return *this;
3092     }
3093 
setCalleeCallLoweringInfo3094     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
3095                                 SDValue Target, ArgListTy &&ArgsList) {
3096       RetTy = ResultType;
3097       Callee = Target;
3098       CallConv = CC;
3099       NumFixedArgs = ArgsList.size();
3100       Args = std::move(ArgsList);
3101       return *this;
3102     }
3103 
setCalleeCallLoweringInfo3104     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
3105                                 SDValue Target, ArgListTy &&ArgsList,
3106                                 ImmutableCallSite Call) {
3107       RetTy = ResultType;
3108 
3109       IsInReg = Call.hasRetAttr(Attribute::InReg);
3110       DoesNotReturn =
3111           Call.doesNotReturn() ||
3112           (!Call.isInvoke() &&
3113            isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
3114       IsVarArg = FTy->isVarArg();
3115       IsReturnValueUsed = !Call.getInstruction()->use_empty();
3116       RetSExt = Call.hasRetAttr(Attribute::SExt);
3117       RetZExt = Call.hasRetAttr(Attribute::ZExt);
3118 
3119       Callee = Target;
3120 
3121       CallConv = Call.getCallingConv();
3122       NumFixedArgs = FTy->getNumParams();
3123       Args = std::move(ArgsList);
3124 
3125       CS = Call;
3126 
3127       return *this;
3128     }
3129 
3130     CallLoweringInfo &setInRegister(bool Value = true) {
3131       IsInReg = Value;
3132       return *this;
3133     }
3134 
3135     CallLoweringInfo &setNoReturn(bool Value = true) {
3136       DoesNotReturn = Value;
3137       return *this;
3138     }
3139 
3140     CallLoweringInfo &setVarArg(bool Value = true) {
3141       IsVarArg = Value;
3142       return *this;
3143     }
3144 
3145     CallLoweringInfo &setTailCall(bool Value = true) {
3146       IsTailCall = Value;
3147       return *this;
3148     }
3149 
3150     CallLoweringInfo &setDiscardResult(bool Value = true) {
3151       IsReturnValueUsed = !Value;
3152       return *this;
3153     }
3154 
3155     CallLoweringInfo &setConvergent(bool Value = true) {
3156       IsConvergent = Value;
3157       return *this;
3158     }
3159 
3160     CallLoweringInfo &setSExtResult(bool Value = true) {
3161       RetSExt = Value;
3162       return *this;
3163     }
3164 
3165     CallLoweringInfo &setZExtResult(bool Value = true) {
3166       RetZExt = Value;
3167       return *this;
3168     }
3169 
3170     CallLoweringInfo &setIsPatchPoint(bool Value = true) {
3171       IsPatchPoint = Value;
3172       return *this;
3173     }
3174 
3175     CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
3176       IsPostTypeLegalization = Value;
3177       return *this;
3178     }
3179 
getArgsCallLoweringInfo3180     ArgListTy &getArgs() {
3181       return Args;
3182     }
3183   };
3184 
3185   /// This function lowers an abstract call to a function into an actual call.
3186   /// This returns a pair of operands.  The first element is the return value
3187   /// for the function (if RetTy is not VoidTy).  The second element is the
3188   /// outgoing token chain. It calls LowerCall to do the actual lowering.
3189   std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
3190 
3191   /// This hook must be implemented to lower calls into the specified
3192   /// DAG. The outgoing arguments to the call are described by the Outs array,
3193   /// and the values to be returned by the call are described by the Ins
3194   /// array. The implementation should fill in the InVals array with legal-type
3195   /// return values from the call, and return the resulting token chain value.
3196   virtual SDValue
LowerCall(CallLoweringInfo &,SmallVectorImpl<SDValue> &)3197     LowerCall(CallLoweringInfo &/*CLI*/,
3198               SmallVectorImpl<SDValue> &/*InVals*/) const {
3199     llvm_unreachable("Not Implemented");
3200   }
3201 
3202   /// Target-specific cleanup for formal ByVal parameters.
HandleByVal(CCState *,unsigned &,unsigned)3203   virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
3204 
3205   /// This hook should be implemented to check whether the return values
3206   /// described by the Outs array can fit into the return registers.  If false
3207   /// is returned, an sret-demotion is performed.
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> &,LLVMContext &)3208   virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
3209                               MachineFunction &/*MF*/, bool /*isVarArg*/,
3210                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
3211                LLVMContext &/*Context*/) const
3212   {
3213     // Return true by default to get preexisting behavior.
3214     return true;
3215   }
3216 
3217   /// This hook must be implemented to lower outgoing return values, described
3218   /// by the Outs array, into the specified DAG. The implementation should
3219   /// return the resulting token chain value.
LowerReturn(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::OutputArg> &,const SmallVectorImpl<SDValue> &,const SDLoc &,SelectionDAG &)3220   virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
3221                               bool /*isVarArg*/,
3222                               const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
3223                               const SmallVectorImpl<SDValue> & /*OutVals*/,
3224                               const SDLoc & /*dl*/,
3225                               SelectionDAG & /*DAG*/) const {
3226     llvm_unreachable("Not Implemented");
3227   }
3228 
3229   /// Return true if result of the specified node is used by a return node
3230   /// only. It also compute and return the input chain for the tail call.
3231   ///
3232   /// This is used to determine whether it is possible to codegen a libcall as
3233   /// tail call at legalization time.
isUsedByReturnOnly(SDNode *,SDValue &)3234   virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
3235     return false;
3236   }
3237 
3238   /// Return true if the target may be able emit the call instruction as a tail
3239   /// call. This is used by optimization passes to determine if it's profitable
3240   /// to duplicate return instructions to enable tailcall optimization.
mayBeEmittedAsTailCall(const CallInst *)3241   virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
3242     return false;
3243   }
3244 
3245   /// Return the builtin name for the __builtin___clear_cache intrinsic
3246   /// Default is to invoke the clear cache library call
getClearCacheBuiltinName()3247   virtual const char * getClearCacheBuiltinName() const {
3248     return "__clear_cache";
3249   }
3250 
3251   /// Return the register ID of the name passed in. Used by named register
3252   /// global variables extension. There is no target-independent behaviour
3253   /// so the default action is to bail.
getRegisterByName(const char * RegName,EVT VT,SelectionDAG & DAG)3254   virtual unsigned getRegisterByName(const char* RegName, EVT VT,
3255                                      SelectionDAG &DAG) const {
3256     report_fatal_error("Named registers not implemented for this target");
3257   }
3258 
3259   /// Return the type that should be used to zero or sign extend a
3260   /// zeroext/signext integer return value.  FIXME: Some C calling conventions
3261   /// require the return type to be promoted, but this is not true all the time,
3262   /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
3263   /// conventions. The frontend should handle this and include all of the
3264   /// necessary information.
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType)3265   virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
3266                                        ISD::NodeType /*ExtendKind*/) const {
3267     EVT MinVT = getRegisterType(Context, MVT::i32);
3268     return VT.bitsLT(MinVT) ? MinVT : VT;
3269   }
3270 
3271   /// For some targets, an LLVM struct type must be broken down into multiple
3272   /// simple types, but the calling convention specifies that the entire struct
3273   /// must be passed in a block of consecutive registers.
3274   virtual bool
functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg)3275   functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
3276                                             bool isVarArg) const {
3277     return false;
3278   }
3279 
3280   /// Returns a 0 terminated array of registers that can be safely used as
3281   /// scratch registers.
getScratchRegisters(CallingConv::ID CC)3282   virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
3283     return nullptr;
3284   }
3285 
3286   /// This callback is used to prepare for a volatile or atomic load.
3287   /// It takes a chain node as input and returns the chain for the load itself.
3288   ///
3289   /// Having a callback like this is necessary for targets like SystemZ,
3290   /// which allows a CPU to reuse the result of a previous load indefinitely,
3291   /// even if a cache-coherent store is performed by another CPU.  The default
3292   /// implementation does nothing.
prepareVolatileOrAtomicLoad(SDValue Chain,const SDLoc & DL,SelectionDAG & DAG)3293   virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
3294                                               SelectionDAG &DAG) const {
3295     return Chain;
3296   }
3297 
3298   /// This callback is used to inspect load/store instructions and add
3299   /// target-specific MachineMemOperand flags to them.  The default
3300   /// implementation does nothing.
getMMOFlags(const Instruction & I)3301   virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
3302     return MachineMemOperand::MONone;
3303   }
3304 
3305   /// This callback is invoked by the type legalizer to legalize nodes with an
3306   /// illegal operand type but legal result types.  It replaces the
3307   /// LowerOperation callback in the type Legalizer.  The reason we can not do
3308   /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
3309   /// use this callback.
3310   ///
3311   /// TODO: Consider merging with ReplaceNodeResults.
3312   ///
3313   /// The target places new result values for the node in Results (their number
3314   /// and types must exactly match those of the original return values of
3315   /// the node), or leaves Results empty, which indicates that the node is not
3316   /// to be custom lowered after all.
3317   /// The default implementation calls LowerOperation.
3318   virtual void LowerOperationWrapper(SDNode *N,
3319                                      SmallVectorImpl<SDValue> &Results,
3320                                      SelectionDAG &DAG) const;
3321 
3322   /// This callback is invoked for operations that are unsupported by the
3323   /// target, which are registered to use 'custom' lowering, and whose defined
3324   /// values are all legal.  If the target has no operations that require custom
3325   /// lowering, it need not implement this.  The default implementation of this
3326   /// aborts.
3327   virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
3328 
3329   /// This callback is invoked when a node result type is illegal for the
3330   /// target, and the operation was registered to use 'custom' lowering for that
3331   /// result type.  The target places new result values for the node in Results
3332   /// (their number and types must exactly match those of the original return
3333   /// values of the node), or leaves Results empty, which indicates that the
3334   /// node is not to be custom lowered after all.
3335   ///
3336   /// If the target has no operations that require custom lowering, it need not
3337   /// implement this.  The default implementation aborts.
ReplaceNodeResults(SDNode *,SmallVectorImpl<SDValue> &,SelectionDAG &)3338   virtual void ReplaceNodeResults(SDNode * /*N*/,
3339                                   SmallVectorImpl<SDValue> &/*Results*/,
3340                                   SelectionDAG &/*DAG*/) const {
3341     llvm_unreachable("ReplaceNodeResults not implemented for this target!");
3342   }
3343 
3344   /// This method returns the name of a target specific DAG node.
3345   virtual const char *getTargetNodeName(unsigned Opcode) const;
3346 
3347   /// This method returns a target specific FastISel object, or null if the
3348   /// target does not support "fast" ISel.
createFastISel(FunctionLoweringInfo &,const TargetLibraryInfo *)3349   virtual FastISel *createFastISel(FunctionLoweringInfo &,
3350                                    const TargetLibraryInfo *) const {
3351     return nullptr;
3352   }
3353 
3354   bool verifyReturnAddressArgumentIsConstant(SDValue Op,
3355                                              SelectionDAG &DAG) const;
3356 
3357   //===--------------------------------------------------------------------===//
3358   // Inline Asm Support hooks
3359   //
3360 
3361   /// This hook allows the target to expand an inline asm call to be explicit
3362   /// llvm code if it wants to.  This is useful for turning simple inline asms
3363   /// into LLVM intrinsics, which gives the compiler more information about the
3364   /// behavior of the code.
ExpandInlineAsm(CallInst *)3365   virtual bool ExpandInlineAsm(CallInst *) const {
3366     return false;
3367   }
3368 
3369   enum ConstraintType {
3370     C_Register,            // Constraint represents specific register(s).
3371     C_RegisterClass,       // Constraint represents any of register(s) in class.
3372     C_Memory,              // Memory constraint.
3373     C_Other,               // Something else.
3374     C_Unknown              // Unsupported constraint.
3375   };
3376 
3377   enum ConstraintWeight {
3378     // Generic weights.
3379     CW_Invalid  = -1,     // No match.
3380     CW_Okay     = 0,      // Acceptable.
3381     CW_Good     = 1,      // Good weight.
3382     CW_Better   = 2,      // Better weight.
3383     CW_Best     = 3,      // Best weight.
3384 
3385     // Well-known weights.
3386     CW_SpecificReg  = CW_Okay,    // Specific register operands.
3387     CW_Register     = CW_Good,    // Register operands.
3388     CW_Memory       = CW_Better,  // Memory operands.
3389     CW_Constant     = CW_Best,    // Constant operand.
3390     CW_Default      = CW_Okay     // Default or don't know type.
3391   };
3392 
3393   /// This contains information for each constraint that we are lowering.
3394   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
3395     /// This contains the actual string for the code, like "m".  TargetLowering
3396     /// picks the 'best' code from ConstraintInfo::Codes that most closely
3397     /// matches the operand.
3398     std::string ConstraintCode;
3399 
3400     /// Information about the constraint code, e.g. Register, RegisterClass,
3401     /// Memory, Other, Unknown.
3402     TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
3403 
3404     /// If this is the result output operand or a clobber, this is null,
3405     /// otherwise it is the incoming operand to the CallInst.  This gets
3406     /// modified as the asm is processed.
3407     Value *CallOperandVal = nullptr;
3408 
3409     /// The ValueType for the operand value.
3410     MVT ConstraintVT = MVT::Other;
3411 
3412     /// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfoAsmOperandInfo3413     AsmOperandInfo(InlineAsm::ConstraintInfo Info)
3414         : InlineAsm::ConstraintInfo(std::move(Info)) {}
3415 
3416     /// Return true of this is an input operand that is a matching constraint
3417     /// like "4".
3418     bool isMatchingInputConstraint() const;
3419 
3420     /// If this is an input matching constraint, this method returns the output
3421     /// operand it matches.
3422     unsigned getMatchedOperand() const;
3423   };
3424 
3425   using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
3426 
3427   /// Split up the constraint string from the inline assembly value into the
3428   /// specific constraints and their prefixes, and also tie in the associated
3429   /// operand values.  If this returns an empty vector, and if the constraint
3430   /// string itself isn't empty, there was an error parsing.
3431   virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
3432                                                 const TargetRegisterInfo *TRI,
3433                                                 ImmutableCallSite CS) const;
3434 
3435   /// Examine constraint type and operand type and determine a weight value.
3436   /// The operand object must already have been set up with the operand type.
3437   virtual ConstraintWeight getMultipleConstraintMatchWeight(
3438       AsmOperandInfo &info, int maIndex) const;
3439 
3440   /// Examine constraint string and operand type and determine a weight value.
3441   /// The operand object must already have been set up with the operand type.
3442   virtual ConstraintWeight getSingleConstraintMatchWeight(
3443       AsmOperandInfo &info, const char *constraint) const;
3444 
3445   /// Determines the constraint code and constraint type to use for the specific
3446   /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
3447   /// If the actual operand being passed in is available, it can be passed in as
3448   /// Op, otherwise an empty SDValue can be passed.
3449   virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
3450                                       SDValue Op,
3451                                       SelectionDAG *DAG = nullptr) const;
3452 
3453   /// Given a constraint, return the type of constraint it is for this target.
3454   virtual ConstraintType getConstraintType(StringRef Constraint) const;
3455 
3456   /// Given a physical register constraint (e.g.  {edx}), return the register
3457   /// number and the register class for the register.
3458   ///
3459   /// Given a register class constraint, like 'r', if this corresponds directly
3460   /// to an LLVM register class, return a register of 0 and the register class
3461   /// pointer.
3462   ///
3463   /// This should only be used for C_Register constraints.  On error, this
3464   /// returns a register number of 0 and a null register class pointer.
3465   virtual std::pair<unsigned, const TargetRegisterClass *>
3466   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3467                                StringRef Constraint, MVT VT) const;
3468 
getInlineAsmMemConstraint(StringRef ConstraintCode)3469   virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3470     if (ConstraintCode == "i")
3471       return InlineAsm::Constraint_i;
3472     else if (ConstraintCode == "m")
3473       return InlineAsm::Constraint_m;
3474     return InlineAsm::Constraint_Unknown;
3475   }
3476 
3477   /// Try to replace an X constraint, which matches anything, with another that
3478   /// has more specific requirements based on the type of the corresponding
3479   /// operand.  This returns null if there is no replacement to make.
3480   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
3481 
3482   /// Lower the specified operand into the Ops vector.  If it is invalid, don't
3483   /// add anything to Ops.
3484   virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
3485                                             std::vector<SDValue> &Ops,
3486                                             SelectionDAG &DAG) const;
3487 
3488   //===--------------------------------------------------------------------===//
3489   // Div utility functions
3490   //
3491   SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
3492                     bool IsAfterLegalization,
3493                     SmallVectorImpl<SDNode *> &Created) const;
3494   SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
3495                     bool IsAfterLegalization,
3496                     SmallVectorImpl<SDNode *> &Created) const;
3497 
3498   /// Targets may override this function to provide custom SDIV lowering for
3499   /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
3500   /// assumes SDIV is expensive and replaces it with a series of other integer
3501   /// operations.
3502   virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
3503                                 SelectionDAG &DAG,
3504                                 SmallVectorImpl<SDNode *> &Created) const;
3505 
3506   /// Indicate whether this target prefers to combine FDIVs with the same
3507   /// divisor. If the transform should never be done, return zero. If the
3508   /// transform should be done, return the minimum number of divisor uses
3509   /// that must exist.
combineRepeatedFPDivisors()3510   virtual unsigned combineRepeatedFPDivisors() const {
3511     return 0;
3512   }
3513 
3514   /// Hooks for building estimates in place of slower divisions and square
3515   /// roots.
3516 
3517   /// Return either a square root or its reciprocal estimate value for the input
3518   /// operand.
3519   /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3520   /// 'Enabled' as set by a potential default override attribute.
3521   /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3522   /// refinement iterations required to generate a sufficient (though not
3523   /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3524   /// The boolean UseOneConstNR output is used to select a Newton-Raphson
3525   /// algorithm implementation that uses either one or two constants.
3526   /// The boolean Reciprocal is used to select whether the estimate is for the
3527   /// square root of the input operand or the reciprocal of its square root.
3528   /// A target may choose to implement its own refinement within this function.
3529   /// If that's true, then return '0' as the number of RefinementSteps to avoid
3530   /// any further refinement of the estimate.
3531   /// An empty SDValue return means no estimate sequence can be created.
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal)3532   virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
3533                                   int Enabled, int &RefinementSteps,
3534                                   bool &UseOneConstNR, bool Reciprocal) const {
3535     return SDValue();
3536   }
3537 
3538   /// Return a reciprocal estimate value for the input operand.
3539   /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3540   /// 'Enabled' as set by a potential default override attribute.
3541   /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3542   /// refinement iterations required to generate a sufficient (though not
3543   /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3544   /// A target may choose to implement its own refinement within this function.
3545   /// If that's true, then return '0' as the number of RefinementSteps to avoid
3546   /// any further refinement of the estimate.
3547   /// An empty SDValue return means no estimate sequence can be created.
getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps)3548   virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
3549                                    int Enabled, int &RefinementSteps) const {
3550     return SDValue();
3551   }
3552 
3553   //===--------------------------------------------------------------------===//
3554   // Legalization utility functions
3555   //
3556 
3557   /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
3558   /// respectively, each computing an n/2-bit part of the result.
3559   /// \param Result A vector that will be filled with the parts of the result
3560   ///        in little-endian order.
3561   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
3562   ///        if you want to control how low bits are extracted from the LHS.
3563   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
3564   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
3565   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
3566   /// \returns true if the node has been expanded, false if it has not
3567   bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
3568                       SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
3569                       SelectionDAG &DAG, MulExpansionKind Kind,
3570                       SDValue LL = SDValue(), SDValue LH = SDValue(),
3571                       SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3572 
3573   /// Expand a MUL into two nodes.  One that computes the high bits of
3574   /// the result and one that computes the low bits.
3575   /// \param HiLoVT The value type to use for the Lo and Hi nodes.
3576   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
3577   ///        if you want to control how low bits are extracted from the LHS.
3578   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
3579   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
3580   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
3581   /// \returns true if the node has been expanded. false if it has not
3582   bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3583                  SelectionDAG &DAG, MulExpansionKind Kind,
3584                  SDValue LL = SDValue(), SDValue LH = SDValue(),
3585                  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3586 
3587   /// Expand float(f32) to SINT(i64) conversion
3588   /// \param N Node to expand
3589   /// \param Result output after conversion
3590   /// \returns True, if the expansion was successful, false otherwise
3591   bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3592 
3593   /// Turn load of vector type into a load of the individual elements.
3594   /// \param LD load to expand
3595   /// \returns MERGE_VALUEs of the scalar loads with their chains.
3596   SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
3597 
3598   // Turn a store of a vector type into stores of the individual elements.
3599   /// \param ST Store with a vector value type
3600   /// \returns MERGE_VALUs of the individual store chains.
3601   SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3602 
3603   /// Expands an unaligned load to 2 half-size loads for an integer, and
3604   /// possibly more for vectors.
3605   std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
3606                                                   SelectionDAG &DAG) const;
3607 
3608   /// Expands an unaligned store to 2 half-size stores for integer values, and
3609   /// possibly more for vectors.
3610   SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
3611 
3612   /// Increments memory address \p Addr according to the type of the value
3613   /// \p DataVT that should be stored. If the data is stored in compressed
3614   /// form, the memory address should be incremented according to the number of
3615   /// the stored elements. This number is equal to the number of '1's bits
3616   /// in the \p Mask.
3617   /// \p DataVT is a vector type. \p Mask is a vector value.
3618   /// \p DataVT and \p Mask have the same number of vector elements.
3619   SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
3620                                  EVT DataVT, SelectionDAG &DAG,
3621                                  bool IsCompressedMemory) const;
3622 
3623   /// Get a pointer to vector element \p Idx located in memory for a vector of
3624   /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
3625   /// bounds the returned pointer is unspecified, but will be within the vector
3626   /// bounds.
3627   SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
3628                                   SDValue Index) const;
3629 
3630   //===--------------------------------------------------------------------===//
3631   // Instruction Emitting Hooks
3632   //
3633 
3634   /// This method should be implemented by targets that mark instructions with
3635   /// the 'usesCustomInserter' flag.  These instructions are special in various
3636   /// ways, which require special support to insert.  The specified MachineInstr
3637   /// is created but not inserted into any basic blocks, and this method is
3638   /// called to expand it into a sequence of instructions, potentially also
3639   /// creating new basic blocks and control flow.
3640   /// As long as the returned basic block is different (i.e., we created a new
3641   /// one), the custom inserter is free to modify the rest of \p MBB.
3642   virtual MachineBasicBlock *
3643   EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
3644 
3645   /// This method should be implemented by targets that mark instructions with
3646   /// the 'hasPostISelHook' flag. These instructions must be adjusted after
3647   /// instruction selection by target hooks.  e.g. To fill in optional defs for
3648   /// ARM 's' setting instructions.
3649   virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
3650                                              SDNode *Node) const;
3651 
3652   /// If this function returns true, SelectionDAGBuilder emits a
3653   /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
useLoadStackGuardNode()3654   virtual bool useLoadStackGuardNode() const {
3655     return false;
3656   }
3657 
emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL)3658   virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
3659                                       const SDLoc &DL) const {
3660     llvm_unreachable("not implemented for this target");
3661   }
3662 
3663   /// Lower TLS global address SDNode for target independent emulated TLS model.
3664   virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
3665                                           SelectionDAG &DAG) const;
3666 
3667   /// Expands target specific indirect branch for the case of JumpTable
3668   /// expanasion.
expandIndirectJTBranch(const SDLoc & dl,SDValue Value,SDValue Addr,SelectionDAG & DAG)3669   virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
3670                                          SelectionDAG &DAG) const {
3671     return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
3672   }
3673 
3674   // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
3675   // If we're comparing for equality to zero and isCtlzFast is true, expose the
3676   // fact that this can be implemented as a ctlz/srl pair, so that the dag
3677   // combiner can fold the new nodes.
3678   SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
3679 
3680 private:
3681   SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3682                                ISD::CondCode Cond, DAGCombinerInfo &DCI,
3683                                const SDLoc &DL) const;
3684 
3685   SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
3686                                                SDValue N1, ISD::CondCode Cond,
3687                                                DAGCombinerInfo &DCI,
3688                                                const SDLoc &DL) const;
3689 };
3690 
3691 /// Given an LLVM IR type and return type attributes, compute the return value
3692 /// EVTs and flags, and optionally also the offsets, if the return value is
3693 /// being lowered to memory.
3694 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
3695                    SmallVectorImpl<ISD::OutputArg> &Outs,
3696                    const TargetLowering &TLI, const DataLayout &DL);
3697 
3698 } // end namespace llvm
3699 
3700 #endif // LLVM_CODEGEN_TARGETLOWERING_H
3701