• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file describes how to lower LLVM code to machine code.  This has two
12 /// main components:
13 ///
14 ///  1. Which ValueTypes are natively supported by the target.
15 ///  2. Which operations are supported for supported ValueTypes.
16 ///  3. Cost thresholds for alternative implementations of certain operations.
17 ///
18 /// In addition it has a few other components, like information about FP
19 /// immediates.
20 ///
21 //===----------------------------------------------------------------------===//
22 
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
25 
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/CodeGen/DAGCombine.h"
28 #include "llvm/CodeGen/RuntimeLibcalls.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/Target/TargetCallingConv.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include <climits>
39 #include <map>
40 #include <vector>
41 
42 namespace llvm {
43   class CallInst;
44   class CCState;
45   class FastISel;
46   class FunctionLoweringInfo;
47   class ImmutableCallSite;
48   class IntrinsicInst;
49   class MachineBasicBlock;
50   class MachineFunction;
51   class MachineInstr;
52   class MachineJumpTableInfo;
53   class Mangler;
54   class MCContext;
55   class MCExpr;
56   class MCSymbol;
57   template<typename T> class SmallVectorImpl;
58   class DataLayout;
59   class TargetRegisterClass;
60   class TargetLibraryInfo;
61   class TargetLoweringObjectFile;
62   class Value;
63 
64   namespace Sched {
65     enum Preference {
66       None,             // No preference
67       Source,           // Follow source order.
68       RegPressure,      // Scheduling for lowest register pressure.
69       Hybrid,           // Scheduling for both latency and register pressure.
70       ILP,              // Scheduling for ILP in low register pressure mode.
71       VLIW              // Scheduling for VLIW targets.
72     };
73   }
74 
75 /// This base class for TargetLowering contains the SelectionDAG-independent
76 /// parts that can be used from the rest of CodeGen.
77 class TargetLoweringBase {
78   TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
79   void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
80 
81 public:
82   /// This enum indicates whether operations are valid for a target, and if not,
83   /// what action should be used to make them valid.
84   enum LegalizeAction {
85     Legal,      // The target natively supports this operation.
86     Promote,    // This operation should be executed in a larger type.
87     Expand,     // Try to expand this to other ops, otherwise use a libcall.
88     Custom      // Use the LowerOperation hook to implement custom lowering.
89   };
90 
91   /// This enum indicates whether a types are legal for a target, and if not,
92   /// what action should be used to make them valid.
93   enum LegalizeTypeAction {
94     TypeLegal,           // The target natively supports this type.
95     TypePromoteInteger,  // Replace this integer with a larger one.
96     TypeExpandInteger,   // Split this integer into two of half the size.
97     TypeSoftenFloat,     // Convert this float to a same size integer type.
98     TypeExpandFloat,     // Split this float into two of half the size.
99     TypeScalarizeVector, // Replace this one-element vector with its element.
100     TypeSplitVector,     // Split this vector into two of half the size.
101     TypeWidenVector      // This vector should be widened into a larger vector.
102   };
103 
104   /// LegalizeKind holds the legalization kind that needs to happen to EVT
105   /// in order to type-legalize it.
106   typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
107 
108   /// Enum that describes how the target represents true/false values.
109   enum BooleanContent {
110     UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
111     ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
112     ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
113   };
114 
115   /// Enum that describes what type of support for selects the target has.
116   enum SelectSupportKind {
117     ScalarValSelect,      // The target supports scalar selects (ex: cmov).
118     ScalarCondVectorVal,  // The target supports selects with a scalar condition
119                           // and vector values (ex: cmov).
120     VectorMaskSelect      // The target supports vector selects with a vector
121                           // mask (ex: x86 blends).
122   };
123 
getExtendForContent(BooleanContent Content)124   static ISD::NodeType getExtendForContent(BooleanContent Content) {
125     switch (Content) {
126     case UndefinedBooleanContent:
127       // Extend by adding rubbish bits.
128       return ISD::ANY_EXTEND;
129     case ZeroOrOneBooleanContent:
130       // Extend by adding zero bits.
131       return ISD::ZERO_EXTEND;
132     case ZeroOrNegativeOneBooleanContent:
133       // Extend by copying the sign bit.
134       return ISD::SIGN_EXTEND;
135     }
136     llvm_unreachable("Invalid content kind");
137   }
138 
139   /// NOTE: The constructor takes ownership of TLOF.
140   explicit TargetLoweringBase(const TargetMachine &TM,
141                               const TargetLoweringObjectFile *TLOF);
142   virtual ~TargetLoweringBase();
143 
144 protected:
145   /// \brief Initialize all of the actions to default values.
146   void initActions();
147 
148 public:
getTargetMachine()149   const TargetMachine &getTargetMachine() const { return TM; }
getDataLayout()150   const DataLayout *getDataLayout() const { return DL; }
getObjFileLowering()151   const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
152 
isBigEndian()153   bool isBigEndian() const { return !IsLittleEndian; }
isLittleEndian()154   bool isLittleEndian() const { return IsLittleEndian; }
155 
156   /// Return the pointer type for the given address space, defaults to
157   /// the pointer type from the data layout.
158   /// FIXME: The default needs to be removed once all the code is updated.
159   virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
160   unsigned getPointerSizeInBits(uint32_t AS = 0) const;
161   unsigned getPointerTypeSizeInBits(Type *Ty) const;
162   virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
163 
164   EVT getShiftAmountTy(EVT LHSTy) const;
165 
166   /// Returns the type to be used for the index operand of:
167   /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
168   /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
getVectorIdxTy()169   virtual MVT getVectorIdxTy() const {
170     return getPointerTy();
171   }
172 
173   /// Return true if the select operation is expensive for this target.
isSelectExpensive()174   bool isSelectExpensive() const { return SelectIsExpensive; }
175 
isSelectSupported(SelectSupportKind)176   virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
177     return true;
178   }
179 
180   /// Return true if multiple condition registers are available.
hasMultipleConditionRegisters()181   bool hasMultipleConditionRegisters() const {
182     return HasMultipleConditionRegisters;
183   }
184 
185   /// Return true if the target has BitExtract instructions.
hasExtractBitsInsn()186   bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
187 
188   /// Return the preferred vector type legalization action.
189   virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(EVT VT)190   getPreferredVectorAction(EVT VT) const {
191     // The default action for one element vectors is to scalarize
192     if (VT.getVectorNumElements() == 1)
193       return TypeScalarizeVector;
194     // The default action for other vectors is to promote
195     return TypePromoteInteger;
196   }
197 
198   // There are two general methods for expanding a BUILD_VECTOR node:
199   //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
200   //     them together.
201   //  2. Build the vector on the stack and then load it.
202   // If this function returns true, then method (1) will be used, subject to
203   // the constraint that all of the necessary shuffles are legal (as determined
204   // by isShuffleMaskLegal). If this function returns false, then method (2) is
205   // always used. The vector type, and the number of defined values, are
206   // provided.
207   virtual bool
shouldExpandBuildVectorWithShuffles(EVT,unsigned DefinedValues)208   shouldExpandBuildVectorWithShuffles(EVT /* VT */,
209                                       unsigned DefinedValues) const {
210     return DefinedValues < 3;
211   }
212 
213   /// Return true if integer divide is usually cheaper than a sequence of
214   /// several shifts, adds, and multiplies for this target.
isIntDivCheap()215   bool isIntDivCheap() const { return IntDivIsCheap; }
216 
217   /// Returns true if target has indicated at least one type should be bypassed.
isSlowDivBypassed()218   bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
219 
220   /// Returns map of slow types for division or remainder with corresponding
221   /// fast types
getBypassSlowDivWidths()222   const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
223     return BypassSlowDivWidths;
224   }
225 
226   /// Return true if pow2 div is cheaper than a chain of srl/add/sra.
isPow2DivCheap()227   bool isPow2DivCheap() const { return Pow2DivIsCheap; }
228 
229   /// Return true if Flow Control is an expensive operation that should be
230   /// avoided.
isJumpExpensive()231   bool isJumpExpensive() const { return JumpIsExpensive; }
232 
233   /// Return true if selects are only cheaper than branches if the branch is
234   /// unlikely to be predicted right.
isPredictableSelectExpensive()235   bool isPredictableSelectExpensive() const {
236     return PredictableSelectIsExpensive;
237   }
238 
239   /// isLoadBitCastBeneficial() - Return true if the following transform
240   /// is beneficial.
241   /// fold (conv (load x)) -> (load (conv*)x)
242   /// On architectures that don't natively support some vector loads efficiently,
243   /// casting the load to a smaller vector of larger types and loading
244   /// is more efficient, however, this can be undone by optimizations in
245   /// dag combiner.
isLoadBitCastBeneficial(EVT,EVT)246   virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
247     return true;
248   }
249 
250   /// \brief Return if the target supports combining a
251   /// chain like:
252   /// \code
253   ///   %andResult = and %val1, #imm-with-one-bit-set;
254   ///   %icmpResult = icmp %andResult, 0
255   ///   br i1 %icmpResult, label %dest1, label %dest2
256   /// \endcode
257   /// into a single machine instruction of a form like:
258   /// \code
259   ///   brOnBitSet %register, #bitNumber, dest
260   /// \endcode
isMaskAndBranchFoldingLegal()261   bool isMaskAndBranchFoldingLegal() const {
262     return MaskAndBranchFoldingIsLegal;
263   }
264 
265   /// Return the ValueType of the result of SETCC operations.  Also used to
266   /// obtain the target's preferred type for the condition operand of SELECT and
267   /// BRCOND nodes.  In the case of BRCOND the argument passed is MVT::Other
268   /// since there are no other operands to get a type hint from.
269   virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
270 
271   /// Return the ValueType for comparison libcalls. Comparions libcalls include
272   /// floating point comparion calls, and Ordered/Unordered check calls on
273   /// floating point numbers.
274   virtual
275   MVT::SimpleValueType getCmpLibcallReturnType() const;
276 
277   /// For targets without i1 registers, this gives the nature of the high-bits
278   /// of boolean values held in types wider than i1.
279   ///
280   /// "Boolean values" are special true/false values produced by nodes like
281   /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
282   /// Not to be confused with general values promoted from i1.  Some cpus
283   /// distinguish between vectors of boolean and scalars; the isVec parameter
284   /// selects between the two kinds.  For example on X86 a scalar boolean should
285   /// be zero extended from i1, while the elements of a vector of booleans
286   /// should be sign extended from i1.
287   ///
288   /// Some cpus also treat floating point types the same way as they treat
289   /// vectors instead of the way they treat scalars.
getBooleanContents(bool isVec,bool isFloat)290   BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
291     if (isVec)
292       return BooleanVectorContents;
293     return isFloat ? BooleanFloatContents : BooleanContents;
294   }
295 
getBooleanContents(EVT Type)296   BooleanContent getBooleanContents(EVT Type) const {
297     return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
298   }
299 
300   /// Return target scheduling preference.
getSchedulingPreference()301   Sched::Preference getSchedulingPreference() const {
302     return SchedPreferenceInfo;
303   }
304 
305   /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
306   /// for different nodes. This function returns the preference (or none) for
307   /// the given node.
getSchedulingPreference(SDNode *)308   virtual Sched::Preference getSchedulingPreference(SDNode *) const {
309     return Sched::None;
310   }
311 
312   /// Return the register class that should be used for the specified value
313   /// type.
getRegClassFor(MVT VT)314   virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
315     const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
316     assert(RC && "This value type is not natively supported!");
317     return RC;
318   }
319 
320   /// Return the 'representative' register class for the specified value
321   /// type.
322   ///
323   /// The 'representative' register class is the largest legal super-reg
324   /// register class for the register class of the value type.  For example, on
325   /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
326   /// register class is GR64 on x86_64.
getRepRegClassFor(MVT VT)327   virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
328     const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
329     return RC;
330   }
331 
332   /// Return the cost of the 'representative' register class for the specified
333   /// value type.
getRepRegClassCostFor(MVT VT)334   virtual uint8_t getRepRegClassCostFor(MVT VT) const {
335     return RepRegClassCostForVT[VT.SimpleTy];
336   }
337 
338   /// Return true if the target has native support for the specified value type.
339   /// This means that it has a register that directly holds it without
340   /// promotions or expansions.
isTypeLegal(EVT VT)341   bool isTypeLegal(EVT VT) const {
342     assert(!VT.isSimple() ||
343            (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
344     return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
345   }
346 
347   class ValueTypeActionImpl {
348     /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
349     /// that indicates how instruction selection should deal with the type.
350     uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
351 
352   public:
ValueTypeActionImpl()353     ValueTypeActionImpl() {
354       std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
355     }
356 
getTypeAction(MVT VT)357     LegalizeTypeAction getTypeAction(MVT VT) const {
358       return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
359     }
360 
setTypeAction(MVT VT,LegalizeTypeAction Action)361     void setTypeAction(MVT VT, LegalizeTypeAction Action) {
362       unsigned I = VT.SimpleTy;
363       ValueTypeActions[I] = Action;
364     }
365   };
366 
getValueTypeActions()367   const ValueTypeActionImpl &getValueTypeActions() const {
368     return ValueTypeActions;
369   }
370 
371   /// Return how we should legalize values of this type, either it is already
372   /// legal (return 'Legal') or we need to promote it to a larger type (return
373   /// 'Promote'), or we need to expand it into multiple registers of smaller
374   /// integer type (return 'Expand').  'Custom' is not an option.
getTypeAction(LLVMContext & Context,EVT VT)375   LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
376     return getTypeConversion(Context, VT).first;
377   }
getTypeAction(MVT VT)378   LegalizeTypeAction getTypeAction(MVT VT) const {
379     return ValueTypeActions.getTypeAction(VT);
380   }
381 
382   /// For types supported by the target, this is an identity function.  For
383   /// types that must be promoted to larger types, this returns the larger type
384   /// to promote to.  For integer types that are larger than the largest integer
385   /// register, this contains one step in the expansion to get to the smaller
386   /// register. For illegal floating point types, this returns the integer type
387   /// to transform to.
getTypeToTransformTo(LLVMContext & Context,EVT VT)388   EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
389     return getTypeConversion(Context, VT).second;
390   }
391 
392   /// For types supported by the target, this is an identity function.  For
393   /// types that must be expanded (i.e. integer types that are larger than the
394   /// largest integer register or illegal floating point types), this returns
395   /// the largest legal type it will be expanded to.
getTypeToExpandTo(LLVMContext & Context,EVT VT)396   EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
397     assert(!VT.isVector());
398     while (true) {
399       switch (getTypeAction(Context, VT)) {
400       case TypeLegal:
401         return VT;
402       case TypeExpandInteger:
403         VT = getTypeToTransformTo(Context, VT);
404         break;
405       default:
406         llvm_unreachable("Type is not legal nor is it to be expanded!");
407       }
408     }
409   }
410 
411   /// Vector types are broken down into some number of legal first class types.
412   /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
413   /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
414   /// turns into 4 EVT::i32 values with both PPC and X86.
415   ///
416   /// This method returns the number of registers needed, and the VT for each
417   /// register.  It also returns the VT and quantity of the intermediate values
418   /// before they are promoted/expanded.
419   unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
420                                   EVT &IntermediateVT,
421                                   unsigned &NumIntermediates,
422                                   MVT &RegisterVT) const;
423 
424   struct IntrinsicInfo {
425     unsigned     opc;         // target opcode
426     EVT          memVT;       // memory VT
427     const Value* ptrVal;      // value representing memory location
428     int          offset;      // offset off of ptrVal
429     unsigned     align;       // alignment
430     bool         vol;         // is volatile?
431     bool         readMem;     // reads memory?
432     bool         writeMem;    // writes memory?
433   };
434 
435   /// Given an intrinsic, checks if on the target the intrinsic will need to map
436   /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
437   /// true and store the intrinsic information into the IntrinsicInfo that was
438   /// passed to the function.
getTgtMemIntrinsic(IntrinsicInfo &,const CallInst &,unsigned)439   virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
440                                   unsigned /*Intrinsic*/) const {
441     return false;
442   }
443 
444   /// Returns true if the target can instruction select the specified FP
445   /// immediate natively. If false, the legalizer will materialize the FP
446   /// immediate as a load from a constant pool.
isFPImmLegal(const APFloat &,EVT)447   virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
448     return false;
449   }
450 
451   /// Targets can use this to indicate that they only support *some*
452   /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
453   /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
454   /// legal.
isShuffleMaskLegal(const SmallVectorImpl<int> &,EVT)455   virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
456                                   EVT /*VT*/) const {
457     return true;
458   }
459 
460   /// Returns true if the operation can trap for the value type.
461   ///
462   /// VT must be a legal type. By default, we optimistically assume most
463   /// operations don't trap except for divide and remainder.
464   virtual bool canOpTrap(unsigned Op, EVT VT) const;
465 
466   /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
467   /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
468   /// a VAND with a constant pool entry.
isVectorClearMaskLegal(const SmallVectorImpl<int> &,EVT)469   virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
470                                       EVT /*VT*/) const {
471     return false;
472   }
473 
474   /// Return how this operation should be treated: either it is legal, needs to
475   /// be promoted to a larger size, needs to be expanded to some other code
476   /// sequence, or the target has a custom expander for it.
getOperationAction(unsigned Op,EVT VT)477   LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
478     if (VT.isExtended()) return Expand;
479     // If a target-specific SDNode requires legalization, require the target
480     // to provide custom legalization for it.
481     if (Op > array_lengthof(OpActions[0])) return Custom;
482     unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
483     return (LegalizeAction)OpActions[I][Op];
484   }
485 
486   /// Return true if the specified operation is legal on this target or can be
487   /// made legal with custom lowering. This is used to help guide high-level
488   /// lowering decisions.
isOperationLegalOrCustom(unsigned Op,EVT VT)489   bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
490     return (VT == MVT::Other || isTypeLegal(VT)) &&
491       (getOperationAction(Op, VT) == Legal ||
492        getOperationAction(Op, VT) == Custom);
493   }
494 
495   /// Return true if the specified operation is legal on this target or can be
496   /// made legal using promotion. This is used to help guide high-level lowering
497   /// decisions.
isOperationLegalOrPromote(unsigned Op,EVT VT)498   bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
499     return (VT == MVT::Other || isTypeLegal(VT)) &&
500       (getOperationAction(Op, VT) == Legal ||
501        getOperationAction(Op, VT) == Promote);
502   }
503 
504   /// Return true if the specified operation is illegal on this target or
505   /// unlikely to be made legal with custom lowering. This is used to help guide
506   /// high-level lowering decisions.
isOperationExpand(unsigned Op,EVT VT)507   bool isOperationExpand(unsigned Op, EVT VT) const {
508     return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
509   }
510 
511   /// Return true if the specified operation is legal on this target.
isOperationLegal(unsigned Op,EVT VT)512   bool isOperationLegal(unsigned Op, EVT VT) const {
513     return (VT == MVT::Other || isTypeLegal(VT)) &&
514            getOperationAction(Op, VT) == Legal;
515   }
516 
517   /// Return how this load with extension should be treated: either it is legal,
518   /// needs to be promoted to a larger size, needs to be expanded to some other
519   /// code sequence, or the target has a custom expander for it.
getLoadExtAction(unsigned ExtType,MVT VT)520   LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
521     assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
522            "Table isn't big enough!");
523     return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
524   }
525 
526   /// Return true if the specified load with extension is legal on this target.
isLoadExtLegal(unsigned ExtType,EVT VT)527   bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
528     return VT.isSimple() &&
529       getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
530   }
531 
532   /// Return how this store with truncation should be treated: either it is
533   /// legal, needs to be promoted to a larger size, needs to be expanded to some
534   /// other code sequence, or the target has a custom expander for it.
getTruncStoreAction(MVT ValVT,MVT MemVT)535   LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
536     assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
537            "Table isn't big enough!");
538     return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
539                                             [MemVT.SimpleTy];
540   }
541 
542   /// Return true if the specified store with truncation is legal on this
543   /// target.
isTruncStoreLegal(EVT ValVT,EVT MemVT)544   bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
545     return isTypeLegal(ValVT) && MemVT.isSimple() &&
546       getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
547   }
548 
549   /// Return how the indexed load should be treated: either it is legal, needs
550   /// to be promoted to a larger size, needs to be expanded to some other code
551   /// sequence, or the target has a custom expander for it.
552   LegalizeAction
getIndexedLoadAction(unsigned IdxMode,MVT VT)553   getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
554     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
555            "Table isn't big enough!");
556     unsigned Ty = (unsigned)VT.SimpleTy;
557     return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
558   }
559 
560   /// Return true if the specified indexed load is legal on this target.
isIndexedLoadLegal(unsigned IdxMode,EVT VT)561   bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
562     return VT.isSimple() &&
563       (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
564        getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
565   }
566 
567   /// Return how the indexed store should be treated: either it is legal, needs
568   /// to be promoted to a larger size, needs to be expanded to some other code
569   /// sequence, or the target has a custom expander for it.
570   LegalizeAction
getIndexedStoreAction(unsigned IdxMode,MVT VT)571   getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
572     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
573            "Table isn't big enough!");
574     unsigned Ty = (unsigned)VT.SimpleTy;
575     return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
576   }
577 
578   /// Return true if the specified indexed load is legal on this target.
isIndexedStoreLegal(unsigned IdxMode,EVT VT)579   bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
580     return VT.isSimple() &&
581       (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
582        getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
583   }
584 
585   /// Return how the condition code should be treated: either it is legal, needs
586   /// to be expanded to some other code sequence, or the target has a custom
587   /// expander for it.
588   LegalizeAction
getCondCodeAction(ISD::CondCode CC,MVT VT)589   getCondCodeAction(ISD::CondCode CC, MVT VT) const {
590     assert((unsigned)CC < array_lengthof(CondCodeActions) &&
591            ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
592            "Table isn't big enough!");
593     // See setCondCodeAction for how this is encoded.
594     uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
595     uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
596     LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
597     assert(Action != Promote && "Can't promote condition code!");
598     return Action;
599   }
600 
601   /// Return true if the specified condition code is legal on this target.
isCondCodeLegal(ISD::CondCode CC,MVT VT)602   bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
603     return
604       getCondCodeAction(CC, VT) == Legal ||
605       getCondCodeAction(CC, VT) == Custom;
606   }
607 
608 
609   /// If the action for this operation is to promote, this method returns the
610   /// ValueType to promote to.
getTypeToPromoteTo(unsigned Op,MVT VT)611   MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
612     assert(getOperationAction(Op, VT) == Promote &&
613            "This operation isn't promoted!");
614 
615     // See if this has an explicit type specified.
616     std::map<std::pair<unsigned, MVT::SimpleValueType>,
617              MVT::SimpleValueType>::const_iterator PTTI =
618       PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
619     if (PTTI != PromoteToType.end()) return PTTI->second;
620 
621     assert((VT.isInteger() || VT.isFloatingPoint()) &&
622            "Cannot autopromote this type, add it with AddPromotedToType.");
623 
624     MVT NVT = VT;
625     do {
626       NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
627       assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
628              "Didn't find type to promote to!");
629     } while (!isTypeLegal(NVT) ||
630               getOperationAction(Op, NVT) == Promote);
631     return NVT;
632   }
633 
634   /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
635   /// operations except for the pointer size.  If AllowUnknown is true, this
636   /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
637   /// otherwise it will assert.
638   EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
639     // Lower scalar pointers to native pointer types.
640     if (PointerType *PTy = dyn_cast<PointerType>(Ty))
641       return getPointerTy(PTy->getAddressSpace());
642 
643     if (Ty->isVectorTy()) {
644       VectorType *VTy = cast<VectorType>(Ty);
645       Type *Elm = VTy->getElementType();
646       // Lower vectors of pointers to native pointer types.
647       if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
648         EVT PointerTy(getPointerTy(PT->getAddressSpace()));
649         Elm = PointerTy.getTypeForEVT(Ty->getContext());
650       }
651 
652       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
653                        VTy->getNumElements());
654     }
655     return EVT::getEVT(Ty, AllowUnknown);
656   }
657 
658   /// Return the MVT corresponding to this LLVM type. See getValueType.
659   MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
660     return getValueType(Ty, AllowUnknown).getSimpleVT();
661   }
662 
663   /// Return the desired alignment for ByVal or InAlloca aggregate function
664   /// arguments in the caller parameter area.  This is the actual alignment, not
665   /// its logarithm.
666   virtual unsigned getByValTypeAlignment(Type *Ty) const;
667 
668   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(MVT VT)669   MVT getRegisterType(MVT VT) const {
670     assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
671     return RegisterTypeForVT[VT.SimpleTy];
672   }
673 
674   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(LLVMContext & Context,EVT VT)675   MVT getRegisterType(LLVMContext &Context, EVT VT) const {
676     if (VT.isSimple()) {
677       assert((unsigned)VT.getSimpleVT().SimpleTy <
678                 array_lengthof(RegisterTypeForVT));
679       return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
680     }
681     if (VT.isVector()) {
682       EVT VT1;
683       MVT RegisterVT;
684       unsigned NumIntermediates;
685       (void)getVectorTypeBreakdown(Context, VT, VT1,
686                                    NumIntermediates, RegisterVT);
687       return RegisterVT;
688     }
689     if (VT.isInteger()) {
690       return getRegisterType(Context, getTypeToTransformTo(Context, VT));
691     }
692     llvm_unreachable("Unsupported extended type!");
693   }
694 
695   /// Return the number of registers that this ValueType will eventually
696   /// require.
697   ///
698   /// This is one for any types promoted to live in larger registers, but may be
699   /// more than one for types (like i64) that are split into pieces.  For types
700   /// like i140, which are first promoted then expanded, it is the number of
701   /// registers needed to hold all the bits of the original type.  For an i140
702   /// on a 32 bit machine this means 5 registers.
getNumRegisters(LLVMContext & Context,EVT VT)703   unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
704     if (VT.isSimple()) {
705       assert((unsigned)VT.getSimpleVT().SimpleTy <
706                 array_lengthof(NumRegistersForVT));
707       return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
708     }
709     if (VT.isVector()) {
710       EVT VT1;
711       MVT VT2;
712       unsigned NumIntermediates;
713       return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
714     }
715     if (VT.isInteger()) {
716       unsigned BitWidth = VT.getSizeInBits();
717       unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
718       return (BitWidth + RegWidth - 1) / RegWidth;
719     }
720     llvm_unreachable("Unsupported extended type!");
721   }
722 
723   /// If true, then instruction selection should seek to shrink the FP constant
724   /// of the specified type to a smaller type in order to save space and / or
725   /// reduce runtime.
ShouldShrinkFPConstant(EVT)726   virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
727 
728   /// When splitting a value of the specified type into parts, does the Lo
729   /// or Hi part come first?  This usually follows the endianness, except
730   /// for ppcf128, where the Hi part always comes first.
hasBigEndianPartOrdering(EVT VT)731   bool hasBigEndianPartOrdering(EVT VT) const {
732     return isBigEndian() || VT == MVT::ppcf128;
733   }
734 
735   /// If true, the target has custom DAG combine transformations that it can
736   /// perform for the specified node.
hasTargetDAGCombine(ISD::NodeType NT)737   bool hasTargetDAGCombine(ISD::NodeType NT) const {
738     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
739     return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
740   }
741 
742   /// \brief Get maximum # of store operations permitted for llvm.memset
743   ///
744   /// This function returns the maximum number of store operations permitted
745   /// to replace a call to llvm.memset. The value is set by the target at the
746   /// performance threshold for such a replacement. If OptSize is true,
747   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemset(bool OptSize)748   unsigned getMaxStoresPerMemset(bool OptSize) const {
749     return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
750   }
751 
752   /// \brief Get maximum # of store operations permitted for llvm.memcpy
753   ///
754   /// This function returns the maximum number of store operations permitted
755   /// to replace a call to llvm.memcpy. The value is set by the target at the
756   /// performance threshold for such a replacement. If OptSize is true,
757   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemcpy(bool OptSize)758   unsigned getMaxStoresPerMemcpy(bool OptSize) const {
759     return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
760   }
761 
762   /// \brief Get maximum # of store operations permitted for llvm.memmove
763   ///
764   /// This function returns the maximum number of store operations permitted
765   /// to replace a call to llvm.memmove. The value is set by the target at the
766   /// performance threshold for such a replacement. If OptSize is true,
767   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemmove(bool OptSize)768   unsigned getMaxStoresPerMemmove(bool OptSize) const {
769     return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
770   }
771 
772   /// \brief Determine if the target supports unaligned memory accesses.
773   ///
774   /// This function returns true if the target allows unaligned memory accesses
775   /// of the specified type in the given address space. If true, it also returns
776   /// whether the unaligned memory access is "fast" in the third argument by
777   /// reference. This is used, for example, in situations where an array
778   /// copy/move/set is converted to a sequence of store operations. Its use
779   /// helps to ensure that such replacements don't generate code that causes an
780   /// alignment error (trap) on the target machine.
781   virtual bool allowsUnalignedMemoryAccesses(EVT,
782                                              unsigned AddrSpace = 0,
783                                              bool * /*Fast*/ = nullptr) const {
784     return false;
785   }
786 
787   /// Returns the target specific optimal type for load and store operations as
788   /// a result of memset, memcpy, and memmove lowering.
789   ///
790   /// If DstAlign is zero that means it's safe to destination alignment can
791   /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
792   /// a need to check it against alignment requirement, probably because the
793   /// source does not need to be loaded. If 'IsMemset' is true, that means it's
794   /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
795   /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
796   /// does not need to be loaded.  It returns EVT::Other if the type should be
797   /// determined using generic target-independent logic.
getOptimalMemOpType(uint64_t,unsigned,unsigned,bool,bool,bool,MachineFunction &)798   virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
799                                   unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
800                                   bool /*IsMemset*/,
801                                   bool /*ZeroMemset*/,
802                                   bool /*MemcpyStrSrc*/,
803                                   MachineFunction &/*MF*/) const {
804     return MVT::Other;
805   }
806 
807   /// Returns true if it's safe to use load / store of the specified type to
808   /// expand memcpy / memset inline.
809   ///
810   /// This is mostly true for all types except for some special cases. For
811   /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
812   /// fstpl which also does type conversion. Note the specified type doesn't
813   /// have to be legal as the hook is used before type legalization.
isSafeMemOpType(MVT)814   virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
815 
816   /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
usesUnderscoreSetJmp()817   bool usesUnderscoreSetJmp() const {
818     return UseUnderscoreSetJmp;
819   }
820 
821   /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
usesUnderscoreLongJmp()822   bool usesUnderscoreLongJmp() const {
823     return UseUnderscoreLongJmp;
824   }
825 
826   /// Return whether the target can generate code for jump tables.
supportJumpTables()827   bool supportJumpTables() const {
828     return SupportJumpTables;
829   }
830 
831   /// Return integer threshold on number of blocks to use jump tables rather
832   /// than if sequence.
getMinimumJumpTableEntries()833   int getMinimumJumpTableEntries() const {
834     return MinimumJumpTableEntries;
835   }
836 
837   /// If a physical register, this specifies the register that
838   /// llvm.savestack/llvm.restorestack should save and restore.
getStackPointerRegisterToSaveRestore()839   unsigned getStackPointerRegisterToSaveRestore() const {
840     return StackPointerRegisterToSaveRestore;
841   }
842 
843   /// If a physical register, this returns the register that receives the
844   /// exception address on entry to a landing pad.
getExceptionPointerRegister()845   unsigned getExceptionPointerRegister() const {
846     return ExceptionPointerRegister;
847   }
848 
849   /// If a physical register, this returns the register that receives the
850   /// exception typeid on entry to a landing pad.
getExceptionSelectorRegister()851   unsigned getExceptionSelectorRegister() const {
852     return ExceptionSelectorRegister;
853   }
854 
855   /// Returns the target's jmp_buf size in bytes (if never set, the default is
856   /// 200)
getJumpBufSize()857   unsigned getJumpBufSize() const {
858     return JumpBufSize;
859   }
860 
861   /// Returns the target's jmp_buf alignment in bytes (if never set, the default
862   /// is 0)
getJumpBufAlignment()863   unsigned getJumpBufAlignment() const {
864     return JumpBufAlignment;
865   }
866 
867   /// Return the minimum stack alignment of an argument.
getMinStackArgumentAlignment()868   unsigned getMinStackArgumentAlignment() const {
869     return MinStackArgumentAlignment;
870   }
871 
872   /// Return the minimum function alignment.
getMinFunctionAlignment()873   unsigned getMinFunctionAlignment() const {
874     return MinFunctionAlignment;
875   }
876 
877   /// Return the preferred function alignment.
getPrefFunctionAlignment()878   unsigned getPrefFunctionAlignment() const {
879     return PrefFunctionAlignment;
880   }
881 
882   /// Return the preferred loop alignment.
getPrefLoopAlignment()883   unsigned getPrefLoopAlignment() const {
884     return PrefLoopAlignment;
885   }
886 
887   /// Return whether the DAG builder should automatically insert fences and
888   /// reduce ordering for atomics.
getInsertFencesForAtomic()889   bool getInsertFencesForAtomic() const {
890     return InsertFencesForAtomic;
891   }
892 
893   /// Return true if the target stores stack protector cookies at a fixed offset
894   /// in some non-standard address space, and populates the address space and
895   /// offset as appropriate.
getStackCookieLocation(unsigned &,unsigned &)896   virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
897                                       unsigned &/*Offset*/) const {
898     return false;
899   }
900 
901   /// Returns the maximal possible offset which can be used for loads / stores
902   /// from the global.
getMaximalGlobalOffset()903   virtual unsigned getMaximalGlobalOffset() const {
904     return 0;
905   }
906 
907   /// Returns true if a cast between SrcAS and DestAS is a noop.
isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS)908   virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
909     return false;
910   }
911 
912   //===--------------------------------------------------------------------===//
913   /// \name Helpers for TargetTransformInfo implementations
914   /// @{
915 
916   /// Get the ISD node that corresponds to the Instruction class opcode.
917   int InstructionOpcodeToISD(unsigned Opcode) const;
918 
919   /// Estimate the cost of type-legalization and the legalized type.
920   std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
921 
922   /// @}
923 
924   //===--------------------------------------------------------------------===//
925   /// \name Helpers for load-linked/store-conditional atomic expansion.
926   /// @{
927 
928   /// Perform a load-linked operation on Addr, returning a "Value *" with the
929   /// corresponding pointee type. This may entail some non-trivial operations to
930   /// truncate or reconstruct types that will be illegal in the backend. See
931   /// ARMISelLowering for an example implementation.
emitLoadLinked(IRBuilder<> & Builder,Value * Addr,AtomicOrdering Ord)932   virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
933                                 AtomicOrdering Ord) const {
934     llvm_unreachable("Load linked unimplemented on this target");
935   }
936 
937   /// Perform a store-conditional operation to Addr. Return the status of the
938   /// store. This should be 0 if the store succeeded, non-zero otherwise.
emitStoreConditional(IRBuilder<> & Builder,Value * Val,Value * Addr,AtomicOrdering Ord)939   virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
940                                       Value *Addr, AtomicOrdering Ord) const {
941     llvm_unreachable("Store conditional unimplemented on this target");
942   }
943 
944   /// Return true if the given (atomic) instruction should be expanded by the
945   /// IR-level AtomicExpandLoadLinked pass into a loop involving
946   /// load-linked/store-conditional pairs. Atomic stores will be expanded in the
947   /// same way as "atomic xchg" operations which ignore their output if needed.
shouldExpandAtomicInIR(Instruction * Inst)948   virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {
949     return false;
950   }
951 
952 
953   //===--------------------------------------------------------------------===//
954   // TargetLowering Configuration Methods - These methods should be invoked by
955   // the derived class constructor to configure this object for the target.
956   //
957 
958   /// \brief Reset the operation actions based on target options.
resetOperationActions()959   virtual void resetOperationActions() {}
960 
961 protected:
962   /// Specify how the target extends the result of integer and floating point
963   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent Ty)964   void setBooleanContents(BooleanContent Ty) {
965     BooleanContents = Ty;
966     BooleanFloatContents = Ty;
967   }
968 
969   /// Specify how the target extends the result of integer and floating point
970   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent IntTy,BooleanContent FloatTy)971   void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
972     BooleanContents = IntTy;
973     BooleanFloatContents = FloatTy;
974   }
975 
976   /// Specify how the target extends the result of a vector boolean value from a
977   /// vector of i1 to a wider type.  See getBooleanContents.
setBooleanVectorContents(BooleanContent Ty)978   void setBooleanVectorContents(BooleanContent Ty) {
979     BooleanVectorContents = Ty;
980   }
981 
982   /// Specify the target scheduling preference.
setSchedulingPreference(Sched::Preference Pref)983   void setSchedulingPreference(Sched::Preference Pref) {
984     SchedPreferenceInfo = Pref;
985   }
986 
987   /// Indicate whether this target prefers to use _setjmp to implement
988   /// llvm.setjmp or the version without _.  Defaults to false.
setUseUnderscoreSetJmp(bool Val)989   void setUseUnderscoreSetJmp(bool Val) {
990     UseUnderscoreSetJmp = Val;
991   }
992 
993   /// Indicate whether this target prefers to use _longjmp to implement
994   /// llvm.longjmp or the version without _.  Defaults to false.
setUseUnderscoreLongJmp(bool Val)995   void setUseUnderscoreLongJmp(bool Val) {
996     UseUnderscoreLongJmp = Val;
997   }
998 
999   /// Indicate whether the target can generate code for jump tables.
setSupportJumpTables(bool Val)1000   void setSupportJumpTables(bool Val) {
1001     SupportJumpTables = Val;
1002   }
1003 
1004   /// Indicate the number of blocks to generate jump tables rather than if
1005   /// sequence.
setMinimumJumpTableEntries(int Val)1006   void setMinimumJumpTableEntries(int Val) {
1007     MinimumJumpTableEntries = Val;
1008   }
1009 
1010   /// If set to a physical register, this specifies the register that
1011   /// llvm.savestack/llvm.restorestack should save and restore.
setStackPointerRegisterToSaveRestore(unsigned R)1012   void setStackPointerRegisterToSaveRestore(unsigned R) {
1013     StackPointerRegisterToSaveRestore = R;
1014   }
1015 
1016   /// If set to a physical register, this sets the register that receives the
1017   /// exception address on entry to a landing pad.
setExceptionPointerRegister(unsigned R)1018   void setExceptionPointerRegister(unsigned R) {
1019     ExceptionPointerRegister = R;
1020   }
1021 
1022   /// If set to a physical register, this sets the register that receives the
1023   /// exception typeid on entry to a landing pad.
setExceptionSelectorRegister(unsigned R)1024   void setExceptionSelectorRegister(unsigned R) {
1025     ExceptionSelectorRegister = R;
1026   }
1027 
1028   /// Tells the code generator not to expand operations into sequences that use
1029   /// the select operations if possible.
1030   void setSelectIsExpensive(bool isExpensive = true) {
1031     SelectIsExpensive = isExpensive;
1032   }
1033 
1034   /// Tells the code generator that the target has multiple (allocatable)
1035   /// condition registers that can be used to store the results of comparisons
1036   /// for use by selects and conditional branches. With multiple condition
1037   /// registers, the code generator will not aggressively sink comparisons into
1038   /// the blocks of their users.
1039   void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1040     HasMultipleConditionRegisters = hasManyRegs;
1041   }
1042 
1043   /// Tells the code generator that the target has BitExtract instructions.
1044   /// The code generator will aggressively sink "shift"s into the blocks of
1045   /// their users if the users will generate "and" instructions which can be
1046   /// combined with "shift" to BitExtract instructions.
1047   void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1048     HasExtractBitsInsn = hasExtractInsn;
1049   }
1050 
1051   /// Tells the code generator not to expand sequence of operations into a
1052   /// separate sequences that increases the amount of flow control.
1053   void setJumpIsExpensive(bool isExpensive = true) {
1054     JumpIsExpensive = isExpensive;
1055   }
1056 
1057   /// Tells the code generator that integer divide is expensive, and if
1058   /// possible, should be replaced by an alternate sequence of instructions not
1059   /// containing an integer divide.
1060   void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
1061 
1062   /// Tells the code generator which bitwidths to bypass.
addBypassSlowDiv(unsigned int SlowBitWidth,unsigned int FastBitWidth)1063   void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1064     BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1065   }
1066 
1067   /// Tells the code generator that it shouldn't generate srl/add/sra for a
1068   /// signed divide by power of two, and let the target handle it.
1069   void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
1070 
1071   /// Add the specified register class as an available regclass for the
1072   /// specified value type. This indicates the selector can handle values of
1073   /// that class natively.
addRegisterClass(MVT VT,const TargetRegisterClass * RC)1074   void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1075     assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1076     AvailableRegClasses.push_back(std::make_pair(VT, RC));
1077     RegClassForVT[VT.SimpleTy] = RC;
1078   }
1079 
1080   /// Remove all register classes.
clearRegisterClasses()1081   void clearRegisterClasses() {
1082     memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
1083 
1084     AvailableRegClasses.clear();
1085   }
1086 
1087   /// \brief Remove all operation actions.
clearOperationActions()1088   void clearOperationActions() {
1089   }
1090 
1091   /// Return the largest legal super-reg register class of the register class
1092   /// for the specified type and its associated "cost".
1093   virtual std::pair<const TargetRegisterClass*, uint8_t>
1094   findRepresentativeClass(MVT VT) const;
1095 
1096   /// Once all of the register classes are added, this allows us to compute
1097   /// derived properties we expose.
1098   void computeRegisterProperties();
1099 
1100   /// Indicate that the specified operation does not work with the specified
1101   /// type and indicate what to do about it.
setOperationAction(unsigned Op,MVT VT,LegalizeAction Action)1102   void setOperationAction(unsigned Op, MVT VT,
1103                           LegalizeAction Action) {
1104     assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1105     OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
1106   }
1107 
1108   /// Indicate that the specified load with extension does not work with the
1109   /// specified type and indicate what to do about it.
setLoadExtAction(unsigned ExtType,MVT VT,LegalizeAction Action)1110   void setLoadExtAction(unsigned ExtType, MVT VT,
1111                         LegalizeAction Action) {
1112     assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
1113            "Table isn't big enough!");
1114     LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
1115   }
1116 
1117   /// Indicate that the specified truncating store does not work with the
1118   /// specified type and indicate what to do about it.
setTruncStoreAction(MVT ValVT,MVT MemVT,LegalizeAction Action)1119   void setTruncStoreAction(MVT ValVT, MVT MemVT,
1120                            LegalizeAction Action) {
1121     assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
1122            "Table isn't big enough!");
1123     TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1124   }
1125 
1126   /// Indicate that the specified indexed load does or does not work with the
1127   /// specified type and indicate what to do abort it.
1128   ///
1129   /// NOTE: All indexed mode loads are initialized to Expand in
1130   /// TargetLowering.cpp
setIndexedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)1131   void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1132                             LegalizeAction Action) {
1133     assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1134            (unsigned)Action < 0xf && "Table isn't big enough!");
1135     // Load action are kept in the upper half.
1136     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1137     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1138   }
1139 
1140   /// Indicate that the specified indexed store does or does not work with the
1141   /// specified type and indicate what to do about it.
1142   ///
1143   /// NOTE: All indexed mode stores are initialized to Expand in
1144   /// TargetLowering.cpp
setIndexedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)1145   void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1146                              LegalizeAction Action) {
1147     assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1148            (unsigned)Action < 0xf && "Table isn't big enough!");
1149     // Store action are kept in the lower half.
1150     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1151     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1152   }
1153 
1154   /// Indicate that the specified condition code is or isn't supported on the
1155   /// target and indicate what to do about it.
setCondCodeAction(ISD::CondCode CC,MVT VT,LegalizeAction Action)1156   void setCondCodeAction(ISD::CondCode CC, MVT VT,
1157                          LegalizeAction Action) {
1158     assert(VT < MVT::LAST_VALUETYPE &&
1159            (unsigned)CC < array_lengthof(CondCodeActions) &&
1160            "Table isn't big enough!");
1161     /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
1162     /// value and the upper 27 bits index into the second dimension of the array
1163     /// to select what 32-bit value to use.
1164     uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
1165     CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
1166     CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
1167   }
1168 
1169   /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1170   /// to trying a larger integer/fp until it can find one that works. If that
1171   /// default is insufficient, this method can be used by the target to override
1172   /// the default.
AddPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)1173   void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1174     PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1175   }
1176 
1177   /// Targets should invoke this method for each target independent node that
1178   /// they want to provide a custom DAG combiner for by implementing the
1179   /// PerformDAGCombine virtual method.
setTargetDAGCombine(ISD::NodeType NT)1180   void setTargetDAGCombine(ISD::NodeType NT) {
1181     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1182     TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1183   }
1184 
1185   /// Set the target's required jmp_buf buffer size (in bytes); default is 200
setJumpBufSize(unsigned Size)1186   void setJumpBufSize(unsigned Size) {
1187     JumpBufSize = Size;
1188   }
1189 
1190   /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1191   /// 0
setJumpBufAlignment(unsigned Align)1192   void setJumpBufAlignment(unsigned Align) {
1193     JumpBufAlignment = Align;
1194   }
1195 
1196   /// Set the target's minimum function alignment (in log2(bytes))
setMinFunctionAlignment(unsigned Align)1197   void setMinFunctionAlignment(unsigned Align) {
1198     MinFunctionAlignment = Align;
1199   }
1200 
1201   /// Set the target's preferred function alignment.  This should be set if
1202   /// there is a performance benefit to higher-than-minimum alignment (in
1203   /// log2(bytes))
setPrefFunctionAlignment(unsigned Align)1204   void setPrefFunctionAlignment(unsigned Align) {
1205     PrefFunctionAlignment = Align;
1206   }
1207 
1208   /// Set the target's preferred loop alignment. Default alignment is zero, it
1209   /// means the target does not care about loop alignment.  The alignment is
1210   /// specified in log2(bytes).
setPrefLoopAlignment(unsigned Align)1211   void setPrefLoopAlignment(unsigned Align) {
1212     PrefLoopAlignment = Align;
1213   }
1214 
1215   /// Set the minimum stack alignment of an argument (in log2(bytes)).
setMinStackArgumentAlignment(unsigned Align)1216   void setMinStackArgumentAlignment(unsigned Align) {
1217     MinStackArgumentAlignment = Align;
1218   }
1219 
1220   /// Set if the DAG builder should automatically insert fences and reduce the
1221   /// order of atomic memory operations to Monotonic.
setInsertFencesForAtomic(bool fence)1222   void setInsertFencesForAtomic(bool fence) {
1223     InsertFencesForAtomic = fence;
1224   }
1225 
1226 public:
1227   //===--------------------------------------------------------------------===//
1228   // Addressing mode description hooks (used by LSR etc).
1229   //
1230 
1231   /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1232   /// instructions reading the address. This allows as much computation as
1233   /// possible to be done in the address mode for that operand. This hook lets
1234   /// targets also pass back when this should be done on intrinsics which
1235   /// load/store.
GetAddrModeArguments(IntrinsicInst *,SmallVectorImpl<Value * > &,Type * &)1236   virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1237                                     SmallVectorImpl<Value*> &/*Ops*/,
1238                                     Type *&/*AccessTy*/) const {
1239     return false;
1240   }
1241 
1242   /// This represents an addressing mode of:
1243   ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1244   /// If BaseGV is null,  there is no BaseGV.
1245   /// If BaseOffs is zero, there is no base offset.
1246   /// If HasBaseReg is false, there is no base register.
1247   /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1248   /// no scale.
1249   struct AddrMode {
1250     GlobalValue *BaseGV;
1251     int64_t      BaseOffs;
1252     bool         HasBaseReg;
1253     int64_t      Scale;
AddrModeAddrMode1254     AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1255   };
1256 
1257   /// Return true if the addressing mode represented by AM is legal for this
1258   /// target, for a load/store of the specified type.
1259   ///
1260   /// The type may be VoidTy, in which case only return true if the addressing
1261   /// mode is legal for a load/store of any legal type.  TODO: Handle
1262   /// pre/postinc as well.
1263   virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1264 
1265   /// \brief Return the cost of the scaling factor used in the addressing mode
1266   /// represented by AM for this target, for a load/store of the specified type.
1267   ///
1268   /// If the AM is supported, the return value must be >= 0.
1269   /// If the AM is not supported, it returns a negative value.
1270   /// TODO: Handle pre/postinc as well.
getScalingFactorCost(const AddrMode & AM,Type * Ty)1271   virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
1272     // Default: assume that any scaling factor used in a legal AM is free.
1273     if (isLegalAddressingMode(AM, Ty)) return 0;
1274     return -1;
1275   }
1276 
1277   /// Return true if the specified immediate is legal icmp immediate, that is
1278   /// the target has icmp instructions which can compare a register against the
1279   /// immediate without having to materialize the immediate into a register.
isLegalICmpImmediate(int64_t)1280   virtual bool isLegalICmpImmediate(int64_t) const {
1281     return true;
1282   }
1283 
1284   /// Return true if the specified immediate is legal add immediate, that is the
1285   /// target has add instructions which can add a register with the immediate
1286   /// without having to materialize the immediate into a register.
isLegalAddImmediate(int64_t)1287   virtual bool isLegalAddImmediate(int64_t) const {
1288     return true;
1289   }
1290 
1291   /// Return true if it's significantly cheaper to shift a vector by a uniform
1292   /// scalar than by an amount which will vary across each lane. On x86, for
1293   /// example, there is a "psllw" instruction for the former case, but no simple
1294   /// instruction for a general "a << b" operation on vectors.
isVectorShiftByScalarCheap(Type * Ty)1295   virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1296     return false;
1297   }
1298 
1299   /// Return true if it's free to truncate a value of type Ty1 to type
1300   /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1301   /// by referencing its sub-register AX.
isTruncateFree(Type *,Type *)1302   virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1303     return false;
1304   }
1305 
1306   /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
1307   /// whether a call is in tail position. Typically this means that both results
1308   /// would be assigned to the same register or stack slot, but it could mean
1309   /// the target performs adequate checks of its own before proceeding with the
1310   /// tail call.
allowTruncateForTailCall(Type *,Type *)1311   virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
1312     return false;
1313   }
1314 
isTruncateFree(EVT,EVT)1315   virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1316     return false;
1317   }
1318 
1319   /// Return true if any actual instruction that defines a value of type Ty1
1320   /// implicitly zero-extends the value to Ty2 in the result register.
1321   ///
1322   /// This does not necessarily include registers defined in unknown ways, such
1323   /// as incoming arguments, or copies from unknown virtual registers. Also, if
1324   /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
1325   /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
1326   /// values implicit zero-extend the result out to 64 bits.
isZExtFree(Type *,Type *)1327   virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1328     return false;
1329   }
1330 
isZExtFree(EVT,EVT)1331   virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1332     return false;
1333   }
1334 
1335   /// Return true if the target supplies and combines to a paired load
1336   /// two loaded values of type LoadedType next to each other in memory.
1337   /// RequiredAlignment gives the minimal alignment constraints that must be met
1338   /// to be able to select this paired load.
1339   ///
1340   /// This information is *not* used to generate actual paired loads, but it is
1341   /// used to generate a sequence of loads that is easier to combine into a
1342   /// paired load.
1343   /// For instance, something like this:
1344   /// a = load i64* addr
1345   /// b = trunc i64 a to i32
1346   /// c = lshr i64 a, 32
1347   /// d = trunc i64 c to i32
1348   /// will be optimized into:
1349   /// b = load i32* addr1
1350   /// d = load i32* addr2
1351   /// Where addr1 = addr2 +/- sizeof(i32).
1352   ///
1353   /// In other words, unless the target performs a post-isel load combining,
1354   /// this information should not be provided because it will generate more
1355   /// loads.
hasPairedLoad(Type *,unsigned &)1356   virtual bool hasPairedLoad(Type * /*LoadedType*/,
1357                              unsigned & /*RequiredAligment*/) const {
1358     return false;
1359   }
1360 
hasPairedLoad(EVT,unsigned &)1361   virtual bool hasPairedLoad(EVT /*LoadedType*/,
1362                              unsigned & /*RequiredAligment*/) const {
1363     return false;
1364   }
1365 
1366   /// Return true if zero-extending the specific node Val to type VT2 is free
1367   /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1368   /// because it's folded such as X86 zero-extending loads).
isZExtFree(SDValue Val,EVT VT2)1369   virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1370     return isZExtFree(Val.getValueType(), VT2);
1371   }
1372 
1373   /// Return true if an fneg operation is free to the point where it is never
1374   /// worthwhile to replace it with a bitwise operation.
isFNegFree(EVT VT)1375   virtual bool isFNegFree(EVT VT) const {
1376     assert(VT.isFloatingPoint());
1377     return false;
1378   }
1379 
1380   /// Return true if an fabs operation is free to the point where it is never
1381   /// worthwhile to replace it with a bitwise operation.
isFAbsFree(EVT VT)1382   virtual bool isFAbsFree(EVT VT) const {
1383     assert(VT.isFloatingPoint());
1384     return false;
1385   }
1386 
1387   /// Return true if an FMA operation is faster than a pair of fmul and fadd
1388   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1389   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1390   ///
1391   /// NOTE: This may be called before legalization on types for which FMAs are
1392   /// not legal, but should return true if those types will eventually legalize
1393   /// to types that support FMAs. After legalization, it will only be called on
1394   /// types that support FMAs (via Legal or Custom actions)
isFMAFasterThanFMulAndFAdd(EVT)1395   virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1396     return false;
1397   }
1398 
1399   /// Return true if it's profitable to narrow operations of type VT1 to
1400   /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1401   /// i32 to i16.
isNarrowingProfitable(EVT,EVT)1402   virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1403     return false;
1404   }
1405 
1406   /// \brief Return true if it is beneficial to convert a load of a constant to
1407   /// just the constant itself.
1408   /// On some targets it might be more efficient to use a combination of
1409   /// arithmetic instructions to materialize the constant instead of loading it
1410   /// from a constant pool.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)1411   virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1412                                                  Type *Ty) const {
1413     return false;
1414   }
1415   //===--------------------------------------------------------------------===//
1416   // Runtime Library hooks
1417   //
1418 
1419   /// Rename the default libcall routine name for the specified libcall.
setLibcallName(RTLIB::Libcall Call,const char * Name)1420   void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1421     LibcallRoutineNames[Call] = Name;
1422   }
1423 
1424   /// Get the libcall routine name for the specified libcall.
getLibcallName(RTLIB::Libcall Call)1425   const char *getLibcallName(RTLIB::Libcall Call) const {
1426     return LibcallRoutineNames[Call];
1427   }
1428 
1429   /// Override the default CondCode to be used to test the result of the
1430   /// comparison libcall against zero.
setCmpLibcallCC(RTLIB::Libcall Call,ISD::CondCode CC)1431   void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1432     CmpLibcallCCs[Call] = CC;
1433   }
1434 
1435   /// Get the CondCode that's to be used to test the result of the comparison
1436   /// libcall against zero.
getCmpLibcallCC(RTLIB::Libcall Call)1437   ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1438     return CmpLibcallCCs[Call];
1439   }
1440 
1441   /// Set the CallingConv that should be used for the specified libcall.
setLibcallCallingConv(RTLIB::Libcall Call,CallingConv::ID CC)1442   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1443     LibcallCallingConvs[Call] = CC;
1444   }
1445 
1446   /// Get the CallingConv that should be used for the specified libcall.
getLibcallCallingConv(RTLIB::Libcall Call)1447   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1448     return LibcallCallingConvs[Call];
1449   }
1450 
1451 private:
1452   const TargetMachine &TM;
1453   const DataLayout *DL;
1454   const TargetLoweringObjectFile &TLOF;
1455 
1456   /// True if this is a little endian target.
1457   bool IsLittleEndian;
1458 
1459   /// Tells the code generator not to expand operations into sequences that use
1460   /// the select operations if possible.
1461   bool SelectIsExpensive;
1462 
1463   /// Tells the code generator that the target has multiple (allocatable)
1464   /// condition registers that can be used to store the results of comparisons
1465   /// for use by selects and conditional branches. With multiple condition
1466   /// registers, the code generator will not aggressively sink comparisons into
1467   /// the blocks of their users.
1468   bool HasMultipleConditionRegisters;
1469 
1470   /// Tells the code generator that the target has BitExtract instructions.
1471   /// The code generator will aggressively sink "shift"s into the blocks of
1472   /// their users if the users will generate "and" instructions which can be
1473   /// combined with "shift" to BitExtract instructions.
1474   bool HasExtractBitsInsn;
1475 
1476   /// Tells the code generator not to expand integer divides by constants into a
1477   /// sequence of muls, adds, and shifts.  This is a hack until a real cost
1478   /// model is in place.  If we ever optimize for size, this will be set to true
1479   /// unconditionally.
1480   bool IntDivIsCheap;
1481 
1482   /// Tells the code generator to bypass slow divide or remainder
1483   /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1484   /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1485   /// div/rem when the operands are positive and less than 256.
1486   DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1487 
1488   /// Tells the code generator that it shouldn't generate srl/add/sra for a
1489   /// signed divide by power of two, and let the target handle it.
1490   bool Pow2DivIsCheap;
1491 
1492   /// Tells the code generator that it shouldn't generate extra flow control
1493   /// instructions and should attempt to combine flow control instructions via
1494   /// predication.
1495   bool JumpIsExpensive;
1496 
1497   /// This target prefers to use _setjmp to implement llvm.setjmp.
1498   ///
1499   /// Defaults to false.
1500   bool UseUnderscoreSetJmp;
1501 
1502   /// This target prefers to use _longjmp to implement llvm.longjmp.
1503   ///
1504   /// Defaults to false.
1505   bool UseUnderscoreLongJmp;
1506 
1507   /// Whether the target can generate code for jumptables.  If it's not true,
1508   /// then each jumptable must be lowered into if-then-else's.
1509   bool SupportJumpTables;
1510 
1511   /// Number of blocks threshold to use jump tables.
1512   int MinimumJumpTableEntries;
1513 
1514   /// Information about the contents of the high-bits in boolean values held in
1515   /// a type wider than i1. See getBooleanContents.
1516   BooleanContent BooleanContents;
1517 
1518   /// Information about the contents of the high-bits in boolean values held in
1519   /// a type wider than i1. See getBooleanContents.
1520   BooleanContent BooleanFloatContents;
1521 
1522   /// Information about the contents of the high-bits in boolean vector values
1523   /// when the element type is wider than i1. See getBooleanContents.
1524   BooleanContent BooleanVectorContents;
1525 
1526   /// The target scheduling preference: shortest possible total cycles or lowest
1527   /// register usage.
1528   Sched::Preference SchedPreferenceInfo;
1529 
1530   /// The size, in bytes, of the target's jmp_buf buffers
1531   unsigned JumpBufSize;
1532 
1533   /// The alignment, in bytes, of the target's jmp_buf buffers
1534   unsigned JumpBufAlignment;
1535 
1536   /// The minimum alignment that any argument on the stack needs to have.
1537   unsigned MinStackArgumentAlignment;
1538 
1539   /// The minimum function alignment (used when optimizing for size, and to
1540   /// prevent explicitly provided alignment from leading to incorrect code).
1541   unsigned MinFunctionAlignment;
1542 
1543   /// The preferred function alignment (used when alignment unspecified and
1544   /// optimizing for speed).
1545   unsigned PrefFunctionAlignment;
1546 
1547   /// The preferred loop alignment.
1548   unsigned PrefLoopAlignment;
1549 
1550   /// Whether the DAG builder should automatically insert fences and reduce
1551   /// ordering for atomics.  (This will be set for for most architectures with
1552   /// weak memory ordering.)
1553   bool InsertFencesForAtomic;
1554 
1555   /// If set to a physical register, this specifies the register that
1556   /// llvm.savestack/llvm.restorestack should save and restore.
1557   unsigned StackPointerRegisterToSaveRestore;
1558 
1559   /// If set to a physical register, this specifies the register that receives
1560   /// the exception address on entry to a landing pad.
1561   unsigned ExceptionPointerRegister;
1562 
1563   /// If set to a physical register, this specifies the register that receives
1564   /// the exception typeid on entry to a landing pad.
1565   unsigned ExceptionSelectorRegister;
1566 
1567   /// This indicates the default register class to use for each ValueType the
1568   /// target supports natively.
1569   const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1570   unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1571   MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1572 
1573   /// This indicates the "representative" register class to use for each
1574   /// ValueType the target supports natively. This information is used by the
1575   /// scheduler to track register pressure. By default, the representative
1576   /// register class is the largest legal super-reg register class of the
1577   /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1578   /// representative class would be GR32.
1579   const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1580 
1581   /// This indicates the "cost" of the "representative" register class for each
1582   /// ValueType. The cost is used by the scheduler to approximate register
1583   /// pressure.
1584   uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1585 
1586   /// For any value types we are promoting or expanding, this contains the value
1587   /// type that we are changing to.  For Expanded types, this contains one step
1588   /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1589   /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
1590   /// the same type (e.g. i32 -> i32).
1591   MVT TransformToType[MVT::LAST_VALUETYPE];
1592 
1593   /// For each operation and each value type, keep a LegalizeAction that
1594   /// indicates how instruction selection should deal with the operation.  Most
1595   /// operations are Legal (aka, supported natively by the target), but
1596   /// operations that are not should be described.  Note that operations on
1597   /// non-legal value types are not described here.
1598   uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1599 
1600   /// For each load extension type and each value type, keep a LegalizeAction
1601   /// that indicates how instruction selection should deal with a load of a
1602   /// specific value type and extension type.
1603   uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
1604 
1605   /// For each value type pair keep a LegalizeAction that indicates whether a
1606   /// truncating store of a specific value type and truncating type is legal.
1607   uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1608 
1609   /// For each indexed mode and each value type, keep a pair of LegalizeAction
1610   /// that indicates how instruction selection should deal with the load /
1611   /// store.
1612   ///
1613   /// The first dimension is the value_type for the reference. The second
1614   /// dimension represents the various modes for load store.
1615   uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1616 
1617   /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1618   /// indicates how instruction selection should deal with the condition code.
1619   ///
1620   /// Because each CC action takes up 2 bits, we need to have the array size be
1621   /// large enough to fit all of the value types. This can be done by rounding
1622   /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
1623   uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
1624 
1625   ValueTypeActionImpl ValueTypeActions;
1626 
1627 public:
1628   LegalizeKind
getTypeConversion(LLVMContext & Context,EVT VT)1629   getTypeConversion(LLVMContext &Context, EVT VT) const {
1630     // If this is a simple type, use the ComputeRegisterProp mechanism.
1631     if (VT.isSimple()) {
1632       MVT SVT = VT.getSimpleVT();
1633       assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1634       MVT NVT = TransformToType[SVT.SimpleTy];
1635       LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1636 
1637       assert(
1638         (LA == TypeLegal ||
1639          ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
1640          && "Promote may not follow Expand or Promote");
1641 
1642       if (LA == TypeSplitVector)
1643         return LegalizeKind(LA, EVT::getVectorVT(Context,
1644                                                  SVT.getVectorElementType(),
1645                                                  SVT.getVectorNumElements()/2));
1646       if (LA == TypeScalarizeVector)
1647         return LegalizeKind(LA, SVT.getVectorElementType());
1648       return LegalizeKind(LA, NVT);
1649     }
1650 
1651     // Handle Extended Scalar Types.
1652     if (!VT.isVector()) {
1653       assert(VT.isInteger() && "Float types must be simple");
1654       unsigned BitSize = VT.getSizeInBits();
1655       // First promote to a power-of-two size, then expand if necessary.
1656       if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1657         EVT NVT = VT.getRoundIntegerType(Context);
1658         assert(NVT != VT && "Unable to round integer VT");
1659         LegalizeKind NextStep = getTypeConversion(Context, NVT);
1660         // Avoid multi-step promotion.
1661         if (NextStep.first == TypePromoteInteger) return NextStep;
1662         // Return rounded integer type.
1663         return LegalizeKind(TypePromoteInteger, NVT);
1664       }
1665 
1666       return LegalizeKind(TypeExpandInteger,
1667                           EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
1668     }
1669 
1670     // Handle vector types.
1671     unsigned NumElts = VT.getVectorNumElements();
1672     EVT EltVT = VT.getVectorElementType();
1673 
1674     // Vectors with only one element are always scalarized.
1675     if (NumElts == 1)
1676       return LegalizeKind(TypeScalarizeVector, EltVT);
1677 
1678     // Try to widen vector elements until the element type is a power of two and
1679     // promote it to a legal type later on, for example:
1680     // <3 x i8> -> <4 x i8> -> <4 x i32>
1681     if (EltVT.isInteger()) {
1682       // Vectors with a number of elements that is not a power of two are always
1683       // widened, for example <3 x i8> -> <4 x i8>.
1684       if (!VT.isPow2VectorType()) {
1685         NumElts = (unsigned)NextPowerOf2(NumElts);
1686         EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1687         return LegalizeKind(TypeWidenVector, NVT);
1688       }
1689 
1690       // Examine the element type.
1691       LegalizeKind LK = getTypeConversion(Context, EltVT);
1692 
1693       // If type is to be expanded, split the vector.
1694       //  <4 x i140> -> <2 x i140>
1695       if (LK.first == TypeExpandInteger)
1696         return LegalizeKind(TypeSplitVector,
1697                             EVT::getVectorVT(Context, EltVT, NumElts / 2));
1698 
1699       // Promote the integer element types until a legal vector type is found
1700       // or until the element integer type is too big. If a legal type was not
1701       // found, fallback to the usual mechanism of widening/splitting the
1702       // vector.
1703       EVT OldEltVT = EltVT;
1704       while (1) {
1705         // Increase the bitwidth of the element to the next pow-of-two
1706         // (which is greater than 8 bits).
1707         EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
1708                                  ).getRoundIntegerType(Context);
1709 
1710         // Stop trying when getting a non-simple element type.
1711         // Note that vector elements may be greater than legal vector element
1712         // types. Example: X86 XMM registers hold 64bit element on 32bit
1713         // systems.
1714         if (!EltVT.isSimple()) break;
1715 
1716         // Build a new vector type and check if it is legal.
1717         MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1718         // Found a legal promoted vector type.
1719         if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1720           return LegalizeKind(TypePromoteInteger,
1721                               EVT::getVectorVT(Context, EltVT, NumElts));
1722       }
1723 
1724       // Reset the type to the unexpanded type if we did not find a legal vector
1725       // type with a promoted vector element type.
1726       EltVT = OldEltVT;
1727     }
1728 
1729     // Try to widen the vector until a legal type is found.
1730     // If there is no wider legal type, split the vector.
1731     while (1) {
1732       // Round up to the next power of 2.
1733       NumElts = (unsigned)NextPowerOf2(NumElts);
1734 
1735       // If there is no simple vector type with this many elements then there
1736       // cannot be a larger legal vector type.  Note that this assumes that
1737       // there are no skipped intermediate vector types in the simple types.
1738       if (!EltVT.isSimple()) break;
1739       MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1740       if (LargerVector == MVT()) break;
1741 
1742       // If this type is legal then widen the vector.
1743       if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1744         return LegalizeKind(TypeWidenVector, LargerVector);
1745     }
1746 
1747     // Widen odd vectors to next power of two.
1748     if (!VT.isPow2VectorType()) {
1749       EVT NVT = VT.getPow2VectorType(Context);
1750       return LegalizeKind(TypeWidenVector, NVT);
1751     }
1752 
1753     // Vectors with illegal element types are expanded.
1754     EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1755     return LegalizeKind(TypeSplitVector, NVT);
1756   }
1757 
1758 private:
1759   std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1760 
1761   /// Targets can specify ISD nodes that they would like PerformDAGCombine
1762   /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1763   /// array.
1764   unsigned char
1765   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1766 
1767   /// For operations that must be promoted to a specific type, this holds the
1768   /// destination type.  This map should be sparse, so don't hold it as an
1769   /// array.
1770   ///
1771   /// Targets add entries to this map with AddPromotedToType(..), clients access
1772   /// this with getTypeToPromoteTo(..).
1773   std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1774     PromoteToType;
1775 
1776   /// Stores the name each libcall.
1777   const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1778 
1779   /// The ISD::CondCode that should be used to test the result of each of the
1780   /// comparison libcall against zero.
1781   ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1782 
1783   /// Stores the CallingConv that should be used for each libcall.
1784   CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1785 
1786 protected:
1787   /// \brief Specify maximum number of store instructions per memset call.
1788   ///
1789   /// When lowering \@llvm.memset this field specifies the maximum number of
1790   /// store operations that may be substituted for the call to memset. Targets
1791   /// must set this value based on the cost threshold for that target. Targets
1792   /// should assume that the memset will be done using as many of the largest
1793   /// store operations first, followed by smaller ones, if necessary, per
1794   /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1795   /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1796   /// store.  This only applies to setting a constant array of a constant size.
1797   unsigned MaxStoresPerMemset;
1798 
1799   /// Maximum number of stores operations that may be substituted for the call
1800   /// to memset, used for functions with OptSize attribute.
1801   unsigned MaxStoresPerMemsetOptSize;
1802 
1803   /// \brief Specify maximum bytes of store instructions per memcpy call.
1804   ///
1805   /// When lowering \@llvm.memcpy this field specifies the maximum number of
1806   /// store operations that may be substituted for a call to memcpy. Targets
1807   /// must set this value based on the cost threshold for that target. Targets
1808   /// should assume that the memcpy will be done using as many of the largest
1809   /// store operations first, followed by smaller ones, if necessary, per
1810   /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1811   /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1812   /// and one 1-byte store. This only applies to copying a constant array of
1813   /// constant size.
1814   unsigned MaxStoresPerMemcpy;
1815 
1816   /// Maximum number of store operations that may be substituted for a call to
1817   /// memcpy, used for functions with OptSize attribute.
1818   unsigned MaxStoresPerMemcpyOptSize;
1819 
1820   /// \brief Specify maximum bytes of store instructions per memmove call.
1821   ///
1822   /// When lowering \@llvm.memmove this field specifies the maximum number of
1823   /// store instructions that may be substituted for a call to memmove. Targets
1824   /// must set this value based on the cost threshold for that target. Targets
1825   /// should assume that the memmove will be done using as many of the largest
1826   /// store operations first, followed by smaller ones, if necessary, per
1827   /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1828   /// with 8-bit alignment would result in nine 1-byte stores.  This only
1829   /// applies to copying a constant array of constant size.
1830   unsigned MaxStoresPerMemmove;
1831 
1832   /// Maximum number of store instructions that may be substituted for a call to
1833   /// memmove, used for functions with OpSize attribute.
1834   unsigned MaxStoresPerMemmoveOptSize;
1835 
1836   /// Tells the code generator that select is more expensive than a branch if
1837   /// the branch is usually predicted right.
1838   bool PredictableSelectIsExpensive;
1839 
1840   /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
1841   /// a mask of a single bit, a compare, and a branch into a single instruction.
1842   bool MaskAndBranchFoldingIsLegal;
1843 
1844 protected:
1845   /// Return true if the value types that can be represented by the specified
1846   /// register class are all legal.
1847   bool isLegalRC(const TargetRegisterClass *RC) const;
1848 
1849   /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1850   /// sequence of memory operands that is recognized by PrologEpilogInserter.
1851   MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
1852 };
1853 
1854 /// This class defines information used to lower LLVM code to legal SelectionDAG
1855 /// operators that the target instruction selector can accept natively.
1856 ///
1857 /// This class also defines callbacks that targets must implement to lower
1858 /// target-specific constructs to SelectionDAG operators.
1859 class TargetLowering : public TargetLoweringBase {
1860   TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
1861   void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
1862 
1863 public:
1864   /// NOTE: The constructor takes ownership of TLOF.
1865   explicit TargetLowering(const TargetMachine &TM,
1866                           const TargetLoweringObjectFile *TLOF);
1867 
1868   /// Returns true by value, base pointer and offset pointer and addressing mode
1869   /// by reference if the node's address can be legally represented as
1870   /// pre-indexed load / store address.
getPreIndexedAddressParts(SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)1871   virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
1872                                          SDValue &/*Offset*/,
1873                                          ISD::MemIndexedMode &/*AM*/,
1874                                          SelectionDAG &/*DAG*/) const {
1875     return false;
1876   }
1877 
1878   /// Returns true by value, base pointer and offset pointer and addressing mode
1879   /// by reference if this node can be combined with a load / store to form a
1880   /// post-indexed load / store.
getPostIndexedAddressParts(SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)1881   virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
1882                                           SDValue &/*Base*/,
1883                                           SDValue &/*Offset*/,
1884                                           ISD::MemIndexedMode &/*AM*/,
1885                                           SelectionDAG &/*DAG*/) const {
1886     return false;
1887   }
1888 
1889   /// Return the entry encoding for a jump table in the current function.  The
1890   /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
1891   virtual unsigned getJumpTableEncoding() const;
1892 
1893   virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *,const MachineBasicBlock *,unsigned,MCContext &)1894   LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
1895                             const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
1896                             MCContext &/*Ctx*/) const {
1897     llvm_unreachable("Need to implement this hook if target has custom JTIs");
1898   }
1899 
1900   /// Returns relocation base for the given PIC jumptable.
1901   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
1902                                            SelectionDAG &DAG) const;
1903 
1904   /// This returns the relocation base for the given PIC jumptable, the same as
1905   /// getPICJumpTableRelocBase, but as an MCExpr.
1906   virtual const MCExpr *
1907   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
1908                                unsigned JTI, MCContext &Ctx) const;
1909 
1910   /// Return true if folding a constant offset with the given GlobalAddress is
1911   /// legal.  It is frequently not legal in PIC relocation models.
1912   virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
1913 
1914   bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
1915                             SDValue &Chain) const;
1916 
1917   void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
1918                            SDValue &NewLHS, SDValue &NewRHS,
1919                            ISD::CondCode &CCCode, SDLoc DL) const;
1920 
1921   /// Returns a pair of (return value, chain).
1922   std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
1923                                           EVT RetVT, const SDValue *Ops,
1924                                           unsigned NumOps, bool isSigned,
1925                                           SDLoc dl, bool doesNotReturn = false,
1926                                           bool isReturnValueUsed = true) const;
1927 
1928   //===--------------------------------------------------------------------===//
1929   // TargetLowering Optimization Methods
1930   //
1931 
1932   /// A convenience struct that encapsulates a DAG, and two SDValues for
1933   /// returning information from TargetLowering to its clients that want to
1934   /// combine.
1935   struct TargetLoweringOpt {
1936     SelectionDAG &DAG;
1937     bool LegalTys;
1938     bool LegalOps;
1939     SDValue Old;
1940     SDValue New;
1941 
TargetLoweringOptTargetLoweringOpt1942     explicit TargetLoweringOpt(SelectionDAG &InDAG,
1943                                bool LT, bool LO) :
1944       DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
1945 
LegalTypesTargetLoweringOpt1946     bool LegalTypes() const { return LegalTys; }
LegalOperationsTargetLoweringOpt1947     bool LegalOperations() const { return LegalOps; }
1948 
CombineToTargetLoweringOpt1949     bool CombineTo(SDValue O, SDValue N) {
1950       Old = O;
1951       New = N;
1952       return true;
1953     }
1954 
1955     /// Check to see if the specified operand of the specified instruction is a
1956     /// constant integer.  If so, check to see if there are any bits set in the
1957     /// constant that are not demanded.  If so, shrink the constant and return
1958     /// true.
1959     bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
1960 
1961     /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
1962     /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
1963     /// generalized for targets with other types of implicit widening casts.
1964     bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
1965                           SDLoc dl);
1966   };
1967 
1968   /// Look at Op.  At this point, we know that only the DemandedMask bits of the
1969   /// result of Op are ever used downstream.  If we can use this information to
1970   /// simplify Op, create a new simplified DAG node and return true, returning
1971   /// the original and new nodes in Old and New.  Otherwise, analyze the
1972   /// expression and return a mask of KnownOne and KnownZero bits for the
1973   /// expression (used to simplify the caller).  The KnownZero/One bits may only
1974   /// be accurate for those bits in the DemandedMask.
1975   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
1976                             APInt &KnownZero, APInt &KnownOne,
1977                             TargetLoweringOpt &TLO, unsigned Depth = 0) const;
1978 
1979   /// Determine which of the bits specified in Mask are known to be either zero
1980   /// or one and return them in the KnownZero/KnownOne bitsets.
1981   virtual void computeKnownBitsForTargetNode(const SDValue Op,
1982                                              APInt &KnownZero,
1983                                              APInt &KnownOne,
1984                                              const SelectionDAG &DAG,
1985                                              unsigned Depth = 0) const;
1986 
1987   /// This method can be implemented by targets that want to expose additional
1988   /// information about sign bits to the DAG Combiner.
1989   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
1990                                                    const SelectionDAG &DAG,
1991                                                    unsigned Depth = 0) const;
1992 
1993   struct DAGCombinerInfo {
1994     void *DC;  // The DAG Combiner object.
1995     CombineLevel Level;
1996     bool CalledByLegalizer;
1997   public:
1998     SelectionDAG &DAG;
1999 
DAGCombinerInfoDAGCombinerInfo2000     DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
2001       : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2002 
isBeforeLegalizeDAGCombinerInfo2003     bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
isBeforeLegalizeOpsDAGCombinerInfo2004     bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
isAfterLegalizeVectorOpsDAGCombinerInfo2005     bool isAfterLegalizeVectorOps() const {
2006       return Level == AfterLegalizeDAG;
2007     }
getDAGCombineLevelDAGCombinerInfo2008     CombineLevel getDAGCombineLevel() { return Level; }
isCalledByLegalizerDAGCombinerInfo2009     bool isCalledByLegalizer() const { return CalledByLegalizer; }
2010 
2011     void AddToWorklist(SDNode *N);
2012     void RemoveFromWorklist(SDNode *N);
2013     SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
2014                       bool AddTo = true);
2015     SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2016     SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2017 
2018     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2019   };
2020 
2021   /// Return if the N is a constant or constant vector equal to the true value
2022   /// from getBooleanContents().
2023   bool isConstTrueVal(const SDNode *N) const;
2024 
2025   /// Return if the N is a constant or constant vector equal to the false value
2026   /// from getBooleanContents().
2027   bool isConstFalseVal(const SDNode *N) const;
2028 
2029   /// Try to simplify a setcc built with the specified operands and cc. If it is
2030   /// unable to simplify it, return a null SDValue.
2031   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
2032                           ISD::CondCode Cond, bool foldBooleans,
2033                           DAGCombinerInfo &DCI, SDLoc dl) const;
2034 
2035   /// Returns true (and the GlobalValue and the offset) if the node is a
2036   /// GlobalAddress + offset.
2037   virtual bool
2038   isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2039 
2040   /// This method will be invoked for all target nodes and for any
2041   /// target-independent nodes that the target has registered with invoke it
2042   /// for.
2043   ///
2044   /// The semantics are as follows:
2045   /// Return Value:
2046   ///   SDValue.Val == 0   - No change was made
2047   ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
2048   ///   otherwise          - N should be replaced by the returned Operand.
2049   ///
2050   /// In addition, methods provided by DAGCombinerInfo may be used to perform
2051   /// more complex transformations.
2052   ///
2053   virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2054 
2055   /// Return true if it is profitable to move a following shift through this
2056   //  node, adjusting any immediate operands as necessary to preserve semantics.
2057   //  This transformation may not be desirable if it disrupts a particularly
2058   //  auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2059   //  By default, it returns true.
isDesirableToCommuteWithShift(const SDNode * N)2060   virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2061     return true;
2062   }
2063 
2064   /// Return true if the target has native support for the specified value type
2065   /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2066   /// i16 is legal, but undesirable since i16 instruction encodings are longer
2067   /// and some i16 instructions are slow.
isTypeDesirableForOp(unsigned,EVT VT)2068   virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2069     // By default, assume all legal types are desirable.
2070     return isTypeLegal(VT);
2071   }
2072 
2073   /// Return true if it is profitable for dag combiner to transform a floating
2074   /// point op of specified opcode to a equivalent op of an integer
2075   /// type. e.g. f32 load -> i32 load can be profitable on ARM.
isDesirableToTransformToIntegerOp(unsigned,EVT)2076   virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2077                                                  EVT /*VT*/) const {
2078     return false;
2079   }
2080 
2081   /// This method query the target whether it is beneficial for dag combiner to
2082   /// promote the specified node. If true, it should return the desired
2083   /// promotion type by reference.
IsDesirableToPromoteOp(SDValue,EVT &)2084   virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2085     return false;
2086   }
2087 
2088   //===--------------------------------------------------------------------===//
2089   // Lowering methods - These methods must be implemented by targets so that
2090   // the SelectionDAGBuilder code knows how to lower these.
2091   //
2092 
2093   /// This hook must be implemented to lower the incoming (formal) arguments,
2094   /// described by the Ins array, into the specified DAG. The implementation
2095   /// should fill in the InVals array with legal-type argument values, and
2096   /// return the resulting token chain value.
2097   ///
2098   virtual SDValue
LowerFormalArguments(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::InputArg> &,SDLoc,SelectionDAG &,SmallVectorImpl<SDValue> &)2099     LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2100                          bool /*isVarArg*/,
2101                          const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
2102                          SDLoc /*dl*/, SelectionDAG &/*DAG*/,
2103                          SmallVectorImpl<SDValue> &/*InVals*/) const {
2104     llvm_unreachable("Not Implemented");
2105   }
2106 
2107   struct ArgListEntry {
2108     SDValue Node;
2109     Type* Ty;
2110     bool isSExt     : 1;
2111     bool isZExt     : 1;
2112     bool isInReg    : 1;
2113     bool isSRet     : 1;
2114     bool isNest     : 1;
2115     bool isByVal    : 1;
2116     bool isInAlloca : 1;
2117     bool isReturned : 1;
2118     uint16_t Alignment;
2119 
ArgListEntryArgListEntry2120     ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
2121       isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
2122       isReturned(false), Alignment(0) { }
2123 
2124     void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2125   };
2126   typedef std::vector<ArgListEntry> ArgListTy;
2127 
2128   /// This structure contains all information that is necessary for lowering
2129   /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2130   /// needs to lower a call, and targets will see this struct in their LowerCall
2131   /// implementation.
2132   struct CallLoweringInfo {
2133     SDValue Chain;
2134     Type *RetTy;
2135     bool RetSExt           : 1;
2136     bool RetZExt           : 1;
2137     bool IsVarArg          : 1;
2138     bool IsInReg           : 1;
2139     bool DoesNotReturn     : 1;
2140     bool IsReturnValueUsed : 1;
2141 
2142     // IsTailCall should be modified by implementations of
2143     // TargetLowering::LowerCall that perform tail call conversions.
2144     bool IsTailCall;
2145 
2146     unsigned NumFixedArgs;
2147     CallingConv::ID CallConv;
2148     SDValue Callee;
2149     ArgListTy Args;
2150     SelectionDAG &DAG;
2151     SDLoc DL;
2152     ImmutableCallSite *CS;
2153     SmallVector<ISD::OutputArg, 32> Outs;
2154     SmallVector<SDValue, 32> OutVals;
2155     SmallVector<ISD::InputArg, 32> Ins;
2156 
CallLoweringInfoCallLoweringInfo2157     CallLoweringInfo(SelectionDAG &DAG)
2158       : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2159         IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
2160         IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
2161         DAG(DAG), CS(nullptr) {}
2162 
setDebugLocCallLoweringInfo2163     CallLoweringInfo &setDebugLoc(SDLoc dl) {
2164       DL = dl;
2165       return *this;
2166     }
2167 
setChainCallLoweringInfo2168     CallLoweringInfo &setChain(SDValue InChain) {
2169       Chain = InChain;
2170       return *this;
2171     }
2172 
2173     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
2174                                 SDValue Target, ArgListTy &&ArgsList,
2175                                 unsigned FixedArgs = -1) {
2176       RetTy = ResultType;
2177       Callee = Target;
2178       CallConv = CC;
2179       NumFixedArgs =
2180         (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
2181       Args = std::move(ArgsList);
2182       return *this;
2183     }
2184 
setCalleeCallLoweringInfo2185     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
2186                                 SDValue Target, ArgListTy &&ArgsList,
2187                                 ImmutableCallSite &Call) {
2188       RetTy = ResultType;
2189 
2190       IsInReg = Call.paramHasAttr(0, Attribute::InReg);
2191       DoesNotReturn = Call.doesNotReturn();
2192       IsVarArg = FTy->isVarArg();
2193       IsReturnValueUsed = !Call.getInstruction()->use_empty();
2194       RetSExt = Call.paramHasAttr(0, Attribute::SExt);
2195       RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
2196 
2197       Callee = Target;
2198 
2199       CallConv = Call.getCallingConv();
2200       NumFixedArgs = FTy->getNumParams();
2201       Args = std::move(ArgsList);
2202 
2203       CS = &Call;
2204 
2205       return *this;
2206     }
2207 
2208     CallLoweringInfo &setInRegister(bool Value = true) {
2209       IsInReg = Value;
2210       return *this;
2211     }
2212 
2213     CallLoweringInfo &setNoReturn(bool Value = true) {
2214       DoesNotReturn = Value;
2215       return *this;
2216     }
2217 
2218     CallLoweringInfo &setVarArg(bool Value = true) {
2219       IsVarArg = Value;
2220       return *this;
2221     }
2222 
2223     CallLoweringInfo &setTailCall(bool Value = true) {
2224       IsTailCall = Value;
2225       return *this;
2226     }
2227 
2228     CallLoweringInfo &setDiscardResult(bool Value = true) {
2229       IsReturnValueUsed = !Value;
2230       return *this;
2231     }
2232 
2233     CallLoweringInfo &setSExtResult(bool Value = true) {
2234       RetSExt = Value;
2235       return *this;
2236     }
2237 
2238     CallLoweringInfo &setZExtResult(bool Value = true) {
2239       RetZExt = Value;
2240       return *this;
2241     }
2242 
getArgsCallLoweringInfo2243     ArgListTy &getArgs() {
2244       return Args;
2245     }
2246   };
2247 
2248   /// This function lowers an abstract call to a function into an actual call.
2249   /// This returns a pair of operands.  The first element is the return value
2250   /// for the function (if RetTy is not VoidTy).  The second element is the
2251   /// outgoing token chain. It calls LowerCall to do the actual lowering.
2252   std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2253 
2254   /// This hook must be implemented to lower calls into the the specified
2255   /// DAG. The outgoing arguments to the call are described by the Outs array,
2256   /// and the values to be returned by the call are described by the Ins
2257   /// array. The implementation should fill in the InVals array with legal-type
2258   /// return values from the call, and return the resulting token chain value.
2259   virtual SDValue
LowerCall(CallLoweringInfo &,SmallVectorImpl<SDValue> &)2260     LowerCall(CallLoweringInfo &/*CLI*/,
2261               SmallVectorImpl<SDValue> &/*InVals*/) const {
2262     llvm_unreachable("Not Implemented");
2263   }
2264 
2265   /// Target-specific cleanup for formal ByVal parameters.
HandleByVal(CCState *,unsigned &,unsigned)2266   virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2267 
2268   /// This hook should be implemented to check whether the return values
2269   /// described by the Outs array can fit into the return registers.  If false
2270   /// is returned, an sret-demotion is performed.
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> &,LLVMContext &)2271   virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2272                               MachineFunction &/*MF*/, bool /*isVarArg*/,
2273                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2274                LLVMContext &/*Context*/) const
2275   {
2276     // Return true by default to get preexisting behavior.
2277     return true;
2278   }
2279 
2280   /// This hook must be implemented to lower outgoing return values, described
2281   /// by the Outs array, into the specified DAG. The implementation should
2282   /// return the resulting token chain value.
2283   virtual SDValue
LowerReturn(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::OutputArg> &,const SmallVectorImpl<SDValue> &,SDLoc,SelectionDAG &)2284     LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2285                 bool /*isVarArg*/,
2286                 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2287                 const SmallVectorImpl<SDValue> &/*OutVals*/,
2288                 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2289     llvm_unreachable("Not Implemented");
2290   }
2291 
2292   /// Return true if result of the specified node is used by a return node
2293   /// only. It also compute and return the input chain for the tail call.
2294   ///
2295   /// This is used to determine whether it is possible to codegen a libcall as
2296   /// tail call at legalization time.
isUsedByReturnOnly(SDNode *,SDValue &)2297   virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2298     return false;
2299   }
2300 
2301   /// Return true if the target may be able emit the call instruction as a tail
2302   /// call. This is used by optimization passes to determine if it's profitable
2303   /// to duplicate return instructions to enable tailcall optimization.
mayBeEmittedAsTailCall(CallInst *)2304   virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2305     return false;
2306   }
2307 
2308   /// Return the builtin name for the __builtin___clear_cache intrinsic
2309   /// Default is to invoke the clear cache library call
getClearCacheBuiltinName()2310   virtual const char * getClearCacheBuiltinName() const {
2311     return "__clear_cache";
2312   }
2313 
2314   /// Return the register ID of the name passed in. Used by named register
2315   /// global variables extension. There is no target-independent behaviour
2316   /// so the default action is to bail.
getRegisterByName(const char * RegName,EVT VT)2317   virtual unsigned getRegisterByName(const char* RegName, EVT VT) const {
2318     report_fatal_error("Named registers not implemented for this target");
2319   }
2320 
2321   /// Return the type that should be used to zero or sign extend a
2322   /// zeroext/signext integer argument or return value.  FIXME: Most C calling
2323   /// convention requires the return type to be promoted, but this is not true
2324   /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2325   /// calling conventions. The frontend should handle this and include all of
2326   /// the necessary information.
getTypeForExtArgOrReturn(MVT VT,ISD::NodeType)2327   virtual MVT getTypeForExtArgOrReturn(MVT VT,
2328                                        ISD::NodeType /*ExtendKind*/) const {
2329     MVT MinVT = getRegisterType(MVT::i32);
2330     return VT.bitsLT(MinVT) ? MinVT : VT;
2331   }
2332 
2333   /// For some targets, an LLVM struct type must be broken down into multiple
2334   /// simple types, but the calling convention specifies that the entire struct
2335   /// must be passed in a block of consecutive registers.
2336   virtual bool
functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg)2337   functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
2338                                             bool isVarArg) const {
2339     return false;
2340   }
2341 
2342   /// Returns a 0 terminated array of registers that can be safely used as
2343   /// scratch registers.
getScratchRegisters(CallingConv::ID CC)2344   virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2345     return nullptr;
2346   }
2347 
2348   /// This callback is used to prepare for a volatile or atomic load.
2349   /// It takes a chain node as input and returns the chain for the load itself.
2350   ///
2351   /// Having a callback like this is necessary for targets like SystemZ,
2352   /// which allows a CPU to reuse the result of a previous load indefinitely,
2353   /// even if a cache-coherent store is performed by another CPU.  The default
2354   /// implementation does nothing.
prepareVolatileOrAtomicLoad(SDValue Chain,SDLoc DL,SelectionDAG & DAG)2355   virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
2356                                               SelectionDAG &DAG) const {
2357     return Chain;
2358   }
2359 
2360   /// This callback is invoked by the type legalizer to legalize nodes with an
2361   /// illegal operand type but legal result types.  It replaces the
2362   /// LowerOperation callback in the type Legalizer.  The reason we can not do
2363   /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2364   /// use this callback.
2365   ///
2366   /// TODO: Consider merging with ReplaceNodeResults.
2367   ///
2368   /// The target places new result values for the node in Results (their number
2369   /// and types must exactly match those of the original return values of
2370   /// the node), or leaves Results empty, which indicates that the node is not
2371   /// to be custom lowered after all.
2372   /// The default implementation calls LowerOperation.
2373   virtual void LowerOperationWrapper(SDNode *N,
2374                                      SmallVectorImpl<SDValue> &Results,
2375                                      SelectionDAG &DAG) const;
2376 
2377   /// This callback is invoked for operations that are unsupported by the
2378   /// target, which are registered to use 'custom' lowering, and whose defined
2379   /// values are all legal.  If the target has no operations that require custom
2380   /// lowering, it need not implement this.  The default implementation of this
2381   /// aborts.
2382   virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2383 
2384   /// This callback is invoked when a node result type is illegal for the
2385   /// target, and the operation was registered to use 'custom' lowering for that
2386   /// result type.  The target places new result values for the node in Results
2387   /// (their number and types must exactly match those of the original return
2388   /// values of the node), or leaves Results empty, which indicates that the
2389   /// node is not to be custom lowered after all.
2390   ///
2391   /// If the target has no operations that require custom lowering, it need not
2392   /// implement this.  The default implementation aborts.
ReplaceNodeResults(SDNode *,SmallVectorImpl<SDValue> &,SelectionDAG &)2393   virtual void ReplaceNodeResults(SDNode * /*N*/,
2394                                   SmallVectorImpl<SDValue> &/*Results*/,
2395                                   SelectionDAG &/*DAG*/) const {
2396     llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2397   }
2398 
2399   /// This method returns the name of a target specific DAG node.
2400   virtual const char *getTargetNodeName(unsigned Opcode) const;
2401 
2402   /// This method returns a target specific FastISel object, or null if the
2403   /// target does not support "fast" ISel.
createFastISel(FunctionLoweringInfo &,const TargetLibraryInfo *)2404   virtual FastISel *createFastISel(FunctionLoweringInfo &,
2405                                    const TargetLibraryInfo *) const {
2406     return nullptr;
2407   }
2408 
2409 
2410   bool verifyReturnAddressArgumentIsConstant(SDValue Op,
2411                                              SelectionDAG &DAG) const;
2412 
2413   //===--------------------------------------------------------------------===//
2414   // Inline Asm Support hooks
2415   //
2416 
2417   /// This hook allows the target to expand an inline asm call to be explicit
2418   /// llvm code if it wants to.  This is useful for turning simple inline asms
2419   /// into LLVM intrinsics, which gives the compiler more information about the
2420   /// behavior of the code.
ExpandInlineAsm(CallInst *)2421   virtual bool ExpandInlineAsm(CallInst *) const {
2422     return false;
2423   }
2424 
2425   enum ConstraintType {
2426     C_Register,            // Constraint represents specific register(s).
2427     C_RegisterClass,       // Constraint represents any of register(s) in class.
2428     C_Memory,              // Memory constraint.
2429     C_Other,               // Something else.
2430     C_Unknown              // Unsupported constraint.
2431   };
2432 
2433   enum ConstraintWeight {
2434     // Generic weights.
2435     CW_Invalid  = -1,     // No match.
2436     CW_Okay     = 0,      // Acceptable.
2437     CW_Good     = 1,      // Good weight.
2438     CW_Better   = 2,      // Better weight.
2439     CW_Best     = 3,      // Best weight.
2440 
2441     // Well-known weights.
2442     CW_SpecificReg  = CW_Okay,    // Specific register operands.
2443     CW_Register     = CW_Good,    // Register operands.
2444     CW_Memory       = CW_Better,  // Memory operands.
2445     CW_Constant     = CW_Best,    // Constant operand.
2446     CW_Default      = CW_Okay     // Default or don't know type.
2447   };
2448 
2449   /// This contains information for each constraint that we are lowering.
2450   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2451     /// This contains the actual string for the code, like "m".  TargetLowering
2452     /// picks the 'best' code from ConstraintInfo::Codes that most closely
2453     /// matches the operand.
2454     std::string ConstraintCode;
2455 
2456     /// Information about the constraint code, e.g. Register, RegisterClass,
2457     /// Memory, Other, Unknown.
2458     TargetLowering::ConstraintType ConstraintType;
2459 
2460     /// If this is the result output operand or a clobber, this is null,
2461     /// otherwise it is the incoming operand to the CallInst.  This gets
2462     /// modified as the asm is processed.
2463     Value *CallOperandVal;
2464 
2465     /// The ValueType for the operand value.
2466     MVT ConstraintVT;
2467 
2468     /// Return true of this is an input operand that is a matching constraint
2469     /// like "4".
2470     bool isMatchingInputConstraint() const;
2471 
2472     /// If this is an input matching constraint, this method returns the output
2473     /// operand it matches.
2474     unsigned getMatchedOperand() const;
2475 
2476     /// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfoAsmOperandInfo2477     AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
2478       : InlineAsm::ConstraintInfo(info),
2479         ConstraintType(TargetLowering::C_Unknown),
2480         CallOperandVal(nullptr), ConstraintVT(MVT::Other) {
2481     }
2482   };
2483 
2484   typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2485 
2486   /// Split up the constraint string from the inline assembly value into the
2487   /// specific constraints and their prefixes, and also tie in the associated
2488   /// operand values.  If this returns an empty vector, and if the constraint
2489   /// string itself isn't empty, there was an error parsing.
2490   virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
2491 
2492   /// Examine constraint type and operand type and determine a weight value.
2493   /// The operand object must already have been set up with the operand type.
2494   virtual ConstraintWeight getMultipleConstraintMatchWeight(
2495       AsmOperandInfo &info, int maIndex) const;
2496 
2497   /// Examine constraint string and operand type and determine a weight value.
2498   /// The operand object must already have been set up with the operand type.
2499   virtual ConstraintWeight getSingleConstraintMatchWeight(
2500       AsmOperandInfo &info, const char *constraint) const;
2501 
2502   /// Determines the constraint code and constraint type to use for the specific
2503   /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2504   /// If the actual operand being passed in is available, it can be passed in as
2505   /// Op, otherwise an empty SDValue can be passed.
2506   virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2507                                       SDValue Op,
2508                                       SelectionDAG *DAG = nullptr) const;
2509 
2510   /// Given a constraint, return the type of constraint it is for this target.
2511   virtual ConstraintType getConstraintType(const std::string &Constraint) const;
2512 
2513   /// Given a physical register constraint (e.g.  {edx}), return the register
2514   /// number and the register class for the register.
2515   ///
2516   /// Given a register class constraint, like 'r', if this corresponds directly
2517   /// to an LLVM register class, return a register of 0 and the register class
2518   /// pointer.
2519   ///
2520   /// This should only be used for C_Register constraints.  On error, this
2521   /// returns a register number of 0 and a null register class pointer..
2522   virtual std::pair<unsigned, const TargetRegisterClass*>
2523     getRegForInlineAsmConstraint(const std::string &Constraint,
2524                                  MVT VT) const;
2525 
2526   /// Try to replace an X constraint, which matches anything, with another that
2527   /// has more specific requirements based on the type of the corresponding
2528   /// operand.  This returns null if there is no replacement to make.
2529   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2530 
2531   /// Lower the specified operand into the Ops vector.  If it is invalid, don't
2532   /// add anything to Ops.
2533   virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2534                                             std::vector<SDValue> &Ops,
2535                                             SelectionDAG &DAG) const;
2536 
2537   //===--------------------------------------------------------------------===//
2538   // Div utility functions
2539   //
2540   SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
2541                          SelectionDAG &DAG) const;
2542   SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2543                     bool IsAfterLegalization,
2544                     std::vector<SDNode *> *Created) const;
2545   SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2546                     bool IsAfterLegalization,
2547                     std::vector<SDNode *> *Created) const;
2548 
2549   //===--------------------------------------------------------------------===//
2550   // Legalization utility functions
2551   //
2552 
2553   /// Expand a MUL into two nodes.  One that computes the high bits of
2554   /// the result and one that computes the low bits.
2555   /// \param HiLoVT The value type to use for the Lo and Hi nodes.
2556   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
2557   ///        if you want to control how low bits are extracted from the LHS.
2558   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
2559   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
2560   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
2561   /// \returns true if the node has been expanded. false if it has not
2562   bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2563                  SelectionDAG &DAG, SDValue LL = SDValue(),
2564                  SDValue LH = SDValue(), SDValue RL = SDValue(),
2565                  SDValue RH = SDValue()) const;
2566 
2567   //===--------------------------------------------------------------------===//
2568   // Instruction Emitting Hooks
2569   //
2570 
2571   /// This method should be implemented by targets that mark instructions with
2572   /// the 'usesCustomInserter' flag.  These instructions are special in various
2573   /// ways, which require special support to insert.  The specified MachineInstr
2574   /// is created but not inserted into any basic blocks, and this method is
2575   /// called to expand it into a sequence of instructions, potentially also
2576   /// creating new basic blocks and control flow.
2577   virtual MachineBasicBlock *
2578     EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
2579 
2580   /// This method should be implemented by targets that mark instructions with
2581   /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2582   /// instruction selection by target hooks.  e.g. To fill in optional defs for
2583   /// ARM 's' setting instructions.
2584   virtual void
2585   AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2586 };
2587 
2588 /// Given an LLVM IR type and return type attributes, compute the return value
2589 /// EVTs and flags, and optionally also the offsets, if the return value is
2590 /// being lowered to memory.
2591 void GetReturnInfo(Type* ReturnType, AttributeSet attr,
2592                    SmallVectorImpl<ISD::OutputArg> &Outs,
2593                    const TargetLowering &TLI);
2594 
2595 } // end llvm namespace
2596 
2597 #endif
2598