• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file describes how to lower LLVM code to machine code.  This has two
11 /// main components:
12 ///
13 ///  1. Which ValueTypes are natively supported by the target.
14 ///  2. Which operations are supported for supported ValueTypes.
15 ///  3. Cost thresholds for alternative implementations of certain operations.
16 ///
17 /// In addition it has a few other components, like information about FP
18 /// immediates.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H
23 #define LLVM_CODEGEN_TARGETLOWERING_H
24 
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/StringRef.h"
31 #include "llvm/CodeGen/DAGCombine.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/RuntimeLibcalls.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SelectionDAGNodes.h"
36 #include "llvm/CodeGen/TargetCallingConv.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/IR/InlineAsm.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Alignment.h"
49 #include "llvm/Support/AtomicOrdering.h"
50 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MachineValueType.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <climits>
56 #include <cstdint>
57 #include <iterator>
58 #include <map>
59 #include <string>
60 #include <utility>
61 #include <vector>
62 
63 namespace llvm {
64 
65 class BranchProbability;
66 class CCState;
67 class CCValAssign;
68 class Constant;
69 class FastISel;
70 class FunctionLoweringInfo;
71 class GlobalValue;
72 class GISelKnownBits;
73 class IntrinsicInst;
74 struct KnownBits;
75 class LegacyDivergenceAnalysis;
76 class LLVMContext;
77 class MachineBasicBlock;
78 class MachineFunction;
79 class MachineInstr;
80 class MachineJumpTableInfo;
81 class MachineLoop;
82 class MachineRegisterInfo;
83 class MCContext;
84 class MCExpr;
85 class Module;
86 class ProfileSummaryInfo;
87 class TargetLibraryInfo;
88 class TargetMachine;
89 class TargetRegisterClass;
90 class TargetRegisterInfo;
91 class TargetTransformInfo;
92 class Value;
93 
94 namespace Sched {
95 
96   enum Preference {
97     None,             // No preference
98     Source,           // Follow source order.
99     RegPressure,      // Scheduling for lowest register pressure.
100     Hybrid,           // Scheduling for both latency and register pressure.
101     ILP,              // Scheduling for ILP in low register pressure mode.
102     VLIW              // Scheduling for VLIW targets.
103   };
104 
105 } // end namespace Sched
106 
107 // MemOp models a memory operation, either memset or memcpy/memmove.
108 struct MemOp {
109 private:
110   // Shared
111   uint64_t Size;
112   bool DstAlignCanChange; // true if destination alignment can satisfy any
113                           // constraint.
114   Align DstAlign;         // Specified alignment of the memory operation.
115 
116   bool AllowOverlap;
117   // memset only
118   bool IsMemset;   // If setthis memory operation is a memset.
119   bool ZeroMemset; // If set clears out memory with zeros.
120   // memcpy only
121   bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
122                      // constant so it does not need to be loaded.
123   Align SrcAlign;    // Inferred alignment of the source or default value if the
124                      // memory operation does not need to load the value.
125 public:
126   static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
127                     Align SrcAlign, bool IsVolatile,
128                     bool MemcpyStrSrc = false) {
129     MemOp Op;
130     Op.Size = Size;
131     Op.DstAlignCanChange = DstAlignCanChange;
132     Op.DstAlign = DstAlign;
133     Op.AllowOverlap = !IsVolatile;
134     Op.IsMemset = false;
135     Op.ZeroMemset = false;
136     Op.MemcpyStrSrc = MemcpyStrSrc;
137     Op.SrcAlign = SrcAlign;
138     return Op;
139   }
140 
SetMemOp141   static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
142                    bool IsZeroMemset, bool IsVolatile) {
143     MemOp Op;
144     Op.Size = Size;
145     Op.DstAlignCanChange = DstAlignCanChange;
146     Op.DstAlign = DstAlign;
147     Op.AllowOverlap = !IsVolatile;
148     Op.IsMemset = true;
149     Op.ZeroMemset = IsZeroMemset;
150     Op.MemcpyStrSrc = false;
151     return Op;
152   }
153 
sizeMemOp154   uint64_t size() const { return Size; }
getDstAlignMemOp155   Align getDstAlign() const {
156     assert(!DstAlignCanChange);
157     return DstAlign;
158   }
isFixedDstAlignMemOp159   bool isFixedDstAlign() const { return !DstAlignCanChange; }
allowOverlapMemOp160   bool allowOverlap() const { return AllowOverlap; }
isMemsetMemOp161   bool isMemset() const { return IsMemset; }
isMemcpyMemOp162   bool isMemcpy() const { return !IsMemset; }
isMemcpyWithFixedDstAlignMemOp163   bool isMemcpyWithFixedDstAlign() const {
164     return isMemcpy() && !DstAlignCanChange;
165   }
isZeroMemsetMemOp166   bool isZeroMemset() const { return isMemset() && ZeroMemset; }
isMemcpyStrSrcMemOp167   bool isMemcpyStrSrc() const {
168     assert(isMemcpy() && "Must be a memcpy");
169     return MemcpyStrSrc;
170   }
getSrcAlignMemOp171   Align getSrcAlign() const {
172     assert(isMemcpy() && "Must be a memcpy");
173     return SrcAlign;
174   }
isSrcAlignedMemOp175   bool isSrcAligned(Align AlignCheck) const {
176     return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
177   }
isDstAlignedMemOp178   bool isDstAligned(Align AlignCheck) const {
179     return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
180   }
isAlignedMemOp181   bool isAligned(Align AlignCheck) const {
182     return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
183   }
184 };
185 
186 /// This base class for TargetLowering contains the SelectionDAG-independent
187 /// parts that can be used from the rest of CodeGen.
188 class TargetLoweringBase {
189 public:
190   /// This enum indicates whether operations are valid for a target, and if not,
191   /// what action should be used to make them valid.
192   enum LegalizeAction : uint8_t {
193     Legal,      // The target natively supports this operation.
194     Promote,    // This operation should be executed in a larger type.
195     Expand,     // Try to expand this to other ops, otherwise use a libcall.
196     LibCall,    // Don't try to expand this to other ops, always use a libcall.
197     Custom      // Use the LowerOperation hook to implement custom lowering.
198   };
199 
200   /// This enum indicates whether a types are legal for a target, and if not,
201   /// what action should be used to make them valid.
202   enum LegalizeTypeAction : uint8_t {
203     TypeLegal,           // The target natively supports this type.
204     TypePromoteInteger,  // Replace this integer with a larger one.
205     TypeExpandInteger,   // Split this integer into two of half the size.
206     TypeSoftenFloat,     // Convert this float to a same size integer type.
207     TypeExpandFloat,     // Split this float into two of half the size.
208     TypeScalarizeVector, // Replace this one-element vector with its element.
209     TypeSplitVector,     // Split this vector into two of half the size.
210     TypeWidenVector,     // This vector should be widened into a larger vector.
211     TypePromoteFloat,    // Replace this float with a larger one.
212     TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
213     TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
214                                  // While it is theoretically possible to
215                                  // legalize operations on scalable types with a
216                                  // loop that handles the vscale * #lanes of the
217                                  // vector, this is non-trivial at SelectionDAG
218                                  // level and these types are better to be
219                                  // widened or promoted.
220   };
221 
222   /// LegalizeKind holds the legalization kind that needs to happen to EVT
223   /// in order to type-legalize it.
224   using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
225 
226   /// Enum that describes how the target represents true/false values.
227   enum BooleanContent {
228     UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
229     ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
230     ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
231   };
232 
233   /// Enum that describes what type of support for selects the target has.
234   enum SelectSupportKind {
235     ScalarValSelect,      // The target supports scalar selects (ex: cmov).
236     ScalarCondVectorVal,  // The target supports selects with a scalar condition
237                           // and vector values (ex: cmov).
238     VectorMaskSelect      // The target supports vector selects with a vector
239                           // mask (ex: x86 blends).
240   };
241 
242   /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
243   /// to, if at all. Exists because different targets have different levels of
244   /// support for these atomic instructions, and also have different options
245   /// w.r.t. what they should expand to.
246   enum class AtomicExpansionKind {
247     None,    // Don't expand the instruction.
248     LLSC,    // Expand the instruction into loadlinked/storeconditional; used
249              // by ARM/AArch64.
250     LLOnly,  // Expand the (load) instruction into just a load-linked, which has
251              // greater atomic guarantees than a normal load.
252     CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
253     MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
254   };
255 
256   /// Enum that specifies when a multiplication should be expanded.
257   enum class MulExpansionKind {
258     Always,            // Always expand the instruction.
259     OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
260                        // or custom.
261   };
262 
263   /// Enum that specifies when a float negation is beneficial.
264   enum class NegatibleCost {
265     Cheaper = 0,    // Negated expression is cheaper.
266     Neutral = 1,    // Negated expression has the same cost.
267     Expensive = 2   // Negated expression is more expensive.
268   };
269 
270   class ArgListEntry {
271   public:
272     Value *Val = nullptr;
273     SDValue Node = SDValue();
274     Type *Ty = nullptr;
275     bool IsSExt : 1;
276     bool IsZExt : 1;
277     bool IsInReg : 1;
278     bool IsSRet : 1;
279     bool IsNest : 1;
280     bool IsByVal : 1;
281     bool IsByRef : 1;
282     bool IsInAlloca : 1;
283     bool IsPreallocated : 1;
284     bool IsReturned : 1;
285     bool IsSwiftSelf : 1;
286     bool IsSwiftError : 1;
287     bool IsCFGuardTarget : 1;
288     MaybeAlign Alignment = None;
289     Type *ByValType = nullptr;
290     Type *PreallocatedType = nullptr;
291 
ArgListEntry()292     ArgListEntry()
293         : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
294           IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
295           IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
296           IsSwiftError(false), IsCFGuardTarget(false) {}
297 
298     void setAttributes(const CallBase *Call, unsigned ArgIdx);
299   };
300   using ArgListTy = std::vector<ArgListEntry>;
301 
markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args)302   virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
303                                      ArgListTy &Args) const {};
304 
getExtendForContent(BooleanContent Content)305   static ISD::NodeType getExtendForContent(BooleanContent Content) {
306     switch (Content) {
307     case UndefinedBooleanContent:
308       // Extend by adding rubbish bits.
309       return ISD::ANY_EXTEND;
310     case ZeroOrOneBooleanContent:
311       // Extend by adding zero bits.
312       return ISD::ZERO_EXTEND;
313     case ZeroOrNegativeOneBooleanContent:
314       // Extend by copying the sign bit.
315       return ISD::SIGN_EXTEND;
316     }
317     llvm_unreachable("Invalid content kind");
318   }
319 
320   explicit TargetLoweringBase(const TargetMachine &TM);
321   TargetLoweringBase(const TargetLoweringBase &) = delete;
322   TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
323   virtual ~TargetLoweringBase() = default;
324 
325   /// Return true if the target support strict float operation
isStrictFPEnabled()326   bool isStrictFPEnabled() const {
327     return IsStrictFPEnabled;
328   }
329 
330 protected:
331   /// Initialize all of the actions to default values.
332   void initActions();
333 
334 public:
getTargetMachine()335   const TargetMachine &getTargetMachine() const { return TM; }
336 
useSoftFloat()337   virtual bool useSoftFloat() const { return false; }
338 
339   /// Return the pointer type for the given address space, defaults to
340   /// the pointer type from the data layout.
341   /// FIXME: The default needs to be removed once all the code is updated.
342   virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
343     return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
344   }
345 
346   /// Return the in-memory pointer type for the given address space, defaults to
347   /// the pointer type from the data layout.  FIXME: The default needs to be
348   /// removed once all the code is updated.
349   MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
350     return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
351   }
352 
353   /// Return the type for frame index, which is determined by
354   /// the alloca address space specified through the data layout.
getFrameIndexTy(const DataLayout & DL)355   MVT getFrameIndexTy(const DataLayout &DL) const {
356     return getPointerTy(DL, DL.getAllocaAddrSpace());
357   }
358 
359   /// Return the type for code pointers, which is determined by the program
360   /// address space specified through the data layout.
getProgramPointerTy(const DataLayout & DL)361   MVT getProgramPointerTy(const DataLayout &DL) const {
362     return getPointerTy(DL, DL.getProgramAddressSpace());
363   }
364 
365   /// Return the type for operands of fence.
366   /// TODO: Let fence operands be of i32 type and remove this.
getFenceOperandTy(const DataLayout & DL)367   virtual MVT getFenceOperandTy(const DataLayout &DL) const {
368     return getPointerTy(DL);
369   }
370 
371   /// EVT is not used in-tree, but is used by out-of-tree target.
372   /// A documentation for this function would be nice...
373   virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
374 
375   EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
376                        bool LegalTypes = true) const;
377 
378   /// Return the preferred type to use for a shift opcode, given the shifted
379   /// amount type is \p ShiftValueTy.
380   LLVM_READONLY
getPreferredShiftAmountTy(LLT ShiftValueTy)381   virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
382     return ShiftValueTy;
383   }
384 
385   /// Returns the type to be used for the index operand of:
386   /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
387   /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
getVectorIdxTy(const DataLayout & DL)388   virtual MVT getVectorIdxTy(const DataLayout &DL) const {
389     return getPointerTy(DL);
390   }
391 
392   /// This callback is used to inspect load/store instructions and add
393   /// target-specific MachineMemOperand flags to them.  The default
394   /// implementation does nothing.
getTargetMMOFlags(const Instruction & I)395   virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const {
396     return MachineMemOperand::MONone;
397   }
398 
399   MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI,
400                                                   const DataLayout &DL) const;
401   MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
402                                                    const DataLayout &DL) const;
403   MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
404                                                     const DataLayout &DL) const;
405 
isSelectSupported(SelectSupportKind)406   virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
407     return true;
408   }
409 
410   /// Return true if it is profitable to convert a select of FP constants into
411   /// a constant pool load whose address depends on the select condition. The
412   /// parameter may be used to differentiate a select with FP compare from
413   /// integer compare.
reduceSelectOfFPConstantLoads(EVT CmpOpVT)414   virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
415     return true;
416   }
417 
418   /// Return true if multiple condition registers are available.
hasMultipleConditionRegisters()419   bool hasMultipleConditionRegisters() const {
420     return HasMultipleConditionRegisters;
421   }
422 
423   /// Return true if the target has BitExtract instructions.
hasExtractBitsInsn()424   bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
425 
426   /// Return the preferred vector type legalization action.
427   virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT)428   getPreferredVectorAction(MVT VT) const {
429     // The default action for one element vectors is to scalarize
430     if (VT.getVectorElementCount().isScalar())
431       return TypeScalarizeVector;
432     // The default action for an odd-width vector is to widen.
433     if (!VT.isPow2VectorType())
434       return TypeWidenVector;
435     // The default action for other vectors is to promote
436     return TypePromoteInteger;
437   }
438 
439   // Return true if the half type should be passed around as i16, but promoted
440   // to float around arithmetic. The default behavior is to pass around as
441   // float and convert around loads/stores/bitcasts and other places where
442   // the size matters.
softPromoteHalfType()443   virtual bool softPromoteHalfType() const { return false; }
444 
445   // There are two general methods for expanding a BUILD_VECTOR node:
446   //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
447   //     them together.
448   //  2. Build the vector on the stack and then load it.
449   // If this function returns true, then method (1) will be used, subject to
450   // the constraint that all of the necessary shuffles are legal (as determined
451   // by isShuffleMaskLegal). If this function returns false, then method (2) is
452   // always used. The vector type, and the number of defined values, are
453   // provided.
454   virtual bool
shouldExpandBuildVectorWithShuffles(EVT,unsigned DefinedValues)455   shouldExpandBuildVectorWithShuffles(EVT /* VT */,
456                                       unsigned DefinedValues) const {
457     return DefinedValues < 3;
458   }
459 
460   /// Return true if integer divide is usually cheaper than a sequence of
461   /// several shifts, adds, and multiplies for this target.
462   /// The definition of "cheaper" may depend on whether we're optimizing
463   /// for speed or for size.
isIntDivCheap(EVT VT,AttributeList Attr)464   virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
465 
466   /// Return true if the target can handle a standalone remainder operation.
hasStandaloneRem(EVT VT)467   virtual bool hasStandaloneRem(EVT VT) const {
468     return true;
469   }
470 
471   /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
isFsqrtCheap(SDValue X,SelectionDAG & DAG)472   virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
473     // Default behavior is to replace SQRT(X) with X*RSQRT(X).
474     return false;
475   }
476 
477   /// Reciprocal estimate status values used by the functions below.
478   enum ReciprocalEstimate : int {
479     Unspecified = -1,
480     Disabled = 0,
481     Enabled = 1
482   };
483 
484   /// Return a ReciprocalEstimate enum value for a square root of the given type
485   /// based on the function's attributes. If the operation is not overridden by
486   /// the function's attributes, "Unspecified" is returned and target defaults
487   /// are expected to be used for instruction selection.
488   int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
489 
490   /// Return a ReciprocalEstimate enum value for a division of the given type
491   /// based on the function's attributes. If the operation is not overridden by
492   /// the function's attributes, "Unspecified" is returned and target defaults
493   /// are expected to be used for instruction selection.
494   int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
495 
496   /// Return the refinement step count for a square root of the given type based
497   /// on the function's attributes. If the operation is not overridden by
498   /// the function's attributes, "Unspecified" is returned and target defaults
499   /// are expected to be used for instruction selection.
500   int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
501 
502   /// Return the refinement step count for a division of the given type based
503   /// on the function's attributes. If the operation is not overridden by
504   /// the function's attributes, "Unspecified" is returned and target defaults
505   /// are expected to be used for instruction selection.
506   int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
507 
508   /// Returns true if target has indicated at least one type should be bypassed.
isSlowDivBypassed()509   bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
510 
511   /// Returns map of slow types for division or remainder with corresponding
512   /// fast types
getBypassSlowDivWidths()513   const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
514     return BypassSlowDivWidths;
515   }
516 
517   /// Return true if Flow Control is an expensive operation that should be
518   /// avoided.
isJumpExpensive()519   bool isJumpExpensive() const { return JumpIsExpensive; }
520 
521   /// Return true if selects are only cheaper than branches if the branch is
522   /// unlikely to be predicted right.
isPredictableSelectExpensive()523   bool isPredictableSelectExpensive() const {
524     return PredictableSelectIsExpensive;
525   }
526 
fallBackToDAGISel(const Instruction & Inst)527   virtual bool fallBackToDAGISel(const Instruction &Inst) const {
528     return false;
529   }
530 
531   /// If a branch or a select condition is skewed in one direction by more than
532   /// this factor, it is very likely to be predicted correctly.
533   virtual BranchProbability getPredictableBranchThreshold() const;
534 
535   /// Return true if the following transform is beneficial:
536   /// fold (conv (load x)) -> (load (conv*)x)
537   /// On architectures that don't natively support some vector loads
538   /// efficiently, casting the load to a smaller vector of larger types and
539   /// loading is more efficient, however, this can be undone by optimizations in
540   /// dag combiner.
isLoadBitCastBeneficial(EVT LoadVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO)541   virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
542                                        const SelectionDAG &DAG,
543                                        const MachineMemOperand &MMO) const {
544     // Don't do if we could do an indexed load on the original type, but not on
545     // the new one.
546     if (!LoadVT.isSimple() || !BitcastVT.isSimple())
547       return true;
548 
549     MVT LoadMVT = LoadVT.getSimpleVT();
550 
551     // Don't bother doing this if it's just going to be promoted again later, as
552     // doing so might interfere with other combines.
553     if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
554         getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
555       return false;
556 
557     bool Fast = false;
558     return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
559                               MMO, &Fast) && Fast;
560   }
561 
562   /// Return true if the following transform is beneficial:
563   /// (store (y (conv x)), y*)) -> (store x, (x*))
isStoreBitCastBeneficial(EVT StoreVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO)564   virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
565                                         const SelectionDAG &DAG,
566                                         const MachineMemOperand &MMO) const {
567     // Default to the same logic as loads.
568     return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
569   }
570 
571   /// Return true if it is expected to be cheaper to do a store of a non-zero
572   /// vector constant with the given size and type for the address space than to
573   /// store the individual scalar element constants.
storeOfVectorConstantIsCheap(EVT MemVT,unsigned NumElem,unsigned AddrSpace)574   virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
575                                             unsigned NumElem,
576                                             unsigned AddrSpace) const {
577     return false;
578   }
579 
580   /// Allow store merging for the specified type after legalization in addition
581   /// to before legalization. This may transform stores that do not exist
582   /// earlier (for example, stores created from intrinsics).
mergeStoresAfterLegalization(EVT MemVT)583   virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
584     return true;
585   }
586 
587   /// Returns if it's reasonable to merge stores to MemVT size.
canMergeStoresTo(unsigned AS,EVT MemVT,const SelectionDAG & DAG)588   virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
589                                 const SelectionDAG &DAG) const {
590     return true;
591   }
592 
593   /// Return true if it is cheap to speculate a call to intrinsic cttz.
isCheapToSpeculateCttz()594   virtual bool isCheapToSpeculateCttz() const {
595     return false;
596   }
597 
598   /// Return true if it is cheap to speculate a call to intrinsic ctlz.
isCheapToSpeculateCtlz()599   virtual bool isCheapToSpeculateCtlz() const {
600     return false;
601   }
602 
603   /// Return true if ctlz instruction is fast.
isCtlzFast()604   virtual bool isCtlzFast() const {
605     return false;
606   }
607 
608   /// Return the maximum number of "x & (x - 1)" operations that can be done
609   /// instead of deferring to a custom CTPOP.
getCustomCtpopCost(EVT VT,ISD::CondCode Cond)610   virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
611     return 1;
612   }
613 
614   /// Return true if instruction generated for equality comparison is folded
615   /// with instruction generated for signed comparison.
isEqualityCmpFoldedWithSignedCmp()616   virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
617 
618   /// Return true if it is safe to transform an integer-domain bitwise operation
619   /// into the equivalent floating-point operation. This should be set to true
620   /// if the target has IEEE-754-compliant fabs/fneg operations for the input
621   /// type.
hasBitPreservingFPLogic(EVT VT)622   virtual bool hasBitPreservingFPLogic(EVT VT) const {
623     return false;
624   }
625 
626   /// Return true if it is cheaper to split the store of a merged int val
627   /// from a pair of smaller values into multiple stores.
isMultiStoresCheaperThanBitsMerge(EVT LTy,EVT HTy)628   virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
629     return false;
630   }
631 
632   /// Return if the target supports combining a
633   /// chain like:
634   /// \code
635   ///   %andResult = and %val1, #mask
636   ///   %icmpResult = icmp %andResult, 0
637   /// \endcode
638   /// into a single machine instruction of a form like:
639   /// \code
640   ///   cc = test %register, #mask
641   /// \endcode
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI)642   virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
643     return false;
644   }
645 
646   /// Use bitwise logic to make pairs of compares more efficient. For example:
647   /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
648   /// This should be true when it takes more than one instruction to lower
649   /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
650   /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
convertSetCCLogicToBitwiseLogic(EVT VT)651   virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
652     return false;
653   }
654 
655   /// Return the preferred operand type if the target has a quick way to compare
656   /// integer values of the given size. Assume that any legal integer type can
657   /// be compared efficiently. Targets may override this to allow illegal wide
658   /// types to return a vector type if there is support to compare that type.
hasFastEqualityCompare(unsigned NumBits)659   virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
660     MVT VT = MVT::getIntegerVT(NumBits);
661     return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
662   }
663 
664   /// Return true if the target should transform:
665   /// (X & Y) == Y ---> (~X & Y) == 0
666   /// (X & Y) != Y ---> (~X & Y) != 0
667   ///
668   /// This may be profitable if the target has a bitwise and-not operation that
669   /// sets comparison flags. A target may want to limit the transformation based
670   /// on the type of Y or if Y is a constant.
671   ///
672   /// Note that the transform will not occur if Y is known to be a power-of-2
673   /// because a mask and compare of a single bit can be handled by inverting the
674   /// predicate, for example:
675   /// (X & 8) == 8 ---> (X & 8) != 0
hasAndNotCompare(SDValue Y)676   virtual bool hasAndNotCompare(SDValue Y) const {
677     return false;
678   }
679 
680   /// Return true if the target has a bitwise and-not operation:
681   /// X = ~A & B
682   /// This can be used to simplify select or other instructions.
hasAndNot(SDValue X)683   virtual bool hasAndNot(SDValue X) const {
684     // If the target has the more complex version of this operation, assume that
685     // it has this operation too.
686     return hasAndNotCompare(X);
687   }
688 
689   /// Return true if the target has a bit-test instruction:
690   ///   (X & (1 << Y)) ==/!= 0
691   /// This knowledge can be used to prevent breaking the pattern,
692   /// or creating it if it could be recognized.
hasBitTest(SDValue X,SDValue Y)693   virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
694 
695   /// There are two ways to clear extreme bits (either low or high):
696   /// Mask:    x &  (-1 << y)  (the instcombine canonical form)
697   /// Shifts:  x >> y << y
698   /// Return true if the variant with 2 variable shifts is preferred.
699   /// Return false if there is no preference.
shouldFoldMaskToVariableShiftPair(SDValue X)700   virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
701     // By default, let's assume that no one prefers shifts.
702     return false;
703   }
704 
705   /// Return true if it is profitable to fold a pair of shifts into a mask.
706   /// This is usually true on most targets. But some targets, like Thumb1,
707   /// have immediate shift instructions, but no immediate "and" instruction;
708   /// this makes the fold unprofitable.
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level)709   virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
710                                                  CombineLevel Level) const {
711     return true;
712   }
713 
714   /// Should we tranform the IR-optimal check for whether given truncation
715   /// down into KeptBits would be truncating or not:
716   ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
717   /// Into it's more traditional form:
718   ///   ((%x << C) a>> C) dstcond %x
719   /// Return true if we should transform.
720   /// Return false if there is no preference.
shouldTransformSignedTruncationCheck(EVT XVT,unsigned KeptBits)721   virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
722                                                     unsigned KeptBits) const {
723     // By default, let's assume that no one prefers shifts.
724     return false;
725   }
726 
727   /// Given the pattern
728   ///   (X & (C l>>/<< Y)) ==/!= 0
729   /// return true if it should be transformed into:
730   ///   ((X <</l>> Y) & C) ==/!= 0
731   /// WARNING: if 'X' is a constant, the fold may deadlock!
732   /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
733   ///        here because it can end up being not linked in.
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG)734   virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
735       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
736       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
737       SelectionDAG &DAG) const {
738     if (hasBitTest(X, Y)) {
739       // One interesting pattern that we'd want to form is 'bit test':
740       //   ((1 << Y) & C) ==/!= 0
741       // But we also need to be careful not to try to reverse that fold.
742 
743       // Is this '1 << Y' ?
744       if (OldShiftOpcode == ISD::SHL && CC->isOne())
745         return false; // Keep the 'bit test' pattern.
746 
747       // Will it be '1 << Y' after the transform ?
748       if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
749         return true; // Do form the 'bit test' pattern.
750     }
751 
752     // If 'X' is a constant, and we transform, then we will immediately
753     // try to undo the fold, thus causing endless combine loop.
754     // So by default, let's assume everyone prefers the fold
755     // iff 'X' is not a constant.
756     return !XC;
757   }
758 
759   /// These two forms are equivalent:
760   ///   sub %y, (xor %x, -1)
761   ///   add (add %x, 1), %y
762   /// The variant with two add's is IR-canonical.
763   /// Some targets may prefer one to the other.
preferIncOfAddToSubOfNot(EVT VT)764   virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
765     // By default, let's assume that everyone prefers the form with two add's.
766     return true;
767   }
768 
769   /// Return true if the target wants to use the optimization that
770   /// turns ext(promotableInst1(...(promotableInstN(load)))) into
771   /// promotedInst1(...(promotedInstN(ext(load)))).
enableExtLdPromotion()772   bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
773 
774   /// Return true if the target can combine store(extractelement VectorTy,
775   /// Idx).
776   /// \p Cost[out] gives the cost of that transformation when this is true.
canCombineStoreAndExtract(Type * VectorTy,Value * Idx,unsigned & Cost)777   virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
778                                          unsigned &Cost) const {
779     return false;
780   }
781 
782   /// Return true if inserting a scalar into a variable element of an undef
783   /// vector is more efficiently handled by splatting the scalar instead.
shouldSplatInsEltVarIndex(EVT)784   virtual bool shouldSplatInsEltVarIndex(EVT) const {
785     return false;
786   }
787 
788   /// Return true if target always beneficiates from combining into FMA for a
789   /// given value type. This must typically return false on targets where FMA
790   /// takes more cycles to execute than FADD.
enableAggressiveFMAFusion(EVT VT)791   virtual bool enableAggressiveFMAFusion(EVT VT) const {
792     return false;
793   }
794 
795   /// Return the ValueType of the result of SETCC operations.
796   virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
797                                  EVT VT) const;
798 
799   /// Return the ValueType for comparison libcalls. Comparions libcalls include
800   /// floating point comparion calls, and Ordered/Unordered check calls on
801   /// floating point numbers.
802   virtual
803   MVT::SimpleValueType getCmpLibcallReturnType() const;
804 
805   /// For targets without i1 registers, this gives the nature of the high-bits
806   /// of boolean values held in types wider than i1.
807   ///
808   /// "Boolean values" are special true/false values produced by nodes like
809   /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
810   /// Not to be confused with general values promoted from i1.  Some cpus
811   /// distinguish between vectors of boolean and scalars; the isVec parameter
812   /// selects between the two kinds.  For example on X86 a scalar boolean should
813   /// be zero extended from i1, while the elements of a vector of booleans
814   /// should be sign extended from i1.
815   ///
816   /// Some cpus also treat floating point types the same way as they treat
817   /// vectors instead of the way they treat scalars.
getBooleanContents(bool isVec,bool isFloat)818   BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
819     if (isVec)
820       return BooleanVectorContents;
821     return isFloat ? BooleanFloatContents : BooleanContents;
822   }
823 
getBooleanContents(EVT Type)824   BooleanContent getBooleanContents(EVT Type) const {
825     return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
826   }
827 
828   /// Return target scheduling preference.
getSchedulingPreference()829   Sched::Preference getSchedulingPreference() const {
830     return SchedPreferenceInfo;
831   }
832 
833   /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
834   /// for different nodes. This function returns the preference (or none) for
835   /// the given node.
getSchedulingPreference(SDNode *)836   virtual Sched::Preference getSchedulingPreference(SDNode *) const {
837     return Sched::None;
838   }
839 
840   /// Return the register class that should be used for the specified value
841   /// type.
842   virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
843     (void)isDivergent;
844     const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
845     assert(RC && "This value type is not natively supported!");
846     return RC;
847   }
848 
849   /// Allows target to decide about the register class of the
850   /// specific value that is live outside the defining block.
851   /// Returns true if the value needs uniform register class.
requiresUniformRegister(MachineFunction & MF,const Value *)852   virtual bool requiresUniformRegister(MachineFunction &MF,
853                                        const Value *) const {
854     return false;
855   }
856 
857   /// Return the 'representative' register class for the specified value
858   /// type.
859   ///
860   /// The 'representative' register class is the largest legal super-reg
861   /// register class for the register class of the value type.  For example, on
862   /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
863   /// register class is GR64 on x86_64.
getRepRegClassFor(MVT VT)864   virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
865     const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
866     return RC;
867   }
868 
869   /// Return the cost of the 'representative' register class for the specified
870   /// value type.
getRepRegClassCostFor(MVT VT)871   virtual uint8_t getRepRegClassCostFor(MVT VT) const {
872     return RepRegClassCostForVT[VT.SimpleTy];
873   }
874 
875   /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS
876   /// instructions, and false if a library call is preferred (e.g for code-size
877   /// reasons).
shouldExpandShift(SelectionDAG & DAG,SDNode * N)878   virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
879     return true;
880   }
881 
882   /// Return true if the target has native support for the specified value type.
883   /// This means that it has a register that directly holds it without
884   /// promotions or expansions.
isTypeLegal(EVT VT)885   bool isTypeLegal(EVT VT) const {
886     assert(!VT.isSimple() ||
887            (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
888     return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
889   }
890 
891   class ValueTypeActionImpl {
892     /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
893     /// that indicates how instruction selection should deal with the type.
894     LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
895 
896   public:
ValueTypeActionImpl()897     ValueTypeActionImpl() {
898       std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
899                 TypeLegal);
900     }
901 
getTypeAction(MVT VT)902     LegalizeTypeAction getTypeAction(MVT VT) const {
903       return ValueTypeActions[VT.SimpleTy];
904     }
905 
setTypeAction(MVT VT,LegalizeTypeAction Action)906     void setTypeAction(MVT VT, LegalizeTypeAction Action) {
907       ValueTypeActions[VT.SimpleTy] = Action;
908     }
909   };
910 
getValueTypeActions()911   const ValueTypeActionImpl &getValueTypeActions() const {
912     return ValueTypeActions;
913   }
914 
915   /// Return how we should legalize values of this type, either it is already
916   /// legal (return 'Legal') or we need to promote it to a larger type (return
917   /// 'Promote'), or we need to expand it into multiple registers of smaller
918   /// integer type (return 'Expand').  'Custom' is not an option.
getTypeAction(LLVMContext & Context,EVT VT)919   LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
920     return getTypeConversion(Context, VT).first;
921   }
getTypeAction(MVT VT)922   LegalizeTypeAction getTypeAction(MVT VT) const {
923     return ValueTypeActions.getTypeAction(VT);
924   }
925 
926   /// For types supported by the target, this is an identity function.  For
927   /// types that must be promoted to larger types, this returns the larger type
928   /// to promote to.  For integer types that are larger than the largest integer
929   /// register, this contains one step in the expansion to get to the smaller
930   /// register. For illegal floating point types, this returns the integer type
931   /// to transform to.
getTypeToTransformTo(LLVMContext & Context,EVT VT)932   EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
933     return getTypeConversion(Context, VT).second;
934   }
935 
936   /// For types supported by the target, this is an identity function.  For
937   /// types that must be expanded (i.e. integer types that are larger than the
938   /// largest integer register or illegal floating point types), this returns
939   /// the largest legal type it will be expanded to.
getTypeToExpandTo(LLVMContext & Context,EVT VT)940   EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
941     assert(!VT.isVector());
942     while (true) {
943       switch (getTypeAction(Context, VT)) {
944       case TypeLegal:
945         return VT;
946       case TypeExpandInteger:
947         VT = getTypeToTransformTo(Context, VT);
948         break;
949       default:
950         llvm_unreachable("Type is not legal nor is it to be expanded!");
951       }
952     }
953   }
954 
955   /// Vector types are broken down into some number of legal first class types.
956   /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
957   /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
958   /// turns into 4 EVT::i32 values with both PPC and X86.
959   ///
960   /// This method returns the number of registers needed, and the VT for each
961   /// register.  It also returns the VT and quantity of the intermediate values
962   /// before they are promoted/expanded.
963   unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
964                                   EVT &IntermediateVT,
965                                   unsigned &NumIntermediates,
966                                   MVT &RegisterVT) const;
967 
968   /// Certain targets such as MIPS require that some types such as vectors are
969   /// always broken down into scalars in some contexts. This occurs even if the
970   /// vector type is legal.
getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT)971   virtual unsigned getVectorTypeBreakdownForCallingConv(
972       LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
973       unsigned &NumIntermediates, MVT &RegisterVT) const {
974     return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
975                                   RegisterVT);
976   }
977 
978   struct IntrinsicInfo {
979     unsigned     opc = 0;          // target opcode
980     EVT          memVT;            // memory VT
981 
982     // value representing memory location
983     PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
984 
985     int          offset = 0;       // offset off of ptrVal
986     uint64_t     size = 0;         // the size of the memory location
987                                    // (taken from memVT if zero)
988     MaybeAlign align = Align(1);   // alignment
989 
990     MachineMemOperand::Flags flags = MachineMemOperand::MONone;
991     IntrinsicInfo() = default;
992   };
993 
994   /// Given an intrinsic, checks if on the target the intrinsic will need to map
995   /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
996   /// true and store the intrinsic information into the IntrinsicInfo that was
997   /// passed to the function.
getTgtMemIntrinsic(IntrinsicInfo &,const CallInst &,MachineFunction &,unsigned)998   virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
999                                   MachineFunction &,
1000                                   unsigned /*Intrinsic*/) const {
1001     return false;
1002   }
1003 
1004   /// Returns true if the target can instruction select the specified FP
1005   /// immediate natively. If false, the legalizer will materialize the FP
1006   /// immediate as a load from a constant pool.
1007   virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1008                             bool ForCodeSize = false) const {
1009     return false;
1010   }
1011 
1012   /// Targets can use this to indicate that they only support *some*
1013   /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
1014   /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1015   /// legal.
isShuffleMaskLegal(ArrayRef<int>,EVT)1016   virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1017     return true;
1018   }
1019 
1020   /// Returns true if the operation can trap for the value type.
1021   ///
1022   /// VT must be a legal type. By default, we optimistically assume most
1023   /// operations don't trap except for integer divide and remainder.
1024   virtual bool canOpTrap(unsigned Op, EVT VT) const;
1025 
1026   /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1027   /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1028   /// constant pool entry.
isVectorClearMaskLegal(ArrayRef<int>,EVT)1029   virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
1030                                       EVT /*VT*/) const {
1031     return false;
1032   }
1033 
1034   /// Return how this operation should be treated: either it is legal, needs to
1035   /// be promoted to a larger size, needs to be expanded to some other code
1036   /// sequence, or the target has a custom expander for it.
getOperationAction(unsigned Op,EVT VT)1037   LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1038     if (VT.isExtended()) return Expand;
1039     // If a target-specific SDNode requires legalization, require the target
1040     // to provide custom legalization for it.
1041     if (Op >= array_lengthof(OpActions[0])) return Custom;
1042     return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1043   }
1044 
1045   /// Custom method defined by each target to indicate if an operation which
1046   /// may require a scale is supported natively by the target.
1047   /// If not, the operation is illegal.
isSupportedFixedPointOperation(unsigned Op,EVT VT,unsigned Scale)1048   virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1049                                               unsigned Scale) const {
1050     return false;
1051   }
1052 
1053   /// Some fixed point operations may be natively supported by the target but
1054   /// only for specific scales. This method allows for checking
1055   /// if the width is supported by the target for a given operation that may
1056   /// depend on scale.
getFixedPointOperationAction(unsigned Op,EVT VT,unsigned Scale)1057   LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
1058                                               unsigned Scale) const {
1059     auto Action = getOperationAction(Op, VT);
1060     if (Action != Legal)
1061       return Action;
1062 
1063     // This operation is supported in this type but may only work on specific
1064     // scales.
1065     bool Supported;
1066     switch (Op) {
1067     default:
1068       llvm_unreachable("Unexpected fixed point operation.");
1069     case ISD::SMULFIX:
1070     case ISD::SMULFIXSAT:
1071     case ISD::UMULFIX:
1072     case ISD::UMULFIXSAT:
1073     case ISD::SDIVFIX:
1074     case ISD::SDIVFIXSAT:
1075     case ISD::UDIVFIX:
1076     case ISD::UDIVFIXSAT:
1077       Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1078       break;
1079     }
1080 
1081     return Supported ? Action : Expand;
1082   }
1083 
1084   // If Op is a strict floating-point operation, return the result
1085   // of getOperationAction for the equivalent non-strict operation.
getStrictFPOperationAction(unsigned Op,EVT VT)1086   LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
1087     unsigned EqOpc;
1088     switch (Op) {
1089       default: llvm_unreachable("Unexpected FP pseudo-opcode");
1090 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
1091       case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1092 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
1093       case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1094 #include "llvm/IR/ConstrainedOps.def"
1095     }
1096 
1097     return getOperationAction(EqOpc, VT);
1098   }
1099 
1100   /// Return true if the specified operation is legal on this target or can be
1101   /// made legal with custom lowering. This is used to help guide high-level
1102   /// lowering decisions. LegalOnly is an optional convenience for code paths
1103   /// traversed pre and post legalisation.
1104   bool isOperationLegalOrCustom(unsigned Op, EVT VT,
1105                                 bool LegalOnly = false) const {
1106     if (LegalOnly)
1107       return isOperationLegal(Op, VT);
1108 
1109     return (VT == MVT::Other || isTypeLegal(VT)) &&
1110       (getOperationAction(Op, VT) == Legal ||
1111        getOperationAction(Op, VT) == Custom);
1112   }
1113 
1114   /// Return true if the specified operation is legal on this target or can be
1115   /// made legal using promotion. This is used to help guide high-level lowering
1116   /// decisions. LegalOnly is an optional convenience for code paths traversed
1117   /// pre and post legalisation.
1118   bool isOperationLegalOrPromote(unsigned Op, EVT VT,
1119                                  bool LegalOnly = false) const {
1120     if (LegalOnly)
1121       return isOperationLegal(Op, VT);
1122 
1123     return (VT == MVT::Other || isTypeLegal(VT)) &&
1124       (getOperationAction(Op, VT) == Legal ||
1125        getOperationAction(Op, VT) == Promote);
1126   }
1127 
1128   /// Return true if the specified operation is legal on this target or can be
1129   /// made legal with custom lowering or using promotion. This is used to help
1130   /// guide high-level lowering decisions. LegalOnly is an optional convenience
1131   /// for code paths traversed pre and post legalisation.
1132   bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
1133                                          bool LegalOnly = false) const {
1134     if (LegalOnly)
1135       return isOperationLegal(Op, VT);
1136 
1137     return (VT == MVT::Other || isTypeLegal(VT)) &&
1138       (getOperationAction(Op, VT) == Legal ||
1139        getOperationAction(Op, VT) == Custom ||
1140        getOperationAction(Op, VT) == Promote);
1141   }
1142 
1143   /// Return true if the operation uses custom lowering, regardless of whether
1144   /// the type is legal or not.
isOperationCustom(unsigned Op,EVT VT)1145   bool isOperationCustom(unsigned Op, EVT VT) const {
1146     return getOperationAction(Op, VT) == Custom;
1147   }
1148 
1149   /// Return true if lowering to a jump table is allowed.
areJTsAllowed(const Function * Fn)1150   virtual bool areJTsAllowed(const Function *Fn) const {
1151     if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
1152       return false;
1153 
1154     return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1155            isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1156   }
1157 
1158   /// Check whether the range [Low,High] fits in a machine word.
rangeFitsInWord(const APInt & Low,const APInt & High,const DataLayout & DL)1159   bool rangeFitsInWord(const APInt &Low, const APInt &High,
1160                        const DataLayout &DL) const {
1161     // FIXME: Using the pointer type doesn't seem ideal.
1162     uint64_t BW = DL.getIndexSizeInBits(0u);
1163     uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1164     return Range <= BW;
1165   }
1166 
1167   /// Return true if lowering to a jump table is suitable for a set of case
1168   /// clusters which may contain \p NumCases cases, \p Range range of values.
1169   virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1170                                       uint64_t Range, ProfileSummaryInfo *PSI,
1171                                       BlockFrequencyInfo *BFI) const;
1172 
1173   /// Return true if lowering to a bit test is suitable for a set of case
1174   /// clusters which contains \p NumDests unique destinations, \p Low and
1175   /// \p High as its lowest and highest case values, and expects \p NumCmps
1176   /// case value comparisons. Check if the number of destinations, comparison
1177   /// metric, and range are all suitable.
isSuitableForBitTests(unsigned NumDests,unsigned NumCmps,const APInt & Low,const APInt & High,const DataLayout & DL)1178   bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1179                              const APInt &Low, const APInt &High,
1180                              const DataLayout &DL) const {
1181     // FIXME: I don't think NumCmps is the correct metric: a single case and a
1182     // range of cases both require only one branch to lower. Just looking at the
1183     // number of clusters and destinations should be enough to decide whether to
1184     // build bit tests.
1185 
1186     // To lower a range with bit tests, the range must fit the bitwidth of a
1187     // machine word.
1188     if (!rangeFitsInWord(Low, High, DL))
1189       return false;
1190 
1191     // Decide whether it's profitable to lower this range with bit tests. Each
1192     // destination requires a bit test and branch, and there is an overall range
1193     // check branch. For a small number of clusters, separate comparisons might
1194     // be cheaper, and for many destinations, splitting the range might be
1195     // better.
1196     return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1197            (NumDests == 3 && NumCmps >= 6);
1198   }
1199 
1200   /// Return true if the specified operation is illegal on this target or
1201   /// unlikely to be made legal with custom lowering. This is used to help guide
1202   /// high-level lowering decisions.
isOperationExpand(unsigned Op,EVT VT)1203   bool isOperationExpand(unsigned Op, EVT VT) const {
1204     return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1205   }
1206 
1207   /// Return true if the specified operation is legal on this target.
isOperationLegal(unsigned Op,EVT VT)1208   bool isOperationLegal(unsigned Op, EVT VT) const {
1209     return (VT == MVT::Other || isTypeLegal(VT)) &&
1210            getOperationAction(Op, VT) == Legal;
1211   }
1212 
1213   /// Return how this load with extension should be treated: either it is legal,
1214   /// needs to be promoted to a larger size, needs to be expanded to some other
1215   /// code sequence, or the target has a custom expander for it.
getLoadExtAction(unsigned ExtType,EVT ValVT,EVT MemVT)1216   LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1217                                   EVT MemVT) const {
1218     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1219     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1220     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1221     assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
1222            MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
1223     unsigned Shift = 4 * ExtType;
1224     return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1225   }
1226 
1227   /// Return true if the specified load with extension is legal on this target.
isLoadExtLegal(unsigned ExtType,EVT ValVT,EVT MemVT)1228   bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1229     return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1230   }
1231 
1232   /// Return true if the specified load with extension is legal or custom
1233   /// on this target.
isLoadExtLegalOrCustom(unsigned ExtType,EVT ValVT,EVT MemVT)1234   bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1235     return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1236            getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1237   }
1238 
1239   /// Return how this store with truncation should be treated: either it is
1240   /// legal, needs to be promoted to a larger size, needs to be expanded to some
1241   /// other code sequence, or the target has a custom expander for it.
getTruncStoreAction(EVT ValVT,EVT MemVT)1242   LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1243     if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1244     unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1245     unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1246     assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
1247            "Table isn't big enough!");
1248     return TruncStoreActions[ValI][MemI];
1249   }
1250 
1251   /// Return true if the specified store with truncation is legal on this
1252   /// target.
isTruncStoreLegal(EVT ValVT,EVT MemVT)1253   bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1254     return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1255   }
1256 
1257   /// Return true if the specified store with truncation has solution on this
1258   /// target.
isTruncStoreLegalOrCustom(EVT ValVT,EVT MemVT)1259   bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1260     return isTypeLegal(ValVT) &&
1261       (getTruncStoreAction(ValVT, MemVT) == Legal ||
1262        getTruncStoreAction(ValVT, MemVT) == Custom);
1263   }
1264 
1265   /// Return how the indexed load should be treated: either it is legal, needs
1266   /// to be promoted to a larger size, needs to be expanded to some other code
1267   /// sequence, or the target has a custom expander for it.
getIndexedLoadAction(unsigned IdxMode,MVT VT)1268   LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1269     return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1270   }
1271 
1272   /// Return true if the specified indexed load is legal on this target.
isIndexedLoadLegal(unsigned IdxMode,EVT VT)1273   bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1274     return VT.isSimple() &&
1275       (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1276        getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1277   }
1278 
1279   /// Return how the indexed store should be treated: either it is legal, needs
1280   /// to be promoted to a larger size, needs to be expanded to some other code
1281   /// sequence, or the target has a custom expander for it.
getIndexedStoreAction(unsigned IdxMode,MVT VT)1282   LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1283     return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1284   }
1285 
1286   /// Return true if the specified indexed load is legal on this target.
isIndexedStoreLegal(unsigned IdxMode,EVT VT)1287   bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1288     return VT.isSimple() &&
1289       (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1290        getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1291   }
1292 
1293   /// Return how the indexed load should be treated: either it is legal, needs
1294   /// to be promoted to a larger size, needs to be expanded to some other code
1295   /// sequence, or the target has a custom expander for it.
getIndexedMaskedLoadAction(unsigned IdxMode,MVT VT)1296   LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1297     return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1298   }
1299 
1300   /// Return true if the specified indexed load is legal on this target.
isIndexedMaskedLoadLegal(unsigned IdxMode,EVT VT)1301   bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1302     return VT.isSimple() &&
1303            (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1304             getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1305   }
1306 
1307   /// Return how the indexed store should be treated: either it is legal, needs
1308   /// to be promoted to a larger size, needs to be expanded to some other code
1309   /// sequence, or the target has a custom expander for it.
getIndexedMaskedStoreAction(unsigned IdxMode,MVT VT)1310   LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1311     return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1312   }
1313 
1314   /// Return true if the specified indexed load is legal on this target.
isIndexedMaskedStoreLegal(unsigned IdxMode,EVT VT)1315   bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1316     return VT.isSimple() &&
1317            (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1318             getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1319   }
1320 
1321   // Returns true if VT is a legal index type for masked gathers/scatters
1322   // on this target
shouldRemoveExtendFromGSIndex(EVT VT)1323   virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const { return false; }
1324 
1325   /// Return how the condition code should be treated: either it is legal, needs
1326   /// to be expanded to some other code sequence, or the target has a custom
1327   /// expander for it.
1328   LegalizeAction
getCondCodeAction(ISD::CondCode CC,MVT VT)1329   getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1330     assert((unsigned)CC < array_lengthof(CondCodeActions) &&
1331            ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
1332            "Table isn't big enough!");
1333     // See setCondCodeAction for how this is encoded.
1334     uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1335     uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1336     LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1337     assert(Action != Promote && "Can't promote condition code!");
1338     return Action;
1339   }
1340 
1341   /// Return true if the specified condition code is legal on this target.
isCondCodeLegal(ISD::CondCode CC,MVT VT)1342   bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1343     return getCondCodeAction(CC, VT) == Legal;
1344   }
1345 
1346   /// Return true if the specified condition code is legal or custom on this
1347   /// target.
isCondCodeLegalOrCustom(ISD::CondCode CC,MVT VT)1348   bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1349     return getCondCodeAction(CC, VT) == Legal ||
1350            getCondCodeAction(CC, VT) == Custom;
1351   }
1352 
1353   /// If the action for this operation is to promote, this method returns the
1354   /// ValueType to promote to.
getTypeToPromoteTo(unsigned Op,MVT VT)1355   MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1356     assert(getOperationAction(Op, VT) == Promote &&
1357            "This operation isn't promoted!");
1358 
1359     // See if this has an explicit type specified.
1360     std::map<std::pair<unsigned, MVT::SimpleValueType>,
1361              MVT::SimpleValueType>::const_iterator PTTI =
1362       PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1363     if (PTTI != PromoteToType.end()) return PTTI->second;
1364 
1365     assert((VT.isInteger() || VT.isFloatingPoint()) &&
1366            "Cannot autopromote this type, add it with AddPromotedToType.");
1367 
1368     MVT NVT = VT;
1369     do {
1370       NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1371       assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1372              "Didn't find type to promote to!");
1373     } while (!isTypeLegal(NVT) ||
1374               getOperationAction(Op, NVT) == Promote);
1375     return NVT;
1376   }
1377 
1378   /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
1379   /// operations except for the pointer size.  If AllowUnknown is true, this
1380   /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1381   /// otherwise it will assert.
1382   EVT getValueType(const DataLayout &DL, Type *Ty,
1383                    bool AllowUnknown = false) const {
1384     // Lower scalar pointers to native pointer types.
1385     if (auto *PTy = dyn_cast<PointerType>(Ty))
1386       return getPointerTy(DL, PTy->getAddressSpace());
1387 
1388     if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1389       Type *EltTy = VTy->getElementType();
1390       // Lower vectors of pointers to native pointer types.
1391       if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1392         EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1393         EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1394       }
1395       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1396                               VTy->getElementCount());
1397     }
1398 
1399     return EVT::getEVT(Ty, AllowUnknown);
1400   }
1401 
1402   EVT getMemValueType(const DataLayout &DL, Type *Ty,
1403                       bool AllowUnknown = false) const {
1404     // Lower scalar pointers to native pointer types.
1405     if (PointerType *PTy = dyn_cast<PointerType>(Ty))
1406       return getPointerMemTy(DL, PTy->getAddressSpace());
1407     else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1408       Type *Elm = VTy->getElementType();
1409       if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
1410         EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace()));
1411         Elm = PointerTy.getTypeForEVT(Ty->getContext());
1412       }
1413       return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
1414                               VTy->getElementCount());
1415     }
1416 
1417     return getValueType(DL, Ty, AllowUnknown);
1418   }
1419 
1420 
1421   /// Return the MVT corresponding to this LLVM type. See getValueType.
1422   MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1423                          bool AllowUnknown = false) const {
1424     return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1425   }
1426 
1427   /// Return the desired alignment for ByVal or InAlloca aggregate function
1428   /// arguments in the caller parameter area.  This is the actual alignment, not
1429   /// its logarithm.
1430   virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1431 
1432   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(MVT VT)1433   MVT getRegisterType(MVT VT) const {
1434     assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
1435     return RegisterTypeForVT[VT.SimpleTy];
1436   }
1437 
1438   /// Return the type of registers that this ValueType will eventually require.
getRegisterType(LLVMContext & Context,EVT VT)1439   MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1440     if (VT.isSimple()) {
1441       assert((unsigned)VT.getSimpleVT().SimpleTy <
1442                 array_lengthof(RegisterTypeForVT));
1443       return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
1444     }
1445     if (VT.isVector()) {
1446       EVT VT1;
1447       MVT RegisterVT;
1448       unsigned NumIntermediates;
1449       (void)getVectorTypeBreakdown(Context, VT, VT1,
1450                                    NumIntermediates, RegisterVT);
1451       return RegisterVT;
1452     }
1453     if (VT.isInteger()) {
1454       return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1455     }
1456     llvm_unreachable("Unsupported extended type!");
1457   }
1458 
1459   /// Return the number of registers that this ValueType will eventually
1460   /// require.
1461   ///
1462   /// This is one for any types promoted to live in larger registers, but may be
1463   /// more than one for types (like i64) that are split into pieces.  For types
1464   /// like i140, which are first promoted then expanded, it is the number of
1465   /// registers needed to hold all the bits of the original type.  For an i140
1466   /// on a 32 bit machine this means 5 registers.
getNumRegisters(LLVMContext & Context,EVT VT)1467   unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
1468     if (VT.isSimple()) {
1469       assert((unsigned)VT.getSimpleVT().SimpleTy <
1470                 array_lengthof(NumRegistersForVT));
1471       return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1472     }
1473     if (VT.isVector()) {
1474       EVT VT1;
1475       MVT VT2;
1476       unsigned NumIntermediates;
1477       return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1478     }
1479     if (VT.isInteger()) {
1480       unsigned BitWidth = VT.getSizeInBits();
1481       unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1482       return (BitWidth + RegWidth - 1) / RegWidth;
1483     }
1484     llvm_unreachable("Unsupported extended type!");
1485   }
1486 
1487   /// Certain combinations of ABIs, Targets and features require that types
1488   /// are legal for some operations and not for other operations.
1489   /// For MIPS all vector types must be passed through the integer register set.
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1490   virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1491                                             CallingConv::ID CC, EVT VT) const {
1492     return getRegisterType(Context, VT);
1493   }
1494 
1495   /// Certain targets require unusual breakdowns of certain types. For MIPS,
1496   /// this occurs when a vector type is used, as vector are passed through the
1497   /// integer register set.
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT)1498   virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1499                                                  CallingConv::ID CC,
1500                                                  EVT VT) const {
1501     return getNumRegisters(Context, VT);
1502   }
1503 
1504   /// Certain targets have context senstive alignment requirements, where one
1505   /// type has the alignment requirement of another type.
getABIAlignmentForCallingConv(Type * ArgTy,DataLayout DL)1506   virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
1507                                               DataLayout DL) const {
1508     return DL.getABITypeAlign(ArgTy);
1509   }
1510 
1511   /// If true, then instruction selection should seek to shrink the FP constant
1512   /// of the specified type to a smaller type in order to save space and / or
1513   /// reduce runtime.
ShouldShrinkFPConstant(EVT)1514   virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1515 
1516   /// Return true if it is profitable to reduce a load to a smaller type.
1517   /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT)1518   virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1519                                      EVT NewVT) const {
1520     // By default, assume that it is cheaper to extract a subvector from a wide
1521     // vector load rather than creating multiple narrow vector loads.
1522     if (NewVT.isVector() && !Load->hasOneUse())
1523       return false;
1524 
1525     return true;
1526   }
1527 
1528   /// When splitting a value of the specified type into parts, does the Lo
1529   /// or Hi part come first?  This usually follows the endianness, except
1530   /// for ppcf128, where the Hi part always comes first.
hasBigEndianPartOrdering(EVT VT,const DataLayout & DL)1531   bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1532     return DL.isBigEndian() || VT == MVT::ppcf128;
1533   }
1534 
1535   /// If true, the target has custom DAG combine transformations that it can
1536   /// perform for the specified node.
hasTargetDAGCombine(ISD::NodeType NT)1537   bool hasTargetDAGCombine(ISD::NodeType NT) const {
1538     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1539     return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1540   }
1541 
getGatherAllAliasesMaxDepth()1542   unsigned getGatherAllAliasesMaxDepth() const {
1543     return GatherAllAliasesMaxDepth;
1544   }
1545 
1546   /// Returns the size of the platform's va_list object.
getVaListSizeInBits(const DataLayout & DL)1547   virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1548     return getPointerTy(DL).getSizeInBits();
1549   }
1550 
1551   /// Get maximum # of store operations permitted for llvm.memset
1552   ///
1553   /// This function returns the maximum number of store operations permitted
1554   /// to replace a call to llvm.memset. The value is set by the target at the
1555   /// performance threshold for such a replacement. If OptSize is true,
1556   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemset(bool OptSize)1557   unsigned getMaxStoresPerMemset(bool OptSize) const {
1558     return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1559   }
1560 
1561   /// Get maximum # of store operations permitted for llvm.memcpy
1562   ///
1563   /// This function returns the maximum number of store operations permitted
1564   /// to replace a call to llvm.memcpy. The value is set by the target at the
1565   /// performance threshold for such a replacement. If OptSize is true,
1566   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemcpy(bool OptSize)1567   unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1568     return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1569   }
1570 
1571   /// \brief Get maximum # of store operations to be glued together
1572   ///
1573   /// This function returns the maximum number of store operations permitted
1574   /// to glue together during lowering of llvm.memcpy. The value is set by
1575   //  the target at the performance threshold for such a replacement.
getMaxGluedStoresPerMemcpy()1576   virtual unsigned getMaxGluedStoresPerMemcpy() const {
1577     return MaxGluedStoresPerMemcpy;
1578   }
1579 
1580   /// Get maximum # of load operations permitted for memcmp
1581   ///
1582   /// This function returns the maximum number of load operations permitted
1583   /// to replace a call to memcmp. The value is set by the target at the
1584   /// performance threshold for such a replacement. If OptSize is true,
1585   /// return the limit for functions that have OptSize attribute.
getMaxExpandSizeMemcmp(bool OptSize)1586   unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1587     return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1588   }
1589 
1590   /// Get maximum # of store operations permitted for llvm.memmove
1591   ///
1592   /// This function returns the maximum number of store operations permitted
1593   /// to replace a call to llvm.memmove. The value is set by the target at the
1594   /// performance threshold for such a replacement. If OptSize is true,
1595   /// return the limit for functions that have OptSize attribute.
getMaxStoresPerMemmove(bool OptSize)1596   unsigned getMaxStoresPerMemmove(bool OptSize) const {
1597     return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1598   }
1599 
1600   /// Determine if the target supports unaligned memory accesses.
1601   ///
1602   /// This function returns true if the target allows unaligned memory accesses
1603   /// of the specified type in the given address space. If true, it also returns
1604   /// whether the unaligned memory access is "fast" in the last argument by
1605   /// reference. This is used, for example, in situations where an array
1606   /// copy/move/set is converted to a sequence of store operations. Its use
1607   /// helps to ensure that such replacements don't generate code that causes an
1608   /// alignment error (trap) on the target machine.
1609   virtual bool allowsMisalignedMemoryAccesses(
1610       EVT, unsigned AddrSpace = 0, unsigned Align = 1,
1611       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1612       bool * /*Fast*/ = nullptr) const {
1613     return false;
1614   }
1615 
1616   /// LLT handling variant.
1617   virtual bool allowsMisalignedMemoryAccesses(
1618       LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1619       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1620       bool * /*Fast*/ = nullptr) const {
1621     return false;
1622   }
1623 
1624   /// This function returns true if the memory access is aligned or if the
1625   /// target allows this specific unaligned memory access. If the access is
1626   /// allowed, the optional final parameter returns if the access is also fast
1627   /// (as defined by the target).
1628   bool allowsMemoryAccessForAlignment(
1629       LLVMContext &Context, const DataLayout &DL, EVT VT,
1630       unsigned AddrSpace = 0, Align Alignment = Align(1),
1631       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1632       bool *Fast = nullptr) const;
1633 
1634   /// Return true if the memory access of this type is aligned or if the target
1635   /// allows this specific unaligned access for the given MachineMemOperand.
1636   /// If the access is allowed, the optional final parameter returns if the
1637   /// access is also fast (as defined by the target).
1638   bool allowsMemoryAccessForAlignment(LLVMContext &Context,
1639                                       const DataLayout &DL, EVT VT,
1640                                       const MachineMemOperand &MMO,
1641                                       bool *Fast = nullptr) const;
1642 
1643   /// Return true if the target supports a memory access of this type for the
1644   /// given address space and alignment. If the access is allowed, the optional
1645   /// final parameter returns if the access is also fast (as defined by the
1646   /// target).
1647   virtual bool
1648   allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1649                      unsigned AddrSpace = 0, Align Alignment = Align(1),
1650                      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1651                      bool *Fast = nullptr) const;
1652 
1653   /// Return true if the target supports a memory access of this type for the
1654   /// given MachineMemOperand. If the access is allowed, the optional
1655   /// final parameter returns if the access is also fast (as defined by the
1656   /// target).
1657   bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1658                           const MachineMemOperand &MMO,
1659                           bool *Fast = nullptr) const;
1660 
1661   /// Returns the target specific optimal type for load and store operations as
1662   /// a result of memset, memcpy, and memmove lowering.
1663   /// It returns EVT::Other if the type should be determined using generic
1664   /// target-independent logic.
1665   virtual EVT
getOptimalMemOpType(const MemOp & Op,const AttributeList &)1666   getOptimalMemOpType(const MemOp &Op,
1667                       const AttributeList & /*FuncAttributes*/) const {
1668     return MVT::Other;
1669   }
1670 
1671   /// LLT returning variant.
1672   virtual LLT
getOptimalMemOpLLT(const MemOp & Op,const AttributeList &)1673   getOptimalMemOpLLT(const MemOp &Op,
1674                      const AttributeList & /*FuncAttributes*/) const {
1675     return LLT();
1676   }
1677 
1678   /// Returns true if it's safe to use load / store of the specified type to
1679   /// expand memcpy / memset inline.
1680   ///
1681   /// This is mostly true for all types except for some special cases. For
1682   /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1683   /// fstpl which also does type conversion. Note the specified type doesn't
1684   /// have to be legal as the hook is used before type legalization.
isSafeMemOpType(MVT)1685   virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1686 
1687   /// Return lower limit for number of blocks in a jump table.
1688   virtual unsigned getMinimumJumpTableEntries() const;
1689 
1690   /// Return lower limit of the density in a jump table.
1691   unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1692 
1693   /// Return upper limit for number of entries in a jump table.
1694   /// Zero if no limit.
1695   unsigned getMaximumJumpTableSize() const;
1696 
1697   virtual bool isJumpTableRelative() const;
1698 
1699   /// If a physical register, this specifies the register that
1700   /// llvm.savestack/llvm.restorestack should save and restore.
getStackPointerRegisterToSaveRestore()1701   Register getStackPointerRegisterToSaveRestore() const {
1702     return StackPointerRegisterToSaveRestore;
1703   }
1704 
1705   /// If a physical register, this returns the register that receives the
1706   /// exception address on entry to an EH pad.
1707   virtual Register
getExceptionPointerRegister(const Constant * PersonalityFn)1708   getExceptionPointerRegister(const Constant *PersonalityFn) const {
1709     return Register();
1710   }
1711 
1712   /// If a physical register, this returns the register that receives the
1713   /// exception typeid on entry to a landing pad.
1714   virtual Register
getExceptionSelectorRegister(const Constant * PersonalityFn)1715   getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1716     return Register();
1717   }
1718 
needsFixedCatchObjects()1719   virtual bool needsFixedCatchObjects() const {
1720     report_fatal_error("Funclet EH is not implemented for this target");
1721   }
1722 
1723   /// Return the minimum stack alignment of an argument.
getMinStackArgumentAlignment()1724   Align getMinStackArgumentAlignment() const {
1725     return MinStackArgumentAlignment;
1726   }
1727 
1728   /// Return the minimum function alignment.
getMinFunctionAlignment()1729   Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1730 
1731   /// Return the preferred function alignment.
getPrefFunctionAlignment()1732   Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1733 
1734   /// Return the preferred loop alignment.
1735   virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1736     return PrefLoopAlignment;
1737   }
1738 
1739   /// Should loops be aligned even when the function is marked OptSize (but not
1740   /// MinSize).
alignLoopsWithOptSize()1741   virtual bool alignLoopsWithOptSize() const {
1742     return false;
1743   }
1744 
1745   /// If the target has a standard location for the stack protector guard,
1746   /// returns the address of that location. Otherwise, returns nullptr.
1747   /// DEPRECATED: please override useLoadStackGuardNode and customize
1748   ///             LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1749   virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1750 
1751   /// Inserts necessary declarations for SSP (stack protection) purpose.
1752   /// Should be used only when getIRStackGuard returns nullptr.
1753   virtual void insertSSPDeclarations(Module &M) const;
1754 
1755   /// Return the variable that's previously inserted by insertSSPDeclarations,
1756   /// if any, otherwise return nullptr. Should be used only when
1757   /// getIRStackGuard returns nullptr.
1758   virtual Value *getSDagStackGuard(const Module &M) const;
1759 
1760   /// If this function returns true, stack protection checks should XOR the
1761   /// frame pointer (or whichever pointer is used to address locals) into the
1762   /// stack guard value before checking it. getIRStackGuard must return nullptr
1763   /// if this returns true.
useStackGuardXorFP()1764   virtual bool useStackGuardXorFP() const { return false; }
1765 
1766   /// If the target has a standard stack protection check function that
1767   /// performs validation and error handling, returns the function. Otherwise,
1768   /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1769   /// Should be used only when getIRStackGuard returns nullptr.
1770   virtual Function *getSSPStackGuardCheck(const Module &M) const;
1771 
1772 protected:
1773   Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1774                                             bool UseTLS) const;
1775 
1776 public:
1777   /// Returns the target-specific address of the unsafe stack pointer.
1778   virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1779 
1780   /// Returns the name of the symbol used to emit stack probes or the empty
1781   /// string if not applicable.
hasStackProbeSymbol(MachineFunction & MF)1782   virtual bool hasStackProbeSymbol(MachineFunction &MF) const { return false; }
1783 
hasInlineStackProbe(MachineFunction & MF)1784   virtual bool hasInlineStackProbe(MachineFunction &MF) const { return false; }
1785 
getStackProbeSymbolName(MachineFunction & MF)1786   virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
1787     return "";
1788   }
1789 
1790   /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1791   /// are happy to sink it into basic blocks. A cast may be free, but not
1792   /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
1793   virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
1794 
1795   /// Return true if the pointer arguments to CI should be aligned by aligning
1796   /// the object whose address is being passed. If so then MinSize is set to the
1797   /// minimum size the object must be to be aligned and PrefAlign is set to the
1798   /// preferred alignment.
shouldAlignPointerArgs(CallInst *,unsigned &,unsigned &)1799   virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1800                                       unsigned & /*PrefAlign*/) const {
1801     return false;
1802   }
1803 
1804   //===--------------------------------------------------------------------===//
1805   /// \name Helpers for TargetTransformInfo implementations
1806   /// @{
1807 
1808   /// Get the ISD node that corresponds to the Instruction class opcode.
1809   int InstructionOpcodeToISD(unsigned Opcode) const;
1810 
1811   /// Estimate the cost of type-legalization and the legalized type.
1812   std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1813                                               Type *Ty) const;
1814 
1815   /// @}
1816 
1817   //===--------------------------------------------------------------------===//
1818   /// \name Helpers for atomic expansion.
1819   /// @{
1820 
1821   /// Returns the maximum atomic operation size (in bits) supported by
1822   /// the backend. Atomic operations greater than this size (as well
1823   /// as ones that are not naturally aligned), will be expanded by
1824   /// AtomicExpandPass into an __atomic_* library call.
getMaxAtomicSizeInBitsSupported()1825   unsigned getMaxAtomicSizeInBitsSupported() const {
1826     return MaxAtomicSizeInBitsSupported;
1827   }
1828 
1829   /// Returns the size of the smallest cmpxchg or ll/sc instruction
1830   /// the backend supports.  Any smaller operations are widened in
1831   /// AtomicExpandPass.
1832   ///
1833   /// Note that *unlike* operations above the maximum size, atomic ops
1834   /// are still natively supported below the minimum; they just
1835   /// require a more complex expansion.
getMinCmpXchgSizeInBits()1836   unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1837 
1838   /// Whether the target supports unaligned atomic operations.
supportsUnalignedAtomics()1839   bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
1840 
1841   /// Whether AtomicExpandPass should automatically insert fences and reduce
1842   /// ordering for this atomic. This should be true for most architectures with
1843   /// weak memory ordering. Defaults to false.
shouldInsertFencesForAtomic(const Instruction * I)1844   virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1845     return false;
1846   }
1847 
1848   /// Perform a load-linked operation on Addr, returning a "Value *" with the
1849   /// corresponding pointee type. This may entail some non-trivial operations to
1850   /// truncate or reconstruct types that will be illegal in the backend. See
1851   /// ARMISelLowering for an example implementation.
emitLoadLinked(IRBuilder<> & Builder,Value * Addr,AtomicOrdering Ord)1852   virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1853                                 AtomicOrdering Ord) const {
1854     llvm_unreachable("Load linked unimplemented on this target");
1855   }
1856 
1857   /// Perform a store-conditional operation to Addr. Return the status of the
1858   /// store. This should be 0 if the store succeeded, non-zero otherwise.
emitStoreConditional(IRBuilder<> & Builder,Value * Val,Value * Addr,AtomicOrdering Ord)1859   virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1860                                       Value *Addr, AtomicOrdering Ord) const {
1861     llvm_unreachable("Store conditional unimplemented on this target");
1862   }
1863 
1864   /// Perform a masked atomicrmw using a target-specific intrinsic. This
1865   /// represents the core LL/SC loop which will be lowered at a late stage by
1866   /// the backend.
emitMaskedAtomicRMWIntrinsic(IRBuilder<> & Builder,AtomicRMWInst * AI,Value * AlignedAddr,Value * Incr,Value * Mask,Value * ShiftAmt,AtomicOrdering Ord)1867   virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
1868                                               AtomicRMWInst *AI,
1869                                               Value *AlignedAddr, Value *Incr,
1870                                               Value *Mask, Value *ShiftAmt,
1871                                               AtomicOrdering Ord) const {
1872     llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
1873   }
1874 
1875   /// Perform a masked cmpxchg using a target-specific intrinsic. This
1876   /// represents the core LL/SC loop which will be lowered at a late stage by
1877   /// the backend.
emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> & Builder,AtomicCmpXchgInst * CI,Value * AlignedAddr,Value * CmpVal,Value * NewVal,Value * Mask,AtomicOrdering Ord)1878   virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
1879       IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
1880       Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
1881     llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
1882   }
1883 
1884   /// Inserts in the IR a target-specific intrinsic specifying a fence.
1885   /// It is called by AtomicExpandPass before expanding an
1886   ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1887   ///   if shouldInsertFencesForAtomic returns true.
1888   ///
1889   /// Inst is the original atomic instruction, prior to other expansions that
1890   /// may be performed.
1891   ///
1892   /// This function should either return a nullptr, or a pointer to an IR-level
1893   ///   Instruction*. Even complex fence sequences can be represented by a
1894   ///   single Instruction* through an intrinsic to be lowered later.
1895   /// Backends should override this method to produce target-specific intrinsic
1896   ///   for their fences.
1897   /// FIXME: Please note that the default implementation here in terms of
1898   ///   IR-level fences exists for historical/compatibility reasons and is
1899   ///   *unsound* ! Fences cannot, in general, be used to restore sequential
1900   ///   consistency. For example, consider the following example:
1901   /// atomic<int> x = y = 0;
1902   /// int r1, r2, r3, r4;
1903   /// Thread 0:
1904   ///   x.store(1);
1905   /// Thread 1:
1906   ///   y.store(1);
1907   /// Thread 2:
1908   ///   r1 = x.load();
1909   ///   r2 = y.load();
1910   /// Thread 3:
1911   ///   r3 = y.load();
1912   ///   r4 = x.load();
1913   ///  r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1914   ///  seq_cst. But if they are lowered to monotonic accesses, no amount of
1915   ///  IR-level fences can prevent it.
1916   /// @{
emitLeadingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord)1917   virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
1918                                         AtomicOrdering Ord) const {
1919     if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
1920       return Builder.CreateFence(Ord);
1921     else
1922       return nullptr;
1923   }
1924 
emitTrailingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord)1925   virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1926                                          Instruction *Inst,
1927                                          AtomicOrdering Ord) const {
1928     if (isAcquireOrStronger(Ord))
1929       return Builder.CreateFence(Ord);
1930     else
1931       return nullptr;
1932   }
1933   /// @}
1934 
1935   // Emits code that executes when the comparison result in the ll/sc
1936   // expansion of a cmpxchg instruction is such that the store-conditional will
1937   // not execute.  This makes it possible to balance out the load-linked with
1938   // a dedicated instruction, if desired.
1939   // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1940   // be unnecessarily held, except if clrex, inserted by this hook, is executed.
emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> & Builder)1941   virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1942 
1943   /// Returns true if the given (atomic) store should be expanded by the
1944   /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
shouldExpandAtomicStoreInIR(StoreInst * SI)1945   virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1946     return false;
1947   }
1948 
1949   /// Returns true if arguments should be sign-extended in lib calls.
shouldSignExtendTypeInLibCall(EVT Type,bool IsSigned)1950   virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1951     return IsSigned;
1952   }
1953 
1954   /// Returns true if arguments should be extended in lib calls.
shouldExtendTypeInLibCall(EVT Type)1955   virtual bool shouldExtendTypeInLibCall(EVT Type) const {
1956     return true;
1957   }
1958 
1959   /// Returns how the given (atomic) load should be expanded by the
1960   /// IR-level AtomicExpand pass.
shouldExpandAtomicLoadInIR(LoadInst * LI)1961   virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1962     return AtomicExpansionKind::None;
1963   }
1964 
1965   /// Returns how the given atomic cmpxchg should be expanded by the IR-level
1966   /// AtomicExpand pass.
1967   virtual AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI)1968   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1969     return AtomicExpansionKind::None;
1970   }
1971 
1972   /// Returns how the IR-level AtomicExpand pass should expand the given
1973   /// AtomicRMW, if at all. Default is to never expand.
shouldExpandAtomicRMWInIR(AtomicRMWInst * RMW)1974   virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
1975     return RMW->isFloatingPointOperation() ?
1976       AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
1977   }
1978 
1979   /// On some platforms, an AtomicRMW that never actually modifies the value
1980   /// (such as fetch_add of 0) can be turned into a fence followed by an
1981   /// atomic load. This may sound useless, but it makes it possible for the
1982   /// processor to keep the cacheline shared, dramatically improving
1983   /// performance. And such idempotent RMWs are useful for implementing some
1984   /// kinds of locks, see for example (justification + benchmarks):
1985   /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1986   /// This method tries doing that transformation, returning the atomic load if
1987   /// it succeeds, and nullptr otherwise.
1988   /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1989   /// another round of expansion.
1990   virtual LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * RMWI)1991   lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1992     return nullptr;
1993   }
1994 
1995   /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1996   /// SIGN_EXTEND, or ANY_EXTEND).
getExtendForAtomicOps()1997   virtual ISD::NodeType getExtendForAtomicOps() const {
1998     return ISD::ZERO_EXTEND;
1999   }
2000 
2001   /// Returns how the platform's atomic compare and swap expects its comparison
2002   /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2003   /// separate from getExtendForAtomicOps, which is concerned with the
2004   /// sign-extension of the instruction's output, whereas here we are concerned
2005   /// with the sign-extension of the input. For targets with compare-and-swap
2006   /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2007   /// the input can be ANY_EXTEND, but the output will still have a specific
2008   /// extension.
getExtendForAtomicCmpSwapArg()2009   virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const {
2010     return ISD::ANY_EXTEND;
2011   }
2012 
2013   /// @}
2014 
2015   /// Returns true if we should normalize
2016   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2017   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2018   /// that it saves us from materializing N0 and N1 in an integer register.
2019   /// Targets that are able to perform and/or on flags should return false here.
shouldNormalizeToSelectSequence(LLVMContext & Context,EVT VT)2020   virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
2021                                                EVT VT) const {
2022     // If a target has multiple condition registers, then it likely has logical
2023     // operations on those registers.
2024     if (hasMultipleConditionRegisters())
2025       return false;
2026     // Only do the transform if the value won't be split into multiple
2027     // registers.
2028     LegalizeTypeAction Action = getTypeAction(Context, VT);
2029     return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2030       Action != TypeSplitVector;
2031   }
2032 
isProfitableToCombineMinNumMaxNum(EVT VT)2033   virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2034 
2035   /// Return true if a select of constants (select Cond, C1, C2) should be
2036   /// transformed into simple math ops with the condition value. For example:
2037   /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
convertSelectOfConstantsToMath(EVT VT)2038   virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2039     return false;
2040   }
2041 
2042   /// Return true if it is profitable to transform an integer
2043   /// multiplication-by-constant into simpler operations like shifts and adds.
2044   /// This may be true if the target does not directly support the
2045   /// multiplication operation for the specified type or the sequence of simpler
2046   /// ops is faster than the multiply.
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C)2047   virtual bool decomposeMulByConstant(LLVMContext &Context,
2048                                       EVT VT, SDValue C) const {
2049     return false;
2050   }
2051 
2052   /// Return true if it is more correct/profitable to use strict FP_TO_INT
2053   /// conversion operations - canonicalizing the FP source value instead of
2054   /// converting all cases and then selecting based on value.
2055   /// This may be true if the target throws exceptions for out of bounds
2056   /// conversions or has fast FP CMOV.
shouldUseStrictFP_TO_INT(EVT FpVT,EVT IntVT,bool IsSigned)2057   virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2058                                         bool IsSigned) const {
2059     return false;
2060   }
2061 
2062   //===--------------------------------------------------------------------===//
2063   // TargetLowering Configuration Methods - These methods should be invoked by
2064   // the derived class constructor to configure this object for the target.
2065   //
2066 protected:
2067   /// Specify how the target extends the result of integer and floating point
2068   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent Ty)2069   void setBooleanContents(BooleanContent Ty) {
2070     BooleanContents = Ty;
2071     BooleanFloatContents = Ty;
2072   }
2073 
2074   /// Specify how the target extends the result of integer and floating point
2075   /// boolean values from i1 to a wider type.  See getBooleanContents.
setBooleanContents(BooleanContent IntTy,BooleanContent FloatTy)2076   void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
2077     BooleanContents = IntTy;
2078     BooleanFloatContents = FloatTy;
2079   }
2080 
2081   /// Specify how the target extends the result of a vector boolean value from a
2082   /// vector of i1 to a wider type.  See getBooleanContents.
setBooleanVectorContents(BooleanContent Ty)2083   void setBooleanVectorContents(BooleanContent Ty) {
2084     BooleanVectorContents = Ty;
2085   }
2086 
2087   /// Specify the target scheduling preference.
setSchedulingPreference(Sched::Preference Pref)2088   void setSchedulingPreference(Sched::Preference Pref) {
2089     SchedPreferenceInfo = Pref;
2090   }
2091 
2092   /// Indicate the minimum number of blocks to generate jump tables.
2093   void setMinimumJumpTableEntries(unsigned Val);
2094 
2095   /// Indicate the maximum number of entries in jump tables.
2096   /// Set to zero to generate unlimited jump tables.
2097   void setMaximumJumpTableSize(unsigned);
2098 
2099   /// If set to a physical register, this specifies the register that
2100   /// llvm.savestack/llvm.restorestack should save and restore.
setStackPointerRegisterToSaveRestore(Register R)2101   void setStackPointerRegisterToSaveRestore(Register R) {
2102     StackPointerRegisterToSaveRestore = R;
2103   }
2104 
2105   /// Tells the code generator that the target has multiple (allocatable)
2106   /// condition registers that can be used to store the results of comparisons
2107   /// for use by selects and conditional branches. With multiple condition
2108   /// registers, the code generator will not aggressively sink comparisons into
2109   /// the blocks of their users.
2110   void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2111     HasMultipleConditionRegisters = hasManyRegs;
2112   }
2113 
2114   /// Tells the code generator that the target has BitExtract instructions.
2115   /// The code generator will aggressively sink "shift"s into the blocks of
2116   /// their users if the users will generate "and" instructions which can be
2117   /// combined with "shift" to BitExtract instructions.
2118   void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2119     HasExtractBitsInsn = hasExtractInsn;
2120   }
2121 
2122   /// Tells the code generator not to expand logic operations on comparison
2123   /// predicates into separate sequences that increase the amount of flow
2124   /// control.
2125   void setJumpIsExpensive(bool isExpensive = true);
2126 
2127   /// Tells the code generator which bitwidths to bypass.
addBypassSlowDiv(unsigned int SlowBitWidth,unsigned int FastBitWidth)2128   void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2129     BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2130   }
2131 
2132   /// Add the specified register class as an available regclass for the
2133   /// specified value type. This indicates the selector can handle values of
2134   /// that class natively.
addRegisterClass(MVT VT,const TargetRegisterClass * RC)2135   void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
2136     assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
2137     RegClassForVT[VT.SimpleTy] = RC;
2138   }
2139 
2140   /// Return the largest legal super-reg register class of the register class
2141   /// for the specified type and its associated "cost".
2142   virtual std::pair<const TargetRegisterClass *, uint8_t>
2143   findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2144 
2145   /// Once all of the register classes are added, this allows us to compute
2146   /// derived properties we expose.
2147   void computeRegisterProperties(const TargetRegisterInfo *TRI);
2148 
2149   /// Indicate that the specified operation does not work with the specified
2150   /// type and indicate what to do about it. Note that VT may refer to either
2151   /// the type of a result or that of an operand of Op.
setOperationAction(unsigned Op,MVT VT,LegalizeAction Action)2152   void setOperationAction(unsigned Op, MVT VT,
2153                           LegalizeAction Action) {
2154     assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
2155     OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2156   }
2157 
2158   /// Indicate that the specified load with extension does not work with the
2159   /// specified type and indicate what to do about it.
setLoadExtAction(unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action)2160   void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2161                         LegalizeAction Action) {
2162     assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2163            MemVT.isValid() && "Table isn't big enough!");
2164     assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2165     unsigned Shift = 4 * ExtType;
2166     LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2167     LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2168   }
2169 
2170   /// Indicate that the specified truncating store does not work with the
2171   /// specified type and indicate what to do about it.
setTruncStoreAction(MVT ValVT,MVT MemVT,LegalizeAction Action)2172   void setTruncStoreAction(MVT ValVT, MVT MemVT,
2173                            LegalizeAction Action) {
2174     assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2175     TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2176   }
2177 
2178   /// Indicate that the specified indexed load does or does not work with the
2179   /// specified type and indicate what to do abort it.
2180   ///
2181   /// NOTE: All indexed mode loads are initialized to Expand in
2182   /// TargetLowering.cpp
setIndexedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2183   void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2184     setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2185   }
2186 
2187   /// Indicate that the specified indexed store does or does not work with the
2188   /// specified type and indicate what to do about it.
2189   ///
2190   /// NOTE: All indexed mode stores are initialized to Expand in
2191   /// TargetLowering.cpp
setIndexedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2192   void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) {
2193     setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2194   }
2195 
2196   /// Indicate that the specified indexed masked load does or does not work with
2197   /// the specified type and indicate what to do about it.
2198   ///
2199   /// NOTE: All indexed mode masked loads are initialized to Expand in
2200   /// TargetLowering.cpp
setIndexedMaskedLoadAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2201   void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2202                                   LegalizeAction Action) {
2203     setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2204   }
2205 
2206   /// Indicate that the specified indexed masked store does or does not work
2207   /// with the specified type and indicate what to do about it.
2208   ///
2209   /// NOTE: All indexed mode masked stores are initialized to Expand in
2210   /// TargetLowering.cpp
setIndexedMaskedStoreAction(unsigned IdxMode,MVT VT,LegalizeAction Action)2211   void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2212                                    LegalizeAction Action) {
2213     setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2214   }
2215 
2216   /// Indicate that the specified condition code is or isn't supported on the
2217   /// target and indicate what to do about it.
setCondCodeAction(ISD::CondCode CC,MVT VT,LegalizeAction Action)2218   void setCondCodeAction(ISD::CondCode CC, MVT VT,
2219                          LegalizeAction Action) {
2220     assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
2221            "Table isn't big enough!");
2222     assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2223     /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
2224     /// value and the upper 29 bits index into the second dimension of the array
2225     /// to select what 32-bit value to use.
2226     uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2227     CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2228     CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2229   }
2230 
2231   /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2232   /// to trying a larger integer/fp until it can find one that works. If that
2233   /// default is insufficient, this method can be used by the target to override
2234   /// the default.
AddPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2235   void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2236     PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2237   }
2238 
2239   /// Convenience method to set an operation to Promote and specify the type
2240   /// in a single call.
setOperationPromotedToType(unsigned Opc,MVT OrigVT,MVT DestVT)2241   void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2242     setOperationAction(Opc, OrigVT, Promote);
2243     AddPromotedToType(Opc, OrigVT, DestVT);
2244   }
2245 
2246   /// Targets should invoke this method for each target independent node that
2247   /// they want to provide a custom DAG combiner for by implementing the
2248   /// PerformDAGCombine virtual method.
setTargetDAGCombine(ISD::NodeType NT)2249   void setTargetDAGCombine(ISD::NodeType NT) {
2250     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
2251     TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
2252   }
2253 
2254   /// Set the target's minimum function alignment.
setMinFunctionAlignment(Align Alignment)2255   void setMinFunctionAlignment(Align Alignment) {
2256     MinFunctionAlignment = Alignment;
2257   }
2258 
2259   /// Set the target's preferred function alignment.  This should be set if
2260   /// there is a performance benefit to higher-than-minimum alignment
setPrefFunctionAlignment(Align Alignment)2261   void setPrefFunctionAlignment(Align Alignment) {
2262     PrefFunctionAlignment = Alignment;
2263   }
2264 
2265   /// Set the target's preferred loop alignment. Default alignment is one, it
2266   /// means the target does not care about loop alignment. The target may also
2267   /// override getPrefLoopAlignment to provide per-loop values.
setPrefLoopAlignment(Align Alignment)2268   void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2269 
2270   /// Set the minimum stack alignment of an argument.
setMinStackArgumentAlignment(Align Alignment)2271   void setMinStackArgumentAlignment(Align Alignment) {
2272     MinStackArgumentAlignment = Alignment;
2273   }
2274 
2275   /// Set the maximum atomic operation size supported by the
2276   /// backend. Atomic operations greater than this size (as well as
2277   /// ones that are not naturally aligned), will be expanded by
2278   /// AtomicExpandPass into an __atomic_* library call.
setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)2279   void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2280     MaxAtomicSizeInBitsSupported = SizeInBits;
2281   }
2282 
2283   /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
setMinCmpXchgSizeInBits(unsigned SizeInBits)2284   void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2285     MinCmpXchgSizeInBits = SizeInBits;
2286   }
2287 
2288   /// Sets whether unaligned atomic operations are supported.
setSupportsUnalignedAtomics(bool UnalignedSupported)2289   void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2290     SupportsUnalignedAtomics = UnalignedSupported;
2291   }
2292 
2293 public:
2294   //===--------------------------------------------------------------------===//
2295   // Addressing mode description hooks (used by LSR etc).
2296   //
2297 
2298   /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2299   /// instructions reading the address. This allows as much computation as
2300   /// possible to be done in the address mode for that operand. This hook lets
2301   /// targets also pass back when this should be done on intrinsics which
2302   /// load/store.
getAddrModeArguments(IntrinsicInst *,SmallVectorImpl<Value * > &,Type * &)2303   virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2304                                     SmallVectorImpl<Value*> &/*Ops*/,
2305                                     Type *&/*AccessTy*/) const {
2306     return false;
2307   }
2308 
2309   /// This represents an addressing mode of:
2310   ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2311   /// If BaseGV is null,  there is no BaseGV.
2312   /// If BaseOffs is zero, there is no base offset.
2313   /// If HasBaseReg is false, there is no base register.
2314   /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
2315   /// no scale.
2316   struct AddrMode {
2317     GlobalValue *BaseGV = nullptr;
2318     int64_t      BaseOffs = 0;
2319     bool         HasBaseReg = false;
2320     int64_t      Scale = 0;
2321     AddrMode() = default;
2322   };
2323 
2324   /// Return true if the addressing mode represented by AM is legal for this
2325   /// target, for a load/store of the specified type.
2326   ///
2327   /// The type may be VoidTy, in which case only return true if the addressing
2328   /// mode is legal for a load/store of any legal type.  TODO: Handle
2329   /// pre/postinc as well.
2330   ///
2331   /// If the address space cannot be determined, it will be -1.
2332   ///
2333   /// TODO: Remove default argument
2334   virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2335                                      Type *Ty, unsigned AddrSpace,
2336                                      Instruction *I = nullptr) const;
2337 
2338   /// Return the cost of the scaling factor used in the addressing mode
2339   /// represented by AM for this target, for a load/store of the specified type.
2340   ///
2341   /// If the AM is supported, the return value must be >= 0.
2342   /// If the AM is not supported, it returns a negative value.
2343   /// TODO: Handle pre/postinc as well.
2344   /// TODO: Remove default argument
2345   virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
2346                                    Type *Ty, unsigned AS = 0) const {
2347     // Default: assume that any scaling factor used in a legal AM is free.
2348     if (isLegalAddressingMode(DL, AM, Ty, AS))
2349       return 0;
2350     return -1;
2351   }
2352 
2353   /// Return true if the specified immediate is legal icmp immediate, that is
2354   /// the target has icmp instructions which can compare a register against the
2355   /// immediate without having to materialize the immediate into a register.
isLegalICmpImmediate(int64_t)2356   virtual bool isLegalICmpImmediate(int64_t) const {
2357     return true;
2358   }
2359 
2360   /// Return true if the specified immediate is legal add immediate, that is the
2361   /// target has add instructions which can add a register with the immediate
2362   /// without having to materialize the immediate into a register.
isLegalAddImmediate(int64_t)2363   virtual bool isLegalAddImmediate(int64_t) const {
2364     return true;
2365   }
2366 
2367   /// Return true if the specified immediate is legal for the value input of a
2368   /// store instruction.
isLegalStoreImmediate(int64_t Value)2369   virtual bool isLegalStoreImmediate(int64_t Value) const {
2370     // Default implementation assumes that at least 0 works since it is likely
2371     // that a zero register exists or a zero immediate is allowed.
2372     return Value == 0;
2373   }
2374 
2375   /// Return true if it's significantly cheaper to shift a vector by a uniform
2376   /// scalar than by an amount which will vary across each lane. On x86 before
2377   /// AVX2 for example, there is a "psllw" instruction for the former case, but
2378   /// no simple instruction for a general "a << b" operation on vectors.
2379   /// This should also apply to lowering for vector funnel shifts (rotates).
isVectorShiftByScalarCheap(Type * Ty)2380   virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2381     return false;
2382   }
2383 
2384   /// Given a shuffle vector SVI representing a vector splat, return a new
2385   /// scalar type of size equal to SVI's scalar type if the new type is more
2386   /// profitable. Returns nullptr otherwise. For example under MVE float splats
2387   /// are converted to integer to prevent the need to move from SPR to GPR
2388   /// registers.
shouldConvertSplatType(ShuffleVectorInst * SVI)2389   virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const {
2390     return nullptr;
2391   }
2392 
2393   /// Given a set in interconnected phis of type 'From' that are loaded/stored
2394   /// or bitcast to type 'To', return true if the set should be converted to
2395   /// 'To'.
shouldConvertPhiType(Type * From,Type * To)2396   virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2397     return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2398            (To->isIntegerTy() || To->isFloatingPointTy());
2399   }
2400 
2401   /// Returns true if the opcode is a commutative binary operation.
isCommutativeBinOp(unsigned Opcode)2402   virtual bool isCommutativeBinOp(unsigned Opcode) const {
2403     // FIXME: This should get its info from the td file.
2404     switch (Opcode) {
2405     case ISD::ADD:
2406     case ISD::SMIN:
2407     case ISD::SMAX:
2408     case ISD::UMIN:
2409     case ISD::UMAX:
2410     case ISD::MUL:
2411     case ISD::MULHU:
2412     case ISD::MULHS:
2413     case ISD::SMUL_LOHI:
2414     case ISD::UMUL_LOHI:
2415     case ISD::FADD:
2416     case ISD::FMUL:
2417     case ISD::AND:
2418     case ISD::OR:
2419     case ISD::XOR:
2420     case ISD::SADDO:
2421     case ISD::UADDO:
2422     case ISD::ADDC:
2423     case ISD::ADDE:
2424     case ISD::SADDSAT:
2425     case ISD::UADDSAT:
2426     case ISD::FMINNUM:
2427     case ISD::FMAXNUM:
2428     case ISD::FMINNUM_IEEE:
2429     case ISD::FMAXNUM_IEEE:
2430     case ISD::FMINIMUM:
2431     case ISD::FMAXIMUM:
2432       return true;
2433     default: return false;
2434     }
2435   }
2436 
2437   /// Return true if the node is a math/logic binary operator.
isBinOp(unsigned Opcode)2438   virtual bool isBinOp(unsigned Opcode) const {
2439     // A commutative binop must be a binop.
2440     if (isCommutativeBinOp(Opcode))
2441       return true;
2442     // These are non-commutative binops.
2443     switch (Opcode) {
2444     case ISD::SUB:
2445     case ISD::SHL:
2446     case ISD::SRL:
2447     case ISD::SRA:
2448     case ISD::SDIV:
2449     case ISD::UDIV:
2450     case ISD::SREM:
2451     case ISD::UREM:
2452     case ISD::FSUB:
2453     case ISD::FDIV:
2454     case ISD::FREM:
2455       return true;
2456     default:
2457       return false;
2458     }
2459   }
2460 
2461   /// Return true if it's free to truncate a value of type FromTy to type
2462   /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2463   /// by referencing its sub-register AX.
2464   /// Targets must return false when FromTy <= ToTy.
isTruncateFree(Type * FromTy,Type * ToTy)2465   virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2466     return false;
2467   }
2468 
2469   /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2470   /// whether a call is in tail position. Typically this means that both results
2471   /// would be assigned to the same register or stack slot, but it could mean
2472   /// the target performs adequate checks of its own before proceeding with the
2473   /// tail call.  Targets must return false when FromTy <= ToTy.
allowTruncateForTailCall(Type * FromTy,Type * ToTy)2474   virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2475     return false;
2476   }
2477 
isTruncateFree(EVT FromVT,EVT ToVT)2478   virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
2479     return false;
2480   }
2481 
isProfitableToHoist(Instruction * I)2482   virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2483 
2484   /// Return true if the extension represented by \p I is free.
2485   /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2486   /// this method can use the context provided by \p I to decide
2487   /// whether or not \p I is free.
2488   /// This method extends the behavior of the is[Z|FP]ExtFree family.
2489   /// In other words, if is[Z|FP]Free returns true, then this method
2490   /// returns true as well. The converse is not true.
2491   /// The target can perform the adequate checks by overriding isExtFreeImpl.
2492   /// \pre \p I must be a sign, zero, or fp extension.
isExtFree(const Instruction * I)2493   bool isExtFree(const Instruction *I) const {
2494     switch (I->getOpcode()) {
2495     case Instruction::FPExt:
2496       if (isFPExtFree(EVT::getEVT(I->getType()),
2497                       EVT::getEVT(I->getOperand(0)->getType())))
2498         return true;
2499       break;
2500     case Instruction::ZExt:
2501       if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2502         return true;
2503       break;
2504     case Instruction::SExt:
2505       break;
2506     default:
2507       llvm_unreachable("Instruction is not an extension");
2508     }
2509     return isExtFreeImpl(I);
2510   }
2511 
2512   /// Return true if \p Load and \p Ext can form an ExtLoad.
2513   /// For example, in AArch64
2514   ///   %L = load i8, i8* %ptr
2515   ///   %E = zext i8 %L to i32
2516   /// can be lowered into one load instruction
2517   ///   ldrb w0, [x0]
isExtLoad(const LoadInst * Load,const Instruction * Ext,const DataLayout & DL)2518   bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2519                  const DataLayout &DL) const {
2520     EVT VT = getValueType(DL, Ext->getType());
2521     EVT LoadVT = getValueType(DL, Load->getType());
2522 
2523     // If the load has other users and the truncate is not free, the ext
2524     // probably isn't free.
2525     if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2526         !isTruncateFree(Ext->getType(), Load->getType()))
2527       return false;
2528 
2529     // Check whether the target supports casts folded into loads.
2530     unsigned LType;
2531     if (isa<ZExtInst>(Ext))
2532       LType = ISD::ZEXTLOAD;
2533     else {
2534       assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2535       LType = ISD::SEXTLOAD;
2536     }
2537 
2538     return isLoadExtLegal(LType, VT, LoadVT);
2539   }
2540 
2541   /// Return true if any actual instruction that defines a value of type FromTy
2542   /// implicitly zero-extends the value to ToTy in the result register.
2543   ///
2544   /// The function should return true when it is likely that the truncate can
2545   /// be freely folded with an instruction defining a value of FromTy. If
2546   /// the defining instruction is unknown (because you're looking at a
2547   /// function argument, PHI, etc.) then the target may require an
2548   /// explicit truncate, which is not necessarily free, but this function
2549   /// does not deal with those cases.
2550   /// Targets must return false when FromTy >= ToTy.
isZExtFree(Type * FromTy,Type * ToTy)2551   virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2552     return false;
2553   }
2554 
isZExtFree(EVT FromTy,EVT ToTy)2555   virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
2556     return false;
2557   }
2558 
2559   /// Return true if sign-extension from FromTy to ToTy is cheaper than
2560   /// zero-extension.
isSExtCheaperThanZExt(EVT FromTy,EVT ToTy)2561   virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2562     return false;
2563   }
2564 
2565   /// Return true if sinking I's operands to the same basic block as I is
2566   /// profitable, e.g. because the operands can be folded into a target
2567   /// instruction during instruction selection. After calling the function
2568   /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2569   /// come first).
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops)2570   virtual bool shouldSinkOperands(Instruction *I,
2571                                   SmallVectorImpl<Use *> &Ops) const {
2572     return false;
2573   }
2574 
2575   /// Return true if the target supplies and combines to a paired load
2576   /// two loaded values of type LoadedType next to each other in memory.
2577   /// RequiredAlignment gives the minimal alignment constraints that must be met
2578   /// to be able to select this paired load.
2579   ///
2580   /// This information is *not* used to generate actual paired loads, but it is
2581   /// used to generate a sequence of loads that is easier to combine into a
2582   /// paired load.
2583   /// For instance, something like this:
2584   /// a = load i64* addr
2585   /// b = trunc i64 a to i32
2586   /// c = lshr i64 a, 32
2587   /// d = trunc i64 c to i32
2588   /// will be optimized into:
2589   /// b = load i32* addr1
2590   /// d = load i32* addr2
2591   /// Where addr1 = addr2 +/- sizeof(i32).
2592   ///
2593   /// In other words, unless the target performs a post-isel load combining,
2594   /// this information should not be provided because it will generate more
2595   /// loads.
hasPairedLoad(EVT,Align &)2596   virtual bool hasPairedLoad(EVT /*LoadedType*/,
2597                              Align & /*RequiredAlignment*/) const {
2598     return false;
2599   }
2600 
2601   /// Return true if the target has a vector blend instruction.
hasVectorBlend()2602   virtual bool hasVectorBlend() const { return false; }
2603 
2604   /// Get the maximum supported factor for interleaved memory accesses.
2605   /// Default to be the minimum interleave factor: 2.
getMaxSupportedInterleaveFactor()2606   virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
2607 
2608   /// Lower an interleaved load to target specific intrinsics. Return
2609   /// true on success.
2610   ///
2611   /// \p LI is the vector load instruction.
2612   /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
2613   /// \p Indices is the corresponding indices for each shufflevector.
2614   /// \p Factor is the interleave factor.
lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor)2615   virtual bool lowerInterleavedLoad(LoadInst *LI,
2616                                     ArrayRef<ShuffleVectorInst *> Shuffles,
2617                                     ArrayRef<unsigned> Indices,
2618                                     unsigned Factor) const {
2619     return false;
2620   }
2621 
2622   /// Lower an interleaved store to target specific intrinsics. Return
2623   /// true on success.
2624   ///
2625   /// \p SI is the vector store instruction.
2626   /// \p SVI is the shufflevector to RE-interleave the stored vector.
2627   /// \p Factor is the interleave factor.
lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor)2628   virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
2629                                      unsigned Factor) const {
2630     return false;
2631   }
2632 
2633   /// Return true if zero-extending the specific node Val to type VT2 is free
2634   /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2635   /// because it's folded such as X86 zero-extending loads).
isZExtFree(SDValue Val,EVT VT2)2636   virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2637     return isZExtFree(Val.getValueType(), VT2);
2638   }
2639 
2640   /// Return true if an fpext operation is free (for instance, because
2641   /// single-precision floating-point numbers are implicitly extended to
2642   /// double-precision).
isFPExtFree(EVT DestVT,EVT SrcVT)2643   virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
2644     assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
2645            "invalid fpext types");
2646     return false;
2647   }
2648 
2649   /// Return true if an fpext operation input to an \p Opcode operation is free
2650   /// (for instance, because half-precision floating-point numbers are
2651   /// implicitly extended to float-precision) for an FMA instruction.
isFPExtFoldable(const SelectionDAG & DAG,unsigned Opcode,EVT DestVT,EVT SrcVT)2652   virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
2653                                EVT DestVT, EVT SrcVT) const {
2654     assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
2655            "invalid fpext types");
2656     return isFPExtFree(DestVT, SrcVT);
2657   }
2658 
2659   /// Return true if folding a vector load into ExtVal (a sign, zero, or any
2660   /// extend node) is profitable.
isVectorLoadExtDesirable(SDValue ExtVal)2661   virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
2662 
2663   /// Return true if an fneg operation is free to the point where it is never
2664   /// worthwhile to replace it with a bitwise operation.
isFNegFree(EVT VT)2665   virtual bool isFNegFree(EVT VT) const {
2666     assert(VT.isFloatingPoint());
2667     return false;
2668   }
2669 
2670   /// Return true if an fabs operation is free to the point where it is never
2671   /// worthwhile to replace it with a bitwise operation.
isFAbsFree(EVT VT)2672   virtual bool isFAbsFree(EVT VT) const {
2673     assert(VT.isFloatingPoint());
2674     return false;
2675   }
2676 
2677   /// Return true if an FMA operation is faster than a pair of fmul and fadd
2678   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
2679   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
2680   ///
2681   /// NOTE: This may be called before legalization on types for which FMAs are
2682   /// not legal, but should return true if those types will eventually legalize
2683   /// to types that support FMAs. After legalization, it will only be called on
2684   /// types that support FMAs (via Legal or Custom actions)
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT)2685   virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
2686                                           EVT) const {
2687     return false;
2688   }
2689 
2690   /// IR version
isFMAFasterThanFMulAndFAdd(const Function & F,Type *)2691   virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
2692     return false;
2693   }
2694 
2695   /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
2696   /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
2697   /// fadd/fsub.
isFMADLegal(const SelectionDAG & DAG,const SDNode * N)2698   virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
2699     assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
2700             N->getOpcode() == ISD::FMUL) &&
2701            "unexpected node in FMAD forming combine");
2702     return isOperationLegal(ISD::FMAD, N->getValueType(0));
2703   }
2704 
2705   /// Return true if it's profitable to narrow operations of type VT1 to
2706   /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
2707   /// i32 to i16.
isNarrowingProfitable(EVT,EVT)2708   virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
2709     return false;
2710   }
2711 
2712   /// Return true if it is beneficial to convert a load of a constant to
2713   /// just the constant itself.
2714   /// On some targets it might be more efficient to use a combination of
2715   /// arithmetic instructions to materialize the constant instead of loading it
2716   /// from a constant pool.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)2717   virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
2718                                                  Type *Ty) const {
2719     return false;
2720   }
2721 
2722   /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
2723   /// from this source type with this index. This is needed because
2724   /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
2725   /// the first element, and only the target knows which lowering is cheap.
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index)2726   virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
2727                                        unsigned Index) const {
2728     return false;
2729   }
2730 
2731   /// Try to convert an extract element of a vector binary operation into an
2732   /// extract element followed by a scalar operation.
shouldScalarizeBinop(SDValue VecOp)2733   virtual bool shouldScalarizeBinop(SDValue VecOp) const {
2734     return false;
2735   }
2736 
2737   /// Return true if extraction of a scalar element from the given vector type
2738   /// at the given index is cheap. For example, if scalar operations occur on
2739   /// the same register file as vector operations, then an extract element may
2740   /// be a sub-register rename rather than an actual instruction.
isExtractVecEltCheap(EVT VT,unsigned Index)2741   virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
2742     return false;
2743   }
2744 
2745   /// Try to convert math with an overflow comparison into the corresponding DAG
2746   /// node operation. Targets may want to override this independently of whether
2747   /// the operation is legal/custom for the given type because it may obscure
2748   /// matching of other patterns.
shouldFormOverflowOp(unsigned Opcode,EVT VT,bool MathUsed)2749   virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
2750                                     bool MathUsed) const {
2751     // TODO: The default logic is inherited from code in CodeGenPrepare.
2752     // The opcode should not make a difference by default?
2753     if (Opcode != ISD::UADDO)
2754       return false;
2755 
2756     // Allow the transform as long as we have an integer type that is not
2757     // obviously illegal and unsupported and if the math result is used
2758     // besides the overflow check. On some targets (e.g. SPARC), it is
2759     // not profitable to form on overflow op if the math result has no
2760     // concrete users.
2761     if (VT.isVector())
2762       return false;
2763     return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
2764   }
2765 
2766   // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
2767   // even if the vector itself has multiple uses.
aggressivelyPreferBuildVectorSources(EVT VecVT)2768   virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
2769     return false;
2770   }
2771 
2772   // Return true if CodeGenPrepare should consider splitting large offset of a
2773   // GEP to make the GEP fit into the addressing mode and can be sunk into the
2774   // same blocks of its users.
shouldConsiderGEPOffsetSplit()2775   virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
2776 
2777   /// Return true if creating a shift of the type by the given
2778   /// amount is not profitable.
shouldAvoidTransformToShift(EVT VT,unsigned Amount)2779   virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
2780     return false;
2781   }
2782 
2783   //===--------------------------------------------------------------------===//
2784   // Runtime Library hooks
2785   //
2786 
2787   /// Rename the default libcall routine name for the specified libcall.
setLibcallName(RTLIB::Libcall Call,const char * Name)2788   void setLibcallName(RTLIB::Libcall Call, const char *Name) {
2789     LibcallRoutineNames[Call] = Name;
2790   }
2791 
2792   /// Get the libcall routine name for the specified libcall.
getLibcallName(RTLIB::Libcall Call)2793   const char *getLibcallName(RTLIB::Libcall Call) const {
2794     return LibcallRoutineNames[Call];
2795   }
2796 
2797   /// Override the default CondCode to be used to test the result of the
2798   /// comparison libcall against zero.
setCmpLibcallCC(RTLIB::Libcall Call,ISD::CondCode CC)2799   void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
2800     CmpLibcallCCs[Call] = CC;
2801   }
2802 
2803   /// Get the CondCode that's to be used to test the result of the comparison
2804   /// libcall against zero.
getCmpLibcallCC(RTLIB::Libcall Call)2805   ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
2806     return CmpLibcallCCs[Call];
2807   }
2808 
2809   /// Set the CallingConv that should be used for the specified libcall.
setLibcallCallingConv(RTLIB::Libcall Call,CallingConv::ID CC)2810   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
2811     LibcallCallingConvs[Call] = CC;
2812   }
2813 
2814   /// Get the CallingConv that should be used for the specified libcall.
getLibcallCallingConv(RTLIB::Libcall Call)2815   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
2816     return LibcallCallingConvs[Call];
2817   }
2818 
2819   /// Execute target specific actions to finalize target lowering.
2820   /// This is used to set extra flags in MachineFrameInformation and freezing
2821   /// the set of reserved registers.
2822   /// The default implementation just freezes the set of reserved registers.
2823   virtual void finalizeLowering(MachineFunction &MF) const;
2824 
2825   //===----------------------------------------------------------------------===//
2826   //  GlobalISel Hooks
2827   //===----------------------------------------------------------------------===//
2828   /// Check whether or not \p MI needs to be moved close to its uses.
2829   virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
2830 
2831 
2832 private:
2833   const TargetMachine &TM;
2834 
2835   /// Tells the code generator that the target has multiple (allocatable)
2836   /// condition registers that can be used to store the results of comparisons
2837   /// for use by selects and conditional branches. With multiple condition
2838   /// registers, the code generator will not aggressively sink comparisons into
2839   /// the blocks of their users.
2840   bool HasMultipleConditionRegisters;
2841 
2842   /// Tells the code generator that the target has BitExtract instructions.
2843   /// The code generator will aggressively sink "shift"s into the blocks of
2844   /// their users if the users will generate "and" instructions which can be
2845   /// combined with "shift" to BitExtract instructions.
2846   bool HasExtractBitsInsn;
2847 
2848   /// Tells the code generator to bypass slow divide or remainder
2849   /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
2850   /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
2851   /// div/rem when the operands are positive and less than 256.
2852   DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
2853 
2854   /// Tells the code generator that it shouldn't generate extra flow control
2855   /// instructions and should attempt to combine flow control instructions via
2856   /// predication.
2857   bool JumpIsExpensive;
2858 
2859   /// Information about the contents of the high-bits in boolean values held in
2860   /// a type wider than i1. See getBooleanContents.
2861   BooleanContent BooleanContents;
2862 
2863   /// Information about the contents of the high-bits in boolean values held in
2864   /// a type wider than i1. See getBooleanContents.
2865   BooleanContent BooleanFloatContents;
2866 
2867   /// Information about the contents of the high-bits in boolean vector values
2868   /// when the element type is wider than i1. See getBooleanContents.
2869   BooleanContent BooleanVectorContents;
2870 
2871   /// The target scheduling preference: shortest possible total cycles or lowest
2872   /// register usage.
2873   Sched::Preference SchedPreferenceInfo;
2874 
2875   /// The minimum alignment that any argument on the stack needs to have.
2876   Align MinStackArgumentAlignment;
2877 
2878   /// The minimum function alignment (used when optimizing for size, and to
2879   /// prevent explicitly provided alignment from leading to incorrect code).
2880   Align MinFunctionAlignment;
2881 
2882   /// The preferred function alignment (used when alignment unspecified and
2883   /// optimizing for speed).
2884   Align PrefFunctionAlignment;
2885 
2886   /// The preferred loop alignment (in log2 bot in bytes).
2887   Align PrefLoopAlignment;
2888 
2889   /// Size in bits of the maximum atomics size the backend supports.
2890   /// Accesses larger than this will be expanded by AtomicExpandPass.
2891   unsigned MaxAtomicSizeInBitsSupported;
2892 
2893   /// Size in bits of the minimum cmpxchg or ll/sc operation the
2894   /// backend supports.
2895   unsigned MinCmpXchgSizeInBits;
2896 
2897   /// This indicates if the target supports unaligned atomic operations.
2898   bool SupportsUnalignedAtomics;
2899 
2900   /// If set to a physical register, this specifies the register that
2901   /// llvm.savestack/llvm.restorestack should save and restore.
2902   Register StackPointerRegisterToSaveRestore;
2903 
2904   /// This indicates the default register class to use for each ValueType the
2905   /// target supports natively.
2906   const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2907   uint16_t NumRegistersForVT[MVT::LAST_VALUETYPE];
2908   MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2909 
2910   /// This indicates the "representative" register class to use for each
2911   /// ValueType the target supports natively. This information is used by the
2912   /// scheduler to track register pressure. By default, the representative
2913   /// register class is the largest legal super-reg register class of the
2914   /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2915   /// representative class would be GR32.
2916   const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2917 
2918   /// This indicates the "cost" of the "representative" register class for each
2919   /// ValueType. The cost is used by the scheduler to approximate register
2920   /// pressure.
2921   uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2922 
2923   /// For any value types we are promoting or expanding, this contains the value
2924   /// type that we are changing to.  For Expanded types, this contains one step
2925   /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2926   /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
2927   /// the same type (e.g. i32 -> i32).
2928   MVT TransformToType[MVT::LAST_VALUETYPE];
2929 
2930   /// For each operation and each value type, keep a LegalizeAction that
2931   /// indicates how instruction selection should deal with the operation.  Most
2932   /// operations are Legal (aka, supported natively by the target), but
2933   /// operations that are not should be described.  Note that operations on
2934   /// non-legal value types are not described here.
2935   LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
2936 
2937   /// For each load extension type and each value type, keep a LegalizeAction
2938   /// that indicates how instruction selection should deal with a load of a
2939   /// specific value type and extension type. Uses 4-bits to store the action
2940   /// for each of the 4 load ext types.
2941   uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2942 
2943   /// For each value type pair keep a LegalizeAction that indicates whether a
2944   /// truncating store of a specific value type and truncating type is legal.
2945   LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2946 
2947   /// For each indexed mode and each value type, keep a quad of LegalizeAction
2948   /// that indicates how instruction selection should deal with the load /
2949   /// store / maskedload / maskedstore.
2950   ///
2951   /// The first dimension is the value_type for the reference. The second
2952   /// dimension represents the various modes for load store.
2953   uint16_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2954 
2955   /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2956   /// indicates how instruction selection should deal with the condition code.
2957   ///
2958   /// Because each CC action takes up 4 bits, we need to have the array size be
2959   /// large enough to fit all of the value types. This can be done by rounding
2960   /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2961   uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2962 
2963   ValueTypeActionImpl ValueTypeActions;
2964 
2965 private:
2966   LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2967 
2968   /// Targets can specify ISD nodes that they would like PerformDAGCombine
2969   /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2970   /// array.
2971   unsigned char
2972   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2973 
2974   /// For operations that must be promoted to a specific type, this holds the
2975   /// destination type.  This map should be sparse, so don't hold it as an
2976   /// array.
2977   ///
2978   /// Targets add entries to this map with AddPromotedToType(..), clients access
2979   /// this with getTypeToPromoteTo(..).
2980   std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2981     PromoteToType;
2982 
2983   /// Stores the name each libcall.
2984   const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
2985 
2986   /// The ISD::CondCode that should be used to test the result of each of the
2987   /// comparison libcall against zero.
2988   ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2989 
2990   /// Stores the CallingConv that should be used for each libcall.
2991   CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2992 
2993   /// Set default libcall names and calling conventions.
2994   void InitLibcalls(const Triple &TT);
2995 
2996   /// The bits of IndexedModeActions used to store the legalisation actions
2997   /// We store the data as   | ML | MS |  L |  S | each taking 4 bits.
2998   enum IndexedModeActionsBits {
2999     IMAB_Store = 0,
3000     IMAB_Load = 4,
3001     IMAB_MaskedStore = 8,
3002     IMAB_MaskedLoad = 12
3003   };
3004 
setIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift,LegalizeAction Action)3005   void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3006                             LegalizeAction Action) {
3007     assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3008            (unsigned)Action < 0xf && "Table isn't big enough!");
3009     unsigned Ty = (unsigned)VT.SimpleTy;
3010     IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3011     IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3012   }
3013 
getIndexedModeAction(unsigned IdxMode,MVT VT,unsigned Shift)3014   LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3015                                       unsigned Shift) const {
3016     assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3017            "Table isn't big enough!");
3018     unsigned Ty = (unsigned)VT.SimpleTy;
3019     return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3020   }
3021 
3022 protected:
3023   /// Return true if the extension represented by \p I is free.
3024   /// \pre \p I is a sign, zero, or fp extension and
3025   ///      is[Z|FP]ExtFree of the related types is not true.
isExtFreeImpl(const Instruction * I)3026   virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3027 
3028   /// Depth that GatherAllAliases should should continue looking for chain
3029   /// dependencies when trying to find a more preferable chain. As an
3030   /// approximation, this should be more than the number of consecutive stores
3031   /// expected to be merged.
3032   unsigned GatherAllAliasesMaxDepth;
3033 
3034   /// \brief Specify maximum number of store instructions per memset call.
3035   ///
3036   /// When lowering \@llvm.memset this field specifies the maximum number of
3037   /// store operations that may be substituted for the call to memset. Targets
3038   /// must set this value based on the cost threshold for that target. Targets
3039   /// should assume that the memset will be done using as many of the largest
3040   /// store operations first, followed by smaller ones, if necessary, per
3041   /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3042   /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3043   /// store.  This only applies to setting a constant array of a constant size.
3044   unsigned MaxStoresPerMemset;
3045   /// Likewise for functions with the OptSize attribute.
3046   unsigned MaxStoresPerMemsetOptSize;
3047 
3048   /// \brief Specify maximum number of store instructions per memcpy call.
3049   ///
3050   /// When lowering \@llvm.memcpy this field specifies the maximum number of
3051   /// store operations that may be substituted for a call to memcpy. Targets
3052   /// must set this value based on the cost threshold for that target. Targets
3053   /// should assume that the memcpy will be done using as many of the largest
3054   /// store operations first, followed by smaller ones, if necessary, per
3055   /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3056   /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3057   /// and one 1-byte store. This only applies to copying a constant array of
3058   /// constant size.
3059   unsigned MaxStoresPerMemcpy;
3060   /// Likewise for functions with the OptSize attribute.
3061   unsigned MaxStoresPerMemcpyOptSize;
3062   /// \brief Specify max number of store instructions to glue in inlined memcpy.
3063   ///
3064   /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3065   /// of store instructions to keep together. This helps in pairing and
3066   //  vectorization later on.
3067   unsigned MaxGluedStoresPerMemcpy = 0;
3068 
3069   /// \brief Specify maximum number of load instructions per memcmp call.
3070   ///
3071   /// When lowering \@llvm.memcmp this field specifies the maximum number of
3072   /// pairs of load operations that may be substituted for a call to memcmp.
3073   /// Targets must set this value based on the cost threshold for that target.
3074   /// Targets should assume that the memcmp will be done using as many of the
3075   /// largest load operations first, followed by smaller ones, if necessary, per
3076   /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3077   /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3078   /// and one 1-byte load. This only applies to copying a constant array of
3079   /// constant size.
3080   unsigned MaxLoadsPerMemcmp;
3081   /// Likewise for functions with the OptSize attribute.
3082   unsigned MaxLoadsPerMemcmpOptSize;
3083 
3084   /// \brief Specify maximum number of store instructions per memmove call.
3085   ///
3086   /// When lowering \@llvm.memmove this field specifies the maximum number of
3087   /// store instructions that may be substituted for a call to memmove. Targets
3088   /// must set this value based on the cost threshold for that target. Targets
3089   /// should assume that the memmove will be done using as many of the largest
3090   /// store operations first, followed by smaller ones, if necessary, per
3091   /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3092   /// with 8-bit alignment would result in nine 1-byte stores.  This only
3093   /// applies to copying a constant array of constant size.
3094   unsigned MaxStoresPerMemmove;
3095   /// Likewise for functions with the OptSize attribute.
3096   unsigned MaxStoresPerMemmoveOptSize;
3097 
3098   /// Tells the code generator that select is more expensive than a branch if
3099   /// the branch is usually predicted right.
3100   bool PredictableSelectIsExpensive;
3101 
3102   /// \see enableExtLdPromotion.
3103   bool EnableExtLdPromotion;
3104 
3105   /// Return true if the value types that can be represented by the specified
3106   /// register class are all legal.
3107   bool isLegalRC(const TargetRegisterInfo &TRI,
3108                  const TargetRegisterClass &RC) const;
3109 
3110   /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3111   /// sequence of memory operands that is recognized by PrologEpilogInserter.
3112   MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
3113                                     MachineBasicBlock *MBB) const;
3114 
3115   /// Replace/modify the XRay custom event operands with target-dependent
3116   /// details.
3117   MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
3118                                          MachineBasicBlock *MBB) const;
3119 
3120   /// Replace/modify the XRay typed event operands with target-dependent
3121   /// details.
3122   MachineBasicBlock *emitXRayTypedEvent(MachineInstr &MI,
3123                                         MachineBasicBlock *MBB) const;
3124 
3125   bool IsStrictFPEnabled;
3126 };
3127 
3128 /// This class defines information used to lower LLVM code to legal SelectionDAG
3129 /// operators that the target instruction selector can accept natively.
3130 ///
3131 /// This class also defines callbacks that targets must implement to lower
3132 /// target-specific constructs to SelectionDAG operators.
3133 class TargetLowering : public TargetLoweringBase {
3134 public:
3135   struct DAGCombinerInfo;
3136   struct MakeLibCallOptions;
3137 
3138   TargetLowering(const TargetLowering &) = delete;
3139   TargetLowering &operator=(const TargetLowering &) = delete;
3140 
3141   explicit TargetLowering(const TargetMachine &TM);
3142 
3143   bool isPositionIndependent() const;
3144 
isSDNodeSourceOfDivergence(const SDNode * N,FunctionLoweringInfo * FLI,LegacyDivergenceAnalysis * DA)3145   virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
3146                                           FunctionLoweringInfo *FLI,
3147                                           LegacyDivergenceAnalysis *DA) const {
3148     return false;
3149   }
3150 
isSDNodeAlwaysUniform(const SDNode * N)3151   virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3152     return false;
3153   }
3154 
3155   /// Returns true by value, base pointer and offset pointer and addressing mode
3156   /// by reference if the node's address can be legally represented as
3157   /// pre-indexed load / store address.
getPreIndexedAddressParts(SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3158   virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3159                                          SDValue &/*Offset*/,
3160                                          ISD::MemIndexedMode &/*AM*/,
3161                                          SelectionDAG &/*DAG*/) const {
3162     return false;
3163   }
3164 
3165   /// Returns true by value, base pointer and offset pointer and addressing mode
3166   /// by reference if this node can be combined with a load / store to form a
3167   /// post-indexed load / store.
getPostIndexedAddressParts(SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)3168   virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3169                                           SDValue &/*Base*/,
3170                                           SDValue &/*Offset*/,
3171                                           ISD::MemIndexedMode &/*AM*/,
3172                                           SelectionDAG &/*DAG*/) const {
3173     return false;
3174   }
3175 
3176   /// Returns true if the specified base+offset is a legal indexed addressing
3177   /// mode for this target. \p MI is the load or store instruction that is being
3178   /// considered for transformation.
isIndexingLegal(MachineInstr & MI,Register Base,Register Offset,bool IsPre,MachineRegisterInfo & MRI)3179   virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
3180                                bool IsPre, MachineRegisterInfo &MRI) const {
3181     return false;
3182   }
3183 
3184   /// Return the entry encoding for a jump table in the current function.  The
3185   /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3186   virtual unsigned getJumpTableEncoding() const;
3187 
3188   virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *,const MachineBasicBlock *,unsigned,MCContext &)3189   LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
3190                             const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3191                             MCContext &/*Ctx*/) const {
3192     llvm_unreachable("Need to implement this hook if target has custom JTIs");
3193   }
3194 
3195   /// Returns relocation base for the given PIC jumptable.
3196   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
3197                                            SelectionDAG &DAG) const;
3198 
3199   /// This returns the relocation base for the given PIC jumptable, the same as
3200   /// getPICJumpTableRelocBase, but as an MCExpr.
3201   virtual const MCExpr *
3202   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3203                                unsigned JTI, MCContext &Ctx) const;
3204 
3205   /// Return true if folding a constant offset with the given GlobalAddress is
3206   /// legal.  It is frequently not legal in PIC relocation models.
3207   virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3208 
3209   bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
3210                             SDValue &Chain) const;
3211 
3212   void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3213                            SDValue &NewRHS, ISD::CondCode &CCCode,
3214                            const SDLoc &DL, const SDValue OldLHS,
3215                            const SDValue OldRHS) const;
3216 
3217   void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3218                            SDValue &NewRHS, ISD::CondCode &CCCode,
3219                            const SDLoc &DL, const SDValue OldLHS,
3220                            const SDValue OldRHS, SDValue &Chain,
3221                            bool IsSignaling = false) const;
3222 
3223   /// Returns a pair of (return value, chain).
3224   /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3225   std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3226                                           EVT RetVT, ArrayRef<SDValue> Ops,
3227                                           MakeLibCallOptions CallOptions,
3228                                           const SDLoc &dl,
3229                                           SDValue Chain = SDValue()) const;
3230 
3231   /// Check whether parameters to a call that are passed in callee saved
3232   /// registers are the same as from the calling function.  This needs to be
3233   /// checked for tail call eligibility.
3234   bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
3235       const uint32_t *CallerPreservedMask,
3236       const SmallVectorImpl<CCValAssign> &ArgLocs,
3237       const SmallVectorImpl<SDValue> &OutVals) const;
3238 
3239   //===--------------------------------------------------------------------===//
3240   // TargetLowering Optimization Methods
3241   //
3242 
3243   /// A convenience struct that encapsulates a DAG, and two SDValues for
3244   /// returning information from TargetLowering to its clients that want to
3245   /// combine.
3246   struct TargetLoweringOpt {
3247     SelectionDAG &DAG;
3248     bool LegalTys;
3249     bool LegalOps;
3250     SDValue Old;
3251     SDValue New;
3252 
TargetLoweringOptTargetLoweringOpt3253     explicit TargetLoweringOpt(SelectionDAG &InDAG,
3254                                bool LT, bool LO) :
3255       DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3256 
LegalTypesTargetLoweringOpt3257     bool LegalTypes() const { return LegalTys; }
LegalOperationsTargetLoweringOpt3258     bool LegalOperations() const { return LegalOps; }
3259 
CombineToTargetLoweringOpt3260     bool CombineTo(SDValue O, SDValue N) {
3261       Old = O;
3262       New = N;
3263       return true;
3264     }
3265   };
3266 
3267   /// Determines the optimal series of memory ops to replace the memset / memcpy.
3268   /// Return true if the number of memory ops is below the threshold (Limit).
3269   /// It returns the types of the sequence of memory ops to perform
3270   /// memset / memcpy by reference.
3271   bool findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3272                                 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3273                                 const AttributeList &FuncAttributes) const;
3274 
3275   /// Check to see if the specified operand of the specified instruction is a
3276   /// constant integer.  If so, check to see if there are any bits set in the
3277   /// constant that are not demanded.  If so, shrink the constant and return
3278   /// true.
3279   bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3280                               const APInt &DemandedElts,
3281                               TargetLoweringOpt &TLO) const;
3282 
3283   /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3284   bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3285                               TargetLoweringOpt &TLO) const;
3286 
3287   // Target hook to do target-specific const optimization, which is called by
3288   // ShrinkDemandedConstant. This function should return true if the target
3289   // doesn't want ShrinkDemandedConstant to further optimize the constant.
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO)3290   virtual bool targetShrinkDemandedConstant(SDValue Op,
3291                                             const APInt &DemandedBits,
3292                                             const APInt &DemandedElts,
3293                                             TargetLoweringOpt &TLO) const {
3294     return false;
3295   }
3296 
3297   /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
3298   /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
3299   /// generalized for targets with other types of implicit widening casts.
3300   bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
3301                         TargetLoweringOpt &TLO) const;
3302 
3303   /// Look at Op.  At this point, we know that only the DemandedBits bits of the
3304   /// result of Op are ever used downstream.  If we can use this information to
3305   /// simplify Op, create a new simplified DAG node and return true, returning
3306   /// the original and new nodes in Old and New.  Otherwise, analyze the
3307   /// expression and return a mask of KnownOne and KnownZero bits for the
3308   /// expression (used to simplify the caller).  The KnownZero/One bits may only
3309   /// be accurate for those bits in the Demanded masks.
3310   /// \p AssumeSingleUse When this parameter is true, this function will
3311   ///    attempt to simplify \p Op even if there are multiple uses.
3312   ///    Callers are responsible for correctly updating the DAG based on the
3313   ///    results of this function, because simply replacing replacing TLO.Old
3314   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
3315   ///    has multiple uses.
3316   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3317                             const APInt &DemandedElts, KnownBits &Known,
3318                             TargetLoweringOpt &TLO, unsigned Depth = 0,
3319                             bool AssumeSingleUse = false) const;
3320 
3321   /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3322   /// Adds Op back to the worklist upon success.
3323   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3324                             KnownBits &Known, TargetLoweringOpt &TLO,
3325                             unsigned Depth = 0,
3326                             bool AssumeSingleUse = false) const;
3327 
3328   /// Helper wrapper around SimplifyDemandedBits.
3329   /// Adds Op back to the worklist upon success.
3330   bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3331                             DAGCombinerInfo &DCI) const;
3332 
3333   /// More limited version of SimplifyDemandedBits that can be used to "look
3334   /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3335   /// bitwise ops etc.
3336   SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3337                                           const APInt &DemandedElts,
3338                                           SelectionDAG &DAG,
3339                                           unsigned Depth) const;
3340 
3341   /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3342   /// elements.
3343   SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3344                                           SelectionDAG &DAG,
3345                                           unsigned Depth = 0) const;
3346 
3347   /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3348   /// bits from only some vector elements.
3349   SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op,
3350                                                 const APInt &DemandedElts,
3351                                                 SelectionDAG &DAG,
3352                                                 unsigned Depth = 0) const;
3353 
3354   /// Look at Vector Op. At this point, we know that only the DemandedElts
3355   /// elements of the result of Op are ever used downstream.  If we can use
3356   /// this information to simplify Op, create a new simplified DAG node and
3357   /// return true, storing the original and new nodes in TLO.
3358   /// Otherwise, analyze the expression and return a mask of KnownUndef and
3359   /// KnownZero elements for the expression (used to simplify the caller).
3360   /// The KnownUndef/Zero elements may only be accurate for those bits
3361   /// in the DemandedMask.
3362   /// \p AssumeSingleUse When this parameter is true, this function will
3363   ///    attempt to simplify \p Op even if there are multiple uses.
3364   ///    Callers are responsible for correctly updating the DAG based on the
3365   ///    results of this function, because simply replacing replacing TLO.Old
3366   ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
3367   ///    has multiple uses.
3368   bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
3369                                   APInt &KnownUndef, APInt &KnownZero,
3370                                   TargetLoweringOpt &TLO, unsigned Depth = 0,
3371                                   bool AssumeSingleUse = false) const;
3372 
3373   /// Helper wrapper around SimplifyDemandedVectorElts.
3374   /// Adds Op back to the worklist upon success.
3375   bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
3376                                   APInt &KnownUndef, APInt &KnownZero,
3377                                   DAGCombinerInfo &DCI) const;
3378 
3379   /// Determine which of the bits specified in Mask are known to be either zero
3380   /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3381   /// argument allows us to only collect the known bits that are shared by the
3382   /// requested vector elements.
3383   virtual void computeKnownBitsForTargetNode(const SDValue Op,
3384                                              KnownBits &Known,
3385                                              const APInt &DemandedElts,
3386                                              const SelectionDAG &DAG,
3387                                              unsigned Depth = 0) const;
3388 
3389   /// Determine which of the bits specified in Mask are known to be either zero
3390   /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3391   /// argument allows us to only collect the known bits that are shared by the
3392   /// requested vector elements. This is for GISel.
3393   virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
3394                                               Register R, KnownBits &Known,
3395                                               const APInt &DemandedElts,
3396                                               const MachineRegisterInfo &MRI,
3397                                               unsigned Depth = 0) const;
3398 
3399   /// Determine the known alignment for the pointer value \p R. This is can
3400   /// typically be inferred from the number of low known 0 bits. However, for a
3401   /// pointer with a non-integral address space, the alignment value may be
3402   /// independent from the known low bits.
3403   virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis,
3404                                                 Register R,
3405                                                 const MachineRegisterInfo &MRI,
3406                                                 unsigned Depth = 0) const;
3407 
3408   /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
3409   /// Default implementation computes low bits based on alignment
3410   /// information. This should preserve known bits passed into it.
3411   virtual void computeKnownBitsForFrameIndex(int FIOp,
3412                                              KnownBits &Known,
3413                                              const MachineFunction &MF) const;
3414 
3415   /// This method can be implemented by targets that want to expose additional
3416   /// information about sign bits to the DAG Combiner. The DemandedElts
3417   /// argument allows us to only collect the minimum sign bits that are shared
3418   /// by the requested vector elements.
3419   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
3420                                                    const APInt &DemandedElts,
3421                                                    const SelectionDAG &DAG,
3422                                                    unsigned Depth = 0) const;
3423 
3424   /// This method can be implemented by targets that want to expose additional
3425   /// information about sign bits to GlobalISel combiners. The DemandedElts
3426   /// argument allows us to only collect the minimum sign bits that are shared
3427   /// by the requested vector elements.
3428   virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
3429                                                     Register R,
3430                                                     const APInt &DemandedElts,
3431                                                     const MachineRegisterInfo &MRI,
3432                                                     unsigned Depth = 0) const;
3433 
3434   /// Attempt to simplify any target nodes based on the demanded vector
3435   /// elements, returning true on success. Otherwise, analyze the expression and
3436   /// return a mask of KnownUndef and KnownZero elements for the expression
3437   /// (used to simplify the caller). The KnownUndef/Zero elements may only be
3438   /// accurate for those bits in the DemandedMask.
3439   virtual bool SimplifyDemandedVectorEltsForTargetNode(
3440       SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
3441       APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
3442 
3443   /// Attempt to simplify any target nodes based on the demanded bits/elts,
3444   /// returning true on success. Otherwise, analyze the
3445   /// expression and return a mask of KnownOne and KnownZero bits for the
3446   /// expression (used to simplify the caller).  The KnownZero/One bits may only
3447   /// be accurate for those bits in the Demanded masks.
3448   virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
3449                                                  const APInt &DemandedBits,
3450                                                  const APInt &DemandedElts,
3451                                                  KnownBits &Known,
3452                                                  TargetLoweringOpt &TLO,
3453                                                  unsigned Depth = 0) const;
3454 
3455   /// More limited version of SimplifyDemandedBits that can be used to "look
3456   /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3457   /// bitwise ops etc.
3458   virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
3459       SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3460       SelectionDAG &DAG, unsigned Depth) const;
3461 
3462   /// Tries to build a legal vector shuffle using the provided parameters
3463   /// or equivalent variations. The Mask argument maybe be modified as the
3464   /// function tries different variations.
3465   /// Returns an empty SDValue if the operation fails.
3466   SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
3467                                   SDValue N1, MutableArrayRef<int> Mask,
3468                                   SelectionDAG &DAG) const;
3469 
3470   /// This method returns the constant pool value that will be loaded by LD.
3471   /// NOTE: You must check for implicit extensions of the constant by LD.
3472   virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
3473 
3474   /// If \p SNaN is false, \returns true if \p Op is known to never be any
3475   /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
3476   /// NaN.
3477   virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
3478                                             const SelectionDAG &DAG,
3479                                             bool SNaN = false,
3480                                             unsigned Depth = 0) const;
3481   struct DAGCombinerInfo {
3482     void *DC;  // The DAG Combiner object.
3483     CombineLevel Level;
3484     bool CalledByLegalizer;
3485 
3486   public:
3487     SelectionDAG &DAG;
3488 
DAGCombinerInfoDAGCombinerInfo3489     DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
3490       : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
3491 
isBeforeLegalizeDAGCombinerInfo3492     bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
isBeforeLegalizeOpsDAGCombinerInfo3493     bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
isAfterLegalizeDAGDAGCombinerInfo3494     bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
getDAGCombineLevelDAGCombinerInfo3495     CombineLevel getDAGCombineLevel() { return Level; }
isCalledByLegalizerDAGCombinerInfo3496     bool isCalledByLegalizer() const { return CalledByLegalizer; }
3497 
3498     void AddToWorklist(SDNode *N);
3499     SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
3500     SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
3501     SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
3502 
3503     bool recursivelyDeleteUnusedNodes(SDNode *N);
3504 
3505     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
3506   };
3507 
3508   /// Return if the N is a constant or constant vector equal to the true value
3509   /// from getBooleanContents().
3510   bool isConstTrueVal(const SDNode *N) const;
3511 
3512   /// Return if the N is a constant or constant vector equal to the false value
3513   /// from getBooleanContents().
3514   bool isConstFalseVal(const SDNode *N) const;
3515 
3516   /// Return if \p N is a True value when extended to \p VT.
3517   bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
3518 
3519   /// Try to simplify a setcc built with the specified operands and cc. If it is
3520   /// unable to simplify it, return a null SDValue.
3521   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
3522                         bool foldBooleans, DAGCombinerInfo &DCI,
3523                         const SDLoc &dl) const;
3524 
3525   // For targets which wrap address, unwrap for analysis.
unwrapAddress(SDValue N)3526   virtual SDValue unwrapAddress(SDValue N) const { return N; }
3527 
3528   /// Returns true (and the GlobalValue and the offset) if the node is a
3529   /// GlobalAddress + offset.
3530   virtual bool
3531   isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
3532 
3533   /// This method will be invoked for all target nodes and for any
3534   /// target-independent nodes that the target has registered with invoke it
3535   /// for.
3536   ///
3537   /// The semantics are as follows:
3538   /// Return Value:
3539   ///   SDValue.Val == 0   - No change was made
3540   ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
3541   ///   otherwise          - N should be replaced by the returned Operand.
3542   ///
3543   /// In addition, methods provided by DAGCombinerInfo may be used to perform
3544   /// more complex transformations.
3545   ///
3546   virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
3547 
3548   /// Return true if it is profitable to move this shift by a constant amount
3549   /// though its operand, adjusting any immediate operands as necessary to
3550   /// preserve semantics. This transformation may not be desirable if it
3551   /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
3552   /// extraction in AArch64). By default, it returns true.
3553   ///
3554   /// @param N the shift node
3555   /// @param Level the current DAGCombine legalization level.
isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level)3556   virtual bool isDesirableToCommuteWithShift(const SDNode *N,
3557                                              CombineLevel Level) const {
3558     return true;
3559   }
3560 
3561   /// Return true if the target has native support for the specified value type
3562   /// and it is 'desirable' to use the type for the given node type. e.g. On x86
3563   /// i16 is legal, but undesirable since i16 instruction encodings are longer
3564   /// and some i16 instructions are slow.
isTypeDesirableForOp(unsigned,EVT VT)3565   virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
3566     // By default, assume all legal types are desirable.
3567     return isTypeLegal(VT);
3568   }
3569 
3570   /// Return true if it is profitable for dag combiner to transform a floating
3571   /// point op of specified opcode to a equivalent op of an integer
3572   /// type. e.g. f32 load -> i32 load can be profitable on ARM.
isDesirableToTransformToIntegerOp(unsigned,EVT)3573   virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
3574                                                  EVT /*VT*/) const {
3575     return false;
3576   }
3577 
3578   /// This method query the target whether it is beneficial for dag combiner to
3579   /// promote the specified node. If true, it should return the desired
3580   /// promotion type by reference.
IsDesirableToPromoteOp(SDValue,EVT &)3581   virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
3582     return false;
3583   }
3584 
3585   /// Return true if the target supports swifterror attribute. It optimizes
3586   /// loads and stores to reading and writing a specific register.
supportSwiftError()3587   virtual bool supportSwiftError() const {
3588     return false;
3589   }
3590 
3591   /// Return true if the target supports that a subset of CSRs for the given
3592   /// machine function is handled explicitly via copies.
supportSplitCSR(MachineFunction * MF)3593   virtual bool supportSplitCSR(MachineFunction *MF) const {
3594     return false;
3595   }
3596 
3597   /// Perform necessary initialization to handle a subset of CSRs explicitly
3598   /// via copies. This function is called at the beginning of instruction
3599   /// selection.
initializeSplitCSR(MachineBasicBlock * Entry)3600   virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
3601     llvm_unreachable("Not Implemented");
3602   }
3603 
3604   /// Insert explicit copies in entry and exit blocks. We copy a subset of
3605   /// CSRs to virtual registers in the entry block, and copy them back to
3606   /// physical registers in the exit blocks. This function is called at the end
3607   /// of instruction selection.
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits)3608   virtual void insertCopiesSplitCSR(
3609       MachineBasicBlock *Entry,
3610       const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
3611     llvm_unreachable("Not Implemented");
3612   }
3613 
3614   /// Return the newly negated expression if the cost is not expensive and
3615   /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
3616   /// do the negation.
3617   virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
3618                                        bool LegalOps, bool OptForSize,
3619                                        NegatibleCost &Cost,
3620                                        unsigned Depth = 0) const;
3621 
3622   /// This is the helper function to return the newly negated expression only
3623   /// when the cost is cheaper.
3624   SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG,
3625                                       bool LegalOps, bool OptForSize,
3626                                       unsigned Depth = 0) const {
3627     NegatibleCost Cost = NegatibleCost::Expensive;
3628     SDValue Neg =
3629         getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
3630     if (Neg && Cost == NegatibleCost::Cheaper)
3631       return Neg;
3632     // Remove the new created node to avoid the side effect to the DAG.
3633     if (Neg && Neg.getNode()->use_empty())
3634       DAG.RemoveDeadNode(Neg.getNode());
3635     return SDValue();
3636   }
3637 
3638   /// This is the helper function to return the newly negated expression if
3639   /// the cost is not expensive.
3640   SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps,
3641                                bool OptForSize, unsigned Depth = 0) const {
3642     NegatibleCost Cost = NegatibleCost::Expensive;
3643     return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
3644   }
3645 
3646   //===--------------------------------------------------------------------===//
3647   // Lowering methods - These methods must be implemented by targets so that
3648   // the SelectionDAGBuilder code knows how to lower these.
3649   //
3650 
3651   /// Target-specific splitting of values into parts that fit a register
3652   /// storing a legal type
splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,Optional<CallingConv::ID> CC)3653   virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
3654                                            SDValue Val, SDValue *Parts,
3655                                            unsigned NumParts, MVT PartVT,
3656                                            Optional<CallingConv::ID> CC) const {
3657     return false;
3658   }
3659 
3660   /// Target-specific combining of register parts into its original value
3661   virtual SDValue
joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,Optional<CallingConv::ID> CC)3662   joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
3663                              const SDValue *Parts, unsigned NumParts,
3664                              MVT PartVT, EVT ValueVT,
3665                              Optional<CallingConv::ID> CC) const {
3666     return SDValue();
3667   }
3668 
3669   /// This hook must be implemented to lower the incoming (formal) arguments,
3670   /// described by the Ins array, into the specified DAG. The implementation
3671   /// should fill in the InVals array with legal-type argument values, and
3672   /// return the resulting token chain value.
LowerFormalArguments(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::InputArg> &,const SDLoc &,SelectionDAG &,SmallVectorImpl<SDValue> &)3673   virtual SDValue LowerFormalArguments(
3674       SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
3675       const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
3676       SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
3677     llvm_unreachable("Not Implemented");
3678   }
3679 
3680   /// This structure contains all information that is necessary for lowering
3681   /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
3682   /// needs to lower a call, and targets will see this struct in their LowerCall
3683   /// implementation.
3684   struct CallLoweringInfo {
3685     SDValue Chain;
3686     Type *RetTy = nullptr;
3687     bool RetSExt           : 1;
3688     bool RetZExt           : 1;
3689     bool IsVarArg          : 1;
3690     bool IsInReg           : 1;
3691     bool DoesNotReturn     : 1;
3692     bool IsReturnValueUsed : 1;
3693     bool IsConvergent      : 1;
3694     bool IsPatchPoint      : 1;
3695     bool IsPreallocated : 1;
3696     bool NoMerge           : 1;
3697 
3698     // IsTailCall should be modified by implementations of
3699     // TargetLowering::LowerCall that perform tail call conversions.
3700     bool IsTailCall = false;
3701 
3702     // Is Call lowering done post SelectionDAG type legalization.
3703     bool IsPostTypeLegalization = false;
3704 
3705     unsigned NumFixedArgs = -1;
3706     CallingConv::ID CallConv = CallingConv::C;
3707     SDValue Callee;
3708     ArgListTy Args;
3709     SelectionDAG &DAG;
3710     SDLoc DL;
3711     const CallBase *CB = nullptr;
3712     SmallVector<ISD::OutputArg, 32> Outs;
3713     SmallVector<SDValue, 32> OutVals;
3714     SmallVector<ISD::InputArg, 32> Ins;
3715     SmallVector<SDValue, 4> InVals;
3716 
CallLoweringInfoCallLoweringInfo3717     CallLoweringInfo(SelectionDAG &DAG)
3718         : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
3719           DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
3720           IsPatchPoint(false), IsPreallocated(false), NoMerge(false),
3721           DAG(DAG) {}
3722 
setDebugLocCallLoweringInfo3723     CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
3724       DL = dl;
3725       return *this;
3726     }
3727 
setChainCallLoweringInfo3728     CallLoweringInfo &setChain(SDValue InChain) {
3729       Chain = InChain;
3730       return *this;
3731     }
3732 
3733     // setCallee with target/module-specific attributes
setLibCalleeCallLoweringInfo3734     CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
3735                                    SDValue Target, ArgListTy &&ArgsList) {
3736       RetTy = ResultType;
3737       Callee = Target;
3738       CallConv = CC;
3739       NumFixedArgs = ArgsList.size();
3740       Args = std::move(ArgsList);
3741 
3742       DAG.getTargetLoweringInfo().markLibCallAttributes(
3743           &(DAG.getMachineFunction()), CC, Args);
3744       return *this;
3745     }
3746 
setCalleeCallLoweringInfo3747     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
3748                                 SDValue Target, ArgListTy &&ArgsList) {
3749       RetTy = ResultType;
3750       Callee = Target;
3751       CallConv = CC;
3752       NumFixedArgs = ArgsList.size();
3753       Args = std::move(ArgsList);
3754       return *this;
3755     }
3756 
setCalleeCallLoweringInfo3757     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
3758                                 SDValue Target, ArgListTy &&ArgsList,
3759                                 const CallBase &Call) {
3760       RetTy = ResultType;
3761 
3762       IsInReg = Call.hasRetAttr(Attribute::InReg);
3763       DoesNotReturn =
3764           Call.doesNotReturn() ||
3765           (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
3766       IsVarArg = FTy->isVarArg();
3767       IsReturnValueUsed = !Call.use_empty();
3768       RetSExt = Call.hasRetAttr(Attribute::SExt);
3769       RetZExt = Call.hasRetAttr(Attribute::ZExt);
3770       NoMerge = Call.hasFnAttr(Attribute::NoMerge);
3771 
3772       Callee = Target;
3773 
3774       CallConv = Call.getCallingConv();
3775       NumFixedArgs = FTy->getNumParams();
3776       Args = std::move(ArgsList);
3777 
3778       CB = &Call;
3779 
3780       return *this;
3781     }
3782 
3783     CallLoweringInfo &setInRegister(bool Value = true) {
3784       IsInReg = Value;
3785       return *this;
3786     }
3787 
3788     CallLoweringInfo &setNoReturn(bool Value = true) {
3789       DoesNotReturn = Value;
3790       return *this;
3791     }
3792 
3793     CallLoweringInfo &setVarArg(bool Value = true) {
3794       IsVarArg = Value;
3795       return *this;
3796     }
3797 
3798     CallLoweringInfo &setTailCall(bool Value = true) {
3799       IsTailCall = Value;
3800       return *this;
3801     }
3802 
3803     CallLoweringInfo &setDiscardResult(bool Value = true) {
3804       IsReturnValueUsed = !Value;
3805       return *this;
3806     }
3807 
3808     CallLoweringInfo &setConvergent(bool Value = true) {
3809       IsConvergent = Value;
3810       return *this;
3811     }
3812 
3813     CallLoweringInfo &setSExtResult(bool Value = true) {
3814       RetSExt = Value;
3815       return *this;
3816     }
3817 
3818     CallLoweringInfo &setZExtResult(bool Value = true) {
3819       RetZExt = Value;
3820       return *this;
3821     }
3822 
3823     CallLoweringInfo &setIsPatchPoint(bool Value = true) {
3824       IsPatchPoint = Value;
3825       return *this;
3826     }
3827 
3828     CallLoweringInfo &setIsPreallocated(bool Value = true) {
3829       IsPreallocated = Value;
3830       return *this;
3831     }
3832 
3833     CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
3834       IsPostTypeLegalization = Value;
3835       return *this;
3836     }
3837 
getArgsCallLoweringInfo3838     ArgListTy &getArgs() {
3839       return Args;
3840     }
3841   };
3842 
3843   /// This structure is used to pass arguments to makeLibCall function.
3844   struct MakeLibCallOptions {
3845     // By passing type list before soften to makeLibCall, the target hook
3846     // shouldExtendTypeInLibCall can get the original type before soften.
3847     ArrayRef<EVT> OpsVTBeforeSoften;
3848     EVT RetVTBeforeSoften;
3849     bool IsSExt : 1;
3850     bool DoesNotReturn : 1;
3851     bool IsReturnValueUsed : 1;
3852     bool IsPostTypeLegalization : 1;
3853     bool IsSoften : 1;
3854 
MakeLibCallOptionsMakeLibCallOptions3855     MakeLibCallOptions()
3856         : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true),
3857           IsPostTypeLegalization(false), IsSoften(false) {}
3858 
3859     MakeLibCallOptions &setSExt(bool Value = true) {
3860       IsSExt = Value;
3861       return *this;
3862     }
3863 
3864     MakeLibCallOptions &setNoReturn(bool Value = true) {
3865       DoesNotReturn = Value;
3866       return *this;
3867     }
3868 
3869     MakeLibCallOptions &setDiscardResult(bool Value = true) {
3870       IsReturnValueUsed = !Value;
3871       return *this;
3872     }
3873 
3874     MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) {
3875       IsPostTypeLegalization = Value;
3876       return *this;
3877     }
3878 
3879     MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT,
3880                                                 bool Value = true) {
3881       OpsVTBeforeSoften = OpsVT;
3882       RetVTBeforeSoften = RetVT;
3883       IsSoften = Value;
3884       return *this;
3885     }
3886   };
3887 
3888   /// This function lowers an abstract call to a function into an actual call.
3889   /// This returns a pair of operands.  The first element is the return value
3890   /// for the function (if RetTy is not VoidTy).  The second element is the
3891   /// outgoing token chain. It calls LowerCall to do the actual lowering.
3892   std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
3893 
3894   /// This hook must be implemented to lower calls into the specified
3895   /// DAG. The outgoing arguments to the call are described by the Outs array,
3896   /// and the values to be returned by the call are described by the Ins
3897   /// array. The implementation should fill in the InVals array with legal-type
3898   /// return values from the call, and return the resulting token chain value.
3899   virtual SDValue
LowerCall(CallLoweringInfo &,SmallVectorImpl<SDValue> &)3900     LowerCall(CallLoweringInfo &/*CLI*/,
3901               SmallVectorImpl<SDValue> &/*InVals*/) const {
3902     llvm_unreachable("Not Implemented");
3903   }
3904 
3905   /// Target-specific cleanup for formal ByVal parameters.
HandleByVal(CCState *,unsigned &,Align)3906   virtual void HandleByVal(CCState *, unsigned &, Align) const {}
3907 
3908   /// This hook should be implemented to check whether the return values
3909   /// described by the Outs array can fit into the return registers.  If false
3910   /// is returned, an sret-demotion is performed.
CanLowerReturn(CallingConv::ID,MachineFunction &,bool,const SmallVectorImpl<ISD::OutputArg> &,LLVMContext &)3911   virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
3912                               MachineFunction &/*MF*/, bool /*isVarArg*/,
3913                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
3914                LLVMContext &/*Context*/) const
3915   {
3916     // Return true by default to get preexisting behavior.
3917     return true;
3918   }
3919 
3920   /// This hook must be implemented to lower outgoing return values, described
3921   /// by the Outs array, into the specified DAG. The implementation should
3922   /// return the resulting token chain value.
LowerReturn(SDValue,CallingConv::ID,bool,const SmallVectorImpl<ISD::OutputArg> &,const SmallVectorImpl<SDValue> &,const SDLoc &,SelectionDAG &)3923   virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
3924                               bool /*isVarArg*/,
3925                               const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
3926                               const SmallVectorImpl<SDValue> & /*OutVals*/,
3927                               const SDLoc & /*dl*/,
3928                               SelectionDAG & /*DAG*/) const {
3929     llvm_unreachable("Not Implemented");
3930   }
3931 
3932   /// Return true if result of the specified node is used by a return node
3933   /// only. It also compute and return the input chain for the tail call.
3934   ///
3935   /// This is used to determine whether it is possible to codegen a libcall as
3936   /// tail call at legalization time.
isUsedByReturnOnly(SDNode *,SDValue &)3937   virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
3938     return false;
3939   }
3940 
3941   /// Return true if the target may be able emit the call instruction as a tail
3942   /// call. This is used by optimization passes to determine if it's profitable
3943   /// to duplicate return instructions to enable tailcall optimization.
mayBeEmittedAsTailCall(const CallInst *)3944   virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
3945     return false;
3946   }
3947 
3948   /// Return the builtin name for the __builtin___clear_cache intrinsic
3949   /// Default is to invoke the clear cache library call
getClearCacheBuiltinName()3950   virtual const char * getClearCacheBuiltinName() const {
3951     return "__clear_cache";
3952   }
3953 
3954   /// Return the register ID of the name passed in. Used by named register
3955   /// global variables extension. There is no target-independent behaviour
3956   /// so the default action is to bail.
getRegisterByName(const char * RegName,LLT Ty,const MachineFunction & MF)3957   virtual Register getRegisterByName(const char* RegName, LLT Ty,
3958                                      const MachineFunction &MF) const {
3959     report_fatal_error("Named registers not implemented for this target");
3960   }
3961 
3962   /// Return the type that should be used to zero or sign extend a
3963   /// zeroext/signext integer return value.  FIXME: Some C calling conventions
3964   /// require the return type to be promoted, but this is not true all the time,
3965   /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
3966   /// conventions. The frontend should handle this and include all of the
3967   /// necessary information.
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType)3968   virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
3969                                        ISD::NodeType /*ExtendKind*/) const {
3970     EVT MinVT = getRegisterType(Context, MVT::i32);
3971     return VT.bitsLT(MinVT) ? MinVT : VT;
3972   }
3973 
3974   /// For some targets, an LLVM struct type must be broken down into multiple
3975   /// simple types, but the calling convention specifies that the entire struct
3976   /// must be passed in a block of consecutive registers.
3977   virtual bool
functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg)3978   functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
3979                                             bool isVarArg) const {
3980     return false;
3981   }
3982 
3983   /// For most targets, an LLVM type must be broken down into multiple
3984   /// smaller types. Usually the halves are ordered according to the endianness
3985   /// but for some platform that would break. So this method will default to
3986   /// matching the endianness but can be overridden.
3987   virtual bool
shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout & DL)3988   shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const {
3989     return DL.isLittleEndian();
3990   }
3991 
3992   /// Returns a 0 terminated array of registers that can be safely used as
3993   /// scratch registers.
getScratchRegisters(CallingConv::ID CC)3994   virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
3995     return nullptr;
3996   }
3997 
3998   /// This callback is used to prepare for a volatile or atomic load.
3999   /// It takes a chain node as input and returns the chain for the load itself.
4000   ///
4001   /// Having a callback like this is necessary for targets like SystemZ,
4002   /// which allows a CPU to reuse the result of a previous load indefinitely,
4003   /// even if a cache-coherent store is performed by another CPU.  The default
4004   /// implementation does nothing.
prepareVolatileOrAtomicLoad(SDValue Chain,const SDLoc & DL,SelectionDAG & DAG)4005   virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
4006                                               SelectionDAG &DAG) const {
4007     return Chain;
4008   }
4009 
4010   /// Should SelectionDAG lower an atomic store of the given kind as a normal
4011   /// StoreSDNode (as opposed to an AtomicSDNode)?  NOTE: The intention is to
4012   /// eventually migrate all targets to the using StoreSDNodes, but porting is
4013   /// being done target at a time.
lowerAtomicStoreAsStoreSDNode(const StoreInst & SI)4014   virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
4015     assert(SI.isAtomic() && "violated precondition");
4016     return false;
4017   }
4018 
4019   /// Should SelectionDAG lower an atomic load of the given kind as a normal
4020   /// LoadSDNode (as opposed to an AtomicSDNode)?  NOTE: The intention is to
4021   /// eventually migrate all targets to the using LoadSDNodes, but porting is
4022   /// being done target at a time.
lowerAtomicLoadAsLoadSDNode(const LoadInst & LI)4023   virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
4024     assert(LI.isAtomic() && "violated precondition");
4025     return false;
4026   }
4027 
4028 
4029   /// This callback is invoked by the type legalizer to legalize nodes with an
4030   /// illegal operand type but legal result types.  It replaces the
4031   /// LowerOperation callback in the type Legalizer.  The reason we can not do
4032   /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
4033   /// use this callback.
4034   ///
4035   /// TODO: Consider merging with ReplaceNodeResults.
4036   ///
4037   /// The target places new result values for the node in Results (their number
4038   /// and types must exactly match those of the original return values of
4039   /// the node), or leaves Results empty, which indicates that the node is not
4040   /// to be custom lowered after all.
4041   /// The default implementation calls LowerOperation.
4042   virtual void LowerOperationWrapper(SDNode *N,
4043                                      SmallVectorImpl<SDValue> &Results,
4044                                      SelectionDAG &DAG) const;
4045 
4046   /// This callback is invoked for operations that are unsupported by the
4047   /// target, which are registered to use 'custom' lowering, and whose defined
4048   /// values are all legal.  If the target has no operations that require custom
4049   /// lowering, it need not implement this.  The default implementation of this
4050   /// aborts.
4051   virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
4052 
4053   /// This callback is invoked when a node result type is illegal for the
4054   /// target, and the operation was registered to use 'custom' lowering for that
4055   /// result type.  The target places new result values for the node in Results
4056   /// (their number and types must exactly match those of the original return
4057   /// values of the node), or leaves Results empty, which indicates that the
4058   /// node is not to be custom lowered after all.
4059   ///
4060   /// If the target has no operations that require custom lowering, it need not
4061   /// implement this.  The default implementation aborts.
ReplaceNodeResults(SDNode *,SmallVectorImpl<SDValue> &,SelectionDAG &)4062   virtual void ReplaceNodeResults(SDNode * /*N*/,
4063                                   SmallVectorImpl<SDValue> &/*Results*/,
4064                                   SelectionDAG &/*DAG*/) const {
4065     llvm_unreachable("ReplaceNodeResults not implemented for this target!");
4066   }
4067 
4068   /// This method returns the name of a target specific DAG node.
4069   virtual const char *getTargetNodeName(unsigned Opcode) const;
4070 
4071   /// This method returns a target specific FastISel object, or null if the
4072   /// target does not support "fast" ISel.
createFastISel(FunctionLoweringInfo &,const TargetLibraryInfo *)4073   virtual FastISel *createFastISel(FunctionLoweringInfo &,
4074                                    const TargetLibraryInfo *) const {
4075     return nullptr;
4076   }
4077 
4078   bool verifyReturnAddressArgumentIsConstant(SDValue Op,
4079                                              SelectionDAG &DAG) const;
4080 
4081   //===--------------------------------------------------------------------===//
4082   // Inline Asm Support hooks
4083   //
4084 
4085   /// This hook allows the target to expand an inline asm call to be explicit
4086   /// llvm code if it wants to.  This is useful for turning simple inline asms
4087   /// into LLVM intrinsics, which gives the compiler more information about the
4088   /// behavior of the code.
ExpandInlineAsm(CallInst *)4089   virtual bool ExpandInlineAsm(CallInst *) const {
4090     return false;
4091   }
4092 
4093   enum ConstraintType {
4094     C_Register,            // Constraint represents specific register(s).
4095     C_RegisterClass,       // Constraint represents any of register(s) in class.
4096     C_Memory,              // Memory constraint.
4097     C_Immediate,           // Requires an immediate.
4098     C_Other,               // Something else.
4099     C_Unknown              // Unsupported constraint.
4100   };
4101 
4102   enum ConstraintWeight {
4103     // Generic weights.
4104     CW_Invalid  = -1,     // No match.
4105     CW_Okay     = 0,      // Acceptable.
4106     CW_Good     = 1,      // Good weight.
4107     CW_Better   = 2,      // Better weight.
4108     CW_Best     = 3,      // Best weight.
4109 
4110     // Well-known weights.
4111     CW_SpecificReg  = CW_Okay,    // Specific register operands.
4112     CW_Register     = CW_Good,    // Register operands.
4113     CW_Memory       = CW_Better,  // Memory operands.
4114     CW_Constant     = CW_Best,    // Constant operand.
4115     CW_Default      = CW_Okay     // Default or don't know type.
4116   };
4117 
4118   /// This contains information for each constraint that we are lowering.
4119   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
4120     /// This contains the actual string for the code, like "m".  TargetLowering
4121     /// picks the 'best' code from ConstraintInfo::Codes that most closely
4122     /// matches the operand.
4123     std::string ConstraintCode;
4124 
4125     /// Information about the constraint code, e.g. Register, RegisterClass,
4126     /// Memory, Other, Unknown.
4127     TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
4128 
4129     /// If this is the result output operand or a clobber, this is null,
4130     /// otherwise it is the incoming operand to the CallInst.  This gets
4131     /// modified as the asm is processed.
4132     Value *CallOperandVal = nullptr;
4133 
4134     /// The ValueType for the operand value.
4135     MVT ConstraintVT = MVT::Other;
4136 
4137     /// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfoAsmOperandInfo4138     AsmOperandInfo(InlineAsm::ConstraintInfo Info)
4139         : InlineAsm::ConstraintInfo(std::move(Info)) {}
4140 
4141     /// Return true of this is an input operand that is a matching constraint
4142     /// like "4".
4143     bool isMatchingInputConstraint() const;
4144 
4145     /// If this is an input matching constraint, this method returns the output
4146     /// operand it matches.
4147     unsigned getMatchedOperand() const;
4148   };
4149 
4150   using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
4151 
4152   /// Split up the constraint string from the inline assembly value into the
4153   /// specific constraints and their prefixes, and also tie in the associated
4154   /// operand values.  If this returns an empty vector, and if the constraint
4155   /// string itself isn't empty, there was an error parsing.
4156   virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
4157                                                 const TargetRegisterInfo *TRI,
4158                                                 const CallBase &Call) const;
4159 
4160   /// Examine constraint type and operand type and determine a weight value.
4161   /// The operand object must already have been set up with the operand type.
4162   virtual ConstraintWeight getMultipleConstraintMatchWeight(
4163       AsmOperandInfo &info, int maIndex) const;
4164 
4165   /// Examine constraint string and operand type and determine a weight value.
4166   /// The operand object must already have been set up with the operand type.
4167   virtual ConstraintWeight getSingleConstraintMatchWeight(
4168       AsmOperandInfo &info, const char *constraint) const;
4169 
4170   /// Determines the constraint code and constraint type to use for the specific
4171   /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4172   /// If the actual operand being passed in is available, it can be passed in as
4173   /// Op, otherwise an empty SDValue can be passed.
4174   virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4175                                       SDValue Op,
4176                                       SelectionDAG *DAG = nullptr) const;
4177 
4178   /// Given a constraint, return the type of constraint it is for this target.
4179   virtual ConstraintType getConstraintType(StringRef Constraint) const;
4180 
4181   /// Given a physical register constraint (e.g.  {edx}), return the register
4182   /// number and the register class for the register.
4183   ///
4184   /// Given a register class constraint, like 'r', if this corresponds directly
4185   /// to an LLVM register class, return a register of 0 and the register class
4186   /// pointer.
4187   ///
4188   /// This should only be used for C_Register constraints.  On error, this
4189   /// returns a register number of 0 and a null register class pointer.
4190   virtual std::pair<unsigned, const TargetRegisterClass *>
4191   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4192                                StringRef Constraint, MVT VT) const;
4193 
getInlineAsmMemConstraint(StringRef ConstraintCode)4194   virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
4195     if (ConstraintCode == "m")
4196       return InlineAsm::Constraint_m;
4197     return InlineAsm::Constraint_Unknown;
4198   }
4199 
4200   /// Try to replace an X constraint, which matches anything, with another that
4201   /// has more specific requirements based on the type of the corresponding
4202   /// operand.  This returns null if there is no replacement to make.
4203   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
4204 
4205   /// Lower the specified operand into the Ops vector.  If it is invalid, don't
4206   /// add anything to Ops.
4207   virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
4208                                             std::vector<SDValue> &Ops,
4209                                             SelectionDAG &DAG) const;
4210 
4211   // Lower custom output constraints. If invalid, return SDValue().
4212   virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
4213                                               const SDLoc &DL,
4214                                               const AsmOperandInfo &OpInfo,
4215                                               SelectionDAG &DAG) const;
4216 
4217   //===--------------------------------------------------------------------===//
4218   // Div utility functions
4219   //
4220   SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4221                     SmallVectorImpl<SDNode *> &Created) const;
4222   SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4223                     SmallVectorImpl<SDNode *> &Created) const;
4224 
4225   /// Targets may override this function to provide custom SDIV lowering for
4226   /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
4227   /// assumes SDIV is expensive and replaces it with a series of other integer
4228   /// operations.
4229   virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4230                                 SelectionDAG &DAG,
4231                                 SmallVectorImpl<SDNode *> &Created) const;
4232 
4233   /// Indicate whether this target prefers to combine FDIVs with the same
4234   /// divisor. If the transform should never be done, return zero. If the
4235   /// transform should be done, return the minimum number of divisor uses
4236   /// that must exist.
combineRepeatedFPDivisors()4237   virtual unsigned combineRepeatedFPDivisors() const {
4238     return 0;
4239   }
4240 
4241   /// Hooks for building estimates in place of slower divisions and square
4242   /// roots.
4243 
4244   /// Return either a square root or its reciprocal estimate value for the input
4245   /// operand.
4246   /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4247   /// 'Enabled' as set by a potential default override attribute.
4248   /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4249   /// refinement iterations required to generate a sufficient (though not
4250   /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4251   /// The boolean UseOneConstNR output is used to select a Newton-Raphson
4252   /// algorithm implementation that uses either one or two constants.
4253   /// The boolean Reciprocal is used to select whether the estimate is for the
4254   /// square root of the input operand or the reciprocal of its square root.
4255   /// A target may choose to implement its own refinement within this function.
4256   /// If that's true, then return '0' as the number of RefinementSteps to avoid
4257   /// any further refinement of the estimate.
4258   /// An empty SDValue return means no estimate sequence can be created.
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal)4259   virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
4260                                   int Enabled, int &RefinementSteps,
4261                                   bool &UseOneConstNR, bool Reciprocal) const {
4262     return SDValue();
4263   }
4264 
4265   /// Return a reciprocal estimate value for the input operand.
4266   /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4267   /// 'Enabled' as set by a potential default override attribute.
4268   /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4269   /// refinement iterations required to generate a sufficient (though not
4270   /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4271   /// A target may choose to implement its own refinement within this function.
4272   /// If that's true, then return '0' as the number of RefinementSteps to avoid
4273   /// any further refinement of the estimate.
4274   /// An empty SDValue return means no estimate sequence can be created.
getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps)4275   virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
4276                                    int Enabled, int &RefinementSteps) const {
4277     return SDValue();
4278   }
4279 
4280   /// Return a target-dependent comparison result if the input operand is
4281   /// suitable for use with a square root estimate calculation. For example, the
4282   /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
4283   /// result should be used as the condition operand for a select or branch.
getSqrtInputTest(SDValue Operand,SelectionDAG & DAG,const DenormalMode & Mode)4284   virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
4285                                    const DenormalMode &Mode) const {
4286     return SDValue();
4287   }
4288 
4289   /// Return a target-dependent result if the input operand is not suitable for
4290   /// use with a square root estimate calculation.
getSqrtResultForDenormInput(SDValue Operand,SelectionDAG & DAG)4291   virtual SDValue getSqrtResultForDenormInput(SDValue Operand,
4292                                               SelectionDAG &DAG) const {
4293     return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
4294   }
4295 
4296   //===--------------------------------------------------------------------===//
4297   // Legalization utility functions
4298   //
4299 
4300   /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
4301   /// respectively, each computing an n/2-bit part of the result.
4302   /// \param Result A vector that will be filled with the parts of the result
4303   ///        in little-endian order.
4304   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
4305   ///        if you want to control how low bits are extracted from the LHS.
4306   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
4307   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
4308   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
4309   /// \returns true if the node has been expanded, false if it has not
4310   bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
4311                       SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
4312                       SelectionDAG &DAG, MulExpansionKind Kind,
4313                       SDValue LL = SDValue(), SDValue LH = SDValue(),
4314                       SDValue RL = SDValue(), SDValue RH = SDValue()) const;
4315 
4316   /// Expand a MUL into two nodes.  One that computes the high bits of
4317   /// the result and one that computes the low bits.
4318   /// \param HiLoVT The value type to use for the Lo and Hi nodes.
4319   /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
4320   ///        if you want to control how low bits are extracted from the LHS.
4321   /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
4322   /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
4323   /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
4324   /// \returns true if the node has been expanded. false if it has not
4325   bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
4326                  SelectionDAG &DAG, MulExpansionKind Kind,
4327                  SDValue LL = SDValue(), SDValue LH = SDValue(),
4328                  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
4329 
4330   /// Expand funnel shift.
4331   /// \param N Node to expand
4332   /// \param Result output after conversion
4333   /// \returns True, if the expansion was successful, false otherwise
4334   bool expandFunnelShift(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4335 
4336   /// Expand rotations.
4337   /// \param N Node to expand
4338   /// \param AllowVectorOps expand vector rotate, this should only be performed
4339   ///        if the legalization is happening outside of LegalizeVectorOps
4340   /// \param Result output after conversion
4341   /// \returns True, if the expansion was successful, false otherwise
4342   bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result,
4343                  SelectionDAG &DAG) const;
4344 
4345   /// Expand float(f32) to SINT(i64) conversion
4346   /// \param N Node to expand
4347   /// \param Result output after conversion
4348   /// \returns True, if the expansion was successful, false otherwise
4349   bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4350 
4351   /// Expand float to UINT conversion
4352   /// \param N Node to expand
4353   /// \param Result output after conversion
4354   /// \param Chain output chain after conversion
4355   /// \returns True, if the expansion was successful, false otherwise
4356   bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
4357                         SelectionDAG &DAG) const;
4358 
4359   /// Expand UINT(i64) to double(f64) conversion
4360   /// \param N Node to expand
4361   /// \param Result output after conversion
4362   /// \param Chain output chain after conversion
4363   /// \returns True, if the expansion was successful, false otherwise
4364   bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
4365                         SelectionDAG &DAG) const;
4366 
4367   /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
4368   SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
4369 
4370   /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
4371   /// vector nodes can only succeed if all operations are legal/custom.
4372   /// \param N Node to expand
4373   /// \param Result output after conversion
4374   /// \returns True, if the expansion was successful, false otherwise
4375   bool expandCTPOP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4376 
4377   /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
4378   /// vector nodes can only succeed if all operations are legal/custom.
4379   /// \param N Node to expand
4380   /// \param Result output after conversion
4381   /// \returns True, if the expansion was successful, false otherwise
4382   bool expandCTLZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4383 
4384   /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
4385   /// vector nodes can only succeed if all operations are legal/custom.
4386   /// \param N Node to expand
4387   /// \param Result output after conversion
4388   /// \returns True, if the expansion was successful, false otherwise
4389   bool expandCTTZ(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
4390 
4391   /// Expand ABS nodes. Expands vector/scalar ABS nodes,
4392   /// vector nodes can only succeed if all operations are legal/custom.
4393   /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
4394   /// \param N Node to expand
4395   /// \param Result output after conversion
4396   /// \param IsNegative indicate negated abs
4397   /// \returns True, if the expansion was successful, false otherwise
4398   bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG,
4399                  bool IsNegative = false) const;
4400 
4401   /// Turn load of vector type into a load of the individual elements.
4402   /// \param LD load to expand
4403   /// \returns BUILD_VECTOR and TokenFactor nodes.
4404   std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
4405                                                   SelectionDAG &DAG) const;
4406 
4407   // Turn a store of a vector type into stores of the individual elements.
4408   /// \param ST Store with a vector value type
4409   /// \returns TokenFactor of the individual store chains.
4410   SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
4411 
4412   /// Expands an unaligned load to 2 half-size loads for an integer, and
4413   /// possibly more for vectors.
4414   std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
4415                                                   SelectionDAG &DAG) const;
4416 
4417   /// Expands an unaligned store to 2 half-size stores for integer values, and
4418   /// possibly more for vectors.
4419   SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
4420 
4421   /// Increments memory address \p Addr according to the type of the value
4422   /// \p DataVT that should be stored. If the data is stored in compressed
4423   /// form, the memory address should be incremented according to the number of
4424   /// the stored elements. This number is equal to the number of '1's bits
4425   /// in the \p Mask.
4426   /// \p DataVT is a vector type. \p Mask is a vector value.
4427   /// \p DataVT and \p Mask have the same number of vector elements.
4428   SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
4429                                  EVT DataVT, SelectionDAG &DAG,
4430                                  bool IsCompressedMemory) const;
4431 
4432   /// Get a pointer to vector element \p Idx located in memory for a vector of
4433   /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
4434   /// bounds the returned pointer is unspecified, but will be within the vector
4435   /// bounds.
4436   SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
4437                                   SDValue Index) const;
4438 
4439   /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
4440   /// method accepts integers as its arguments.
4441   SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;
4442 
4443   /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
4444   /// method accepts integers as its arguments.
4445   SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
4446 
4447   /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
4448   /// method accepts integers as its arguments.
4449   SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;
4450 
4451   /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
4452   /// method accepts integers as its arguments.
4453   SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;
4454 
4455   /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
4456   /// method accepts integers as its arguments.
4457   /// Note: This method may fail if the division could not be performed
4458   /// within the type. Clients must retry with a wider type if this happens.
4459   SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
4460                               SDValue LHS, SDValue RHS,
4461                               unsigned Scale, SelectionDAG &DAG) const;
4462 
4463   /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
4464   /// always suceeds and populates the Result and Overflow arguments.
4465   void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4466                       SelectionDAG &DAG) const;
4467 
4468   /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
4469   /// always suceeds and populates the Result and Overflow arguments.
4470   void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4471                       SelectionDAG &DAG) const;
4472 
4473   /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
4474   /// expansion was successful and populates the Result and Overflow arguments.
4475   bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
4476                   SelectionDAG &DAG) const;
4477 
4478   /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
4479   /// only the first Count elements of the vector are used.
4480   SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
4481 
4482   /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
4483   SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
4484 
4485   /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
4486   /// Returns true if the expansion was successful.
4487   bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
4488 
4489   //===--------------------------------------------------------------------===//
4490   // Instruction Emitting Hooks
4491   //
4492 
4493   /// This method should be implemented by targets that mark instructions with
4494   /// the 'usesCustomInserter' flag.  These instructions are special in various
4495   /// ways, which require special support to insert.  The specified MachineInstr
4496   /// is created but not inserted into any basic blocks, and this method is
4497   /// called to expand it into a sequence of instructions, potentially also
4498   /// creating new basic blocks and control flow.
4499   /// As long as the returned basic block is different (i.e., we created a new
4500   /// one), the custom inserter is free to modify the rest of \p MBB.
4501   virtual MachineBasicBlock *
4502   EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
4503 
4504   /// This method should be implemented by targets that mark instructions with
4505   /// the 'hasPostISelHook' flag. These instructions must be adjusted after
4506   /// instruction selection by target hooks.  e.g. To fill in optional defs for
4507   /// ARM 's' setting instructions.
4508   virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
4509                                              SDNode *Node) const;
4510 
4511   /// If this function returns true, SelectionDAGBuilder emits a
4512   /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
useLoadStackGuardNode()4513   virtual bool useLoadStackGuardNode() const {
4514     return false;
4515   }
4516 
emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL)4517   virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
4518                                       const SDLoc &DL) const {
4519     llvm_unreachable("not implemented for this target");
4520   }
4521 
4522   /// Lower TLS global address SDNode for target independent emulated TLS model.
4523   virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
4524                                           SelectionDAG &DAG) const;
4525 
4526   /// Expands target specific indirect branch for the case of JumpTable
4527   /// expanasion.
expandIndirectJTBranch(const SDLoc & dl,SDValue Value,SDValue Addr,SelectionDAG & DAG)4528   virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
4529                                          SelectionDAG &DAG) const {
4530     return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
4531   }
4532 
4533   // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
4534   // If we're comparing for equality to zero and isCtlzFast is true, expose the
4535   // fact that this can be implemented as a ctlz/srl pair, so that the dag
4536   // combiner can fold the new nodes.
4537   SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
4538 
4539   /// Give targets the chance to reduce the number of distinct addresing modes.
4540   ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType,
4541                                           EVT MemVT, SDValue Offsets) const;
4542 
4543 private:
4544   SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4545                            const SDLoc &DL, DAGCombinerInfo &DCI) const;
4546   SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4547                              const SDLoc &DL, DAGCombinerInfo &DCI) const;
4548 
4549   SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
4550                                                SDValue N1, ISD::CondCode Cond,
4551                                                DAGCombinerInfo &DCI,
4552                                                const SDLoc &DL) const;
4553 
4554   // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
4555   SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
4556       EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
4557       DAGCombinerInfo &DCI, const SDLoc &DL) const;
4558 
4559   SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
4560                             SDValue CompTargetNode, ISD::CondCode Cond,
4561                             DAGCombinerInfo &DCI, const SDLoc &DL,
4562                             SmallVectorImpl<SDNode *> &Created) const;
4563   SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
4564                           ISD::CondCode Cond, DAGCombinerInfo &DCI,
4565                           const SDLoc &DL) const;
4566 
4567   SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
4568                             SDValue CompTargetNode, ISD::CondCode Cond,
4569                             DAGCombinerInfo &DCI, const SDLoc &DL,
4570                             SmallVectorImpl<SDNode *> &Created) const;
4571   SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
4572                           ISD::CondCode Cond, DAGCombinerInfo &DCI,
4573                           const SDLoc &DL) const;
4574 };
4575 
4576 /// Given an LLVM IR type and return type attributes, compute the return value
4577 /// EVTs and flags, and optionally also the offsets, if the return value is
4578 /// being lowered to memory.
4579 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
4580                    SmallVectorImpl<ISD::OutputArg> &Outs,
4581                    const TargetLowering &TLI, const DataLayout &DL);
4582 
4583 } // end namespace llvm
4584 
4585 #endif // LLVM_CODEGEN_TARGETLOWERING_H
4586