• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
17 
18 #include "AArch64.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/Target/TargetLowering.h"
24 
25 namespace llvm {
26 
27 namespace AArch64ISD {
28 
29 enum NodeType : unsigned {
30   FIRST_NUMBER = ISD::BUILTIN_OP_END,
31   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32   CALL,         // Function call.
33 
34   // Produces the full sequence of instructions for getting the thread pointer
35   // offset of a variable into X0, using the TLSDesc model.
36   TLSDESC_CALLSEQ,
37   ADRP,     // Page address of a TargetGlobalAddress operand.
38   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
39   LOADgot,  // Load from automatically generated descriptor (e.g. Global
40             // Offset Table, TLS record).
41   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42   BRCOND,   // Conditional branch instruction; "b.cond".
43   CSEL,
44   FCSEL, // Conditional move instruction.
45   CSINV, // Conditional select invert.
46   CSNEG, // Conditional select negate.
47   CSINC, // Conditional select increment.
48 
49   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50   // ELF.
51   THREAD_POINTER,
52   ADC,
53   SBC, // adc, sbc instructions
54 
55   // Arithmetic instructions which write flags.
56   ADDS,
57   SUBS,
58   ADCS,
59   SBCS,
60   ANDS,
61 
62   // Conditional compares. Operands: left,right,falsecc,cc,flags
63   CCMP,
64   CCMN,
65   FCCMP,
66 
67   // Floating point comparison
68   FCMP,
69 
70   // Scalar extract
71   EXTR,
72 
73   // Scalar-to-vector duplication
74   DUP,
75   DUPLANE8,
76   DUPLANE16,
77   DUPLANE32,
78   DUPLANE64,
79 
80   // Vector immedate moves
81   MOVI,
82   MOVIshift,
83   MOVIedit,
84   MOVImsl,
85   FMOV,
86   MVNIshift,
87   MVNImsl,
88 
89   // Vector immediate ops
90   BICi,
91   ORRi,
92 
93   // Vector bit select: similar to ISD::VSELECT but not all bits within an
94   // element must be identical.
95   BSL,
96 
97   // Vector arithmetic negation
98   NEG,
99 
100   // Vector shuffles
101   ZIP1,
102   ZIP2,
103   UZP1,
104   UZP2,
105   TRN1,
106   TRN2,
107   REV16,
108   REV32,
109   REV64,
110   EXT,
111 
112   // Vector shift by scalar
113   VSHL,
114   VLSHR,
115   VASHR,
116 
117   // Vector shift by scalar (again)
118   SQSHL_I,
119   UQSHL_I,
120   SQSHLU_I,
121   SRSHR_I,
122   URSHR_I,
123 
124   // Vector comparisons
125   CMEQ,
126   CMGE,
127   CMGT,
128   CMHI,
129   CMHS,
130   FCMEQ,
131   FCMGE,
132   FCMGT,
133 
134   // Vector zero comparisons
135   CMEQz,
136   CMGEz,
137   CMGTz,
138   CMLEz,
139   CMLTz,
140   FCMEQz,
141   FCMGEz,
142   FCMGTz,
143   FCMLEz,
144   FCMLTz,
145 
146   // Vector across-lanes addition
147   // Only the lower result lane is defined.
148   SADDV,
149   UADDV,
150 
151   // Vector across-lanes min/max
152   // Only the lower result lane is defined.
153   SMINV,
154   UMINV,
155   SMAXV,
156   UMAXV,
157 
158   // Vector bitwise negation
159   NOT,
160 
161   // Vector bitwise selection
162   BIT,
163 
164   // Compare-and-branch
165   CBZ,
166   CBNZ,
167   TBZ,
168   TBNZ,
169 
170   // Tail calls
171   TC_RETURN,
172 
173   // Custom prefetch handling
174   PREFETCH,
175 
176   // {s|u}int to FP within a FP register.
177   SITOF,
178   UITOF,
179 
180   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181   /// world w.r.t vectors; which causes additional REV instructions to be
182   /// generated to compensate for the byte-swapping. But sometimes we do
183   /// need to re-interpret the data in SIMD vector registers in big-endian
184   /// mode without emitting such REV instructions.
185   NVCAST,
186 
187   SMULL,
188   UMULL,
189 
190   // Reciprocal estimates.
191   FRECPE,
192   FRSQRTE,
193 
194   // NEON Load/Store with post-increment base updates
195   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
196   LD3post,
197   LD4post,
198   ST2post,
199   ST3post,
200   ST4post,
201   LD1x2post,
202   LD1x3post,
203   LD1x4post,
204   ST1x2post,
205   ST1x3post,
206   ST1x4post,
207   LD1DUPpost,
208   LD2DUPpost,
209   LD3DUPpost,
210   LD4DUPpost,
211   LD1LANEpost,
212   LD2LANEpost,
213   LD3LANEpost,
214   LD4LANEpost,
215   ST2LANEpost,
216   ST3LANEpost,
217   ST4LANEpost
218 };
219 
220 } // end namespace AArch64ISD
221 
222 class AArch64Subtarget;
223 class AArch64TargetMachine;
224 
225 class AArch64TargetLowering : public TargetLowering {
226 public:
227   explicit AArch64TargetLowering(const TargetMachine &TM,
228                                  const AArch64Subtarget &STI);
229 
230   /// Selects the correct CCAssignFn for a given CallingConvention value.
231   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
232 
233   /// Determine which of the bits specified in Mask are known to be either zero
234   /// or one and return them in the KnownZero/KnownOne bitsets.
235   void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
236                                      APInt &KnownOne, const SelectionDAG &DAG,
237                                      unsigned Depth = 0) const override;
238 
239   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
240 
241   /// Returns true if the target allows unaligned memory accesses of the
242   /// specified type.
243   bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
244                                       unsigned Align = 1,
245                                       bool *Fast = nullptr) const override;
246 
247   /// Provide custom lowering hooks for some operations.
248   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
249 
250   const char *getTargetNodeName(unsigned Opcode) const override;
251 
252   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
253 
254   /// Returns true if a cast between SrcAS and DestAS is a noop.
isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS)255   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
256     // Addrspacecasts are always noops.
257     return true;
258   }
259 
260   /// This method returns a target specific FastISel object, or null if the
261   /// target does not support "fast" ISel.
262   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
263                            const TargetLibraryInfo *libInfo) const override;
264 
265   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
266 
267   bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
268 
269   /// Return true if the given shuffle mask can be codegen'd directly, or if it
270   /// should be stack expanded.
271   bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
272 
273   /// Return the ISD::SETCC ValueType.
274   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
275                          EVT VT) const override;
276 
277   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
278 
279   MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
280                                   MachineBasicBlock *BB) const;
281 
282   MachineBasicBlock *
283   EmitInstrWithCustomInserter(MachineInstr &MI,
284                               MachineBasicBlock *MBB) const override;
285 
286   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
287                           unsigned Intrinsic) const override;
288 
289   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
290   bool isTruncateFree(EVT VT1, EVT VT2) const override;
291 
292   bool isProfitableToHoist(Instruction *I) const override;
293 
294   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
295   bool isZExtFree(EVT VT1, EVT VT2) const override;
296   bool isZExtFree(SDValue Val, EVT VT2) const override;
297 
298   bool hasPairedLoad(Type *LoadedType,
299                      unsigned &RequiredAligment) const override;
300   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
301 
getMaxSupportedInterleaveFactor()302   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
303 
304   bool lowerInterleavedLoad(LoadInst *LI,
305                             ArrayRef<ShuffleVectorInst *> Shuffles,
306                             ArrayRef<unsigned> Indices,
307                             unsigned Factor) const override;
308   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
309                              unsigned Factor) const override;
310 
311   bool isLegalAddImmediate(int64_t) const override;
312   bool isLegalICmpImmediate(int64_t) const override;
313 
314   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
315                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
316                           MachineFunction &MF) const override;
317 
318   /// Return true if the addressing mode represented by AM is legal for this
319   /// target, for a load/store of the specified type.
320   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
321                              unsigned AS) const override;
322 
323   /// \brief Return the cost of the scaling factor used in the addressing
324   /// mode represented by AM for this target, for a load/store
325   /// of the specified type.
326   /// If the AM is supported, the return value must be >= 0.
327   /// If the AM is not supported, it returns a negative value.
328   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
329                            unsigned AS) const override;
330 
331   /// Return true if an FMA operation is faster than a pair of fmul and fadd
332   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
333   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
334   bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
335 
336   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
337 
338   /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
339   bool isDesirableToCommuteWithShift(const SDNode *N) const override;
340 
341   /// \brief Returns true if it is beneficial to convert a load of a constant
342   /// to just the constant itself.
343   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
344                                          Type *Ty) const override;
345 
346   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
347                         AtomicOrdering Ord) const override;
348   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
349                               Value *Addr, AtomicOrdering Ord) const override;
350 
351   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
352 
353   TargetLoweringBase::AtomicExpansionKind
354   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
355   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
356   TargetLoweringBase::AtomicExpansionKind
357   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
358 
359   bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
360 
361   bool useLoadStackGuardNode() const override;
362   TargetLoweringBase::LegalizeTypeAction
363   getPreferredVectorAction(EVT VT) const override;
364 
365   /// If the target has a standard location for the stack protector cookie,
366   /// returns the address of that location. Otherwise, returns nullptr.
367   Value *getIRStackGuard(IRBuilder<> &IRB) const override;
368 
369   /// If the target has a standard location for the unsafe stack pointer,
370   /// returns the address of that location. Otherwise, returns nullptr.
371   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
372 
373   /// If a physical register, this returns the register that receives the
374   /// exception address on entry to an EH pad.
375   unsigned
getExceptionPointerRegister(const Constant * PersonalityFn)376   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
377     // FIXME: This is a guess. Has this been defined yet?
378     return AArch64::X0;
379   }
380 
381   /// If a physical register, this returns the register that receives the
382   /// exception typeid on entry to a landing pad.
383   unsigned
getExceptionSelectorRegister(const Constant * PersonalityFn)384   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
385     // FIXME: This is a guess. Has this been defined yet?
386     return AArch64::X1;
387   }
388 
389   bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
390 
isCheapToSpeculateCttz()391   bool isCheapToSpeculateCttz() const override {
392     return true;
393   }
394 
isCheapToSpeculateCtlz()395   bool isCheapToSpeculateCtlz() const override {
396     return true;
397   }
398 
hasBitPreservingFPLogic(EVT VT)399   bool hasBitPreservingFPLogic(EVT VT) const override {
400     // FIXME: Is this always true? It should be true for vectors at least.
401     return VT == MVT::f32 || VT == MVT::f64;
402   }
403 
supportSplitCSR(MachineFunction * MF)404   bool supportSplitCSR(MachineFunction *MF) const override {
405     return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
406            MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
407   }
408   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
409   void insertCopiesSplitCSR(
410       MachineBasicBlock *Entry,
411       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
412 
supportSwiftError()413   bool supportSwiftError() const override {
414     return true;
415   }
416 
417 private:
418   bool isExtFreeImpl(const Instruction *Ext) const override;
419 
420   /// Keep a pointer to the AArch64Subtarget around so that we can
421   /// make the right decision when generating code for different targets.
422   const AArch64Subtarget *Subtarget;
423 
424   void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
425   void addDRTypeForNEON(MVT VT);
426   void addQRTypeForNEON(MVT VT);
427 
428   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
429                                bool isVarArg,
430                                const SmallVectorImpl<ISD::InputArg> &Ins,
431                                const SDLoc &DL, SelectionDAG &DAG,
432                                SmallVectorImpl<SDValue> &InVals) const override;
433 
434   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
435                     SmallVectorImpl<SDValue> &InVals) const override;
436 
437   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
438                           CallingConv::ID CallConv, bool isVarArg,
439                           const SmallVectorImpl<ISD::InputArg> &Ins,
440                           const SDLoc &DL, SelectionDAG &DAG,
441                           SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
442                           SDValue ThisVal) const;
443 
444   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
445 
446   bool isEligibleForTailCallOptimization(
447       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
448       const SmallVectorImpl<ISD::OutputArg> &Outs,
449       const SmallVectorImpl<SDValue> &OutVals,
450       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
451 
452   /// Finds the incoming stack arguments which overlap the given fixed stack
453   /// object and incorporates their load into the current chain. This prevents
454   /// an upcoming store from clobbering the stack argument before it's used.
455   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
456                               MachineFrameInfo *MFI, int ClobberedFI) const;
457 
458   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
459 
460   bool IsTailCallConvention(CallingConv::ID CallCC) const;
461 
462   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
463                            SDValue &Chain) const;
464 
465   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
466                       bool isVarArg,
467                       const SmallVectorImpl<ISD::OutputArg> &Outs,
468                       LLVMContext &Context) const override;
469 
470   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
471                       const SmallVectorImpl<ISD::OutputArg> &Outs,
472                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
473                       SelectionDAG &DAG) const override;
474 
475   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
476   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
477   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
478   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
479   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
480                                  SelectionDAG &DAG) const;
481   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
482   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
483   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
484   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
485   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
486                          SDValue TVal, SDValue FVal, const SDLoc &dl,
487                          SelectionDAG &DAG) const;
488   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
489   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
490   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
491   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
492   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
493   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
494   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
495   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
496   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
497   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
498   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
499   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
500   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
501   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
502   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
503   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
504   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
505   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
506   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
507   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
508   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
509   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
510                         RTLIB::Libcall Call) const;
511   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
512   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
513   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
514   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
515   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
516   SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
517   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
518   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
519   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
520 
521   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
522                         std::vector<SDNode *> *Created) const override;
523   SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
524                            unsigned &RefinementSteps,
525                            bool &UseOneConstNR) const override;
526   SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
527                            unsigned &RefinementSteps) const override;
528   unsigned combineRepeatedFPDivisors() const override;
529 
530   ConstraintType getConstraintType(StringRef Constraint) const override;
531   unsigned getRegisterByName(const char* RegName, EVT VT,
532                              SelectionDAG &DAG) const override;
533 
534   /// Examine constraint string and operand type and determine a weight value.
535   /// The operand object must already have been set up with the operand type.
536   ConstraintWeight
537   getSingleConstraintMatchWeight(AsmOperandInfo &info,
538                                  const char *constraint) const override;
539 
540   std::pair<unsigned, const TargetRegisterClass *>
541   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
542                                StringRef Constraint, MVT VT) const override;
543 
544   const char *LowerXConstraint(EVT ConstraintVT) const override;
545 
546   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
547                                     std::vector<SDValue> &Ops,
548                                     SelectionDAG &DAG) const override;
549 
getInlineAsmMemConstraint(StringRef ConstraintCode)550   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
551     if (ConstraintCode == "Q")
552       return InlineAsm::Constraint_Q;
553     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
554     //        followed by llvm_unreachable so we'll leave them unimplemented in
555     //        the backend for now.
556     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
557   }
558 
559   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
560   bool mayBeEmittedAsTailCall(CallInst *CI) const override;
561   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
562                               ISD::MemIndexedMode &AM, bool &IsInc,
563                               SelectionDAG &DAG) const;
564   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
565                                  ISD::MemIndexedMode &AM,
566                                  SelectionDAG &DAG) const override;
567   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
568                                   SDValue &Offset, ISD::MemIndexedMode &AM,
569                                   SelectionDAG &DAG) const override;
570 
571   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
572                           SelectionDAG &DAG) const override;
573 
574   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
575                                                  CallingConv::ID CallConv,
576                                                  bool isVarArg) const override;
577 
578   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
579 };
580 
581 namespace AArch64 {
582 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
583                          const TargetLibraryInfo *libInfo);
584 } // end namespace AArch64
585 
586 } // end namespace llvm
587 
588 #endif
589