• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
17 
18 #include "AArch64.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/Target/TargetLowering.h"
24 
25 namespace llvm {
26 
27 namespace AArch64ISD {
28 
29 enum NodeType : unsigned {
30   FIRST_NUMBER = ISD::BUILTIN_OP_END,
31   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32   CALL,         // Function call.
33 
34   // Produces the full sequence of instructions for getting the thread pointer
35   // offset of a variable into X0, using the TLSDesc model.
36   TLSDESC_CALLSEQ,
37   ADRP,     // Page address of a TargetGlobalAddress operand.
38   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
39   LOADgot,  // Load from automatically generated descriptor (e.g. Global
40             // Offset Table, TLS record).
41   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42   BRCOND,   // Conditional branch instruction; "b.cond".
43   CSEL,
44   FCSEL, // Conditional move instruction.
45   CSINV, // Conditional select invert.
46   CSNEG, // Conditional select negate.
47   CSINC, // Conditional select increment.
48 
49   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50   // ELF.
51   THREAD_POINTER,
52   ADC,
53   SBC, // adc, sbc instructions
54 
55   // Arithmetic instructions which write flags.
56   ADDS,
57   SUBS,
58   ADCS,
59   SBCS,
60   ANDS,
61 
62   // Conditional compares. Operands: left,right,falsecc,cc,flags
63   CCMP,
64   CCMN,
65   FCCMP,
66 
67   // Floating point comparison
68   FCMP,
69 
70   // Scalar extract
71   EXTR,
72 
73   // Scalar-to-vector duplication
74   DUP,
75   DUPLANE8,
76   DUPLANE16,
77   DUPLANE32,
78   DUPLANE64,
79 
80   // Vector immedate moves
81   MOVI,
82   MOVIshift,
83   MOVIedit,
84   MOVImsl,
85   FMOV,
86   MVNIshift,
87   MVNImsl,
88 
89   // Vector immediate ops
90   BICi,
91   ORRi,
92 
93   // Vector bit select: similar to ISD::VSELECT but not all bits within an
94   // element must be identical.
95   BSL,
96 
97   // Vector arithmetic negation
98   NEG,
99 
100   // Vector shuffles
101   ZIP1,
102   ZIP2,
103   UZP1,
104   UZP2,
105   TRN1,
106   TRN2,
107   REV16,
108   REV32,
109   REV64,
110   EXT,
111 
112   // Vector shift by scalar
113   VSHL,
114   VLSHR,
115   VASHR,
116 
117   // Vector shift by scalar (again)
118   SQSHL_I,
119   UQSHL_I,
120   SQSHLU_I,
121   SRSHR_I,
122   URSHR_I,
123 
124   // Vector comparisons
125   CMEQ,
126   CMGE,
127   CMGT,
128   CMHI,
129   CMHS,
130   FCMEQ,
131   FCMGE,
132   FCMGT,
133 
134   // Vector zero comparisons
135   CMEQz,
136   CMGEz,
137   CMGTz,
138   CMLEz,
139   CMLTz,
140   FCMEQz,
141   FCMGEz,
142   FCMGTz,
143   FCMLEz,
144   FCMLTz,
145 
146   // Vector across-lanes addition
147   // Only the lower result lane is defined.
148   SADDV,
149   UADDV,
150 
151   // Vector across-lanes min/max
152   // Only the lower result lane is defined.
153   SMINV,
154   UMINV,
155   SMAXV,
156   UMAXV,
157 
158   // Vector bitwise negation
159   NOT,
160 
161   // Vector bitwise selection
162   BIT,
163 
164   // Compare-and-branch
165   CBZ,
166   CBNZ,
167   TBZ,
168   TBNZ,
169 
170   // Tail calls
171   TC_RETURN,
172 
173   // Custom prefetch handling
174   PREFETCH,
175 
176   // {s|u}int to FP within a FP register.
177   SITOF,
178   UITOF,
179 
180   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181   /// world w.r.t vectors; which causes additional REV instructions to be
182   /// generated to compensate for the byte-swapping. But sometimes we do
183   /// need to re-interpret the data in SIMD vector registers in big-endian
184   /// mode without emitting such REV instructions.
185   NVCAST,
186 
187   SMULL,
188   UMULL,
189 
190   // NEON Load/Store with post-increment base updates
191   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
192   LD3post,
193   LD4post,
194   ST2post,
195   ST3post,
196   ST4post,
197   LD1x2post,
198   LD1x3post,
199   LD1x4post,
200   ST1x2post,
201   ST1x3post,
202   ST1x4post,
203   LD1DUPpost,
204   LD2DUPpost,
205   LD3DUPpost,
206   LD4DUPpost,
207   LD1LANEpost,
208   LD2LANEpost,
209   LD3LANEpost,
210   LD4LANEpost,
211   ST2LANEpost,
212   ST3LANEpost,
213   ST4LANEpost
214 };
215 
216 } // end namespace AArch64ISD
217 
218 class AArch64Subtarget;
219 class AArch64TargetMachine;
220 
221 class AArch64TargetLowering : public TargetLowering {
222 public:
223   explicit AArch64TargetLowering(const TargetMachine &TM,
224                                  const AArch64Subtarget &STI);
225 
226   /// Selects the correct CCAssignFn for a given CallingConvention value.
227   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228 
229   /// Determine which of the bits specified in Mask are known to be either zero
230   /// or one and return them in the KnownZero/KnownOne bitsets.
231   void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
232                                      APInt &KnownOne, const SelectionDAG &DAG,
233                                      unsigned Depth = 0) const override;
234 
235   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
236 
237   /// Returns true if the target allows unaligned memory accesses of the
238   /// specified type.
239   bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
240                                       unsigned Align = 1,
241                                       bool *Fast = nullptr) const override;
242 
243   /// Provide custom lowering hooks for some operations.
244   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
245 
246   const char *getTargetNodeName(unsigned Opcode) const override;
247 
248   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
249 
250   /// Returns true if a cast between SrcAS and DestAS is a noop.
isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS)251   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
252     // Addrspacecasts are always noops.
253     return true;
254   }
255 
256   /// This method returns a target specific FastISel object, or null if the
257   /// target does not support "fast" ISel.
258   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
259                            const TargetLibraryInfo *libInfo) const override;
260 
261   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
262 
263   bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
264 
265   /// Return true if the given shuffle mask can be codegen'd directly, or if it
266   /// should be stack expanded.
267   bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
268 
269   /// Return the ISD::SETCC ValueType.
270   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
271                          EVT VT) const override;
272 
273   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
274 
275   MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
276                                   MachineBasicBlock *BB) const;
277 
278   MachineBasicBlock *
279   EmitInstrWithCustomInserter(MachineInstr *MI,
280                               MachineBasicBlock *MBB) const override;
281 
282   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
283                           unsigned Intrinsic) const override;
284 
285   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
286   bool isTruncateFree(EVT VT1, EVT VT2) const override;
287 
288   bool isProfitableToHoist(Instruction *I) const override;
289 
290   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
291   bool isZExtFree(EVT VT1, EVT VT2) const override;
292   bool isZExtFree(SDValue Val, EVT VT2) const override;
293 
294   bool hasPairedLoad(Type *LoadedType,
295                      unsigned &RequiredAligment) const override;
296   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
297 
getMaxSupportedInterleaveFactor()298   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
299 
300   bool lowerInterleavedLoad(LoadInst *LI,
301                             ArrayRef<ShuffleVectorInst *> Shuffles,
302                             ArrayRef<unsigned> Indices,
303                             unsigned Factor) const override;
304   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
305                              unsigned Factor) const override;
306 
307   bool isLegalAddImmediate(int64_t) const override;
308   bool isLegalICmpImmediate(int64_t) const override;
309 
310   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
311                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
312                           MachineFunction &MF) const override;
313 
314   /// Return true if the addressing mode represented by AM is legal for this
315   /// target, for a load/store of the specified type.
316   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
317                              unsigned AS) const override;
318 
319   /// \brief Return the cost of the scaling factor used in the addressing
320   /// mode represented by AM for this target, for a load/store
321   /// of the specified type.
322   /// If the AM is supported, the return value must be >= 0.
323   /// If the AM is not supported, it returns a negative value.
324   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
325                            unsigned AS) const override;
326 
327   /// Return true if an FMA operation is faster than a pair of fmul and fadd
328   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
329   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
330   bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
331 
332   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
333 
334   /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
335   bool isDesirableToCommuteWithShift(const SDNode *N) const override;
336 
337   /// \brief Returns true if it is beneficial to convert a load of a constant
338   /// to just the constant itself.
339   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
340                                          Type *Ty) const override;
341 
342   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
343                         AtomicOrdering Ord) const override;
344   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
345                               Value *Addr, AtomicOrdering Ord) const override;
346 
347   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
348 
349   TargetLoweringBase::AtomicExpansionKind
350   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
351   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
352   TargetLoweringBase::AtomicExpansionKind
353   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
354 
355   bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
356 
357   bool useLoadStackGuardNode() const override;
358   TargetLoweringBase::LegalizeTypeAction
359   getPreferredVectorAction(EVT VT) const override;
360 
361   /// If the target has a standard location for the unsafe stack pointer,
362   /// returns the address of that location. Otherwise, returns nullptr.
363   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
364 
365   /// If a physical register, this returns the register that receives the
366   /// exception address on entry to an EH pad.
367   unsigned
getExceptionPointerRegister(const Constant * PersonalityFn)368   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
369     // FIXME: This is a guess. Has this been defined yet?
370     return AArch64::X0;
371   }
372 
373   /// If a physical register, this returns the register that receives the
374   /// exception typeid on entry to a landing pad.
375   unsigned
getExceptionSelectorRegister(const Constant * PersonalityFn)376   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
377     // FIXME: This is a guess. Has this been defined yet?
378     return AArch64::X1;
379   }
380 
isCheapToSpeculateCttz()381   bool isCheapToSpeculateCttz() const override {
382     return true;
383   }
384 
isCheapToSpeculateCtlz()385   bool isCheapToSpeculateCtlz() const override {
386     return true;
387   }
supportSplitCSR(MachineFunction * MF)388   bool supportSplitCSR(MachineFunction *MF) const override {
389     return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
390            MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
391   }
392   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
393   void insertCopiesSplitCSR(
394       MachineBasicBlock *Entry,
395       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
396 
397 private:
398   bool isExtFreeImpl(const Instruction *Ext) const override;
399 
400   /// Keep a pointer to the AArch64Subtarget around so that we can
401   /// make the right decision when generating code for different targets.
402   const AArch64Subtarget *Subtarget;
403 
404   void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
405   void addDRTypeForNEON(MVT VT);
406   void addQRTypeForNEON(MVT VT);
407 
408   SDValue
409   LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
410                        const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
411                        SelectionDAG &DAG,
412                        SmallVectorImpl<SDValue> &InVals) const override;
413 
414   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
415                     SmallVectorImpl<SDValue> &InVals) const override;
416 
417   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
418                           CallingConv::ID CallConv, bool isVarArg,
419                           const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
420                           SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
421                           bool isThisReturn, SDValue ThisVal) const;
422 
423   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
424 
425   bool isEligibleForTailCallOptimization(
426       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
427       bool isCalleeStructRet, bool isCallerStructRet,
428       const SmallVectorImpl<ISD::OutputArg> &Outs,
429       const SmallVectorImpl<SDValue> &OutVals,
430       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
431 
432   /// Finds the incoming stack arguments which overlap the given fixed stack
433   /// object and incorporates their load into the current chain. This prevents
434   /// an upcoming store from clobbering the stack argument before it's used.
435   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
436                               MachineFrameInfo *MFI, int ClobberedFI) const;
437 
438   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
439 
440   bool IsTailCallConvention(CallingConv::ID CallCC) const;
441 
442   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
443                            SDValue &Chain) const;
444 
445   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
446                       bool isVarArg,
447                       const SmallVectorImpl<ISD::OutputArg> &Outs,
448                       LLVMContext &Context) const override;
449 
450   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
451                       const SmallVectorImpl<ISD::OutputArg> &Outs,
452                       const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
453                       SelectionDAG &DAG) const override;
454 
455   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
456   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
457   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
458   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
459   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
460                                  SelectionDAG &DAG) const;
461   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
462   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
463   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
464   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
465   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
466                          SDValue TVal, SDValue FVal, SDLoc dl,
467                          SelectionDAG &DAG) const;
468   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
469   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
470   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
471   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
472   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
473   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
474   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
475   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
476   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
477   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
478   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
479   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
480   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
481   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
482   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
483   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
484   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
485   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
486   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
487   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
488   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
489   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
490                         RTLIB::Libcall Call) const;
491   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
492   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
493   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
494   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
495   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
496   SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
497   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
498   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
499   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
500 
501   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
502                         std::vector<SDNode *> *Created) const override;
503   unsigned combineRepeatedFPDivisors() const override;
504 
505   ConstraintType getConstraintType(StringRef Constraint) const override;
506   unsigned getRegisterByName(const char* RegName, EVT VT,
507                              SelectionDAG &DAG) const override;
508 
509   /// Examine constraint string and operand type and determine a weight value.
510   /// The operand object must already have been set up with the operand type.
511   ConstraintWeight
512   getSingleConstraintMatchWeight(AsmOperandInfo &info,
513                                  const char *constraint) const override;
514 
515   std::pair<unsigned, const TargetRegisterClass *>
516   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
517                                StringRef Constraint, MVT VT) const override;
518   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
519                                     std::vector<SDValue> &Ops,
520                                     SelectionDAG &DAG) const override;
521 
getInlineAsmMemConstraint(StringRef ConstraintCode)522   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
523     if (ConstraintCode == "Q")
524       return InlineAsm::Constraint_Q;
525     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
526     //        followed by llvm_unreachable so we'll leave them unimplemented in
527     //        the backend for now.
528     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
529   }
530 
531   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
532   bool mayBeEmittedAsTailCall(CallInst *CI) const override;
533   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
534                               ISD::MemIndexedMode &AM, bool &IsInc,
535                               SelectionDAG &DAG) const;
536   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
537                                  ISD::MemIndexedMode &AM,
538                                  SelectionDAG &DAG) const override;
539   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
540                                   SDValue &Offset, ISD::MemIndexedMode &AM,
541                                   SelectionDAG &DAG) const override;
542 
543   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
544                           SelectionDAG &DAG) const override;
545 
546   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
547                                                  CallingConv::ID CallConv,
548                                                  bool isVarArg) const override;
549 
550   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
551 };
552 
553 namespace AArch64 {
554 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
555                          const TargetLibraryInfo *libInfo);
556 } // end namespace AArch64
557 
558 } // end namespace llvm
559 
560 #endif
561