• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
17 
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/Target/TargetRegisterInfo.h"
21 #include "llvm/CodeGen/FastISel.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include <vector>
25 
26 namespace llvm {
27   class ARMConstantPoolValue;
28 
29   namespace ARMISD {
30     // ARM Specific DAG Nodes
31     enum NodeType {
32       // Start the numbering where the builtin ops and target ops leave off.
33       FIRST_NUMBER = ISD::BUILTIN_OP_END,
34 
35       Wrapper,      // Wrapper - A wrapper node for TargetConstantPool,
36                     // TargetExternalSymbol, and TargetGlobalAddress.
37       WrapperDYN,   // WrapperDYN - A wrapper node for TargetGlobalAddress in
38                     // DYN mode.
39       WrapperPIC,   // WrapperPIC - A wrapper node for TargetGlobalAddress in
40                     // PIC mode.
41       WrapperJT,    // WrapperJT - A wrapper node for TargetJumpTable
42 
43       CALL,         // Function call.
44       CALL_PRED,    // Function call that's predicable.
45       CALL_NOLINK,  // Function call with branch not branch-and-link.
46       tCALL,        // Thumb function call.
47       BRCOND,       // Conditional branch.
48       BR_JT,        // Jumptable branch.
49       BR2_JT,       // Jumptable branch (2 level - jumptable entry is a jump).
50       RET_FLAG,     // Return with a flag operand.
51 
52       PIC_ADD,      // Add with a PC operand and a PIC label.
53 
54       CMP,          // ARM compare instructions.
55       CMPZ,         // ARM compare that sets only Z flag.
56       CMPFP,        // ARM VFP compare instruction, sets FPSCR.
57       CMPFPw0,      // ARM VFP compare against zero instruction, sets FPSCR.
58       FMSTAT,       // ARM fmstat instruction.
59       CMOV,         // ARM conditional move instructions.
60 
61       BCC_i64,
62 
63       RBIT,         // ARM bitreverse instruction
64 
65       FTOSI,        // FP to sint within a FP register.
66       FTOUI,        // FP to uint within a FP register.
67       SITOF,        // sint to FP within a FP register.
68       UITOF,        // uint to FP within a FP register.
69 
70       SRL_FLAG,     // V,Flag = srl_flag X -> srl X, 1 + save carry out.
71       SRA_FLAG,     // V,Flag = sra_flag X -> sra X, 1 + save carry out.
72       RRX,          // V = RRX X, Flag     -> srl X, 1 + shift in carry flag.
73 
74       ADDC,         // Add with carry
75       ADDE,         // Add using carry
76       SUBC,         // Sub with carry
77       SUBE,         // Sub using carry
78 
79       VMOVRRD,      // double to two gprs.
80       VMOVDRR,      // Two gprs to double.
81 
82       EH_SJLJ_SETJMP,         // SjLj exception handling setjmp.
83       EH_SJLJ_LONGJMP,        // SjLj exception handling longjmp.
84       EH_SJLJ_DISPATCHSETUP,  // SjLj exception handling dispatch setup.
85 
86       TC_RETURN,    // Tail call return pseudo.
87 
88       THREAD_POINTER,
89 
90       DYN_ALLOC,    // Dynamic allocation on the stack.
91 
92       MEMBARRIER,   // Memory barrier (DMB)
93       MEMBARRIER_MCR, // Memory barrier (MCR)
94 
95       PRELOAD,      // Preload
96 
97       VCEQ,         // Vector compare equal.
98       VCEQZ,        // Vector compare equal to zero.
99       VCGE,         // Vector compare greater than or equal.
100       VCGEZ,        // Vector compare greater than or equal to zero.
101       VCLEZ,        // Vector compare less than or equal to zero.
102       VCGEU,        // Vector compare unsigned greater than or equal.
103       VCGT,         // Vector compare greater than.
104       VCGTZ,        // Vector compare greater than zero.
105       VCLTZ,        // Vector compare less than zero.
106       VCGTU,        // Vector compare unsigned greater than.
107       VTST,         // Vector test bits.
108 
109       // Vector shift by immediate:
110       VSHL,         // ...left
111       VSHRs,        // ...right (signed)
112       VSHRu,        // ...right (unsigned)
113       VSHLLs,       // ...left long (signed)
114       VSHLLu,       // ...left long (unsigned)
115       VSHLLi,       // ...left long (with maximum shift count)
116       VSHRN,        // ...right narrow
117 
118       // Vector rounding shift by immediate:
119       VRSHRs,       // ...right (signed)
120       VRSHRu,       // ...right (unsigned)
121       VRSHRN,       // ...right narrow
122 
123       // Vector saturating shift by immediate:
124       VQSHLs,       // ...left (signed)
125       VQSHLu,       // ...left (unsigned)
126       VQSHLsu,      // ...left (signed to unsigned)
127       VQSHRNs,      // ...right narrow (signed)
128       VQSHRNu,      // ...right narrow (unsigned)
129       VQSHRNsu,     // ...right narrow (signed to unsigned)
130 
131       // Vector saturating rounding shift by immediate:
132       VQRSHRNs,     // ...right narrow (signed)
133       VQRSHRNu,     // ...right narrow (unsigned)
134       VQRSHRNsu,    // ...right narrow (signed to unsigned)
135 
136       // Vector shift and insert:
137       VSLI,         // ...left
138       VSRI,         // ...right
139 
140       // Vector get lane (VMOV scalar to ARM core register)
141       // (These are used for 8- and 16-bit element types only.)
142       VGETLANEu,    // zero-extend vector extract element
143       VGETLANEs,    // sign-extend vector extract element
144 
145       // Vector move immediate and move negated immediate:
146       VMOVIMM,
147       VMVNIMM,
148 
149       // Vector duplicate:
150       VDUP,
151       VDUPLANE,
152 
153       // Vector shuffles:
154       VEXT,         // extract
155       VREV64,       // reverse elements within 64-bit doublewords
156       VREV32,       // reverse elements within 32-bit words
157       VREV16,       // reverse elements within 16-bit halfwords
158       VZIP,         // zip (interleave)
159       VUZP,         // unzip (deinterleave)
160       VTRN,         // transpose
161       VTBL1,        // 1-register shuffle with mask
162       VTBL2,        // 2-register shuffle with mask
163 
164       // Vector multiply long:
165       VMULLs,       // ...signed
166       VMULLu,       // ...unsigned
167 
168       // Operands of the standard BUILD_VECTOR node are not legalized, which
169       // is fine if BUILD_VECTORs are always lowered to shuffles or other
170       // operations, but for ARM some BUILD_VECTORs are legal as-is and their
171       // operands need to be legalized.  Define an ARM-specific version of
172       // BUILD_VECTOR for this purpose.
173       BUILD_VECTOR,
174 
175       // Floating-point max and min:
176       FMAX,
177       FMIN,
178 
179       // Bit-field insert
180       BFI,
181 
182       // Vector OR with immediate
183       VORRIMM,
184       // Vector AND with NOT of immediate
185       VBICIMM,
186 
187       // Vector bitwise select
188       VBSL,
189 
190       // Vector load N-element structure to all lanes:
191       VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
192       VLD3DUP,
193       VLD4DUP,
194 
195       // NEON loads with post-increment base updates:
196       VLD1_UPD,
197       VLD2_UPD,
198       VLD3_UPD,
199       VLD4_UPD,
200       VLD2LN_UPD,
201       VLD3LN_UPD,
202       VLD4LN_UPD,
203       VLD2DUP_UPD,
204       VLD3DUP_UPD,
205       VLD4DUP_UPD,
206 
207       // NEON stores with post-increment base updates:
208       VST1_UPD,
209       VST2_UPD,
210       VST3_UPD,
211       VST4_UPD,
212       VST2LN_UPD,
213       VST3LN_UPD,
214       VST4LN_UPD,
215 
216       // 64-bit atomic ops (value split into two registers)
217       ATOMADD64_DAG,
218       ATOMSUB64_DAG,
219       ATOMOR64_DAG,
220       ATOMXOR64_DAG,
221       ATOMAND64_DAG,
222       ATOMNAND64_DAG,
223       ATOMSWAP64_DAG,
224       ATOMCMPXCHG64_DAG
225     };
226   }
227 
228   /// Define some predicates that are used for node matching.
229   namespace ARM {
230     bool isBitFieldInvertedMask(unsigned v);
231   }
232 
233   //===--------------------------------------------------------------------===//
234   //  ARMTargetLowering - ARM Implementation of the TargetLowering interface
235 
236   class ARMTargetLowering : public TargetLowering {
237   public:
238     explicit ARMTargetLowering(TargetMachine &TM);
239 
240     virtual unsigned getJumpTableEncoding(void) const;
241 
242     virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
243 
244     /// ReplaceNodeResults - Replace the results of node with an illegal result
245     /// type with new values built out of custom code.
246     ///
247     virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
248                                     SelectionDAG &DAG) const;
249 
250     virtual const char *getTargetNodeName(unsigned Opcode) const;
251 
252     /// getSetCCResultType - Return the value type to use for ISD::SETCC.
253     virtual EVT getSetCCResultType(EVT VT) const;
254 
255     virtual MachineBasicBlock *
256       EmitInstrWithCustomInserter(MachineInstr *MI,
257                                   MachineBasicBlock *MBB) const;
258 
259     virtual void
260     AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
261 
262     SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
263     virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
264 
265     bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const;
266 
267     /// allowsUnalignedMemoryAccesses - Returns true if the target allows
268     /// unaligned memory accesses. of the specified type.
269     /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
270     virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
271 
272     /// isLegalAddressingMode - Return true if the addressing mode represented
273     /// by AM is legal for this target, for a load/store of the specified type.
274     virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
275     bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
276 
277     /// isLegalICmpImmediate - Return true if the specified immediate is legal
278     /// icmp immediate, that is the target has icmp instructions which can
279     /// compare a register against the immediate without having to materialize
280     /// the immediate into a register.
281     virtual bool isLegalICmpImmediate(int64_t Imm) const;
282 
283     /// isLegalAddImmediate - Return true if the specified immediate is legal
284     /// add immediate, that is the target has add instructions which can
285     /// add a register and the immediate without having to materialize
286     /// the immediate into a register.
287     virtual bool isLegalAddImmediate(int64_t Imm) const;
288 
289     /// getPreIndexedAddressParts - returns true by value, base pointer and
290     /// offset pointer and addressing mode by reference if the node's address
291     /// can be legally represented as pre-indexed load / store address.
292     virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
293                                            SDValue &Offset,
294                                            ISD::MemIndexedMode &AM,
295                                            SelectionDAG &DAG) const;
296 
297     /// getPostIndexedAddressParts - returns true by value, base pointer and
298     /// offset pointer and addressing mode by reference if this node can be
299     /// combined with a load / store to form a post-indexed load / store.
300     virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
301                                             SDValue &Base, SDValue &Offset,
302                                             ISD::MemIndexedMode &AM,
303                                             SelectionDAG &DAG) const;
304 
305     virtual void computeMaskedBitsForTargetNode(const SDValue Op,
306                                                 const APInt &Mask,
307                                                 APInt &KnownZero,
308                                                 APInt &KnownOne,
309                                                 const SelectionDAG &DAG,
310                                                 unsigned Depth) const;
311 
312 
313     virtual bool ExpandInlineAsm(CallInst *CI) const;
314 
315     ConstraintType getConstraintType(const std::string &Constraint) const;
316 
317     /// Examine constraint string and operand type and determine a weight value.
318     /// The operand object must already have been set up with the operand type.
319     ConstraintWeight getSingleConstraintMatchWeight(
320       AsmOperandInfo &info, const char *constraint) const;
321 
322     std::pair<unsigned, const TargetRegisterClass*>
323       getRegForInlineAsmConstraint(const std::string &Constraint,
324                                    EVT VT) const;
325 
326     /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
327     /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is
328     /// true it means one of the asm constraint of the inline asm instruction
329     /// being processed is 'm'.
330     virtual void LowerAsmOperandForConstraint(SDValue Op,
331                                               std::string &Constraint,
332                                               std::vector<SDValue> &Ops,
333                                               SelectionDAG &DAG) const;
334 
getSubtarget()335     const ARMSubtarget* getSubtarget() const {
336       return Subtarget;
337     }
338 
339     /// getRegClassFor - Return the register class that should be used for the
340     /// specified value type.
341     virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
342 
343     /// getMaximalGlobalOffset - Returns the maximal possible offset which can
344     /// be used for loads / stores from the global.
345     virtual unsigned getMaximalGlobalOffset() const;
346 
347     /// createFastISel - This method returns a target specific FastISel object,
348     /// or null if the target does not support "fast" ISel.
349     virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
350 
351     Sched::Preference getSchedulingPreference(SDNode *N) const;
352 
353     bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
354     bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
355 
356     /// isFPImmLegal - Returns true if the target can instruction select the
357     /// specified FP immediate natively. If false, the legalizer will
358     /// materialize the FP immediate as a load from a constant pool.
359     virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
360 
361     virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
362                                     const CallInst &I,
363                                     unsigned Intrinsic) const;
364   protected:
365     std::pair<const TargetRegisterClass*, uint8_t>
366     findRepresentativeClass(EVT VT) const;
367 
368   private:
369     /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
370     /// make the right decision when generating code for different targets.
371     const ARMSubtarget *Subtarget;
372 
373     const TargetRegisterInfo *RegInfo;
374 
375     const InstrItineraryData *Itins;
376 
377     /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
378     ///
379     unsigned ARMPCLabelIndex;
380 
381     void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
382     void addDRTypeForNEON(EVT VT);
383     void addQRTypeForNEON(EVT VT);
384 
385     typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
386     void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
387                           SDValue Chain, SDValue &Arg,
388                           RegsToPassVector &RegsToPass,
389                           CCValAssign &VA, CCValAssign &NextVA,
390                           SDValue &StackPtr,
391                           SmallVector<SDValue, 8> &MemOpChains,
392                           ISD::ArgFlagsTy Flags) const;
393     SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
394                                  SDValue &Root, SelectionDAG &DAG,
395                                  DebugLoc dl) const;
396 
397     CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
398                                   bool isVarArg) const;
399     SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
400                              DebugLoc dl, SelectionDAG &DAG,
401                              const CCValAssign &VA,
402                              ISD::ArgFlagsTy Flags) const;
403     SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
404     SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
405     SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const;
406     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
407                                     const ARMSubtarget *Subtarget) const;
408     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
409     SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
410     SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
411     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
412     SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
413                                             SelectionDAG &DAG) const;
414     SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
415                                    SelectionDAG &DAG) const;
416     SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
417     SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
418     SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
419     SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
420     SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
421     SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
422     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
423     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
424     SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
425     SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
426     SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
427     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
428                               const ARMSubtarget *ST) const;
429 
430     SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
431 
432     SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
433                             CallingConv::ID CallConv, bool isVarArg,
434                             const SmallVectorImpl<ISD::InputArg> &Ins,
435                             DebugLoc dl, SelectionDAG &DAG,
436                             SmallVectorImpl<SDValue> &InVals) const;
437 
438     virtual SDValue
439       LowerFormalArguments(SDValue Chain,
440                            CallingConv::ID CallConv, bool isVarArg,
441                            const SmallVectorImpl<ISD::InputArg> &Ins,
442                            DebugLoc dl, SelectionDAG &DAG,
443                            SmallVectorImpl<SDValue> &InVals) const;
444 
445     void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
446                               DebugLoc dl, SDValue &Chain, unsigned ArgOffset)
447       const;
448 
449     void computeRegArea(CCState &CCInfo, MachineFunction &MF,
450                         unsigned &VARegSize, unsigned &VARegSaveSize) const;
451 
452     virtual SDValue
453       LowerCall(SDValue Chain, SDValue Callee,
454                 CallingConv::ID CallConv, bool isVarArg,
455                 bool &isTailCall,
456                 const SmallVectorImpl<ISD::OutputArg> &Outs,
457                 const SmallVectorImpl<SDValue> &OutVals,
458                 const SmallVectorImpl<ISD::InputArg> &Ins,
459                 DebugLoc dl, SelectionDAG &DAG,
460                 SmallVectorImpl<SDValue> &InVals) const;
461 
462     /// HandleByVal - Target-specific cleanup for ByVal support.
463     virtual void HandleByVal(CCState *, unsigned &) const;
464 
465     /// IsEligibleForTailCallOptimization - Check whether the call is eligible
466     /// for tail call optimization. Targets which want to do tail call
467     /// optimization should implement this function.
468     bool IsEligibleForTailCallOptimization(SDValue Callee,
469                                            CallingConv::ID CalleeCC,
470                                            bool isVarArg,
471                                            bool isCalleeStructRet,
472                                            bool isCallerStructRet,
473                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
474                                     const SmallVectorImpl<SDValue> &OutVals,
475                                     const SmallVectorImpl<ISD::InputArg> &Ins,
476                                            SelectionDAG& DAG) const;
477     virtual SDValue
478       LowerReturn(SDValue Chain,
479                   CallingConv::ID CallConv, bool isVarArg,
480                   const SmallVectorImpl<ISD::OutputArg> &Outs,
481                   const SmallVectorImpl<SDValue> &OutVals,
482                   DebugLoc dl, SelectionDAG &DAG) const;
483 
484     virtual bool isUsedByReturnOnly(SDNode *N) const;
485 
486     virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
487 
488     SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
489                       SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
490     SDValue getVFPCmp(SDValue LHS, SDValue RHS,
491                       SelectionDAG &DAG, DebugLoc dl) const;
492     SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
493 
494     SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
495 
496     MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
497                                          MachineBasicBlock *BB,
498                                          unsigned Size) const;
499     MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
500                                         MachineBasicBlock *BB,
501                                         unsigned Size,
502                                         unsigned BinOpcode) const;
503     MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI,
504                                           MachineBasicBlock *BB,
505                                           unsigned Op1,
506                                           unsigned Op2,
507                                           bool NeedsCarry = false,
508                                           bool IsCmpxchg = false) const;
509     MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI,
510                                                MachineBasicBlock *BB,
511                                                unsigned Size,
512                                                bool signExtend,
513                                                ARMCC::CondCodes Cond) const;
514 
515     void EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB,
516                                       MachineBasicBlock *DispatchBB) const;
517 
518     void SetupEntryBlockForSjLj(MachineInstr *MI,
519                                 MachineBasicBlock *MBB,
520                                 MachineBasicBlock *DispatchBB, int FI) const;
521 
522     MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI,
523                                              MachineBasicBlock *MBB) const;
524 
525     bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
526   };
527 
528   enum NEONModImmType {
529     VMOVModImm,
530     VMVNModImm,
531     OtherModImm
532   };
533 
534 
535   namespace ARM {
536     FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
537   }
538 }
539 
540 #endif  // ARMISELLOWERING_H
541