1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef ARMISELLOWERING_H 16 #define ARMISELLOWERING_H 17 18 #include "ARMSubtarget.h" 19 #include "llvm/Target/TargetLowering.h" 20 #include "llvm/Target/TargetRegisterInfo.h" 21 #include "llvm/CodeGen/FastISel.h" 22 #include "llvm/CodeGen/SelectionDAG.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include <vector> 25 26 namespace llvm { 27 class ARMConstantPoolValue; 28 29 namespace ARMISD { 30 // ARM Specific DAG Nodes 31 enum NodeType { 32 // Start the numbering where the builtin ops and target ops leave off. 33 FIRST_NUMBER = ISD::BUILTIN_OP_END, 34 35 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 36 // TargetExternalSymbol, and TargetGlobalAddress. 37 WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in 38 // DYN mode. 39 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 40 // PIC mode. 41 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 42 43 CALL, // Function call. 44 CALL_PRED, // Function call that's predicable. 45 CALL_NOLINK, // Function call with branch not branch-and-link. 46 tCALL, // Thumb function call. 47 BRCOND, // Conditional branch. 48 BR_JT, // Jumptable branch. 49 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 50 RET_FLAG, // Return with a flag operand. 51 52 PIC_ADD, // Add with a PC operand and a PIC label. 53 54 CMP, // ARM compare instructions. 55 CMPZ, // ARM compare that sets only Z flag. 56 CMPFP, // ARM VFP compare instruction, sets FPSCR. 57 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 58 FMSTAT, // ARM fmstat instruction. 59 CMOV, // ARM conditional move instructions. 60 61 BCC_i64, 62 63 RBIT, // ARM bitreverse instruction 64 65 FTOSI, // FP to sint within a FP register. 66 FTOUI, // FP to uint within a FP register. 67 SITOF, // sint to FP within a FP register. 68 UITOF, // uint to FP within a FP register. 69 70 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 71 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 72 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 73 74 VMOVRRD, // double to two gprs. 75 VMOVDRR, // Two gprs to double. 76 77 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 78 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 79 EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup. 80 81 TC_RETURN, // Tail call return pseudo. 82 83 THREAD_POINTER, 84 85 DYN_ALLOC, // Dynamic allocation on the stack. 86 87 MEMBARRIER, // Memory barrier (DMB) 88 MEMBARRIER_MCR, // Memory barrier (MCR) 89 90 PRELOAD, // Preload 91 92 VCEQ, // Vector compare equal. 93 VCEQZ, // Vector compare equal to zero. 94 VCGE, // Vector compare greater than or equal. 95 VCGEZ, // Vector compare greater than or equal to zero. 96 VCLEZ, // Vector compare less than or equal to zero. 97 VCGEU, // Vector compare unsigned greater than or equal. 98 VCGT, // Vector compare greater than. 99 VCGTZ, // Vector compare greater than zero. 100 VCLTZ, // Vector compare less than zero. 101 VCGTU, // Vector compare unsigned greater than. 102 VTST, // Vector test bits. 103 104 // Vector shift by immediate: 105 VSHL, // ...left 106 VSHRs, // ...right (signed) 107 VSHRu, // ...right (unsigned) 108 VSHLLs, // ...left long (signed) 109 VSHLLu, // ...left long (unsigned) 110 VSHLLi, // ...left long (with maximum shift count) 111 VSHRN, // ...right narrow 112 113 // Vector rounding shift by immediate: 114 VRSHRs, // ...right (signed) 115 VRSHRu, // ...right (unsigned) 116 VRSHRN, // ...right narrow 117 118 // Vector saturating shift by immediate: 119 VQSHLs, // ...left (signed) 120 VQSHLu, // ...left (unsigned) 121 VQSHLsu, // ...left (signed to unsigned) 122 VQSHRNs, // ...right narrow (signed) 123 VQSHRNu, // ...right narrow (unsigned) 124 VQSHRNsu, // ...right narrow (signed to unsigned) 125 126 // Vector saturating rounding shift by immediate: 127 VQRSHRNs, // ...right narrow (signed) 128 VQRSHRNu, // ...right narrow (unsigned) 129 VQRSHRNsu, // ...right narrow (signed to unsigned) 130 131 // Vector shift and insert: 132 VSLI, // ...left 133 VSRI, // ...right 134 135 // Vector get lane (VMOV scalar to ARM core register) 136 // (These are used for 8- and 16-bit element types only.) 137 VGETLANEu, // zero-extend vector extract element 138 VGETLANEs, // sign-extend vector extract element 139 140 // Vector move immediate and move negated immediate: 141 VMOVIMM, 142 VMVNIMM, 143 144 // Vector duplicate: 145 VDUP, 146 VDUPLANE, 147 148 // Vector shuffles: 149 VEXT, // extract 150 VREV64, // reverse elements within 64-bit doublewords 151 VREV32, // reverse elements within 32-bit words 152 VREV16, // reverse elements within 16-bit halfwords 153 VZIP, // zip (interleave) 154 VUZP, // unzip (deinterleave) 155 VTRN, // transpose 156 VTBL1, // 1-register shuffle with mask 157 VTBL2, // 2-register shuffle with mask 158 159 // Vector multiply long: 160 VMULLs, // ...signed 161 VMULLu, // ...unsigned 162 163 // Operands of the standard BUILD_VECTOR node are not legalized, which 164 // is fine if BUILD_VECTORs are always lowered to shuffles or other 165 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 166 // operands need to be legalized. Define an ARM-specific version of 167 // BUILD_VECTOR for this purpose. 168 BUILD_VECTOR, 169 170 // Floating-point max and min: 171 FMAX, 172 FMIN, 173 174 // Bit-field insert 175 BFI, 176 177 // Vector OR with immediate 178 VORRIMM, 179 // Vector AND with NOT of immediate 180 VBICIMM, 181 182 // Vector bitwise select 183 VBSL, 184 185 // Vector load N-element structure to all lanes: 186 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 187 VLD3DUP, 188 VLD4DUP, 189 190 // NEON loads with post-increment base updates: 191 VLD1_UPD, 192 VLD2_UPD, 193 VLD3_UPD, 194 VLD4_UPD, 195 VLD2LN_UPD, 196 VLD3LN_UPD, 197 VLD4LN_UPD, 198 VLD2DUP_UPD, 199 VLD3DUP_UPD, 200 VLD4DUP_UPD, 201 202 // NEON stores with post-increment base updates: 203 VST1_UPD, 204 VST2_UPD, 205 VST3_UPD, 206 VST4_UPD, 207 VST2LN_UPD, 208 VST3LN_UPD, 209 VST4LN_UPD 210 }; 211 } 212 213 /// Define some predicates that are used for node matching. 214 namespace ARM { 215 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be 216 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd) 217 /// instruction, returns its 8-bit integer representation. Otherwise, 218 /// returns -1. 219 int getVFPf32Imm(const APFloat &FPImm); 220 int getVFPf64Imm(const APFloat &FPImm); 221 bool isBitFieldInvertedMask(unsigned v); 222 } 223 224 //===--------------------------------------------------------------------===// 225 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 226 227 class ARMTargetLowering : public TargetLowering { 228 public: 229 explicit ARMTargetLowering(TargetMachine &TM); 230 231 virtual unsigned getJumpTableEncoding(void) const; 232 233 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 234 235 /// ReplaceNodeResults - Replace the results of node with an illegal result 236 /// type with new values built out of custom code. 237 /// 238 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 239 SelectionDAG &DAG) const; 240 241 virtual const char *getTargetNodeName(unsigned Opcode) const; 242 243 virtual MachineBasicBlock * 244 EmitInstrWithCustomInserter(MachineInstr *MI, 245 MachineBasicBlock *MBB) const; 246 247 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 248 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 249 250 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const; 251 252 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 253 /// unaligned memory accesses. of the specified type. 254 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON? 255 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const; 256 257 /// isLegalAddressingMode - Return true if the addressing mode represented 258 /// by AM is legal for this target, for a load/store of the specified type. 259 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 260 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 261 262 /// isLegalICmpImmediate - Return true if the specified immediate is legal 263 /// icmp immediate, that is the target has icmp instructions which can 264 /// compare a register against the immediate without having to materialize 265 /// the immediate into a register. 266 virtual bool isLegalICmpImmediate(int64_t Imm) const; 267 268 /// isLegalAddImmediate - Return true if the specified immediate is legal 269 /// add immediate, that is the target has add instructions which can 270 /// add a register and the immediate without having to materialize 271 /// the immediate into a register. 272 virtual bool isLegalAddImmediate(int64_t Imm) const; 273 274 /// getPreIndexedAddressParts - returns true by value, base pointer and 275 /// offset pointer and addressing mode by reference if the node's address 276 /// can be legally represented as pre-indexed load / store address. 277 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 278 SDValue &Offset, 279 ISD::MemIndexedMode &AM, 280 SelectionDAG &DAG) const; 281 282 /// getPostIndexedAddressParts - returns true by value, base pointer and 283 /// offset pointer and addressing mode by reference if this node can be 284 /// combined with a load / store to form a post-indexed load / store. 285 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 286 SDValue &Base, SDValue &Offset, 287 ISD::MemIndexedMode &AM, 288 SelectionDAG &DAG) const; 289 290 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 291 const APInt &Mask, 292 APInt &KnownZero, 293 APInt &KnownOne, 294 const SelectionDAG &DAG, 295 unsigned Depth) const; 296 297 298 virtual bool ExpandInlineAsm(CallInst *CI) const; 299 300 ConstraintType getConstraintType(const std::string &Constraint) const; 301 302 /// Examine constraint string and operand type and determine a weight value. 303 /// The operand object must already have been set up with the operand type. 304 ConstraintWeight getSingleConstraintMatchWeight( 305 AsmOperandInfo &info, const char *constraint) const; 306 307 std::pair<unsigned, const TargetRegisterClass*> 308 getRegForInlineAsmConstraint(const std::string &Constraint, 309 EVT VT) const; 310 311 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 312 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 313 /// true it means one of the asm constraint of the inline asm instruction 314 /// being processed is 'm'. 315 virtual void LowerAsmOperandForConstraint(SDValue Op, 316 std::string &Constraint, 317 std::vector<SDValue> &Ops, 318 SelectionDAG &DAG) const; 319 getSubtarget()320 const ARMSubtarget* getSubtarget() const { 321 return Subtarget; 322 } 323 324 /// getRegClassFor - Return the register class that should be used for the 325 /// specified value type. 326 virtual TargetRegisterClass *getRegClassFor(EVT VT) const; 327 328 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 329 /// be used for loads / stores from the global. 330 virtual unsigned getMaximalGlobalOffset() const; 331 332 /// createFastISel - This method returns a target specific FastISel object, 333 /// or null if the target does not support "fast" ISel. 334 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; 335 336 Sched::Preference getSchedulingPreference(SDNode *N) const; 337 338 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const; 339 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 340 341 /// isFPImmLegal - Returns true if the target can instruction select the 342 /// specified FP immediate natively. If false, the legalizer will 343 /// materialize the FP immediate as a load from a constant pool. 344 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 345 346 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, 347 const CallInst &I, 348 unsigned Intrinsic) const; 349 protected: 350 std::pair<const TargetRegisterClass*, uint8_t> 351 findRepresentativeClass(EVT VT) const; 352 353 private: 354 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 355 /// make the right decision when generating code for different targets. 356 const ARMSubtarget *Subtarget; 357 358 const TargetRegisterInfo *RegInfo; 359 360 const InstrItineraryData *Itins; 361 362 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 363 /// 364 unsigned ARMPCLabelIndex; 365 366 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT); 367 void addDRTypeForNEON(EVT VT); 368 void addQRTypeForNEON(EVT VT); 369 370 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 371 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 372 SDValue Chain, SDValue &Arg, 373 RegsToPassVector &RegsToPass, 374 CCValAssign &VA, CCValAssign &NextVA, 375 SDValue &StackPtr, 376 SmallVector<SDValue, 8> &MemOpChains, 377 ISD::ArgFlagsTy Flags) const; 378 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 379 SDValue &Root, SelectionDAG &DAG, 380 DebugLoc dl) const; 381 382 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 383 bool isVarArg) const; 384 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 385 DebugLoc dl, SelectionDAG &DAG, 386 const CCValAssign &VA, 387 ISD::ArgFlagsTy Flags) const; 388 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 389 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 390 SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const; 391 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 392 const ARMSubtarget *Subtarget) const; 393 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 394 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 395 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 396 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 397 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 398 SelectionDAG &DAG) const; 399 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 400 SelectionDAG &DAG) const; 401 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 402 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 403 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 404 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 405 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 406 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 407 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 408 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 409 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 410 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 411 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 412 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 413 const ARMSubtarget *ST) const; 414 415 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 416 417 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 418 CallingConv::ID CallConv, bool isVarArg, 419 const SmallVectorImpl<ISD::InputArg> &Ins, 420 DebugLoc dl, SelectionDAG &DAG, 421 SmallVectorImpl<SDValue> &InVals) const; 422 423 virtual SDValue 424 LowerFormalArguments(SDValue Chain, 425 CallingConv::ID CallConv, bool isVarArg, 426 const SmallVectorImpl<ISD::InputArg> &Ins, 427 DebugLoc dl, SelectionDAG &DAG, 428 SmallVectorImpl<SDValue> &InVals) const; 429 430 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 431 DebugLoc dl, SDValue &Chain, unsigned ArgOffset) 432 const; 433 434 void computeRegArea(CCState &CCInfo, MachineFunction &MF, 435 unsigned &VARegSize, unsigned &VARegSaveSize) const; 436 437 virtual SDValue 438 LowerCall(SDValue Chain, SDValue Callee, 439 CallingConv::ID CallConv, bool isVarArg, 440 bool &isTailCall, 441 const SmallVectorImpl<ISD::OutputArg> &Outs, 442 const SmallVectorImpl<SDValue> &OutVals, 443 const SmallVectorImpl<ISD::InputArg> &Ins, 444 DebugLoc dl, SelectionDAG &DAG, 445 SmallVectorImpl<SDValue> &InVals) const; 446 447 /// HandleByVal - Target-specific cleanup for ByVal support. 448 virtual void HandleByVal(CCState *, unsigned &) const; 449 450 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 451 /// for tail call optimization. Targets which want to do tail call 452 /// optimization should implement this function. 453 bool IsEligibleForTailCallOptimization(SDValue Callee, 454 CallingConv::ID CalleeCC, 455 bool isVarArg, 456 bool isCalleeStructRet, 457 bool isCallerStructRet, 458 const SmallVectorImpl<ISD::OutputArg> &Outs, 459 const SmallVectorImpl<SDValue> &OutVals, 460 const SmallVectorImpl<ISD::InputArg> &Ins, 461 SelectionDAG& DAG) const; 462 virtual SDValue 463 LowerReturn(SDValue Chain, 464 CallingConv::ID CallConv, bool isVarArg, 465 const SmallVectorImpl<ISD::OutputArg> &Outs, 466 const SmallVectorImpl<SDValue> &OutVals, 467 DebugLoc dl, SelectionDAG &DAG) const; 468 469 virtual bool isUsedByReturnOnly(SDNode *N) const; 470 471 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 472 473 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 474 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const; 475 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 476 SelectionDAG &DAG, DebugLoc dl) const; 477 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 478 479 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 480 481 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, 482 MachineBasicBlock *BB, 483 unsigned Size) const; 484 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 485 MachineBasicBlock *BB, 486 unsigned Size, 487 unsigned BinOpcode) const; 488 MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI, 489 MachineBasicBlock *BB, 490 unsigned Size, 491 bool signExtend, 492 ARMCC::CondCodes Cond) const; 493 494 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const; 495 }; 496 497 enum NEONModImmType { 498 VMOVModImm, 499 VMVNModImm, 500 OtherModImm 501 }; 502 503 504 namespace ARM { 505 FastISel *createFastISel(FunctionLoweringInfo &funcInfo); 506 } 507 } 508 509 #endif // ARMISELLOWERING_H 510