1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 16 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H 17 18 #include "MCTargetDesc/ARMBaseInfo.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/Target/TargetLowering.h" 22 #include <vector> 23 24 namespace llvm { 25 class ARMConstantPoolValue; 26 class ARMSubtarget; 27 28 namespace ARMISD { 29 // ARM Specific DAG Nodes 30 enum NodeType : unsigned { 31 // Start the numbering where the builtin ops and target ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 35 // TargetExternalSymbol, and TargetGlobalAddress. 36 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 37 // PIC mode. 38 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 39 40 // Add pseudo op to model memcpy for struct byval. 41 COPY_STRUCT_BYVAL, 42 43 CALL, // Function call. 44 CALL_PRED, // Function call that's predicable. 45 CALL_NOLINK, // Function call with branch not branch-and-link. 46 BRCOND, // Conditional branch. 47 BR_JT, // Jumptable branch. 48 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 49 RET_FLAG, // Return with a flag operand. 50 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 51 52 PIC_ADD, // Add with a PC operand and a PIC label. 53 54 CMP, // ARM compare instructions. 55 CMN, // ARM CMN instructions. 56 CMPZ, // ARM compare that sets only Z flag. 57 CMPFP, // ARM VFP compare instruction, sets FPSCR. 58 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 59 FMSTAT, // ARM fmstat instruction. 60 61 CMOV, // ARM conditional move instructions. 62 63 SSAT, // Signed saturation 64 65 BCC_i64, 66 67 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 68 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 69 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 70 71 ADDC, // Add with carry 72 ADDE, // Add using carry 73 SUBC, // Sub with carry 74 SUBE, // Sub using carry 75 76 VMOVRRD, // double to two gprs. 77 VMOVDRR, // Two gprs to double. 78 79 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 80 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 81 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch. 82 83 TC_RETURN, // Tail call return pseudo. 84 85 THREAD_POINTER, 86 87 DYN_ALLOC, // Dynamic allocation on the stack. 88 89 MEMBARRIER_MCR, // Memory barrier (MCR) 90 91 PRELOAD, // Preload 92 93 WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 94 WIN__DBZCHK, // Windows' divide by zero check 95 96 VCEQ, // Vector compare equal. 97 VCEQZ, // Vector compare equal to zero. 98 VCGE, // Vector compare greater than or equal. 99 VCGEZ, // Vector compare greater than or equal to zero. 100 VCLEZ, // Vector compare less than or equal to zero. 101 VCGEU, // Vector compare unsigned greater than or equal. 102 VCGT, // Vector compare greater than. 103 VCGTZ, // Vector compare greater than zero. 104 VCLTZ, // Vector compare less than zero. 105 VCGTU, // Vector compare unsigned greater than. 106 VTST, // Vector test bits. 107 108 // Vector shift by immediate: 109 VSHL, // ...left 110 VSHRs, // ...right (signed) 111 VSHRu, // ...right (unsigned) 112 113 // Vector rounding shift by immediate: 114 VRSHRs, // ...right (signed) 115 VRSHRu, // ...right (unsigned) 116 VRSHRN, // ...right narrow 117 118 // Vector saturating shift by immediate: 119 VQSHLs, // ...left (signed) 120 VQSHLu, // ...left (unsigned) 121 VQSHLsu, // ...left (signed to unsigned) 122 VQSHRNs, // ...right narrow (signed) 123 VQSHRNu, // ...right narrow (unsigned) 124 VQSHRNsu, // ...right narrow (signed to unsigned) 125 126 // Vector saturating rounding shift by immediate: 127 VQRSHRNs, // ...right narrow (signed) 128 VQRSHRNu, // ...right narrow (unsigned) 129 VQRSHRNsu, // ...right narrow (signed to unsigned) 130 131 // Vector shift and insert: 132 VSLI, // ...left 133 VSRI, // ...right 134 135 // Vector get lane (VMOV scalar to ARM core register) 136 // (These are used for 8- and 16-bit element types only.) 137 VGETLANEu, // zero-extend vector extract element 138 VGETLANEs, // sign-extend vector extract element 139 140 // Vector move immediate and move negated immediate: 141 VMOVIMM, 142 VMVNIMM, 143 144 // Vector move f32 immediate: 145 VMOVFPIMM, 146 147 // Vector duplicate: 148 VDUP, 149 VDUPLANE, 150 151 // Vector shuffles: 152 VEXT, // extract 153 VREV64, // reverse elements within 64-bit doublewords 154 VREV32, // reverse elements within 32-bit words 155 VREV16, // reverse elements within 16-bit halfwords 156 VZIP, // zip (interleave) 157 VUZP, // unzip (deinterleave) 158 VTRN, // transpose 159 VTBL1, // 1-register shuffle with mask 160 VTBL2, // 2-register shuffle with mask 161 162 // Vector multiply long: 163 VMULLs, // ...signed 164 VMULLu, // ...unsigned 165 166 UMLAL, // 64bit Unsigned Accumulate Multiply 167 SMLAL, // 64bit Signed Accumulate Multiply 168 UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply 169 170 // Operands of the standard BUILD_VECTOR node are not legalized, which 171 // is fine if BUILD_VECTORs are always lowered to shuffles or other 172 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 173 // operands need to be legalized. Define an ARM-specific version of 174 // BUILD_VECTOR for this purpose. 175 BUILD_VECTOR, 176 177 // Bit-field insert 178 BFI, 179 180 // Vector OR with immediate 181 VORRIMM, 182 // Vector AND with NOT of immediate 183 VBICIMM, 184 185 // Vector bitwise select 186 VBSL, 187 188 // Pseudo-instruction representing a memory copy using ldm/stm 189 // instructions. 190 MEMCPY, 191 192 // Vector load N-element structure to all lanes: 193 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 194 VLD3DUP, 195 VLD4DUP, 196 197 // NEON loads with post-increment base updates: 198 VLD1_UPD, 199 VLD2_UPD, 200 VLD3_UPD, 201 VLD4_UPD, 202 VLD2LN_UPD, 203 VLD3LN_UPD, 204 VLD4LN_UPD, 205 VLD2DUP_UPD, 206 VLD3DUP_UPD, 207 VLD4DUP_UPD, 208 209 // NEON stores with post-increment base updates: 210 VST1_UPD, 211 VST2_UPD, 212 VST3_UPD, 213 VST4_UPD, 214 VST2LN_UPD, 215 VST3LN_UPD, 216 VST4LN_UPD 217 }; 218 } 219 220 /// Define some predicates that are used for node matching. 221 namespace ARM { 222 bool isBitFieldInvertedMask(unsigned v); 223 } 224 225 //===--------------------------------------------------------------------===// 226 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 227 228 class ARMTargetLowering : public TargetLowering { 229 public: 230 explicit ARMTargetLowering(const TargetMachine &TM, 231 const ARMSubtarget &STI); 232 233 unsigned getJumpTableEncoding() const override; 234 bool useSoftFloat() const override; 235 236 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 237 238 /// ReplaceNodeResults - Replace the results of node with an illegal result 239 /// type with new values built out of custom code. 240 /// 241 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 242 SelectionDAG &DAG) const override; 243 244 const char *getTargetNodeName(unsigned Opcode) const override; 245 isSelectSupported(SelectSupportKind Kind)246 bool isSelectSupported(SelectSupportKind Kind) const override { 247 // ARM does not support scalar condition selects on vectors. 248 return (Kind != ScalarCondVectorVal); 249 } 250 251 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 252 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 253 EVT VT) const override; 254 255 MachineBasicBlock * 256 EmitInstrWithCustomInserter(MachineInstr &MI, 257 MachineBasicBlock *MBB) const override; 258 259 void AdjustInstrPostInstrSelection(MachineInstr &MI, 260 SDNode *Node) const override; 261 262 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 263 SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const; 264 SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const; 265 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 266 267 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 268 269 /// allowsMisalignedMemoryAccesses - Returns true if the target allows 270 /// unaligned memory accesses of the specified type. Returns whether it 271 /// is "fast" by reference in the second argument. 272 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 273 unsigned Align, 274 bool *Fast) const override; 275 276 EVT getOptimalMemOpType(uint64_t Size, 277 unsigned DstAlign, unsigned SrcAlign, 278 bool IsMemset, bool ZeroMemset, 279 bool MemcpyStrSrc, 280 MachineFunction &MF) const override; 281 282 using TargetLowering::isZExtFree; 283 bool isZExtFree(SDValue Val, EVT VT2) const override; 284 285 bool isVectorLoadExtDesirable(SDValue ExtVal) const override; 286 287 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 288 289 290 /// isLegalAddressingMode - Return true if the addressing mode represented 291 /// by AM is legal for this target, for a load/store of the specified type. 292 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 293 Type *Ty, unsigned AS) const override; 294 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 295 296 /// isLegalICmpImmediate - Return true if the specified immediate is legal 297 /// icmp immediate, that is the target has icmp instructions which can 298 /// compare a register against the immediate without having to materialize 299 /// the immediate into a register. 300 bool isLegalICmpImmediate(int64_t Imm) const override; 301 302 /// isLegalAddImmediate - Return true if the specified immediate is legal 303 /// add immediate, that is the target has add instructions which can 304 /// add a register and the immediate without having to materialize 305 /// the immediate into a register. 306 bool isLegalAddImmediate(int64_t Imm) const override; 307 308 /// getPreIndexedAddressParts - returns true by value, base pointer and 309 /// offset pointer and addressing mode by reference if the node's address 310 /// can be legally represented as pre-indexed load / store address. 311 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 312 ISD::MemIndexedMode &AM, 313 SelectionDAG &DAG) const override; 314 315 /// getPostIndexedAddressParts - returns true by value, base pointer and 316 /// offset pointer and addressing mode by reference if this node can be 317 /// combined with a load / store to form a post-indexed load / store. 318 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 319 SDValue &Offset, ISD::MemIndexedMode &AM, 320 SelectionDAG &DAG) const override; 321 322 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, 323 APInt &KnownOne, 324 const SelectionDAG &DAG, 325 unsigned Depth) const override; 326 327 328 bool ExpandInlineAsm(CallInst *CI) const override; 329 330 ConstraintType getConstraintType(StringRef Constraint) const override; 331 332 /// Examine constraint string and operand type and determine a weight value. 333 /// The operand object must already have been set up with the operand type. 334 ConstraintWeight getSingleConstraintMatchWeight( 335 AsmOperandInfo &info, const char *constraint) const override; 336 337 std::pair<unsigned, const TargetRegisterClass *> 338 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 339 StringRef Constraint, MVT VT) const override; 340 341 const char *LowerXConstraint(EVT ConstraintVT) const override; 342 343 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 344 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 345 /// true it means one of the asm constraint of the inline asm instruction 346 /// being processed is 'm'. 347 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 348 std::vector<SDValue> &Ops, 349 SelectionDAG &DAG) const override; 350 351 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode)352 getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 353 if (ConstraintCode == "Q") 354 return InlineAsm::Constraint_Q; 355 else if (ConstraintCode == "o") 356 return InlineAsm::Constraint_o; 357 else if (ConstraintCode.size() == 2) { 358 if (ConstraintCode[0] == 'U') { 359 switch(ConstraintCode[1]) { 360 default: 361 break; 362 case 'm': 363 return InlineAsm::Constraint_Um; 364 case 'n': 365 return InlineAsm::Constraint_Un; 366 case 'q': 367 return InlineAsm::Constraint_Uq; 368 case 's': 369 return InlineAsm::Constraint_Us; 370 case 't': 371 return InlineAsm::Constraint_Ut; 372 case 'v': 373 return InlineAsm::Constraint_Uv; 374 case 'y': 375 return InlineAsm::Constraint_Uy; 376 } 377 } 378 } 379 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 380 } 381 getSubtarget()382 const ARMSubtarget* getSubtarget() const { 383 return Subtarget; 384 } 385 386 /// getRegClassFor - Return the register class that should be used for the 387 /// specified value type. 388 const TargetRegisterClass *getRegClassFor(MVT VT) const override; 389 390 /// Returns true if a cast between SrcAS and DestAS is a noop. isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS)391 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { 392 // Addrspacecasts are always noops. 393 return true; 394 } 395 396 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 397 unsigned &PrefAlign) const override; 398 399 /// createFastISel - This method returns a target specific FastISel object, 400 /// or null if the target does not support "fast" ISel. 401 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 402 const TargetLibraryInfo *libInfo) const override; 403 404 Sched::Preference getSchedulingPreference(SDNode *N) const override; 405 406 bool 407 isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override; 408 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 409 410 /// isFPImmLegal - Returns true if the target can instruction select the 411 /// specified FP immediate natively. If false, the legalizer will 412 /// materialize the FP immediate as a load from a constant pool. 413 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; 414 415 bool getTgtMemIntrinsic(IntrinsicInfo &Info, 416 const CallInst &I, 417 unsigned Intrinsic) const override; 418 419 /// \brief Returns true if it is beneficial to convert a load of a constant 420 /// to just the constant itself. 421 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 422 Type *Ty) const override; 423 424 /// \brief Returns true if an argument of type Ty needs to be passed in a 425 /// contiguous block of registers in calling convention CallConv. 426 bool functionArgumentNeedsConsecutiveRegisters( 427 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; 428 429 /// If a physical register, this returns the register that receives the 430 /// exception address on entry to an EH pad. 431 unsigned 432 getExceptionPointerRegister(const Constant *PersonalityFn) const override; 433 434 /// If a physical register, this returns the register that receives the 435 /// exception typeid on entry to a landing pad. 436 unsigned 437 getExceptionSelectorRegister(const Constant *PersonalityFn) const override; 438 439 Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const; 440 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 441 AtomicOrdering Ord) const override; 442 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 443 Value *Addr, AtomicOrdering Ord) const override; 444 445 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override; 446 447 Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, 448 bool IsStore, bool IsLoad) const override; 449 Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, 450 bool IsStore, bool IsLoad) const override; 451 getMaxSupportedInterleaveFactor()452 unsigned getMaxSupportedInterleaveFactor() const override { return 4; } 453 454 bool lowerInterleavedLoad(LoadInst *LI, 455 ArrayRef<ShuffleVectorInst *> Shuffles, 456 ArrayRef<unsigned> Indices, 457 unsigned Factor) const override; 458 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 459 unsigned Factor) const override; 460 461 bool shouldInsertFencesForAtomic(const Instruction *I) const override; 462 TargetLoweringBase::AtomicExpansionKind 463 shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 464 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 465 TargetLoweringBase::AtomicExpansionKind 466 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; 467 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 468 469 bool useLoadStackGuardNode() const override; 470 471 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 472 unsigned &Cost) const override; 473 474 bool isCheapToSpeculateCttz() const override; 475 bool isCheapToSpeculateCtlz() const override; 476 supportSwiftError()477 bool supportSwiftError() const override { 478 return true; 479 } 480 481 protected: 482 std::pair<const TargetRegisterClass *, uint8_t> 483 findRepresentativeClass(const TargetRegisterInfo *TRI, 484 MVT VT) const override; 485 486 private: 487 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 488 /// make the right decision when generating code for different targets. 489 const ARMSubtarget *Subtarget; 490 491 const TargetRegisterInfo *RegInfo; 492 493 const InstrItineraryData *Itins; 494 495 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 496 /// 497 unsigned ARMPCLabelIndex; 498 499 // TODO: remove this, and have shouldInsertFencesForAtomic do the proper 500 // check. 501 bool InsertFencesForAtomic; 502 503 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 504 void addDRTypeForNEON(MVT VT); 505 void addQRTypeForNEON(MVT VT); 506 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 507 508 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 509 void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, 510 SDValue &Arg, RegsToPassVector &RegsToPass, 511 CCValAssign &VA, CCValAssign &NextVA, 512 SDValue &StackPtr, 513 SmallVectorImpl<SDValue> &MemOpChains, 514 ISD::ArgFlagsTy Flags) const; 515 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 516 SDValue &Root, SelectionDAG &DAG, 517 const SDLoc &dl) const; 518 519 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 520 bool isVarArg) const; 521 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 522 bool isVarArg) const; 523 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 524 const SDLoc &dl, SelectionDAG &DAG, 525 const CCValAssign &VA, 526 ISD::ArgFlagsTy Flags) const; 527 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 528 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 529 SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const; 530 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 531 const ARMSubtarget *Subtarget) const; 532 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 533 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 534 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 535 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 536 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 537 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 538 SelectionDAG &DAG) const; 539 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 540 SelectionDAG &DAG, 541 TLSModel::Model model) const; 542 SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 543 SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const; 544 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 545 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 546 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; 547 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 548 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 549 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 550 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 551 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 552 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 553 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 554 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 555 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 556 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 557 const ARMSubtarget *ST) const; 558 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 559 const ARMSubtarget *ST) const; 560 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 561 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 562 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const; 563 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed, 564 SmallVectorImpl<SDValue> &Results) const; 565 SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed, 566 SDValue &Chain) const; 567 SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const; 568 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 569 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; 570 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 571 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 572 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 573 574 unsigned getRegisterByName(const char* RegName, EVT VT, 575 SelectionDAG &DAG) const override; 576 577 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 578 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 579 /// expanded to FMAs when this method returns true, otherwise fmuladd is 580 /// expanded to fmul + fadd. 581 /// 582 /// ARM supports both fused and unfused multiply-add operations; we already 583 /// lower a pair of fmul and fadd to the latter so it's not clear that there 584 /// would be a gain or that the gain would be worthwhile enough to risk 585 /// correctness bugs. isFMAFasterThanFMulAndFAdd(EVT VT)586 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; } 587 588 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 589 590 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 591 CallingConv::ID CallConv, bool isVarArg, 592 const SmallVectorImpl<ISD::InputArg> &Ins, 593 const SDLoc &dl, SelectionDAG &DAG, 594 SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 595 SDValue ThisVal) const; 596 supportSplitCSR(MachineFunction * MF)597 bool supportSplitCSR(MachineFunction *MF) const override { 598 return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && 599 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); 600 } 601 void initializeSplitCSR(MachineBasicBlock *Entry) const override; 602 void insertCopiesSplitCSR( 603 MachineBasicBlock *Entry, 604 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override; 605 606 SDValue 607 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 608 const SmallVectorImpl<ISD::InputArg> &Ins, 609 const SDLoc &dl, SelectionDAG &DAG, 610 SmallVectorImpl<SDValue> &InVals) const override; 611 612 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl, 613 SDValue &Chain, const Value *OrigArg, 614 unsigned InRegsParamRecordIdx, int ArgOffset, 615 unsigned ArgSize) const; 616 617 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 618 const SDLoc &dl, SDValue &Chain, 619 unsigned ArgOffset, unsigned TotalArgRegsSaveSize, 620 bool ForceMutable = false) const; 621 622 SDValue 623 LowerCall(TargetLowering::CallLoweringInfo &CLI, 624 SmallVectorImpl<SDValue> &InVals) const override; 625 626 /// HandleByVal - Target-specific cleanup for ByVal support. 627 void HandleByVal(CCState *, unsigned &, unsigned) const override; 628 629 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 630 /// for tail call optimization. Targets which want to do tail call 631 /// optimization should implement this function. 632 bool IsEligibleForTailCallOptimization(SDValue Callee, 633 CallingConv::ID CalleeCC, 634 bool isVarArg, 635 bool isCalleeStructRet, 636 bool isCallerStructRet, 637 const SmallVectorImpl<ISD::OutputArg> &Outs, 638 const SmallVectorImpl<SDValue> &OutVals, 639 const SmallVectorImpl<ISD::InputArg> &Ins, 640 SelectionDAG& DAG) const; 641 642 bool CanLowerReturn(CallingConv::ID CallConv, 643 MachineFunction &MF, bool isVarArg, 644 const SmallVectorImpl<ISD::OutputArg> &Outs, 645 LLVMContext &Context) const override; 646 647 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 648 const SmallVectorImpl<ISD::OutputArg> &Outs, 649 const SmallVectorImpl<SDValue> &OutVals, 650 const SDLoc &dl, SelectionDAG &DAG) const override; 651 652 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 653 654 bool mayBeEmittedAsTailCall(CallInst *CI) const override; 655 656 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, 657 SDValue ARMcc, SDValue CCR, SDValue Cmp, 658 SelectionDAG &DAG) const; 659 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 660 SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const; 661 SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 662 const SDLoc &dl) const; 663 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 664 665 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 666 667 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, 668 MachineBasicBlock *DispatchBB, int FI) const; 669 670 void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const; 671 672 bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const; 673 674 MachineBasicBlock *EmitStructByval(MachineInstr &MI, 675 MachineBasicBlock *MBB) const; 676 677 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI, 678 MachineBasicBlock *MBB) const; 679 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI, 680 MachineBasicBlock *MBB) const; 681 }; 682 683 enum NEONModImmType { 684 VMOVModImm, 685 VMVNModImm, 686 OtherModImm 687 }; 688 689 namespace ARM { 690 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 691 const TargetLibraryInfo *libInfo); 692 } 693 } 694 695 #endif // ARMISELLOWERING_H 696