1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that X86 uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef X86ISELLOWERING_H 16 #define X86ISELLOWERING_H 17 18 #include "X86Subtarget.h" 19 #include "X86RegisterInfo.h" 20 #include "X86MachineFunctionInfo.h" 21 #include "llvm/Target/TargetLowering.h" 22 #include "llvm/Target/TargetOptions.h" 23 #include "llvm/CodeGen/FastISel.h" 24 #include "llvm/CodeGen/SelectionDAG.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 27 namespace llvm { 28 namespace X86ISD { 29 // X86 Specific DAG Nodes 30 enum NodeType { 31 // Start the numbering where the builtin ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 /// BSF - Bit scan forward. 35 /// BSR - Bit scan reverse. 36 BSF, 37 BSR, 38 39 /// SHLD, SHRD - Double shift instructions. These correspond to 40 /// X86::SHLDxx and X86::SHRDxx instructions. 41 SHLD, 42 SHRD, 43 44 /// FAND - Bitwise logical AND of floating point values. This corresponds 45 /// to X86::ANDPS or X86::ANDPD. 46 FAND, 47 48 /// FOR - Bitwise logical OR of floating point values. This corresponds 49 /// to X86::ORPS or X86::ORPD. 50 FOR, 51 52 /// FXOR - Bitwise logical XOR of floating point values. This corresponds 53 /// to X86::XORPS or X86::XORPD. 54 FXOR, 55 56 /// FSRL - Bitwise logical right shift of floating point values. These 57 /// corresponds to X86::PSRLDQ. 58 FSRL, 59 60 /// CALL - These operations represent an abstract X86 call 61 /// instruction, which includes a bunch of information. In particular the 62 /// operands of these node are: 63 /// 64 /// #0 - The incoming token chain 65 /// #1 - The callee 66 /// #2 - The number of arg bytes the caller pushes on the stack. 67 /// #3 - The number of arg bytes the callee pops off the stack. 68 /// #4 - The value to pass in AL/AX/EAX (optional) 69 /// #5 - The value to pass in DL/DX/EDX (optional) 70 /// 71 /// The result values of these nodes are: 72 /// 73 /// #0 - The outgoing token chain 74 /// #1 - The first register result value (optional) 75 /// #2 - The second register result value (optional) 76 /// 77 CALL, 78 79 /// RDTSC_DAG - This operation implements the lowering for 80 /// readcyclecounter 81 RDTSC_DAG, 82 83 /// X86 compare and logical compare instructions. 84 CMP, COMI, UCOMI, 85 86 /// X86 bit-test instructions. 87 BT, 88 89 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS 90 /// operand, usually produced by a CMP instruction. 91 SETCC, 92 93 // Same as SETCC except it's materialized with a sbb and the value is all 94 // one's or all zero's. 95 SETCC_CARRY, // R = carry_bit ? ~0 : 0 96 97 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD. 98 /// Operands are two FP values to compare; result is a mask of 99 /// 0s or 1s. Generally DTRT for C/C++ with NaNs. 100 FSETCCss, FSETCCsd, 101 102 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values, 103 /// result in an integer GPR. Needs masking for scalar result. 104 FGETSIGNx86, 105 106 /// X86 conditional moves. Operand 0 and operand 1 are the two values 107 /// to select from. Operand 2 is the condition code, and operand 3 is the 108 /// flag operand produced by a CMP or TEST instruction. It also writes a 109 /// flag result. 110 CMOV, 111 112 /// X86 conditional branches. Operand 0 is the chain operand, operand 1 113 /// is the block to branch if condition is true, operand 2 is the 114 /// condition code, and operand 3 is the flag operand produced by a CMP 115 /// or TEST instruction. 116 BRCOND, 117 118 /// Return with a flag operand. Operand 0 is the chain operand, operand 119 /// 1 is the number of bytes of stack to pop. 120 RET_FLAG, 121 122 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. 123 REP_STOS, 124 125 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. 126 REP_MOVS, 127 128 /// GlobalBaseReg - On Darwin, this node represents the result of the popl 129 /// at function entry, used for PIC code. 130 GlobalBaseReg, 131 132 /// Wrapper - A wrapper node for TargetConstantPool, 133 /// TargetExternalSymbol, and TargetGlobalAddress. 134 Wrapper, 135 136 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP 137 /// relative displacements. 138 WrapperRIP, 139 140 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector 141 /// to an MMX vector. If you think this is too close to the previous 142 /// mnemonic, so do I; blame Intel. 143 MOVDQ2Q, 144 145 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to 146 /// i32, corresponds to X86::PEXTRB. 147 PEXTRB, 148 149 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to 150 /// i32, corresponds to X86::PEXTRW. 151 PEXTRW, 152 153 /// INSERTPS - Insert any element of a 4 x float vector into any element 154 /// of a destination 4 x floatvector. 155 INSERTPS, 156 157 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, 158 /// corresponds to X86::PINSRB. 159 PINSRB, 160 161 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, 162 /// corresponds to X86::PINSRW. 163 PINSRW, MMX_PINSRW, 164 165 /// PSHUFB - Shuffle 16 8-bit values within a vector. 166 PSHUFB, 167 168 /// ANDNP - Bitwise Logical AND NOT of Packed FP values. 169 ANDNP, 170 171 /// PSIGN - Copy integer sign. 172 PSIGN, 173 174 /// BLENDV - Blend where the selector is an XMM. 175 BLENDV, 176 177 /// BLENDxx - Blend where the selector is an immediate. 178 BLENDPW, 179 BLENDPS, 180 BLENDPD, 181 182 /// HADD - Integer horizontal add. 183 HADD, 184 185 /// HSUB - Integer horizontal sub. 186 HSUB, 187 188 /// FHADD - Floating point horizontal add. 189 FHADD, 190 191 /// FHSUB - Floating point horizontal sub. 192 FHSUB, 193 194 /// FMAX, FMIN - Floating point max and min. 195 /// 196 FMAX, FMIN, 197 198 /// FMAXC, FMINC - Commutative FMIN and FMAX. 199 FMAXC, FMINC, 200 201 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal 202 /// approximation. Note that these typically require refinement 203 /// in order to obtain suitable precision. 204 FRSQRT, FRCP, 205 206 // TLSADDR - Thread Local Storage. 207 TLSADDR, 208 209 // TLSBASEADDR - Thread Local Storage. A call to get the start address 210 // of the TLS block for the current module. 211 TLSBASEADDR, 212 213 // TLSCALL - Thread Local Storage. When calling to an OS provided 214 // thunk at the address from an earlier relocation. 215 TLSCALL, 216 217 // EH_RETURN - Exception Handling helpers. 218 EH_RETURN, 219 220 /// TC_RETURN - Tail call return. 221 /// operand #0 chain 222 /// operand #1 callee (register or absolute) 223 /// operand #2 stack adjustment 224 /// operand #3 optional in flag 225 TC_RETURN, 226 227 // VZEXT_MOVL - Vector move low and zero extend. 228 VZEXT_MOVL, 229 230 // VSEXT_MOVL - Vector move low and sign extend. 231 VSEXT_MOVL, 232 233 // VFPEXT - Vector FP extend. 234 VFPEXT, 235 236 // VSHL, VSRL - 128-bit vector logical left / right shift 237 VSHLDQ, VSRLDQ, 238 239 // VSHL, VSRL, VSRA - Vector shift elements 240 VSHL, VSRL, VSRA, 241 242 // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate 243 VSHLI, VSRLI, VSRAI, 244 245 // CMPP - Vector packed double/float comparison. 246 CMPP, 247 248 // PCMP* - Vector integer comparisons. 249 PCMPEQ, PCMPGT, 250 251 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results. 252 ADD, SUB, ADC, SBB, SMUL, 253 INC, DEC, OR, XOR, AND, 254 255 ANDN, // ANDN - Bitwise AND NOT with FLAGS results. 256 257 BLSI, // BLSI - Extract lowest set isolated bit 258 BLSMSK, // BLSMSK - Get mask up to lowest set bit 259 BLSR, // BLSR - Reset lowest set bit 260 261 UMUL, // LOW, HI, FLAGS = umul LHS, RHS 262 263 // MUL_IMM - X86 specific multiply by immediate. 264 MUL_IMM, 265 266 // PTEST - Vector bitwise comparisons 267 PTEST, 268 269 // TESTP - Vector packed fp sign bitwise comparisons 270 TESTP, 271 272 // Several flavors of instructions with vector shuffle behaviors. 273 PALIGN, 274 PSHUFD, 275 PSHUFHW, 276 PSHUFLW, 277 SHUFP, 278 MOVDDUP, 279 MOVSHDUP, 280 MOVSLDUP, 281 MOVLHPS, 282 MOVLHPD, 283 MOVHLPS, 284 MOVLPS, 285 MOVLPD, 286 MOVSD, 287 MOVSS, 288 UNPCKL, 289 UNPCKH, 290 VPERMILP, 291 VPERMV, 292 VPERMI, 293 VPERM2X128, 294 VBROADCAST, 295 296 // PMULUDQ - Vector multiply packed unsigned doubleword integers 297 PMULUDQ, 298 299 // FMA nodes 300 FMADD, 301 FNMADD, 302 FMSUB, 303 FNMSUB, 304 FMADDSUB, 305 FMSUBADD, 306 307 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, 308 // according to %al. An operator is needed so that this can be expanded 309 // with control flow. 310 VASTART_SAVE_XMM_REGS, 311 312 // WIN_ALLOCA - Windows's _chkstk call to do stack probing. 313 WIN_ALLOCA, 314 315 // SEG_ALLOCA - For allocating variable amounts of stack space when using 316 // segmented stacks. Check if the current stacklet has enough space, and 317 // falls back to heap allocation if not. 318 SEG_ALLOCA, 319 320 // WIN_FTOL - Windows's _ftol2 runtime routine to do fptoui. 321 WIN_FTOL, 322 323 // Memory barrier 324 MEMBARRIER, 325 MFENCE, 326 SFENCE, 327 LFENCE, 328 329 // FNSTSW16r - Store FP status word into i16 register. 330 FNSTSW16r, 331 332 // SAHF - Store contents of %ah into %eflags. 333 SAHF, 334 335 // RDRAND - Get a random integer and indicate whether it is valid in CF. 336 RDRAND, 337 338 // PCMP*STRI 339 PCMPISTRI, 340 PCMPESTRI, 341 342 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, 343 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - 344 // Atomic 64-bit binary operations. 345 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, 346 ATOMSUB64_DAG, 347 ATOMOR64_DAG, 348 ATOMXOR64_DAG, 349 ATOMAND64_DAG, 350 ATOMNAND64_DAG, 351 ATOMSWAP64_DAG, 352 353 // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap. 354 LCMPXCHG_DAG, 355 LCMPXCHG8_DAG, 356 LCMPXCHG16_DAG, 357 358 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. 359 VZEXT_LOAD, 360 361 // FNSTCW16m - Store FP control world into i16 memory. 362 FNSTCW16m, 363 364 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the 365 /// integer destination in memory and a FP reg source. This corresponds 366 /// to the X86::FIST*m instructions and the rounding mode change stuff. It 367 /// has two inputs (token chain and address) and two outputs (int value 368 /// and token chain). 369 FP_TO_INT16_IN_MEM, 370 FP_TO_INT32_IN_MEM, 371 FP_TO_INT64_IN_MEM, 372 373 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the 374 /// integer source in memory and FP reg result. This corresponds to the 375 /// X86::FILD*m instructions. It has three inputs (token chain, address, 376 /// and source type) and two outputs (FP value and token chain). FILD_FLAG 377 /// also produces a flag). 378 FILD, 379 FILD_FLAG, 380 381 /// FLD - This instruction implements an extending load to FP stack slots. 382 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain 383 /// operand, ptr to load from, and a ValueType node indicating the type 384 /// to load to. 385 FLD, 386 387 /// FST - This instruction implements a truncating store to FP stack 388 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a 389 /// chain operand, value to store, address, and a ValueType to store it 390 /// as. 391 FST, 392 393 /// VAARG_64 - This instruction grabs the address of the next argument 394 /// from a va_list. (reads and modifies the va_list in memory) 395 VAARG_64 396 397 // WARNING: Do not add anything in the end unless you want the node to 398 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be 399 // thought as target memory ops! 400 }; 401 } 402 403 /// Define some predicates that are used for node matching. 404 namespace X86 { 405 /// isVEXTRACTF128Index - Return true if the specified 406 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is 407 /// suitable for input to VEXTRACTF128. 408 bool isVEXTRACTF128Index(SDNode *N); 409 410 /// isVINSERTF128Index - Return true if the specified 411 /// INSERT_SUBVECTOR operand specifies a subvector insert that is 412 /// suitable for input to VINSERTF128. 413 bool isVINSERTF128Index(SDNode *N); 414 415 /// getExtractVEXTRACTF128Immediate - Return the appropriate 416 /// immediate to extract the specified EXTRACT_SUBVECTOR index 417 /// with VEXTRACTF128 instructions. 418 unsigned getExtractVEXTRACTF128Immediate(SDNode *N); 419 420 /// getInsertVINSERTF128Immediate - Return the appropriate 421 /// immediate to insert at the specified INSERT_SUBVECTOR index 422 /// with VINSERTF128 instructions. 423 unsigned getInsertVINSERTF128Immediate(SDNode *N); 424 425 /// isZeroNode - Returns true if Elt is a constant zero or a floating point 426 /// constant +0.0. 427 bool isZeroNode(SDValue Elt); 428 429 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be 430 /// fit into displacement field of the instruction. 431 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 432 bool hasSymbolicDisplacement = true); 433 434 435 /// isCalleePop - Determines whether the callee is required to pop its 436 /// own arguments. Callee pop is necessary to support tail calls. 437 bool isCalleePop(CallingConv::ID CallingConv, 438 bool is64Bit, bool IsVarArg, bool TailCallOpt); 439 } 440 441 //===--------------------------------------------------------------------===// 442 // X86TargetLowering - X86 Implementation of the TargetLowering interface 443 class X86TargetLowering : public TargetLowering { 444 public: 445 explicit X86TargetLowering(X86TargetMachine &TM); 446 447 virtual unsigned getJumpTableEncoding() const; 448 getShiftAmountTy(EVT LHSTy)449 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; } 450 451 virtual const MCExpr * 452 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 453 const MachineBasicBlock *MBB, unsigned uid, 454 MCContext &Ctx) const; 455 456 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 457 /// jumptable. 458 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 459 SelectionDAG &DAG) const; 460 virtual const MCExpr * 461 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 462 unsigned JTI, MCContext &Ctx) const; 463 464 /// getStackPtrReg - Return the stack pointer register we are using: either 465 /// ESP or RSP. getStackPtrReg()466 unsigned getStackPtrReg() const { return X86StackPtr; } 467 468 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 469 /// function arguments in the caller parameter area. For X86, aggregates 470 /// that contains are placed at 16-byte boundaries while the rest are at 471 /// 4-byte boundaries. 472 virtual unsigned getByValTypeAlignment(Type *Ty) const; 473 474 /// getOptimalMemOpType - Returns the target specific optimal type for load 475 /// and store operations as a result of memset, memcpy, and memmove 476 /// lowering. If DstAlign is zero that means it's safe to destination 477 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 478 /// means there isn't a need to check it against alignment requirement, 479 /// probably because the source does not need to be loaded. If 480 /// 'IsZeroVal' is true, that means it's safe to return a 481 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 482 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 483 /// constant so it does not need to be loaded. 484 /// It returns EVT::Other if the type should be determined using generic 485 /// target-independent logic. 486 virtual EVT 487 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 488 bool IsZeroVal, bool MemcpyStrSrc, 489 MachineFunction &MF) const; 490 491 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 492 /// unaligned memory accesses. of the specified type. allowsUnalignedMemoryAccesses(EVT VT)493 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { 494 return true; 495 } 496 497 /// LowerOperation - Provide custom lowering hooks for some operations. 498 /// 499 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 500 501 /// ReplaceNodeResults - Replace the results of node with an illegal result 502 /// type with new values built out of custom code. 503 /// 504 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 505 SelectionDAG &DAG) const; 506 507 508 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 509 510 /// isTypeDesirableForOp - Return true if the target has native support for 511 /// the specified value type and it is 'desirable' to use the type for the 512 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 513 /// instruction encodings are longer and some i16 instructions are slow. 514 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; 515 516 /// isTypeDesirable - Return true if the target has native support for the 517 /// specified value type and it is 'desirable' to use the type. e.g. On x86 518 /// i16 is legal, but undesirable since i16 instruction encodings are longer 519 /// and some i16 instructions are slow. 520 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; 521 522 virtual MachineBasicBlock * 523 EmitInstrWithCustomInserter(MachineInstr *MI, 524 MachineBasicBlock *MBB) const; 525 526 527 /// getTargetNodeName - This method returns the name of a target specific 528 /// DAG node. 529 virtual const char *getTargetNodeName(unsigned Opcode) const; 530 531 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 532 virtual EVT getSetCCResultType(EVT VT) const; 533 534 /// computeMaskedBitsForTargetNode - Determine which of the bits specified 535 /// in Mask are known to be either zero or one and return them in the 536 /// KnownZero/KnownOne bitsets. 537 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 538 APInt &KnownZero, 539 APInt &KnownOne, 540 const SelectionDAG &DAG, 541 unsigned Depth = 0) const; 542 543 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the 544 // operation that are sign bits. 545 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 546 unsigned Depth) const; 547 548 virtual bool 549 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 550 551 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; 552 553 virtual bool ExpandInlineAsm(CallInst *CI) const; 554 555 ConstraintType getConstraintType(const std::string &Constraint) const; 556 557 /// Examine constraint string and operand type and determine a weight value. 558 /// The operand object must already have been set up with the operand type. 559 virtual ConstraintWeight getSingleConstraintMatchWeight( 560 AsmOperandInfo &info, const char *constraint) const; 561 562 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 563 564 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 565 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 566 /// true it means one of the asm constraint of the inline asm instruction 567 /// being processed is 'm'. 568 virtual void LowerAsmOperandForConstraint(SDValue Op, 569 std::string &Constraint, 570 std::vector<SDValue> &Ops, 571 SelectionDAG &DAG) const; 572 573 /// getRegForInlineAsmConstraint - Given a physical register constraint 574 /// (e.g. {edx}), return the register number and the register class for the 575 /// register. This should only be used for C_Register constraints. On 576 /// error, this returns a register number of 0. 577 std::pair<unsigned, const TargetRegisterClass*> 578 getRegForInlineAsmConstraint(const std::string &Constraint, 579 EVT VT) const; 580 581 /// isLegalAddressingMode - Return true if the addressing mode represented 582 /// by AM is legal for this target, for a load/store of the specified type. 583 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 584 585 /// isLegalICmpImmediate - Return true if the specified immediate is legal 586 /// icmp immediate, that is the target has icmp instructions which can 587 /// compare a register against the immediate without having to materialize 588 /// the immediate into a register. 589 virtual bool isLegalICmpImmediate(int64_t Imm) const; 590 591 /// isLegalAddImmediate - Return true if the specified immediate is legal 592 /// add immediate, that is the target has add instructions which can 593 /// add a register and the immediate without having to materialize 594 /// the immediate into a register. 595 virtual bool isLegalAddImmediate(int64_t Imm) const; 596 597 /// isTruncateFree - Return true if it's free to truncate a value of 598 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 599 /// register EAX to i16 by referencing its sub-register AX. 600 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; 601 virtual bool isTruncateFree(EVT VT1, EVT VT2) const; 602 603 /// isZExtFree - Return true if any actual instruction that defines a 604 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result 605 /// register. This does not necessarily include registers defined in 606 /// unknown ways, such as incoming arguments, or copies from unknown 607 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 608 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 609 /// all instructions that define 32-bit values implicit zero-extend the 610 /// result out to 64 bits. 611 virtual bool isZExtFree(Type *Ty1, Type *Ty2) const; 612 virtual bool isZExtFree(EVT VT1, EVT VT2) const; 613 614 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 615 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 616 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 617 /// is expanded to mul + add. isFMAFasterThanMulAndAdd(EVT)618 virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; } 619 620 /// isNarrowingProfitable - Return true if it's profitable to narrow 621 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 622 /// from i32 to i8 but not from i32 to i16. 623 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; 624 625 /// isFPImmLegal - Returns true if the target can instruction select the 626 /// specified FP immediate natively. If false, the legalizer will 627 /// materialize the FP immediate as a load from a constant pool. 628 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 629 630 /// isShuffleMaskLegal - Targets can use this to indicate that they only 631 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 632 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask 633 /// values are assumed to be legal. 634 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, 635 EVT VT) const; 636 637 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 638 /// used by Targets can use this to indicate if there is a suitable 639 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 640 /// pool entry. 641 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 642 EVT VT) const; 643 644 /// ShouldShrinkFPConstant - If true, then instruction selection should 645 /// seek to shrink the FP constant of the specified type to a smaller type 646 /// in order to save space and / or reduce runtime. ShouldShrinkFPConstant(EVT VT)647 virtual bool ShouldShrinkFPConstant(EVT VT) const { 648 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more 649 // expensive than a straight movsd. On the other hand, it's important to 650 // shrink long double fp constant since fldt is very slow. 651 return !X86ScalarSSEf64 || VT == MVT::f80; 652 } 653 getSubtarget()654 const X86Subtarget* getSubtarget() const { 655 return Subtarget; 656 } 657 658 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is 659 /// computed in an SSE register, not on the X87 floating point stack. isScalarFPTypeInSSEReg(EVT VT)660 bool isScalarFPTypeInSSEReg(EVT VT) const { 661 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 662 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 663 } 664 665 /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine 666 /// for fptoui. isTargetFTOL()667 bool isTargetFTOL() const { 668 return Subtarget->isTargetWindows() && !Subtarget->is64Bit(); 669 } 670 671 /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be 672 /// used for fptoui to the given type. isIntegerTypeFTOL(EVT VT)673 bool isIntegerTypeFTOL(EVT VT) const { 674 return isTargetFTOL() && VT == MVT::i64; 675 } 676 677 /// createFastISel - This method returns a target specific FastISel object, 678 /// or null if the target does not support "fast" ISel. 679 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 680 const TargetLibraryInfo *libInfo) const; 681 682 /// getStackCookieLocation - Return true if the target stores stack 683 /// protector cookies at a fixed offset in some non-standard address 684 /// space, and populates the address space and offset as 685 /// appropriate. 686 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; 687 688 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, 689 SelectionDAG &DAG) const; 690 691 protected: 692 std::pair<const TargetRegisterClass*, uint8_t> 693 findRepresentativeClass(EVT VT) const; 694 695 private: 696 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 697 /// make the right decision when generating code for different targets. 698 const X86Subtarget *Subtarget; 699 const X86RegisterInfo *RegInfo; 700 const TargetData *TD; 701 702 /// X86StackPtr - X86 physical register used as stack ptr. 703 unsigned X86StackPtr; 704 705 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 706 /// floating point ops. 707 /// When SSE is available, use it for f32 operations. 708 /// When SSE2 is available, use it for f64 operations. 709 bool X86ScalarSSEf32; 710 bool X86ScalarSSEf64; 711 712 /// LegalFPImmediates - A list of legal fp immediates. 713 std::vector<APFloat> LegalFPImmediates; 714 715 /// addLegalFPImmediate - Indicate that this x86 target can instruction 716 /// select the specified FP immediate natively. addLegalFPImmediate(const APFloat & Imm)717 void addLegalFPImmediate(const APFloat& Imm) { 718 LegalFPImmediates.push_back(Imm); 719 } 720 721 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 722 CallingConv::ID CallConv, bool isVarArg, 723 const SmallVectorImpl<ISD::InputArg> &Ins, 724 DebugLoc dl, SelectionDAG &DAG, 725 SmallVectorImpl<SDValue> &InVals) const; 726 SDValue LowerMemArgument(SDValue Chain, 727 CallingConv::ID CallConv, 728 const SmallVectorImpl<ISD::InputArg> &ArgInfo, 729 DebugLoc dl, SelectionDAG &DAG, 730 const CCValAssign &VA, MachineFrameInfo *MFI, 731 unsigned i) const; 732 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 733 DebugLoc dl, SelectionDAG &DAG, 734 const CCValAssign &VA, 735 ISD::ArgFlagsTy Flags) const; 736 737 // Call lowering helpers. 738 739 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 740 /// for tail call optimization. Targets which want to do tail call 741 /// optimization should implement this function. 742 bool IsEligibleForTailCallOptimization(SDValue Callee, 743 CallingConv::ID CalleeCC, 744 bool isVarArg, 745 bool isCalleeStructRet, 746 bool isCallerStructRet, 747 const SmallVectorImpl<ISD::OutputArg> &Outs, 748 const SmallVectorImpl<SDValue> &OutVals, 749 const SmallVectorImpl<ISD::InputArg> &Ins, 750 SelectionDAG& DAG) const; 751 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; 752 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, 753 SDValue Chain, bool IsTailCall, bool Is64Bit, 754 int FPDiff, DebugLoc dl) const; 755 756 unsigned GetAlignedArgumentStackSize(unsigned StackSize, 757 SelectionDAG &DAG) const; 758 759 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 760 bool isSigned, 761 bool isReplace) const; 762 763 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 764 SelectionDAG &DAG) const; 765 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 766 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 767 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 768 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 769 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; 770 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 771 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; 772 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 773 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 774 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 775 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 776 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 777 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 778 int64_t Offset, SelectionDAG &DAG) const; 779 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 780 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 781 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; 782 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const; 783 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const; 784 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 785 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 786 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; 787 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; 788 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; 789 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; 790 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; 791 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; 792 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 793 SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const; 794 SDValue LowerToBT(SDValue And, ISD::CondCode CC, 795 DebugLoc dl, SelectionDAG &DAG) const; 796 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 797 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; 798 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 799 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 800 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; 801 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 802 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 803 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 804 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; 805 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 806 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 807 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; 808 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 809 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 810 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; 811 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; 812 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 813 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 814 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 815 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const; 816 SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const; 817 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; 818 SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const; 819 SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const; 820 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 821 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; 822 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; 823 824 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; 825 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 826 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; 827 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const; 828 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; 829 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; 830 SDValue PerformTruncateCombine(SDNode* N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const; 831 832 // Utility functions to help LowerVECTOR_SHUFFLE 833 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const; 834 SDValue LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const; 835 SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const; 836 837 SDValue LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const; 838 839 virtual SDValue 840 LowerFormalArguments(SDValue Chain, 841 CallingConv::ID CallConv, bool isVarArg, 842 const SmallVectorImpl<ISD::InputArg> &Ins, 843 DebugLoc dl, SelectionDAG &DAG, 844 SmallVectorImpl<SDValue> &InVals) const; 845 virtual SDValue 846 LowerCall(CallLoweringInfo &CLI, 847 SmallVectorImpl<SDValue> &InVals) const; 848 849 virtual SDValue 850 LowerReturn(SDValue Chain, 851 CallingConv::ID CallConv, bool isVarArg, 852 const SmallVectorImpl<ISD::OutputArg> &Outs, 853 const SmallVectorImpl<SDValue> &OutVals, 854 DebugLoc dl, SelectionDAG &DAG) const; 855 856 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; 857 858 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 859 860 virtual EVT 861 getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 862 ISD::NodeType ExtendKind) const; 863 864 virtual bool 865 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 866 bool isVarArg, 867 const SmallVectorImpl<ISD::OutputArg> &Outs, 868 LLVMContext &Context) const; 869 870 /// Utility function to emit string processing sse4.2 instructions 871 /// that return in xmm0. 872 /// This takes the instruction to expand, the associated machine basic 873 /// block, the number of args, and whether or not the second arg is 874 /// in memory or not. 875 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB, 876 unsigned argNum, bool inMem) const; 877 878 /// Utility functions to emit monitor and mwait instructions. These 879 /// need to make sure that the arguments to the intrinsic are in the 880 /// correct registers. 881 MachineBasicBlock *EmitMonitor(MachineInstr *MI, 882 MachineBasicBlock *BB) const; 883 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const; 884 885 /// Utility function to emit atomic bitwise operations (and, or, xor). 886 /// It takes the bitwise instruction to expand, the associated machine basic 887 /// block, and the associated X86 opcodes for reg/reg and reg/imm. 888 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter( 889 MachineInstr *BInstr, 890 MachineBasicBlock *BB, 891 unsigned regOpc, 892 unsigned immOpc, 893 unsigned loadOpc, 894 unsigned cxchgOpc, 895 unsigned notOpc, 896 unsigned EAXreg, 897 const TargetRegisterClass *RC, 898 bool Invert = false) const; 899 900 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter( 901 MachineInstr *BInstr, 902 MachineBasicBlock *BB, 903 unsigned regOpcL, 904 unsigned regOpcH, 905 unsigned immOpcL, 906 unsigned immOpcH, 907 bool Invert = false) const; 908 909 /// Utility function to emit atomic min and max. It takes the min/max 910 /// instruction to expand, the associated basic block, and the associated 911 /// cmov opcode for moving the min or max value. 912 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr, 913 MachineBasicBlock *BB, 914 unsigned cmovOpc) const; 915 916 // Utility function to emit the low-level va_arg code for X86-64. 917 MachineBasicBlock *EmitVAARG64WithCustomInserter( 918 MachineInstr *MI, 919 MachineBasicBlock *MBB) const; 920 921 /// Utility function to emit the xmm reg save portion of va_start. 922 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( 923 MachineInstr *BInstr, 924 MachineBasicBlock *BB) const; 925 926 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, 927 MachineBasicBlock *BB) const; 928 929 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI, 930 MachineBasicBlock *BB) const; 931 932 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI, 933 MachineBasicBlock *BB, 934 bool Is64Bit) const; 935 936 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, 937 MachineBasicBlock *BB) const; 938 939 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, 940 MachineBasicBlock *BB) const; 941 942 /// Emit nodes that will be selected as "test Op0,Op0", or something 943 /// equivalent, for use with the given x86 condition code. 944 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; 945 946 /// Emit nodes that will be selected as "cmp Op0,Op1", or something 947 /// equivalent, for use with the given x86 condition code. 948 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 949 SelectionDAG &DAG) const; 950 951 /// Convert a comparison if required by the subtarget. 952 SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const; 953 }; 954 955 namespace X86 { 956 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 957 const TargetLibraryInfo *libInfo); 958 } 959 } 960 961 #endif // X86ISELLOWERING_H 962