1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that PPC uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 16 #define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 17 18 #include "PPC.h" 19 #include "PPCSubtarget.h" 20 #include "llvm/Target/TargetLowering.h" 21 #include "llvm/CodeGen/SelectionDAG.h" 22 23 namespace llvm { 24 namespace PPCISD { 25 enum NodeType { 26 // Start the numbering where the builtin ops and target ops leave off. 27 FIRST_NUMBER = ISD::BUILTIN_OP_END, 28 29 /// FSEL - Traditional three-operand fsel node. 30 /// 31 FSEL, 32 33 /// FCFID - The FCFID instruction, taking an f64 operand and producing 34 /// and f64 value containing the FP representation of the integer that 35 /// was temporarily in the f64 operand. 36 FCFID, 37 38 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 39 /// operand, producing an f64 value containing the integer representation 40 /// of that FP value. 41 FCTIDZ, FCTIWZ, 42 43 /// STFIWX - The STFIWX instruction. The first operand is an input token 44 /// chain, then an f64 value to store, then an address to store it to. 45 STFIWX, 46 47 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking 48 // three v4f32 operands and producing a v4f32 result. 49 VMADDFP, VNMSUBFP, 50 51 /// VPERM - The PPC VPERM Instruction. 52 /// 53 VPERM, 54 55 /// Hi/Lo - These represent the high and low 16-bit parts of a global 56 /// address respectively. These nodes have two operands, the first of 57 /// which must be a TargetGlobalAddress, and the second of which must be a 58 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', 59 /// though these are usually folded into other nodes. 60 Hi, Lo, 61 62 TOC_ENTRY, 63 64 /// The following three target-specific nodes are used for calls through 65 /// function pointers in the 64-bit SVR4 ABI. 66 67 /// Restore the TOC from the TOC save area of the current stack frame. 68 /// This is basically a hard coded load instruction which additionally 69 /// takes/produces a flag. 70 TOC_RESTORE, 71 72 /// Like a regular LOAD but additionally taking/producing a flag. 73 LOAD, 74 75 /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is 76 /// a hard coded load instruction. 77 LOAD_TOC, 78 79 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) 80 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to 81 /// compute an allocation on the stack. 82 DYNALLOC, 83 84 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr 85 /// at function entry, used for PIC code. 86 GlobalBaseReg, 87 88 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit 89 /// shift amounts. These nodes are generated by the multi-precision shift 90 /// code. 91 SRL, SRA, SHL, 92 93 /// EXTSW_32 - This is the EXTSW instruction for use with "32-bit" 94 /// registers. 95 EXTSW_32, 96 97 /// CALL - A direct function call. 98 /// CALL_NOP_SVR4 is a call with the special NOP which follows 64-bit 99 /// SVR4 calls. 100 CALL_Darwin, CALL_SVR4, CALL_NOP_SVR4, 101 102 /// NOP - Special NOP which follows 64-bit SVR4 calls. 103 NOP, 104 105 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a 106 /// MTCTR instruction. 107 MTCTR, 108 109 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a 110 /// BCTRL instruction. 111 BCTRL_Darwin, BCTRL_SVR4, 112 113 /// Return with a flag operand, matched by 'blr' 114 RET_FLAG, 115 116 /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF 117 /// instructions. This copies the bits corresponding to the specified 118 /// CRREG into the resultant GPR. Bits corresponding to other CR regs 119 /// are undefined. 120 MFCR, 121 122 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* 123 /// instructions. For lack of better number, we use the opcode number 124 /// encoding for the OPC field to identify the compare. For example, 838 125 /// is VCMPGTSH. 126 VCMP, 127 128 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the 129 /// altivec VCMP*o instructions. For lack of better number, we use the 130 /// opcode number encoding for the OPC field to identify the compare. For 131 /// example, 838 is VCMPGTSH. 132 VCMPo, 133 134 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This 135 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the 136 /// condition register to branch on, OPC is the branch opcode to use (e.g. 137 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is 138 /// an optional input flag argument. 139 COND_BRANCH, 140 141 // The following 5 instructions are used only as part of the 142 // long double-to-int conversion sequence. 143 144 /// OUTFLAG = MFFS F8RC - This moves the FPSCR (not modelled) into the 145 /// register. 146 MFFS, 147 148 /// OUTFLAG = MTFSB0 INFLAG - This clears a bit in the FPSCR. 149 MTFSB0, 150 151 /// OUTFLAG = MTFSB1 INFLAG - This sets a bit in the FPSCR. 152 MTFSB1, 153 154 /// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with 155 /// rounding towards zero. It has flags added so it won't move past the 156 /// FPSCR-setting instructions. 157 FADDRTZ, 158 159 /// MTFSF = F8RC, INFLAG - This moves the register into the FPSCR. 160 MTFSF, 161 162 /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and 163 /// reserve indexed. This is used to implement atomic operations. 164 LARX, 165 166 /// STCX = This corresponds to PPC stcx. instrcution: store conditional 167 /// indexed. This is used to implement atomic operations. 168 STCX, 169 170 /// TC_RETURN - A tail call return. 171 /// operand #0 chain 172 /// operand #1 callee (register or absolute) 173 /// operand #2 stack adjustment 174 /// operand #3 optional in flag 175 TC_RETURN, 176 177 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls 178 CR6SET, 179 CR6UNSET, 180 181 /// STD_32 - This is the STD instruction for use with "32-bit" registers. 182 STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, 183 184 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a 185 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of 186 /// the GPRC input, then stores it through Ptr. Type can be either i16 or 187 /// i32. 188 STBRX, 189 190 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a 191 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, 192 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 193 /// or i32. 194 LBRX 195 }; 196 } 197 198 /// Define some predicates that are used for node matching. 199 namespace PPC { 200 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 201 /// VPKUHUM instruction. 202 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 203 204 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 205 /// VPKUWUM instruction. 206 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 207 208 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 209 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 210 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 211 bool isUnary); 212 213 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 214 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 215 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 216 bool isUnary); 217 218 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 219 /// amount, otherwise return -1. 220 int isVSLDOIShuffleMask(SDNode *N, bool isUnary); 221 222 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 223 /// specifies a splat of a single element that is suitable for input to 224 /// VSPLTB/VSPLTH/VSPLTW. 225 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 226 227 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 228 /// are -0.0. 229 bool isAllNegativeZeroVector(SDNode *N); 230 231 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 232 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 233 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize); 234 235 /// get_VSPLTI_elt - If this is a build_vector of constants which can be 236 /// formed by using a vspltis[bhw] instruction of the specified element 237 /// size, return the constant being splatted. The ByteSize field indicates 238 /// the number of bytes of each element [124] -> [bhw]. 239 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 240 } 241 242 class PPCTargetLowering : public TargetLowering { 243 const PPCSubtarget &PPCSubTarget; 244 245 public: 246 explicit PPCTargetLowering(PPCTargetMachine &TM); 247 248 /// getTargetNodeName() - This method returns the name of a target specific 249 /// DAG node. 250 virtual const char *getTargetNodeName(unsigned Opcode) const; 251 getShiftAmountTy(EVT LHSTy)252 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; } 253 254 /// getSetCCResultType - Return the ISD::SETCC ValueType 255 virtual EVT getSetCCResultType(EVT VT) const; 256 257 /// getPreIndexedAddressParts - returns true by value, base pointer and 258 /// offset pointer and addressing mode by reference if the node's address 259 /// can be legally represented as pre-indexed load / store address. 260 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 261 SDValue &Offset, 262 ISD::MemIndexedMode &AM, 263 SelectionDAG &DAG) const; 264 265 /// SelectAddressRegReg - Given the specified addressed, check to see if it 266 /// can be represented as an indexed [r+r] operation. Returns false if it 267 /// can be more efficiently represented with [r+imm]. 268 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, 269 SelectionDAG &DAG) const; 270 271 /// SelectAddressRegImm - Returns true if the address N can be represented 272 /// by a base register plus a signed 16-bit displacement [r+imm], and if it 273 /// is not better represented as reg+reg. 274 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, 275 SelectionDAG &DAG) const; 276 277 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 278 /// represented as an indexed [r+r] operation. 279 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, 280 SelectionDAG &DAG) const; 281 282 /// SelectAddressRegImmShift - Returns true if the address N can be 283 /// represented by a base register plus a signed 14-bit displacement 284 /// [r+imm*4]. Suitable for use by STD and friends. 285 bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base, 286 SelectionDAG &DAG) const; 287 288 Sched::Preference getSchedulingPreference(SDNode *N) const; 289 290 /// LowerOperation - Provide custom lowering hooks for some operations. 291 /// 292 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 293 294 /// ReplaceNodeResults - Replace the results of node with an illegal result 295 /// type with new values built out of custom code. 296 /// 297 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 298 SelectionDAG &DAG) const; 299 300 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 301 302 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 303 APInt &KnownZero, 304 APInt &KnownOne, 305 const SelectionDAG &DAG, 306 unsigned Depth = 0) const; 307 308 virtual MachineBasicBlock * 309 EmitInstrWithCustomInserter(MachineInstr *MI, 310 MachineBasicBlock *MBB) const; 311 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 312 MachineBasicBlock *MBB, bool is64Bit, 313 unsigned BinOpcode) const; 314 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI, 315 MachineBasicBlock *MBB, 316 bool is8bit, unsigned Opcode) const; 317 318 ConstraintType getConstraintType(const std::string &Constraint) const; 319 320 /// Examine constraint string and operand type and determine a weight value. 321 /// The operand object must already have been set up with the operand type. 322 ConstraintWeight getSingleConstraintMatchWeight( 323 AsmOperandInfo &info, const char *constraint) const; 324 325 std::pair<unsigned, const TargetRegisterClass*> 326 getRegForInlineAsmConstraint(const std::string &Constraint, 327 EVT VT) const; 328 329 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 330 /// function arguments in the caller parameter area. This is the actual 331 /// alignment, not its logarithm. 332 unsigned getByValTypeAlignment(Type *Ty) const; 333 334 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 335 /// vector. If it is invalid, don't add anything to Ops. 336 virtual void LowerAsmOperandForConstraint(SDValue Op, 337 std::string &Constraint, 338 std::vector<SDValue> &Ops, 339 SelectionDAG &DAG) const; 340 341 /// isLegalAddressingMode - Return true if the addressing mode represented 342 /// by AM is legal for this target, for a load/store of the specified type. 343 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 344 345 /// isLegalAddressImmediate - Return true if the integer value can be used 346 /// as the offset of the target addressing mode for load / store of the 347 /// given type. 348 virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const; 349 350 /// isLegalAddressImmediate - Return true if the GlobalValue can be used as 351 /// the offset of the target addressing mode. 352 virtual bool isLegalAddressImmediate(GlobalValue *GV) const; 353 354 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 355 356 /// getOptimalMemOpType - Returns the target specific optimal type for load 357 /// and store operations as a result of memset, memcpy, and memmove 358 /// lowering. If DstAlign is zero that means it's safe to destination 359 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 360 /// means there isn't a need to check it against alignment requirement, 361 /// probably because the source does not need to be loaded. If 362 /// 'IsZeroVal' is true, that means it's safe to return a 363 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 364 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 365 /// constant so it does not need to be loaded. 366 /// It returns EVT::Other if the type should be determined using generic 367 /// target-independent logic. 368 virtual EVT 369 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 370 bool IsZeroVal, bool MemcpyStrSrc, 371 MachineFunction &MF) const; 372 373 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 374 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 375 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 376 /// is expanded to mul + add. 377 virtual bool isFMAFasterThanMulAndAdd(EVT VT) const; 378 379 private: 380 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; 381 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; 382 383 bool 384 IsEligibleForTailCallOptimization(SDValue Callee, 385 CallingConv::ID CalleeCC, 386 bool isVarArg, 387 const SmallVectorImpl<ISD::InputArg> &Ins, 388 SelectionDAG& DAG) const; 389 390 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 391 int SPDiff, 392 SDValue Chain, 393 SDValue &LROpOut, 394 SDValue &FPOpOut, 395 bool isDarwinABI, 396 DebugLoc dl) const; 397 398 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 399 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 400 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 401 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 402 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 403 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 404 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 405 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 406 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 407 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 408 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 409 const PPCSubtarget &Subtarget) const; 410 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, 411 const PPCSubtarget &Subtarget) const; 412 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 413 const PPCSubtarget &Subtarget) const; 414 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 415 const PPCSubtarget &Subtarget) const; 416 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 417 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; 418 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 419 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 420 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; 421 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; 422 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; 423 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 424 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 425 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 426 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 427 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 428 429 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 430 CallingConv::ID CallConv, bool isVarArg, 431 const SmallVectorImpl<ISD::InputArg> &Ins, 432 DebugLoc dl, SelectionDAG &DAG, 433 SmallVectorImpl<SDValue> &InVals) const; 434 SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall, 435 bool isVarArg, 436 SelectionDAG &DAG, 437 SmallVector<std::pair<unsigned, SDValue>, 8> 438 &RegsToPass, 439 SDValue InFlag, SDValue Chain, 440 SDValue &Callee, 441 int SPDiff, unsigned NumBytes, 442 const SmallVectorImpl<ISD::InputArg> &Ins, 443 SmallVectorImpl<SDValue> &InVals) const; 444 445 virtual SDValue 446 LowerFormalArguments(SDValue Chain, 447 CallingConv::ID CallConv, bool isVarArg, 448 const SmallVectorImpl<ISD::InputArg> &Ins, 449 DebugLoc dl, SelectionDAG &DAG, 450 SmallVectorImpl<SDValue> &InVals) const; 451 452 virtual SDValue 453 LowerCall(TargetLowering::CallLoweringInfo &CLI, 454 SmallVectorImpl<SDValue> &InVals) const; 455 456 virtual bool 457 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 458 bool isVarArg, 459 const SmallVectorImpl<ISD::OutputArg> &Outs, 460 LLVMContext &Context) const; 461 462 virtual SDValue 463 LowerReturn(SDValue Chain, 464 CallingConv::ID CallConv, bool isVarArg, 465 const SmallVectorImpl<ISD::OutputArg> &Outs, 466 const SmallVectorImpl<SDValue> &OutVals, 467 DebugLoc dl, SelectionDAG &DAG) const; 468 469 SDValue 470 LowerFormalArguments_Darwin(SDValue Chain, 471 CallingConv::ID CallConv, bool isVarArg, 472 const SmallVectorImpl<ISD::InputArg> &Ins, 473 DebugLoc dl, SelectionDAG &DAG, 474 SmallVectorImpl<SDValue> &InVals) const; 475 SDValue 476 LowerFormalArguments_SVR4(SDValue Chain, 477 CallingConv::ID CallConv, bool isVarArg, 478 const SmallVectorImpl<ISD::InputArg> &Ins, 479 DebugLoc dl, SelectionDAG &DAG, 480 SmallVectorImpl<SDValue> &InVals) const; 481 482 SDValue 483 LowerCall_Darwin(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, 484 bool isVarArg, bool isTailCall, 485 const SmallVectorImpl<ISD::OutputArg> &Outs, 486 const SmallVectorImpl<SDValue> &OutVals, 487 const SmallVectorImpl<ISD::InputArg> &Ins, 488 DebugLoc dl, SelectionDAG &DAG, 489 SmallVectorImpl<SDValue> &InVals) const; 490 SDValue 491 LowerCall_SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, 492 bool isVarArg, bool isTailCall, 493 const SmallVectorImpl<ISD::OutputArg> &Outs, 494 const SmallVectorImpl<SDValue> &OutVals, 495 const SmallVectorImpl<ISD::InputArg> &Ins, 496 DebugLoc dl, SelectionDAG &DAG, 497 SmallVectorImpl<SDValue> &InVals) const; 498 }; 499 } 500 501 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 502