1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //==-----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Defines an instruction selector for the AMDGPU target.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDILDevices.h"
18 #include "R600InstrInfo.h"
19 #include "SIISelLowering.h"
20 #include "llvm/ADT/ValueMap.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/Support/Compiler.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include <list>
26 #include <queue>
27
28 using namespace llvm;
29
30 //===----------------------------------------------------------------------===//
31 // Instruction Selector Implementation
32 //===----------------------------------------------------------------------===//
33
34 namespace {
35 /// AMDGPU specific code to select AMDGPU machine instructions for
36 /// SelectionDAG operations.
37 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
38 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
39 // make the right decision when generating code for different targets.
40 const AMDGPUSubtarget &Subtarget;
41 public:
42 AMDGPUDAGToDAGISel(TargetMachine &TM);
43 virtual ~AMDGPUDAGToDAGISel();
44
45 SDNode *Select(SDNode *N);
46 virtual const char *getPassName() const;
47 virtual void PostprocessISelDAG();
48
49 private:
50 inline SDValue getSmallIPtrImm(unsigned Imm);
51 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
52
53 // Complex pattern selectors
54 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
55 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
56 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
57
58 static bool checkType(const Value *ptr, unsigned int addrspace);
59 static const Value *getBasePointerValue(const Value *V);
60
61 static bool isGlobalStore(const StoreSDNode *N);
62 static bool isPrivateStore(const StoreSDNode *N);
63 static bool isLocalStore(const StoreSDNode *N);
64 static bool isRegionStore(const StoreSDNode *N);
65
66 static bool isCPLoad(const LoadSDNode *N);
67 static bool isConstantLoad(const LoadSDNode *N, int cbID);
68 static bool isGlobalLoad(const LoadSDNode *N);
69 static bool isParamLoad(const LoadSDNode *N);
70 static bool isPrivateLoad(const LoadSDNode *N);
71 static bool isLocalLoad(const LoadSDNode *N);
72 static bool isRegionLoad(const LoadSDNode *N);
73
74 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
75 bool SelectGlobalValueVariableOffset(SDValue Addr,
76 SDValue &BaseReg, SDValue& Offset);
77 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
78 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
79
80 // Include the pieces autogenerated from the target description.
81 #include "AMDGPUGenDAGISel.inc"
82 };
83 } // end anonymous namespace
84
85 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
86 // DAG, ready for instruction scheduling.
createAMDGPUISelDag(TargetMachine & TM)87 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
88 ) {
89 return new AMDGPUDAGToDAGISel(TM);
90 }
91
AMDGPUDAGToDAGISel(TargetMachine & TM)92 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM
93 )
94 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
95 }
96
~AMDGPUDAGToDAGISel()97 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
98 }
99
getSmallIPtrImm(unsigned int Imm)100 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
101 return CurDAG->getTargetConstant(Imm, MVT::i32);
102 }
103
SelectADDRParam(SDValue Addr,SDValue & R1,SDValue & R2)104 bool AMDGPUDAGToDAGISel::SelectADDRParam(
105 SDValue Addr, SDValue& R1, SDValue& R2) {
106
107 if (Addr.getOpcode() == ISD::FrameIndex) {
108 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
109 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
110 R2 = CurDAG->getTargetConstant(0, MVT::i32);
111 } else {
112 R1 = Addr;
113 R2 = CurDAG->getTargetConstant(0, MVT::i32);
114 }
115 } else if (Addr.getOpcode() == ISD::ADD) {
116 R1 = Addr.getOperand(0);
117 R2 = Addr.getOperand(1);
118 } else {
119 R1 = Addr;
120 R2 = CurDAG->getTargetConstant(0, MVT::i32);
121 }
122 return true;
123 }
124
SelectADDR(SDValue Addr,SDValue & R1,SDValue & R2)125 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
126 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
127 Addr.getOpcode() == ISD::TargetGlobalAddress) {
128 return false;
129 }
130 return SelectADDRParam(Addr, R1, R2);
131 }
132
133
SelectADDR64(SDValue Addr,SDValue & R1,SDValue & R2)134 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
135 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
136 Addr.getOpcode() == ISD::TargetGlobalAddress) {
137 return false;
138 }
139
140 if (Addr.getOpcode() == ISD::FrameIndex) {
141 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
142 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
143 R2 = CurDAG->getTargetConstant(0, MVT::i64);
144 } else {
145 R1 = Addr;
146 R2 = CurDAG->getTargetConstant(0, MVT::i64);
147 }
148 } else if (Addr.getOpcode() == ISD::ADD) {
149 R1 = Addr.getOperand(0);
150 R2 = Addr.getOperand(1);
151 } else {
152 R1 = Addr;
153 R2 = CurDAG->getTargetConstant(0, MVT::i64);
154 }
155 return true;
156 }
157
Select(SDNode * N)158 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
159 unsigned int Opc = N->getOpcode();
160 if (N->isMachineOpcode()) {
161 return NULL; // Already selected.
162 }
163 switch (Opc) {
164 default: break;
165 case ISD::BUILD_VECTOR: {
166 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
167 if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
168 break;
169 }
170 // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
171 // that adds a 128 bits reg copy when going through TwoAddressInstructions
172 // pass. We want to avoid 128 bits copies as much as possible because they
173 // can't be bundled by our scheduler.
174 SDValue RegSeqArgs[9] = {
175 CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32),
176 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
177 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
178 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
179 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
180 };
181 bool IsRegSeq = true;
182 for (unsigned i = 0; i < N->getNumOperands(); i++) {
183 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
184 IsRegSeq = false;
185 break;
186 }
187 RegSeqArgs[2 * i + 1] = N->getOperand(i);
188 }
189 if (!IsRegSeq)
190 break;
191 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
192 RegSeqArgs, 2 * N->getNumOperands() + 1);
193 }
194 case ISD::ConstantFP:
195 case ISD::Constant: {
196 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
197 // XXX: Custom immediate lowering not implemented yet. Instead we use
198 // pseudo instructions defined in SIInstructions.td
199 if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
200 break;
201 }
202 const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
203
204 uint64_t ImmValue = 0;
205 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
206
207 if (N->getOpcode() == ISD::ConstantFP) {
208 // XXX: 64-bit Immediates not supported yet
209 assert(N->getValueType(0) != MVT::f64);
210
211 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
212 APFloat Value = C->getValueAPF();
213 float FloatValue = Value.convertToFloat();
214 if (FloatValue == 0.0) {
215 ImmReg = AMDGPU::ZERO;
216 } else if (FloatValue == 0.5) {
217 ImmReg = AMDGPU::HALF;
218 } else if (FloatValue == 1.0) {
219 ImmReg = AMDGPU::ONE;
220 } else {
221 ImmValue = Value.bitcastToAPInt().getZExtValue();
222 }
223 } else {
224 // XXX: 64-bit Immediates not supported yet
225 assert(N->getValueType(0) != MVT::i64);
226
227 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
228 if (C->getZExtValue() == 0) {
229 ImmReg = AMDGPU::ZERO;
230 } else if (C->getZExtValue() == 1) {
231 ImmReg = AMDGPU::ONE_INT;
232 } else {
233 ImmValue = C->getZExtValue();
234 }
235 }
236
237 for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
238 Use != SDNode::use_end(); Use = Next) {
239 Next = llvm::next(Use);
240 std::vector<SDValue> Ops;
241 for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
242 Ops.push_back(Use->getOperand(i));
243 }
244
245 if (!Use->isMachineOpcode()) {
246 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
247 // We can only use literal constants (e.g. AMDGPU::ZERO,
248 // AMDGPU::ONE, etc) in machine opcodes.
249 continue;
250 }
251 } else {
252 if (!TII->isALUInstr(Use->getMachineOpcode()) ||
253 (TII->get(Use->getMachineOpcode()).TSFlags &
254 R600_InstFlag::VECTOR)) {
255 continue;
256 }
257
258 int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
259 assert(ImmIdx != -1);
260
261 // subtract one from ImmIdx, because the DST operand is usually index
262 // 0 for MachineInstrs, but we have no DST in the Ops vector.
263 ImmIdx--;
264
265 // Check that we aren't already using an immediate.
266 // XXX: It's possible for an instruction to have more than one
267 // immediate operand, but this is not supported yet.
268 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
269 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
270 assert(C);
271
272 if (C->getZExtValue() != 0) {
273 // This instruction is already using an immediate.
274 continue;
275 }
276
277 // Set the immediate value
278 Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
279 }
280 }
281 // Set the immediate register
282 Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
283
284 CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
285 }
286 break;
287 }
288 }
289 SDNode *Result = SelectCode(N);
290
291 // Fold operands of selected node
292
293 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
294 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
295 const R600InstrInfo *TII =
296 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
297 if (Result && Result->isMachineOpcode() &&
298 !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
299 && TII->isALUInstr(Result->getMachineOpcode())) {
300 // Fold FNEG/FABS/CONST_ADDRESS
301 // TODO: Isel can generate multiple MachineInst, we need to recursively
302 // parse Result
303 bool IsModified = false;
304 do {
305 std::vector<SDValue> Ops;
306 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
307 I != E; ++I)
308 Ops.push_back(*I);
309 IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
310 if (IsModified) {
311 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
312 }
313 } while (IsModified);
314
315 // If node has a single use which is CLAMP_R600, folds it
316 if (Result->hasOneUse() && Result->isMachineOpcode()) {
317 SDNode *PotentialClamp = *Result->use_begin();
318 if (PotentialClamp->isMachineOpcode() &&
319 PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
320 unsigned ClampIdx =
321 TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP);
322 std::vector<SDValue> Ops;
323 unsigned NumOp = Result->getNumOperands();
324 for (unsigned i = 0; i < NumOp; ++i) {
325 Ops.push_back(Result->getOperand(i));
326 }
327 Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
328 Result = CurDAG->SelectNodeTo(PotentialClamp,
329 Result->getMachineOpcode(), PotentialClamp->getVTList(),
330 Ops.data(), NumOp);
331 }
332 }
333 }
334 }
335
336 return Result;
337 }
338
FoldOperands(unsigned Opcode,const R600InstrInfo * TII,std::vector<SDValue> & Ops)339 bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
340 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
341 int OperandIdx[] = {
342 TII->getOperandIdx(Opcode, R600Operands::SRC0),
343 TII->getOperandIdx(Opcode, R600Operands::SRC1),
344 TII->getOperandIdx(Opcode, R600Operands::SRC2)
345 };
346 int SelIdx[] = {
347 TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL),
348 TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL),
349 TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL)
350 };
351 int NegIdx[] = {
352 TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG),
353 TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG),
354 TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG)
355 };
356 int AbsIdx[] = {
357 TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS),
358 TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS),
359 -1
360 };
361
362 for (unsigned i = 0; i < 3; i++) {
363 if (OperandIdx[i] < 0)
364 return false;
365 SDValue Operand = Ops[OperandIdx[i] - 1];
366 switch (Operand.getOpcode()) {
367 case AMDGPUISD::CONST_ADDRESS: {
368 SDValue CstOffset;
369 if (Operand.getValueType().isVector() ||
370 !SelectGlobalValueConstantOffset(Operand.getOperand(0), CstOffset))
371 break;
372
373 // Gather others constants values
374 std::vector<unsigned> Consts;
375 for (unsigned j = 0; j < 3; j++) {
376 int SrcIdx = OperandIdx[j];
377 if (SrcIdx < 0)
378 break;
379 if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) {
380 if (Reg->getReg() == AMDGPU::ALU_CONST) {
381 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]);
382 Consts.push_back(Cst->getZExtValue());
383 }
384 }
385 }
386
387 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
388 Consts.push_back(Cst->getZExtValue());
389 if (!TII->fitsConstReadLimitations(Consts))
390 break;
391
392 Ops[OperandIdx[i] - 1] = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32);
393 Ops[SelIdx[i] - 1] = CstOffset;
394 return true;
395 }
396 case ISD::FNEG:
397 if (NegIdx[i] < 0)
398 break;
399 Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
400 Ops[NegIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32);
401 return true;
402 case ISD::FABS:
403 if (AbsIdx[i] < 0)
404 break;
405 Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
406 Ops[AbsIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32);
407 return true;
408 case ISD::BITCAST:
409 Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
410 return true;
411 default:
412 break;
413 }
414 }
415 return false;
416 }
417
checkType(const Value * ptr,unsigned int addrspace)418 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
419 if (!ptr) {
420 return false;
421 }
422 Type *ptrType = ptr->getType();
423 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
424 }
425
getBasePointerValue(const Value * V)426 const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V) {
427 if (!V) {
428 return NULL;
429 }
430 const Value *ret = NULL;
431 ValueMap<const Value *, bool> ValueBitMap;
432 std::queue<const Value *, std::list<const Value *> > ValueQueue;
433 ValueQueue.push(V);
434 while (!ValueQueue.empty()) {
435 V = ValueQueue.front();
436 if (ValueBitMap.find(V) == ValueBitMap.end()) {
437 ValueBitMap[V] = true;
438 if (dyn_cast<Argument>(V) && dyn_cast<PointerType>(V->getType())) {
439 ret = V;
440 break;
441 } else if (dyn_cast<GlobalVariable>(V)) {
442 ret = V;
443 break;
444 } else if (dyn_cast<Constant>(V)) {
445 const ConstantExpr *CE = dyn_cast<ConstantExpr>(V);
446 if (CE) {
447 ValueQueue.push(CE->getOperand(0));
448 }
449 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
450 ret = AI;
451 break;
452 } else if (const Instruction *I = dyn_cast<Instruction>(V)) {
453 uint32_t numOps = I->getNumOperands();
454 for (uint32_t x = 0; x < numOps; ++x) {
455 ValueQueue.push(I->getOperand(x));
456 }
457 } else {
458 assert(!"Found a Value that we didn't know how to handle!");
459 }
460 }
461 ValueQueue.pop();
462 }
463 return ret;
464 }
465
isGlobalStore(const StoreSDNode * N)466 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
467 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
468 }
469
isPrivateStore(const StoreSDNode * N)470 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
471 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
472 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
473 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
474 }
475
isLocalStore(const StoreSDNode * N)476 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
477 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
478 }
479
isRegionStore(const StoreSDNode * N)480 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
481 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
482 }
483
isConstantLoad(const LoadSDNode * N,int cbID)484 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) {
485 if (checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)) {
486 return true;
487 }
488 MachineMemOperand *MMO = N->getMemOperand();
489 const Value *V = MMO->getValue();
490 const Value *BV = getBasePointerValue(V);
491 if (MMO
492 && MMO->getValue()
493 && ((V && dyn_cast<GlobalValue>(V))
494 || (BV && dyn_cast<GlobalValue>(
495 getBasePointerValue(MMO->getValue()))))) {
496 return checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS);
497 } else {
498 return false;
499 }
500 }
501
isGlobalLoad(const LoadSDNode * N)502 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) {
503 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
504 }
505
isParamLoad(const LoadSDNode * N)506 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) {
507 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
508 }
509
isLocalLoad(const LoadSDNode * N)510 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) {
511 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
512 }
513
isRegionLoad(const LoadSDNode * N)514 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) {
515 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
516 }
517
isCPLoad(const LoadSDNode * N)518 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) {
519 MachineMemOperand *MMO = N->getMemOperand();
520 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
521 if (MMO) {
522 const Value *V = MMO->getValue();
523 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
524 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
525 return true;
526 }
527 }
528 }
529 return false;
530 }
531
isPrivateLoad(const LoadSDNode * N)532 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) {
533 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
534 // Check to make sure we are not a constant pool load or a constant load
535 // that is marked as a private load
536 if (isCPLoad(N) || isConstantLoad(N, -1)) {
537 return false;
538 }
539 }
540 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
541 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
542 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
543 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
544 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
545 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
546 return true;
547 }
548 return false;
549 }
550
getPassName() const551 const char *AMDGPUDAGToDAGISel::getPassName() const {
552 return "AMDGPU DAG->DAG Pattern Instruction Selection";
553 }
554
555 #ifdef DEBUGTMP
556 #undef INT64_C
557 #endif
558 #undef DEBUGTMP
559
560 ///==== AMDGPU Functions ====///
561
SelectGlobalValueConstantOffset(SDValue Addr,SDValue & IntPtr)562 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
563 SDValue& IntPtr) {
564 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
565 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
566 return true;
567 }
568 return false;
569 }
570
SelectGlobalValueVariableOffset(SDValue Addr,SDValue & BaseReg,SDValue & Offset)571 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
572 SDValue& BaseReg, SDValue &Offset) {
573 if (!dyn_cast<ConstantSDNode>(Addr)) {
574 BaseReg = Addr;
575 Offset = CurDAG->getIntPtrConstant(0, true);
576 return true;
577 }
578 return false;
579 }
580
SelectADDRVTX_READ(SDValue Addr,SDValue & Base,SDValue & Offset)581 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
582 SDValue &Offset) {
583 ConstantSDNode * IMMOffset;
584
585 if (Addr.getOpcode() == ISD::ADD
586 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
587 && isInt<16>(IMMOffset->getZExtValue())) {
588
589 Base = Addr.getOperand(0);
590 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
591 return true;
592 // If the pointer address is constant, we can move it to the offset field.
593 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
594 && isInt<16>(IMMOffset->getZExtValue())) {
595 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
596 CurDAG->getEntryNode().getDebugLoc(),
597 AMDGPU::ZERO, MVT::i32);
598 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
599 return true;
600 }
601
602 // Default case, no offset
603 Base = Addr;
604 Offset = CurDAG->getTargetConstant(0, MVT::i32);
605 return true;
606 }
607
SelectADDRIndirect(SDValue Addr,SDValue & Base,SDValue & Offset)608 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
609 SDValue &Offset) {
610 ConstantSDNode *C;
611
612 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
613 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
614 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
615 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
616 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
617 Base = Addr.getOperand(0);
618 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
619 } else {
620 Base = Addr;
621 Offset = CurDAG->getTargetConstant(0, MVT::i32);
622 }
623
624 return true;
625 }
626
PostprocessISelDAG()627 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
628
629 // Go over all selected nodes and try to fold them a bit more
630 const AMDGPUTargetLowering& Lowering = ((const AMDGPUTargetLowering&)TLI);
631 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
632 E = CurDAG->allnodes_end(); I != E; ++I) {
633
634 MachineSDNode *Node = dyn_cast<MachineSDNode>(I);
635 if (!Node)
636 continue;
637
638 SDNode *ResNode = Lowering.PostISelFolding(Node, *CurDAG);
639 if (ResNode != Node)
640 ReplaceUses(Node, ResNode);
641 }
642 }
643
644