• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// AMDGPU.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUInstructionSelector.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPURegisterInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/Utils.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
34 
35 #define DEBUG_TYPE "amdgpu-isel"
36 
37 using namespace llvm;
38 
39 #define GET_GLOBALISEL_IMPL
40 #define AMDGPUSubtarget GCNSubtarget
41 #include "AMDGPUGenGlobalISel.inc"
42 #undef GET_GLOBALISEL_IMPL
43 #undef AMDGPUSubtarget
44 
AMDGPUInstructionSelector(const GCNSubtarget & STI,const AMDGPURegisterBankInfo & RBI,const AMDGPUTargetMachine & TM)45 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
46     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
47     const AMDGPUTargetMachine &TM)
48     : InstructionSelector(), TII(*STI.getInstrInfo()),
49       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
50       STI(STI),
51       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
52 #define GET_GLOBALISEL_PREDICATES_INIT
53 #include "AMDGPUGenGlobalISel.inc"
54 #undef GET_GLOBALISEL_PREDICATES_INIT
55 #define GET_GLOBALISEL_TEMPORARIES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_TEMPORARIES_INIT
58       ,AMDGPUASI(STI.getAMDGPUAS())
59 {
60 }
61 
getName()62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
selectCOPY(MachineInstr & I) const64 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
65   MachineBasicBlock *BB = I.getParent();
66   MachineFunction *MF = BB->getParent();
67   MachineRegisterInfo &MRI = MF->getRegInfo();
68   I.setDesc(TII.get(TargetOpcode::COPY));
69   for (const MachineOperand &MO : I.operands()) {
70     if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
71       continue;
72 
73     const TargetRegisterClass *RC =
74             TRI.getConstrainedRegClassForOperand(MO, MRI);
75     if (!RC)
76       continue;
77     RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
78   }
79   return true;
80 }
81 
82 MachineOperand
getSubOperand64(MachineOperand & MO,unsigned SubIdx) const83 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
84                                            unsigned SubIdx) const {
85 
86   MachineInstr *MI = MO.getParent();
87   MachineBasicBlock *BB = MO.getParent()->getParent();
88   MachineFunction *MF = BB->getParent();
89   MachineRegisterInfo &MRI = MF->getRegInfo();
90   unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
91 
92   if (MO.isReg()) {
93     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
94     unsigned Reg = MO.getReg();
95     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
96             .addReg(Reg, 0, ComposedSubIdx);
97 
98     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
99                                      MO.isKill(), MO.isDead(), MO.isUndef(),
100                                      MO.isEarlyClobber(), 0, MO.isDebug(),
101                                      MO.isInternalRead());
102   }
103 
104   assert(MO.isImm());
105 
106   APInt Imm(64, MO.getImm());
107 
108   switch (SubIdx) {
109   default:
110     llvm_unreachable("do not know to split immediate with this sub index.");
111   case AMDGPU::sub0:
112     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
113   case AMDGPU::sub1:
114     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
115   }
116 }
117 
getConstant(const MachineInstr * MI)118 static int64_t getConstant(const MachineInstr *MI) {
119   return MI->getOperand(1).getCImm()->getSExtValue();
120 }
121 
selectG_ADD(MachineInstr & I) const122 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const {
123   MachineBasicBlock *BB = I.getParent();
124   MachineFunction *MF = BB->getParent();
125   MachineRegisterInfo &MRI = MF->getRegInfo();
126   unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
127   unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
128   unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
129 
130   if (Size != 64)
131     return false;
132 
133   DebugLoc DL = I.getDebugLoc();
134 
135   MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0));
136   MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0));
137 
138   BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
139           .add(Lo1)
140           .add(Lo2);
141 
142   MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1));
143   MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1));
144 
145   BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
146           .add(Hi1)
147           .add(Hi2);
148 
149   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg())
150           .addReg(DstLo)
151           .addImm(AMDGPU::sub0)
152           .addReg(DstHi)
153           .addImm(AMDGPU::sub1);
154 
155   for (MachineOperand &MO : I.explicit_operands()) {
156     if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
157       continue;
158     RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI);
159   }
160 
161   I.eraseFromParent();
162   return true;
163 }
164 
selectG_GEP(MachineInstr & I) const165 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
166   return selectG_ADD(I);
167 }
168 
selectG_IMPLICIT_DEF(MachineInstr & I) const169 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
170   MachineBasicBlock *BB = I.getParent();
171   MachineFunction *MF = BB->getParent();
172   MachineRegisterInfo &MRI = MF->getRegInfo();
173   const MachineOperand &MO = I.getOperand(0);
174   const TargetRegisterClass *RC =
175       TRI.getConstrainedRegClassForOperand(MO, MRI);
176   if (RC)
177     RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
178   I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
179   return true;
180 }
181 
selectG_INTRINSIC(MachineInstr & I,CodeGenCoverage & CoverageInfo) const182 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I,
183                                           CodeGenCoverage &CoverageInfo) const {
184   unsigned IntrinsicID =  I.getOperand(1).getIntrinsicID();
185 
186   switch (IntrinsicID) {
187   default:
188     break;
189   case Intrinsic::maxnum:
190   case Intrinsic::minnum:
191   case Intrinsic::amdgcn_cvt_pkrtz:
192     return selectImpl(I, CoverageInfo);
193 
194   case Intrinsic::amdgcn_kernarg_segment_ptr: {
195     MachineFunction *MF = I.getParent()->getParent();
196     MachineRegisterInfo &MRI = MF->getRegInfo();
197     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
198     const ArgDescriptor *InputPtrReg;
199     const TargetRegisterClass *RC;
200     const DebugLoc &DL = I.getDebugLoc();
201 
202     std::tie(InputPtrReg, RC)
203       = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
204     if (!InputPtrReg)
205       report_fatal_error("missing kernarg segment ptr");
206 
207     BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY))
208       .add(I.getOperand(0))
209       .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister()));
210     I.eraseFromParent();
211     return true;
212   }
213   }
214   return false;
215 }
216 
217 static MachineInstr *
buildEXP(const TargetInstrInfo & TII,MachineInstr * Insert,unsigned Tgt,unsigned Reg0,unsigned Reg1,unsigned Reg2,unsigned Reg3,unsigned VM,bool Compr,unsigned Enabled,bool Done)218 buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
219          unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
220          unsigned VM, bool Compr, unsigned Enabled, bool Done) {
221   const DebugLoc &DL = Insert->getDebugLoc();
222   MachineBasicBlock &BB = *Insert->getParent();
223   unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
224   return BuildMI(BB, Insert, DL, TII.get(Opcode))
225           .addImm(Tgt)
226           .addReg(Reg0)
227           .addReg(Reg1)
228           .addReg(Reg2)
229           .addReg(Reg3)
230           .addImm(VM)
231           .addImm(Compr)
232           .addImm(Enabled);
233 }
234 
selectG_INTRINSIC_W_SIDE_EFFECTS(MachineInstr & I,CodeGenCoverage & CoverageInfo) const235 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
236                                                  MachineInstr &I,
237 						 CodeGenCoverage &CoverageInfo) const {
238   MachineBasicBlock *BB = I.getParent();
239   MachineFunction *MF = BB->getParent();
240   MachineRegisterInfo &MRI = MF->getRegInfo();
241 
242   unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
243   switch (IntrinsicID) {
244   case Intrinsic::amdgcn_exp: {
245     int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
246     int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
247     int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
248     int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
249 
250     MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
251                                  I.getOperand(4).getReg(),
252                                  I.getOperand(5).getReg(),
253                                  I.getOperand(6).getReg(),
254                                  VM, false, Enabled, Done);
255 
256     I.eraseFromParent();
257     return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
258   }
259   case Intrinsic::amdgcn_exp_compr: {
260     const DebugLoc &DL = I.getDebugLoc();
261     int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
262     int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
263     unsigned Reg0 = I.getOperand(3).getReg();
264     unsigned Reg1 = I.getOperand(4).getReg();
265     unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
266     int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
267     int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
268 
269     BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
270     MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
271                                  true,  Enabled, Done);
272 
273     I.eraseFromParent();
274     return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
275   }
276   }
277   return false;
278 }
279 
selectG_STORE(MachineInstr & I) const280 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
281   MachineBasicBlock *BB = I.getParent();
282   MachineFunction *MF = BB->getParent();
283   MachineRegisterInfo &MRI = MF->getRegInfo();
284   DebugLoc DL = I.getDebugLoc();
285   unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
286   unsigned Opcode;
287 
288   // FIXME: Select store instruction based on address space
289   switch (StoreSize) {
290   default:
291     return false;
292   case 32:
293     Opcode = AMDGPU::FLAT_STORE_DWORD;
294     break;
295   case 64:
296     Opcode = AMDGPU::FLAT_STORE_DWORDX2;
297     break;
298   case 96:
299     Opcode = AMDGPU::FLAT_STORE_DWORDX3;
300     break;
301   case 128:
302     Opcode = AMDGPU::FLAT_STORE_DWORDX4;
303     break;
304   }
305 
306   MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
307           .add(I.getOperand(1))
308           .add(I.getOperand(0))
309           .addImm(0)  // offset
310           .addImm(0)  // glc
311           .addImm(0); // slc
312 
313 
314   // Now that we selected an opcode, we need to constrain the register
315   // operands to use appropriate classes.
316   bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
317 
318   I.eraseFromParent();
319   return Ret;
320 }
321 
selectG_CONSTANT(MachineInstr & I) const322 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
323   MachineBasicBlock *BB = I.getParent();
324   MachineFunction *MF = BB->getParent();
325   MachineRegisterInfo &MRI = MF->getRegInfo();
326   MachineOperand &ImmOp = I.getOperand(1);
327 
328   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
329   if (ImmOp.isFPImm()) {
330     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
331     ImmOp.ChangeToImmediate(Imm.getZExtValue());
332   } else if (ImmOp.isCImm()) {
333     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
334   }
335 
336   unsigned DstReg = I.getOperand(0).getReg();
337   unsigned Size;
338   bool IsSgpr;
339   const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
340   if (RB) {
341     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
342     Size = MRI.getType(DstReg).getSizeInBits();
343   } else {
344     const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
345     IsSgpr = TRI.isSGPRClass(RC);
346     Size = TRI.getRegSizeInBits(*RC);
347   }
348 
349   if (Size != 32 && Size != 64)
350     return false;
351 
352   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
353   if (Size == 32) {
354     I.setDesc(TII.get(Opcode));
355     I.addImplicitDefUseOperands(*MF);
356     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
357   }
358 
359   DebugLoc DL = I.getDebugLoc();
360   const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
361                                            &AMDGPU::VGPR_32RegClass;
362   unsigned LoReg = MRI.createVirtualRegister(RC);
363   unsigned HiReg = MRI.createVirtualRegister(RC);
364   const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
365 
366   BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
367           .addImm(Imm.trunc(32).getZExtValue());
368 
369   BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
370           .addImm(Imm.ashr(32).getZExtValue());
371 
372   const MachineInstr *RS =
373       BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
374               .addReg(LoReg)
375               .addImm(AMDGPU::sub0)
376               .addReg(HiReg)
377               .addImm(AMDGPU::sub1);
378 
379   // We can't call constrainSelectedInstRegOperands here, because it doesn't
380   // work for target independent opcodes
381   I.eraseFromParent();
382   const TargetRegisterClass *DstRC =
383       TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
384   if (!DstRC)
385     return true;
386   return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
387 }
388 
isConstant(const MachineInstr & MI)389 static bool isConstant(const MachineInstr &MI) {
390   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
391 }
392 
getAddrModeInfo(const MachineInstr & Load,const MachineRegisterInfo & MRI,SmallVectorImpl<GEPInfo> & AddrInfo) const393 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
394     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
395 
396   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
397 
398   assert(PtrMI);
399 
400   if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
401     return;
402 
403   GEPInfo GEPInfo(*PtrMI);
404 
405   for (unsigned i = 1, e = 3; i < e; ++i) {
406     const MachineOperand &GEPOp = PtrMI->getOperand(i);
407     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
408     assert(OpDef);
409     if (isConstant(*OpDef)) {
410       // FIXME: Is it possible to have multiple Imm parts?  Maybe if we
411       // are lacking other optimizations.
412       assert(GEPInfo.Imm == 0);
413       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
414       continue;
415     }
416     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
417     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
418       GEPInfo.SgprParts.push_back(GEPOp.getReg());
419     else
420       GEPInfo.VgprParts.push_back(GEPOp.getReg());
421   }
422 
423   AddrInfo.push_back(GEPInfo);
424   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
425 }
426 
isInstrUniform(const MachineInstr & MI)427 static bool isInstrUniform(const MachineInstr &MI) {
428   if (!MI.hasOneMemOperand())
429     return false;
430 
431   const MachineMemOperand *MMO = *MI.memoperands_begin();
432   const Value *Ptr = MMO->getValue();
433 
434   // UndefValue means this is a load of a kernel input.  These are uniform.
435   // Sometimes LDS instructions have constant pointers.
436   // If Ptr is null, then that means this mem operand contains a
437   // PseudoSourceValue like GOT.
438   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
439       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
440     return true;
441 
442   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
443     return true;
444 
445   const Instruction *I = dyn_cast<Instruction>(Ptr);
446   return I && I->getMetadata("amdgpu.uniform");
447 }
448 
getSmrdOpcode(unsigned BaseOpcode,unsigned LoadSize)449 static unsigned getSmrdOpcode(unsigned BaseOpcode, unsigned LoadSize) {
450 
451   if (LoadSize == 32)
452     return BaseOpcode;
453 
454   switch (BaseOpcode) {
455   case AMDGPU::S_LOAD_DWORD_IMM:
456     switch (LoadSize) {
457     case 64:
458       return AMDGPU::S_LOAD_DWORDX2_IMM;
459     case 128:
460       return AMDGPU::S_LOAD_DWORDX4_IMM;
461     case 256:
462       return AMDGPU::S_LOAD_DWORDX8_IMM;
463     case 512:
464       return AMDGPU::S_LOAD_DWORDX16_IMM;
465     }
466     break;
467   case AMDGPU::S_LOAD_DWORD_IMM_ci:
468     switch (LoadSize) {
469     case 64:
470       return AMDGPU::S_LOAD_DWORDX2_IMM_ci;
471     case 128:
472       return AMDGPU::S_LOAD_DWORDX4_IMM_ci;
473     case 256:
474       return AMDGPU::S_LOAD_DWORDX8_IMM_ci;
475     case 512:
476       return AMDGPU::S_LOAD_DWORDX16_IMM_ci;
477     }
478     break;
479   case AMDGPU::S_LOAD_DWORD_SGPR:
480     switch (LoadSize) {
481     case 64:
482       return AMDGPU::S_LOAD_DWORDX2_SGPR;
483     case 128:
484       return AMDGPU::S_LOAD_DWORDX4_SGPR;
485     case 256:
486       return AMDGPU::S_LOAD_DWORDX8_SGPR;
487     case 512:
488       return AMDGPU::S_LOAD_DWORDX16_SGPR;
489     }
490     break;
491   }
492   llvm_unreachable("Invalid base smrd opcode or size");
493 }
494 
hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const495 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
496   for (const GEPInfo &GEPInfo : AddrInfo) {
497     if (!GEPInfo.VgprParts.empty())
498       return true;
499   }
500   return false;
501 }
502 
selectSMRD(MachineInstr & I,ArrayRef<GEPInfo> AddrInfo) const503 bool AMDGPUInstructionSelector::selectSMRD(MachineInstr &I,
504                                            ArrayRef<GEPInfo> AddrInfo) const {
505 
506   if (!I.hasOneMemOperand())
507     return false;
508 
509   if ((*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
510       (*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT)
511     return false;
512 
513   if (!isInstrUniform(I))
514     return false;
515 
516   if (hasVgprParts(AddrInfo))
517     return false;
518 
519   MachineBasicBlock *BB = I.getParent();
520   MachineFunction *MF = BB->getParent();
521   const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>();
522   MachineRegisterInfo &MRI = MF->getRegInfo();
523   unsigned DstReg = I.getOperand(0).getReg();
524   const DebugLoc &DL = I.getDebugLoc();
525   unsigned Opcode;
526   unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
527 
528   if (!AddrInfo.empty() && AddrInfo[0].SgprParts.size() == 1) {
529 
530     const GEPInfo &GEPInfo = AddrInfo[0];
531 
532     unsigned PtrReg = GEPInfo.SgprParts[0];
533     int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(Subtarget, GEPInfo.Imm);
534     if (AMDGPU::isLegalSMRDImmOffset(Subtarget, GEPInfo.Imm)) {
535       Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize);
536 
537       MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
538                                  .addReg(PtrReg)
539                                  .addImm(EncodedImm)
540                                  .addImm(0); // glc
541       return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
542     }
543 
544     if (Subtarget.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS &&
545         isUInt<32>(EncodedImm)) {
546       Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM_ci, LoadSize);
547       MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
548                                    .addReg(PtrReg)
549                                    .addImm(EncodedImm)
550                                    .addImm(0); // glc
551       return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
552     }
553 
554     if (isUInt<32>(GEPInfo.Imm)) {
555       Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_SGPR, LoadSize);
556       unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
557       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), OffsetReg)
558               .addImm(GEPInfo.Imm);
559 
560       MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
561                                    .addReg(PtrReg)
562                                    .addReg(OffsetReg)
563                                    .addImm(0); // glc
564       return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
565     }
566   }
567 
568   unsigned PtrReg = I.getOperand(1).getReg();
569   Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize);
570   MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
571                                .addReg(PtrReg)
572                                .addImm(0)
573                                .addImm(0); // glc
574   return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
575 }
576 
577 
selectG_LOAD(MachineInstr & I) const578 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
579   MachineBasicBlock *BB = I.getParent();
580   MachineFunction *MF = BB->getParent();
581   MachineRegisterInfo &MRI = MF->getRegInfo();
582   DebugLoc DL = I.getDebugLoc();
583   unsigned DstReg = I.getOperand(0).getReg();
584   unsigned PtrReg = I.getOperand(1).getReg();
585   unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
586   unsigned Opcode;
587 
588   SmallVector<GEPInfo, 4> AddrInfo;
589 
590   getAddrModeInfo(I, MRI, AddrInfo);
591 
592   if (selectSMRD(I, AddrInfo)) {
593     I.eraseFromParent();
594     return true;
595   }
596 
597   switch (LoadSize) {
598   default:
599     llvm_unreachable("Load size not supported\n");
600   case 32:
601     Opcode = AMDGPU::FLAT_LOAD_DWORD;
602     break;
603   case 64:
604     Opcode = AMDGPU::FLAT_LOAD_DWORDX2;
605     break;
606   }
607 
608   MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
609                                .add(I.getOperand(0))
610                                .addReg(PtrReg)
611                                .addImm(0)  // offset
612                                .addImm(0)  // glc
613                                .addImm(0); // slc
614 
615   bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
616   I.eraseFromParent();
617   return Ret;
618 }
619 
select(MachineInstr & I,CodeGenCoverage & CoverageInfo) const620 bool AMDGPUInstructionSelector::select(MachineInstr &I,
621                                        CodeGenCoverage &CoverageInfo) const {
622 
623   if (!isPreISelGenericOpcode(I.getOpcode())) {
624     if (I.isCopy())
625       return selectCOPY(I);
626     return true;
627   }
628 
629   switch (I.getOpcode()) {
630   default:
631     return selectImpl(I, CoverageInfo);
632   case TargetOpcode::G_ADD:
633     return selectG_ADD(I);
634   case TargetOpcode::G_BITCAST:
635     return selectCOPY(I);
636   case TargetOpcode::G_CONSTANT:
637   case TargetOpcode::G_FCONSTANT:
638     return selectG_CONSTANT(I);
639   case TargetOpcode::G_GEP:
640     return selectG_GEP(I);
641   case TargetOpcode::G_IMPLICIT_DEF:
642     return selectG_IMPLICIT_DEF(I);
643   case TargetOpcode::G_INTRINSIC:
644     return selectG_INTRINSIC(I, CoverageInfo);
645   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
646     return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
647   case TargetOpcode::G_LOAD:
648     return selectG_LOAD(I);
649   case TargetOpcode::G_STORE:
650     return selectG_STORE(I);
651   }
652   return false;
653 }
654 
655 InstructionSelector::ComplexRendererFns
selectVCSRC(MachineOperand & Root) const656 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
657   return {{
658       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
659   }};
660 
661 }
662 
663 ///
664 /// This will select either an SGPR or VGPR operand and will save us from
665 /// having to write an extra tablegen pattern.
666 InstructionSelector::ComplexRendererFns
selectVSRC0(MachineOperand & Root) const667 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
668   return {{
669       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
670   }};
671 }
672 
673 InstructionSelector::ComplexRendererFns
selectVOP3Mods0(MachineOperand & Root) const674 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
675   return {{
676       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
677       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods
678       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
679       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
680   }};
681 }
682 InstructionSelector::ComplexRendererFns
selectVOP3OMods(MachineOperand & Root) const683 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
684   return {{
685       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
686       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
687       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
688   }};
689 }
690 
691 InstructionSelector::ComplexRendererFns
selectVOP3Mods(MachineOperand & Root) const692 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
693   return {{
694       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
695       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // src_mods
696   }};
697 }
698