1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPURegisterInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "SIMachineFunctionInfo.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
25 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
28 #include "llvm/CodeGen/GlobalISel/Utils.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37
38 #define DEBUG_TYPE "amdgpu-isel"
39
40 using namespace llvm;
41 using namespace MIPatternMatch;
42
43 #define GET_GLOBALISEL_IMPL
44 #define AMDGPUSubtarget GCNSubtarget
45 #include "AMDGPUGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_IMPL
47 #undef AMDGPUSubtarget
48
AMDGPUInstructionSelector(const GCNSubtarget & STI,const AMDGPURegisterBankInfo & RBI,const AMDGPUTargetMachine & TM)49 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
50 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
51 const AMDGPUTargetMachine &TM)
52 : InstructionSelector(), TII(*STI.getInstrInfo()),
53 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
54 STI(STI),
55 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
56 #define GET_GLOBALISEL_PREDICATES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_PREDICATES_INIT
59 #define GET_GLOBALISEL_TEMPORARIES_INIT
60 #include "AMDGPUGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_TEMPORARIES_INIT
62 {
63 }
64
getName()65 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
66
setupMF(MachineFunction & MF,GISelKnownBits & KB,CodeGenCoverage & CoverageInfo)67 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
68 CodeGenCoverage &CoverageInfo) {
69 MRI = &MF.getRegInfo();
70 InstructionSelector::setupMF(MF, KB, CoverageInfo);
71 }
72
isVCC(Register Reg,const MachineRegisterInfo & MRI) const73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74 const MachineRegisterInfo &MRI) const {
75 if (Register::isPhysicalRegister(Reg))
76 return Reg == TRI.getVCC();
77
78 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
79 const TargetRegisterClass *RC =
80 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
81 if (RC) {
82 const LLT Ty = MRI.getType(Reg);
83 return RC->hasSuperClassEq(TRI.getBoolRC()) &&
84 Ty.isValid() && Ty.getSizeInBits() == 1;
85 }
86
87 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
88 return RB->getID() == AMDGPU::VCCRegBankID;
89 }
90
selectCOPY(MachineInstr & I) const91 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
92 const DebugLoc &DL = I.getDebugLoc();
93 MachineBasicBlock *BB = I.getParent();
94 I.setDesc(TII.get(TargetOpcode::COPY));
95
96 const MachineOperand &Src = I.getOperand(1);
97 MachineOperand &Dst = I.getOperand(0);
98 Register DstReg = Dst.getReg();
99 Register SrcReg = Src.getReg();
100
101 if (isVCC(DstReg, *MRI)) {
102 if (SrcReg == AMDGPU::SCC) {
103 const TargetRegisterClass *RC
104 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105 if (!RC)
106 return true;
107 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
108 }
109
110 if (!isVCC(SrcReg, *MRI)) {
111 // TODO: Should probably leave the copy and let copyPhysReg expand it.
112 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
113 return false;
114
115 const TargetRegisterClass *SrcRC
116 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
117
118 Register MaskedReg = MRI->createVirtualRegister(SrcRC);
119
120 // We can't trust the high bits at this point, so clear them.
121
122 // TODO: Skip masking high bits if def is known boolean.
123
124 unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
125 AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
126 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
127 .addImm(1)
128 .addReg(SrcReg);
129 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
130 .addImm(0)
131 .addReg(MaskedReg);
132
133 if (!MRI->getRegClassOrNull(SrcReg))
134 MRI->setRegClass(SrcReg, SrcRC);
135 I.eraseFromParent();
136 return true;
137 }
138
139 const TargetRegisterClass *RC =
140 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
141 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
142 return false;
143
144 // Don't constrain the source register to a class so the def instruction
145 // handles it (unless it's undef).
146 //
147 // FIXME: This is a hack. When selecting the def, we neeed to know
148 // specifically know that the result is VCCRegBank, and not just an SGPR
149 // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
150 if (Src.isUndef()) {
151 const TargetRegisterClass *SrcRC =
152 TRI.getConstrainedRegClassForOperand(Src, *MRI);
153 if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
154 return false;
155 }
156
157 return true;
158 }
159
160 for (const MachineOperand &MO : I.operands()) {
161 if (Register::isPhysicalRegister(MO.getReg()))
162 continue;
163
164 const TargetRegisterClass *RC =
165 TRI.getConstrainedRegClassForOperand(MO, *MRI);
166 if (!RC)
167 continue;
168 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
169 }
170 return true;
171 }
172
selectPHI(MachineInstr & I) const173 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
174 const Register DefReg = I.getOperand(0).getReg();
175 const LLT DefTy = MRI->getType(DefReg);
176
177 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
178
179 const RegClassOrRegBank &RegClassOrBank =
180 MRI->getRegClassOrRegBank(DefReg);
181
182 const TargetRegisterClass *DefRC
183 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
184 if (!DefRC) {
185 if (!DefTy.isValid()) {
186 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
187 return false;
188 }
189
190 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
191 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
192 if (!DefRC) {
193 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
194 return false;
195 }
196 }
197
198 // TODO: Verify that all registers have the same bank
199 I.setDesc(TII.get(TargetOpcode::PHI));
200 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
201 }
202
203 MachineOperand
getSubOperand64(MachineOperand & MO,const TargetRegisterClass & SubRC,unsigned SubIdx) const204 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
205 const TargetRegisterClass &SubRC,
206 unsigned SubIdx) const {
207
208 MachineInstr *MI = MO.getParent();
209 MachineBasicBlock *BB = MO.getParent()->getParent();
210 Register DstReg = MRI->createVirtualRegister(&SubRC);
211
212 if (MO.isReg()) {
213 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
214 Register Reg = MO.getReg();
215 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
216 .addReg(Reg, 0, ComposedSubIdx);
217
218 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
219 MO.isKill(), MO.isDead(), MO.isUndef(),
220 MO.isEarlyClobber(), 0, MO.isDebug(),
221 MO.isInternalRead());
222 }
223
224 assert(MO.isImm());
225
226 APInt Imm(64, MO.getImm());
227
228 switch (SubIdx) {
229 default:
230 llvm_unreachable("do not know to split immediate with this sub index.");
231 case AMDGPU::sub0:
232 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
233 case AMDGPU::sub1:
234 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
235 }
236 }
237
getLogicalBitOpcode(unsigned Opc,bool Is64)238 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
239 switch (Opc) {
240 case AMDGPU::G_AND:
241 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
242 case AMDGPU::G_OR:
243 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
244 case AMDGPU::G_XOR:
245 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
246 default:
247 llvm_unreachable("not a bit op");
248 }
249 }
250
selectG_AND_OR_XOR(MachineInstr & I) const251 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
252 MachineOperand &Dst = I.getOperand(0);
253 MachineOperand &Src0 = I.getOperand(1);
254 MachineOperand &Src1 = I.getOperand(2);
255 Register DstReg = Dst.getReg();
256 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
257
258 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
259 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
260 const TargetRegisterClass *RC = TRI.getBoolRC();
261 unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
262 RC == &AMDGPU::SReg_64RegClass);
263 I.setDesc(TII.get(InstOpc));
264
265 // FIXME: Hack to avoid turning the register bank into a register class.
266 // The selector for G_ICMP relies on seeing the register bank for the result
267 // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
268 // be ambiguous whether it's a scalar or vector bool.
269 if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
270 MRI->setRegClass(Src0.getReg(), RC);
271 if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
272 MRI->setRegClass(Src1.getReg(), RC);
273
274 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
275 }
276
277 // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
278 // the result?
279 if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
280 unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
281 I.setDesc(TII.get(InstOpc));
282 // Dead implicit-def of scc
283 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
284 true, // isImp
285 false, // isKill
286 true)); // isDead
287 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
288 }
289
290 return false;
291 }
292
selectG_ADD_SUB(MachineInstr & I) const293 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
294 MachineBasicBlock *BB = I.getParent();
295 MachineFunction *MF = BB->getParent();
296 Register DstReg = I.getOperand(0).getReg();
297 const DebugLoc &DL = I.getDebugLoc();
298 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
299 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
300 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
301 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
302
303 if (Size == 32) {
304 if (IsSALU) {
305 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
306 MachineInstr *Add =
307 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
308 .add(I.getOperand(1))
309 .add(I.getOperand(2));
310 I.eraseFromParent();
311 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
312 }
313
314 if (STI.hasAddNoCarry()) {
315 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
316 I.setDesc(TII.get(Opc));
317 I.addOperand(*MF, MachineOperand::CreateImm(0));
318 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
319 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
320 }
321
322 const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
323
324 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
325 MachineInstr *Add
326 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
327 .addDef(UnusedCarry, RegState::Dead)
328 .add(I.getOperand(1))
329 .add(I.getOperand(2))
330 .addImm(0);
331 I.eraseFromParent();
332 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
333 }
334
335 assert(!Sub && "illegal sub should not reach here");
336
337 const TargetRegisterClass &RC
338 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
339 const TargetRegisterClass &HalfRC
340 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
341
342 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
343 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
344 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
345 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
346
347 Register DstLo = MRI->createVirtualRegister(&HalfRC);
348 Register DstHi = MRI->createVirtualRegister(&HalfRC);
349
350 if (IsSALU) {
351 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
352 .add(Lo1)
353 .add(Lo2);
354 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
355 .add(Hi1)
356 .add(Hi2);
357 } else {
358 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
359 Register CarryReg = MRI->createVirtualRegister(CarryRC);
360 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
361 .addDef(CarryReg)
362 .add(Lo1)
363 .add(Lo2)
364 .addImm(0);
365 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
366 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
367 .add(Hi1)
368 .add(Hi2)
369 .addReg(CarryReg, RegState::Kill)
370 .addImm(0);
371
372 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
373 return false;
374 }
375
376 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
377 .addReg(DstLo)
378 .addImm(AMDGPU::sub0)
379 .addReg(DstHi)
380 .addImm(AMDGPU::sub1);
381
382
383 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
384 return false;
385
386 I.eraseFromParent();
387 return true;
388 }
389
selectG_UADDO_USUBO_UADDE_USUBE(MachineInstr & I) const390 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
391 MachineInstr &I) const {
392 MachineBasicBlock *BB = I.getParent();
393 MachineFunction *MF = BB->getParent();
394 const DebugLoc &DL = I.getDebugLoc();
395 Register Dst0Reg = I.getOperand(0).getReg();
396 Register Dst1Reg = I.getOperand(1).getReg();
397 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
398 I.getOpcode() == AMDGPU::G_UADDE;
399 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
400 I.getOpcode() == AMDGPU::G_USUBE;
401
402 if (isVCC(Dst1Reg, *MRI)) {
403 // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
404 // carry out despite the _i32 name. These were renamed in VI to _U32.
405 // FIXME: We should probably rename the opcodes here.
406 unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
407 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
408 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
409 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
410 I.addOperand(*MF, MachineOperand::CreateImm(0));
411 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
412 }
413
414 Register Src0Reg = I.getOperand(2).getReg();
415 Register Src1Reg = I.getOperand(3).getReg();
416
417 if (HasCarryIn) {
418 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
419 .addReg(I.getOperand(4).getReg());
420 }
421
422 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
423 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
424
425 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
426 .add(I.getOperand(2))
427 .add(I.getOperand(3));
428 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
429 .addReg(AMDGPU::SCC);
430
431 if (!MRI->getRegClassOrNull(Dst1Reg))
432 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
433
434 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
435 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
436 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
437 return false;
438
439 if (HasCarryIn &&
440 !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
441 AMDGPU::SReg_32RegClass, *MRI))
442 return false;
443
444 I.eraseFromParent();
445 return true;
446 }
447
selectG_EXTRACT(MachineInstr & I) const448 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
449 MachineBasicBlock *BB = I.getParent();
450 Register DstReg = I.getOperand(0).getReg();
451 Register SrcReg = I.getOperand(1).getReg();
452 LLT DstTy = MRI->getType(DstReg);
453 LLT SrcTy = MRI->getType(SrcReg);
454 const unsigned SrcSize = SrcTy.getSizeInBits();
455 const unsigned DstSize = DstTy.getSizeInBits();
456
457 // TODO: Should handle any multiple of 32 offset.
458 unsigned Offset = I.getOperand(2).getImm();
459 if (Offset % DstSize != 0)
460 return false;
461
462 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
463 const TargetRegisterClass *SrcRC =
464 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
465 if (!SrcRC)
466 return false;
467
468 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
469
470 const DebugLoc &DL = I.getDebugLoc();
471 MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
472 .addReg(SrcReg, 0, SubRegs[Offset / DstSize]);
473
474 for (const MachineOperand &MO : Copy->operands()) {
475 const TargetRegisterClass *RC =
476 TRI.getConstrainedRegClassForOperand(MO, *MRI);
477 if (!RC)
478 continue;
479 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
480 }
481 I.eraseFromParent();
482 return true;
483 }
484
selectG_MERGE_VALUES(MachineInstr & MI) const485 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
486 MachineBasicBlock *BB = MI.getParent();
487 Register DstReg = MI.getOperand(0).getReg();
488 LLT DstTy = MRI->getType(DstReg);
489 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
490
491 const unsigned SrcSize = SrcTy.getSizeInBits();
492 if (SrcSize < 32)
493 return selectImpl(MI, *CoverageInfo);
494
495 const DebugLoc &DL = MI.getDebugLoc();
496 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
497 const unsigned DstSize = DstTy.getSizeInBits();
498 const TargetRegisterClass *DstRC =
499 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
500 if (!DstRC)
501 return false;
502
503 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
504 MachineInstrBuilder MIB =
505 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
506 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
507 MachineOperand &Src = MI.getOperand(I + 1);
508 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
509 MIB.addImm(SubRegs[I]);
510
511 const TargetRegisterClass *SrcRC
512 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
513 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
514 return false;
515 }
516
517 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
518 return false;
519
520 MI.eraseFromParent();
521 return true;
522 }
523
selectG_UNMERGE_VALUES(MachineInstr & MI) const524 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
525 MachineBasicBlock *BB = MI.getParent();
526 const int NumDst = MI.getNumOperands() - 1;
527
528 MachineOperand &Src = MI.getOperand(NumDst);
529
530 Register SrcReg = Src.getReg();
531 Register DstReg0 = MI.getOperand(0).getReg();
532 LLT DstTy = MRI->getType(DstReg0);
533 LLT SrcTy = MRI->getType(SrcReg);
534
535 const unsigned DstSize = DstTy.getSizeInBits();
536 const unsigned SrcSize = SrcTy.getSizeInBits();
537 const DebugLoc &DL = MI.getDebugLoc();
538 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
539
540 const TargetRegisterClass *SrcRC =
541 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
542 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
543 return false;
544
545 const unsigned SrcFlags = getUndefRegState(Src.isUndef());
546
547 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
548 // source, and this relies on the fact that the same subregister indices are
549 // used for both.
550 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
551 for (int I = 0, E = NumDst; I != E; ++I) {
552 MachineOperand &Dst = MI.getOperand(I);
553 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
554 .addReg(SrcReg, SrcFlags, SubRegs[I]);
555
556 const TargetRegisterClass *DstRC =
557 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
558 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
559 return false;
560 }
561
562 MI.eraseFromParent();
563 return true;
564 }
565
selectG_PTR_ADD(MachineInstr & I) const566 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
567 return selectG_ADD_SUB(I);
568 }
569
selectG_IMPLICIT_DEF(MachineInstr & I) const570 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
571 const MachineOperand &MO = I.getOperand(0);
572
573 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
574 // regbank check here is to know why getConstrainedRegClassForOperand failed.
575 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
576 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
577 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
578 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
579 return true;
580 }
581
582 return false;
583 }
584
selectG_INSERT(MachineInstr & I) const585 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
586 MachineBasicBlock *BB = I.getParent();
587
588 Register DstReg = I.getOperand(0).getReg();
589 Register Src0Reg = I.getOperand(1).getReg();
590 Register Src1Reg = I.getOperand(2).getReg();
591 LLT Src1Ty = MRI->getType(Src1Reg);
592
593 unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
594 unsigned InsSize = Src1Ty.getSizeInBits();
595
596 int64_t Offset = I.getOperand(3).getImm();
597 if (Offset % 32 != 0)
598 return false;
599
600 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
601 if (SubReg == AMDGPU::NoSubRegister)
602 return false;
603
604 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
605 const TargetRegisterClass *DstRC =
606 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
607 if (!DstRC)
608 return false;
609
610 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
611 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
612 const TargetRegisterClass *Src0RC =
613 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
614 const TargetRegisterClass *Src1RC =
615 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
616
617 // Deal with weird cases where the class only partially supports the subreg
618 // index.
619 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
620 if (!Src0RC)
621 return false;
622
623 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
624 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
625 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
626 return false;
627
628 const DebugLoc &DL = I.getDebugLoc();
629 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
630 .addReg(Src0Reg)
631 .addReg(Src1Reg)
632 .addImm(SubReg);
633
634 I.eraseFromParent();
635 return true;
636 }
637
selectG_INTRINSIC(MachineInstr & I) const638 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
639 unsigned IntrinsicID = I.getIntrinsicID();
640 switch (IntrinsicID) {
641 case Intrinsic::amdgcn_if_break: {
642 MachineBasicBlock *BB = I.getParent();
643
644 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
645 // SelectionDAG uses for wave32 vs wave64.
646 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
647 .add(I.getOperand(0))
648 .add(I.getOperand(2))
649 .add(I.getOperand(3));
650
651 Register DstReg = I.getOperand(0).getReg();
652 Register Src0Reg = I.getOperand(2).getReg();
653 Register Src1Reg = I.getOperand(3).getReg();
654
655 I.eraseFromParent();
656
657 for (Register Reg : { DstReg, Src0Reg, Src1Reg })
658 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
659
660 return true;
661 }
662 default:
663 return selectImpl(I, *CoverageInfo);
664 }
665 }
666
getV_CMPOpcode(CmpInst::Predicate P,unsigned Size)667 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
668 if (Size != 32 && Size != 64)
669 return -1;
670 switch (P) {
671 default:
672 llvm_unreachable("Unknown condition code!");
673 case CmpInst::ICMP_NE:
674 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
675 case CmpInst::ICMP_EQ:
676 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
677 case CmpInst::ICMP_SGT:
678 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
679 case CmpInst::ICMP_SGE:
680 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
681 case CmpInst::ICMP_SLT:
682 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
683 case CmpInst::ICMP_SLE:
684 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
685 case CmpInst::ICMP_UGT:
686 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
687 case CmpInst::ICMP_UGE:
688 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
689 case CmpInst::ICMP_ULT:
690 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
691 case CmpInst::ICMP_ULE:
692 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
693 }
694 }
695
getS_CMPOpcode(CmpInst::Predicate P,unsigned Size) const696 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
697 unsigned Size) const {
698 if (Size == 64) {
699 if (!STI.hasScalarCompareEq64())
700 return -1;
701
702 switch (P) {
703 case CmpInst::ICMP_NE:
704 return AMDGPU::S_CMP_LG_U64;
705 case CmpInst::ICMP_EQ:
706 return AMDGPU::S_CMP_EQ_U64;
707 default:
708 return -1;
709 }
710 }
711
712 if (Size != 32)
713 return -1;
714
715 switch (P) {
716 case CmpInst::ICMP_NE:
717 return AMDGPU::S_CMP_LG_U32;
718 case CmpInst::ICMP_EQ:
719 return AMDGPU::S_CMP_EQ_U32;
720 case CmpInst::ICMP_SGT:
721 return AMDGPU::S_CMP_GT_I32;
722 case CmpInst::ICMP_SGE:
723 return AMDGPU::S_CMP_GE_I32;
724 case CmpInst::ICMP_SLT:
725 return AMDGPU::S_CMP_LT_I32;
726 case CmpInst::ICMP_SLE:
727 return AMDGPU::S_CMP_LE_I32;
728 case CmpInst::ICMP_UGT:
729 return AMDGPU::S_CMP_GT_U32;
730 case CmpInst::ICMP_UGE:
731 return AMDGPU::S_CMP_GE_U32;
732 case CmpInst::ICMP_ULT:
733 return AMDGPU::S_CMP_LT_U32;
734 case CmpInst::ICMP_ULE:
735 return AMDGPU::S_CMP_LE_U32;
736 default:
737 llvm_unreachable("Unknown condition code!");
738 }
739 }
740
selectG_ICMP(MachineInstr & I) const741 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
742 MachineBasicBlock *BB = I.getParent();
743 const DebugLoc &DL = I.getDebugLoc();
744
745 Register SrcReg = I.getOperand(2).getReg();
746 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
747
748 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
749
750 Register CCReg = I.getOperand(0).getReg();
751 if (!isVCC(CCReg, *MRI)) {
752 int Opcode = getS_CMPOpcode(Pred, Size);
753 if (Opcode == -1)
754 return false;
755 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
756 .add(I.getOperand(2))
757 .add(I.getOperand(3));
758 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
759 .addReg(AMDGPU::SCC);
760 bool Ret =
761 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
762 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
763 I.eraseFromParent();
764 return Ret;
765 }
766
767 int Opcode = getV_CMPOpcode(Pred, Size);
768 if (Opcode == -1)
769 return false;
770
771 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
772 I.getOperand(0).getReg())
773 .add(I.getOperand(2))
774 .add(I.getOperand(3));
775 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
776 *TRI.getBoolRC(), *MRI);
777 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
778 I.eraseFromParent();
779 return Ret;
780 }
781
782 static MachineInstr *
buildEXP(const TargetInstrInfo & TII,MachineInstr * Insert,unsigned Tgt,unsigned Reg0,unsigned Reg1,unsigned Reg2,unsigned Reg3,unsigned VM,bool Compr,unsigned Enabled,bool Done)783 buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
784 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
785 unsigned VM, bool Compr, unsigned Enabled, bool Done) {
786 const DebugLoc &DL = Insert->getDebugLoc();
787 MachineBasicBlock &BB = *Insert->getParent();
788 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
789 return BuildMI(BB, Insert, DL, TII.get(Opcode))
790 .addImm(Tgt)
791 .addReg(Reg0)
792 .addReg(Reg1)
793 .addReg(Reg2)
794 .addReg(Reg3)
795 .addImm(VM)
796 .addImm(Compr)
797 .addImm(Enabled);
798 }
799
isZero(Register Reg,MachineRegisterInfo & MRI)800 static bool isZero(Register Reg, MachineRegisterInfo &MRI) {
801 int64_t C;
802 if (mi_match(Reg, MRI, m_ICst(C)) && C == 0)
803 return true;
804
805 // FIXME: matcher should ignore copies
806 return mi_match(Reg, MRI, m_Copy(m_ICst(C))) && C == 0;
807 }
808
extractGLC(unsigned AuxiliaryData)809 static unsigned extractGLC(unsigned AuxiliaryData) {
810 return AuxiliaryData & 1;
811 }
812
extractSLC(unsigned AuxiliaryData)813 static unsigned extractSLC(unsigned AuxiliaryData) {
814 return (AuxiliaryData >> 1) & 1;
815 }
816
extractDLC(unsigned AuxiliaryData)817 static unsigned extractDLC(unsigned AuxiliaryData) {
818 return (AuxiliaryData >> 2) & 1;
819 }
820
extractSWZ(unsigned AuxiliaryData)821 static unsigned extractSWZ(unsigned AuxiliaryData) {
822 return (AuxiliaryData >> 3) & 1;
823 }
824
getBufferStoreOpcode(LLT Ty,const unsigned MemSize,const bool Offen)825 static unsigned getBufferStoreOpcode(LLT Ty,
826 const unsigned MemSize,
827 const bool Offen) {
828 const int Size = Ty.getSizeInBits();
829 switch (8 * MemSize) {
830 case 8:
831 return Offen ? AMDGPU::BUFFER_STORE_BYTE_OFFEN_exact :
832 AMDGPU::BUFFER_STORE_BYTE_OFFSET_exact;
833 case 16:
834 return Offen ? AMDGPU::BUFFER_STORE_SHORT_OFFEN_exact :
835 AMDGPU::BUFFER_STORE_SHORT_OFFSET_exact;
836 default:
837 unsigned Opc = Offen ? AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact :
838 AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact;
839 if (Size > 32)
840 Opc = AMDGPU::getMUBUFOpcode(Opc, Size / 32);
841 return Opc;
842 }
843 }
844
getBufferStoreFormatOpcode(LLT Ty,const unsigned MemSize,const bool Offen)845 static unsigned getBufferStoreFormatOpcode(LLT Ty,
846 const unsigned MemSize,
847 const bool Offen) {
848 bool IsD16Packed = Ty.getScalarSizeInBits() == 16;
849 bool IsD16Unpacked = 8 * MemSize < Ty.getSizeInBits();
850 int NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
851
852 if (IsD16Packed) {
853 switch (NumElts) {
854 case 1:
855 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
856 AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
857 case 2:
858 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact :
859 AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFSET_exact;
860 case 3:
861 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFEN_exact :
862 AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFSET_exact;
863 case 4:
864 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFEN_exact :
865 AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFSET_exact;
866 default:
867 return -1;
868 }
869 }
870
871 if (IsD16Unpacked) {
872 switch (NumElts) {
873 case 1:
874 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
875 AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
876 case 2:
877 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFEN_exact :
878 AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFSET_exact;
879 case 3:
880 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFEN_exact :
881 AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFSET_exact;
882 case 4:
883 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFEN_exact :
884 AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFSET_exact;
885 default:
886 return -1;
887 }
888 }
889
890 switch (NumElts) {
891 case 1:
892 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_X_OFFEN_exact :
893 AMDGPU::BUFFER_STORE_FORMAT_X_OFFSET_exact;
894 case 2:
895 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XY_OFFEN_exact :
896 AMDGPU::BUFFER_STORE_FORMAT_XY_OFFSET_exact;
897 case 3:
898 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFEN_exact :
899 AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFSET_exact;
900 case 4:
901 return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFEN_exact :
902 AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFSET_exact;
903 default:
904 return -1;
905 }
906
907 llvm_unreachable("unhandled buffer store");
908 }
909
910 // TODO: Move this to combiner
911 // Returns base register, imm offset, total constant offset.
912 std::tuple<Register, unsigned, unsigned>
splitBufferOffsets(MachineIRBuilder & B,Register OrigOffset) const913 AMDGPUInstructionSelector::splitBufferOffsets(MachineIRBuilder &B,
914 Register OrigOffset) const {
915 const unsigned MaxImm = 4095;
916 Register BaseReg;
917 unsigned TotalConstOffset;
918 MachineInstr *OffsetDef;
919
920 std::tie(BaseReg, TotalConstOffset, OffsetDef)
921 = AMDGPU::getBaseWithConstantOffset(*MRI, OrigOffset);
922
923 unsigned ImmOffset = TotalConstOffset;
924
925 // If the immediate value is too big for the immoffset field, put the value
926 // and -4096 into the immoffset field so that the value that is copied/added
927 // for the voffset field is a multiple of 4096, and it stands more chance
928 // of being CSEd with the copy/add for another similar load/store.f
929 // However, do not do that rounding down to a multiple of 4096 if that is a
930 // negative number, as it appears to be illegal to have a negative offset
931 // in the vgpr, even if adding the immediate offset makes it positive.
932 unsigned Overflow = ImmOffset & ~MaxImm;
933 ImmOffset -= Overflow;
934 if ((int32_t)Overflow < 0) {
935 Overflow += ImmOffset;
936 ImmOffset = 0;
937 }
938
939 if (Overflow != 0) {
940 // In case this is in a waterfall loop, insert offset code at the def point
941 // of the offset, not inside the loop.
942 MachineBasicBlock::iterator OldInsPt = B.getInsertPt();
943 MachineBasicBlock &OldMBB = B.getMBB();
944 B.setInstr(*OffsetDef);
945
946 if (!BaseReg) {
947 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
948 B.buildInstr(AMDGPU::V_MOV_B32_e32)
949 .addDef(BaseReg)
950 .addImm(Overflow);
951 } else {
952 Register OverflowVal = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
953 B.buildInstr(AMDGPU::V_MOV_B32_e32)
954 .addDef(OverflowVal)
955 .addImm(Overflow);
956
957 Register NewBaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
958 TII.getAddNoCarry(B.getMBB(), B.getInsertPt(), B.getDebugLoc(), NewBaseReg)
959 .addReg(BaseReg)
960 .addReg(OverflowVal, RegState::Kill)
961 .addImm(0);
962 BaseReg = NewBaseReg;
963 }
964
965 B.setInsertPt(OldMBB, OldInsPt);
966 }
967
968 return std::make_tuple(BaseReg, ImmOffset, TotalConstOffset);
969 }
970
selectStoreIntrinsic(MachineInstr & MI,bool IsFormat) const971 bool AMDGPUInstructionSelector::selectStoreIntrinsic(MachineInstr &MI,
972 bool IsFormat) const {
973 MachineIRBuilder B(MI);
974 MachineFunction &MF = B.getMF();
975 Register VData = MI.getOperand(1).getReg();
976 LLT Ty = MRI->getType(VData);
977
978 int Size = Ty.getSizeInBits();
979 if (Size % 32 != 0)
980 return false;
981
982 // FIXME: Verifier should enforce 1 MMO for these intrinsics.
983 MachineMemOperand *MMO = *MI.memoperands_begin();
984 const int MemSize = MMO->getSize();
985
986 Register RSrc = MI.getOperand(2).getReg();
987 Register VOffset = MI.getOperand(3).getReg();
988 Register SOffset = MI.getOperand(4).getReg();
989 unsigned AuxiliaryData = MI.getOperand(5).getImm();
990 unsigned ImmOffset;
991 unsigned TotalOffset;
992
993 std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
994 if (TotalOffset != 0)
995 MMO = MF.getMachineMemOperand(MMO, TotalOffset, MemSize);
996
997 const bool Offen = !isZero(VOffset, *MRI);
998
999 int Opc = IsFormat ? getBufferStoreFormatOpcode(Ty, MemSize, Offen) :
1000 getBufferStoreOpcode(Ty, MemSize, Offen);
1001 if (Opc == -1)
1002 return false;
1003
1004 MachineInstrBuilder MIB = B.buildInstr(Opc)
1005 .addUse(VData);
1006
1007 if (Offen)
1008 MIB.addUse(VOffset);
1009
1010 MIB.addUse(RSrc)
1011 .addUse(SOffset)
1012 .addImm(ImmOffset)
1013 .addImm(extractGLC(AuxiliaryData))
1014 .addImm(extractSLC(AuxiliaryData))
1015 .addImm(0) // tfe: FIXME: Remove from inst
1016 .addImm(extractDLC(AuxiliaryData))
1017 .addImm(extractSWZ(AuxiliaryData))
1018 .addMemOperand(MMO);
1019
1020 MI.eraseFromParent();
1021
1022 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1023 }
1024
getDSShaderTypeValue(const MachineFunction & MF)1025 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
1026 switch (MF.getFunction().getCallingConv()) {
1027 case CallingConv::AMDGPU_PS:
1028 return 1;
1029 case CallingConv::AMDGPU_VS:
1030 return 2;
1031 case CallingConv::AMDGPU_GS:
1032 return 3;
1033 case CallingConv::AMDGPU_HS:
1034 case CallingConv::AMDGPU_LS:
1035 case CallingConv::AMDGPU_ES:
1036 report_fatal_error("ds_ordered_count unsupported for this calling conv");
1037 case CallingConv::AMDGPU_CS:
1038 case CallingConv::AMDGPU_KERNEL:
1039 case CallingConv::C:
1040 case CallingConv::Fast:
1041 default:
1042 // Assume other calling conventions are various compute callable functions
1043 return 0;
1044 }
1045 }
1046
selectDSOrderedIntrinsic(MachineInstr & MI,Intrinsic::ID IntrID) const1047 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1048 MachineInstr &MI, Intrinsic::ID IntrID) const {
1049 MachineBasicBlock *MBB = MI.getParent();
1050 MachineFunction *MF = MBB->getParent();
1051 const DebugLoc &DL = MI.getDebugLoc();
1052
1053 unsigned IndexOperand = MI.getOperand(7).getImm();
1054 bool WaveRelease = MI.getOperand(8).getImm() != 0;
1055 bool WaveDone = MI.getOperand(9).getImm() != 0;
1056
1057 if (WaveDone && !WaveRelease)
1058 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1059
1060 unsigned OrderedCountIndex = IndexOperand & 0x3f;
1061 IndexOperand &= ~0x3f;
1062 unsigned CountDw = 0;
1063
1064 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1065 CountDw = (IndexOperand >> 24) & 0xf;
1066 IndexOperand &= ~(0xf << 24);
1067
1068 if (CountDw < 1 || CountDw > 4) {
1069 report_fatal_error(
1070 "ds_ordered_count: dword count must be between 1 and 4");
1071 }
1072 }
1073
1074 if (IndexOperand)
1075 report_fatal_error("ds_ordered_count: bad index operand");
1076
1077 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1078 unsigned ShaderType = getDSShaderTypeValue(*MF);
1079
1080 unsigned Offset0 = OrderedCountIndex << 2;
1081 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1082 (Instruction << 4);
1083
1084 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1085 Offset1 |= (CountDw - 1) << 6;
1086
1087 unsigned Offset = Offset0 | (Offset1 << 8);
1088
1089 Register M0Val = MI.getOperand(2).getReg();
1090 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1091 .addReg(M0Val);
1092
1093 Register DstReg = MI.getOperand(0).getReg();
1094 Register ValReg = MI.getOperand(3).getReg();
1095 MachineInstrBuilder DS =
1096 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1097 .addReg(ValReg)
1098 .addImm(Offset)
1099 .cloneMemRefs(MI);
1100
1101 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1102 return false;
1103
1104 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1105 MI.eraseFromParent();
1106 return Ret;
1107 }
1108
selectG_INTRINSIC_W_SIDE_EFFECTS(MachineInstr & I) const1109 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1110 MachineInstr &I) const {
1111 MachineBasicBlock *BB = I.getParent();
1112 unsigned IntrinsicID = I.getIntrinsicID();
1113 switch (IntrinsicID) {
1114 case Intrinsic::amdgcn_exp: {
1115 int64_t Tgt = I.getOperand(1).getImm();
1116 int64_t Enabled = I.getOperand(2).getImm();
1117 int64_t Done = I.getOperand(7).getImm();
1118 int64_t VM = I.getOperand(8).getImm();
1119
1120 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
1121 I.getOperand(4).getReg(),
1122 I.getOperand(5).getReg(),
1123 I.getOperand(6).getReg(),
1124 VM, false, Enabled, Done);
1125
1126 I.eraseFromParent();
1127 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
1128 }
1129 case Intrinsic::amdgcn_exp_compr: {
1130 const DebugLoc &DL = I.getDebugLoc();
1131 int64_t Tgt = I.getOperand(1).getImm();
1132 int64_t Enabled = I.getOperand(2).getImm();
1133 Register Reg0 = I.getOperand(3).getReg();
1134 Register Reg1 = I.getOperand(4).getReg();
1135 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1136 int64_t Done = I.getOperand(5).getImm();
1137 int64_t VM = I.getOperand(6).getImm();
1138
1139 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1140 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
1141 true, Enabled, Done);
1142
1143 I.eraseFromParent();
1144 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
1145 }
1146 case Intrinsic::amdgcn_end_cf: {
1147 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1148 // SelectionDAG uses for wave32 vs wave64.
1149 BuildMI(*BB, &I, I.getDebugLoc(),
1150 TII.get(AMDGPU::SI_END_CF))
1151 .add(I.getOperand(1));
1152
1153 Register Reg = I.getOperand(1).getReg();
1154 I.eraseFromParent();
1155
1156 if (!MRI->getRegClassOrNull(Reg))
1157 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1158 return true;
1159 }
1160 case Intrinsic::amdgcn_raw_buffer_store:
1161 return selectStoreIntrinsic(I, false);
1162 case Intrinsic::amdgcn_raw_buffer_store_format:
1163 return selectStoreIntrinsic(I, true);
1164 case Intrinsic::amdgcn_ds_ordered_add:
1165 case Intrinsic::amdgcn_ds_ordered_swap:
1166 return selectDSOrderedIntrinsic(I, IntrinsicID);
1167 default:
1168 return selectImpl(I, *CoverageInfo);
1169 }
1170 }
1171
selectG_SELECT(MachineInstr & I) const1172 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1173 MachineBasicBlock *BB = I.getParent();
1174 const DebugLoc &DL = I.getDebugLoc();
1175
1176 Register DstReg = I.getOperand(0).getReg();
1177 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1178 assert(Size <= 32 || Size == 64);
1179 const MachineOperand &CCOp = I.getOperand(1);
1180 Register CCReg = CCOp.getReg();
1181 if (!isVCC(CCReg, *MRI)) {
1182 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1183 AMDGPU::S_CSELECT_B32;
1184 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1185 .addReg(CCReg);
1186
1187 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1188 // bank, because it does not cover the register class that we used to represent
1189 // for it. So we need to manually set the register class here.
1190 if (!MRI->getRegClassOrNull(CCReg))
1191 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1192 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1193 .add(I.getOperand(2))
1194 .add(I.getOperand(3));
1195
1196 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1197 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1198 I.eraseFromParent();
1199 return Ret;
1200 }
1201
1202 // Wide VGPR select should have been split in RegBankSelect.
1203 if (Size > 32)
1204 return false;
1205
1206 MachineInstr *Select =
1207 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1208 .addImm(0)
1209 .add(I.getOperand(3))
1210 .addImm(0)
1211 .add(I.getOperand(2))
1212 .add(I.getOperand(1));
1213
1214 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1215 I.eraseFromParent();
1216 return Ret;
1217 }
1218
selectG_STORE(MachineInstr & I) const1219 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1220 initM0(I);
1221 return selectImpl(I, *CoverageInfo);
1222 }
1223
sizeToSubRegIndex(unsigned Size)1224 static int sizeToSubRegIndex(unsigned Size) {
1225 switch (Size) {
1226 case 32:
1227 return AMDGPU::sub0;
1228 case 64:
1229 return AMDGPU::sub0_sub1;
1230 case 96:
1231 return AMDGPU::sub0_sub1_sub2;
1232 case 128:
1233 return AMDGPU::sub0_sub1_sub2_sub3;
1234 case 256:
1235 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1236 default:
1237 if (Size < 32)
1238 return AMDGPU::sub0;
1239 if (Size > 256)
1240 return -1;
1241 return sizeToSubRegIndex(PowerOf2Ceil(Size));
1242 }
1243 }
1244
selectG_TRUNC(MachineInstr & I) const1245 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1246 Register DstReg = I.getOperand(0).getReg();
1247 Register SrcReg = I.getOperand(1).getReg();
1248 const LLT DstTy = MRI->getType(DstReg);
1249 const LLT SrcTy = MRI->getType(SrcReg);
1250 if (!DstTy.isScalar())
1251 return false;
1252
1253 const LLT S1 = LLT::scalar(1);
1254
1255 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1256 const RegisterBank *DstRB;
1257 if (DstTy == S1) {
1258 // This is a special case. We don't treat s1 for legalization artifacts as
1259 // vcc booleans.
1260 DstRB = SrcRB;
1261 } else {
1262 DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1263 if (SrcRB != DstRB)
1264 return false;
1265 }
1266
1267 unsigned DstSize = DstTy.getSizeInBits();
1268 unsigned SrcSize = SrcTy.getSizeInBits();
1269
1270 const TargetRegisterClass *SrcRC
1271 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1272 const TargetRegisterClass *DstRC
1273 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1274
1275 if (SrcSize > 32) {
1276 int SubRegIdx = sizeToSubRegIndex(DstSize);
1277 if (SubRegIdx == -1)
1278 return false;
1279
1280 // Deal with weird cases where the class only partially supports the subreg
1281 // index.
1282 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1283 if (!SrcRC)
1284 return false;
1285
1286 I.getOperand(1).setSubReg(SubRegIdx);
1287 }
1288
1289 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1290 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1291 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1292 return false;
1293 }
1294
1295 I.setDesc(TII.get(TargetOpcode::COPY));
1296 return true;
1297 }
1298
1299 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
shouldUseAndMask(unsigned Size,unsigned & Mask)1300 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1301 Mask = maskTrailingOnes<unsigned>(Size);
1302 int SignedMask = static_cast<int>(Mask);
1303 return SignedMask >= -16 && SignedMask <= 64;
1304 }
1305
1306 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
getArtifactRegBank(Register Reg,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI) const1307 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1308 Register Reg, const MachineRegisterInfo &MRI,
1309 const TargetRegisterInfo &TRI) const {
1310 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1311 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1312 return RB;
1313
1314 // Ignore the type, since we don't use vcc in artifacts.
1315 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1316 return &RBI.getRegBankFromRegClass(*RC, LLT());
1317 return nullptr;
1318 }
1319
selectG_SZA_EXT(MachineInstr & I) const1320 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1321 bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
1322 const DebugLoc &DL = I.getDebugLoc();
1323 MachineBasicBlock &MBB = *I.getParent();
1324 const Register DstReg = I.getOperand(0).getReg();
1325 const Register SrcReg = I.getOperand(1).getReg();
1326
1327 const LLT DstTy = MRI->getType(DstReg);
1328 const LLT SrcTy = MRI->getType(SrcReg);
1329 const unsigned SrcSize = SrcTy.getSizeInBits();
1330 const unsigned DstSize = DstTy.getSizeInBits();
1331 if (!DstTy.isScalar())
1332 return false;
1333
1334 if (I.getOpcode() == AMDGPU::G_ANYEXT)
1335 return selectCOPY(I);
1336
1337 // Artifact casts should never use vcc.
1338 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1339
1340 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1341 // 64-bit should have been split up in RegBankSelect
1342
1343 // Try to use an and with a mask if it will save code size.
1344 unsigned Mask;
1345 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1346 MachineInstr *ExtI =
1347 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1348 .addImm(Mask)
1349 .addReg(SrcReg);
1350 I.eraseFromParent();
1351 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1352 }
1353
1354 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1355 MachineInstr *ExtI =
1356 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1357 .addReg(SrcReg)
1358 .addImm(0) // Offset
1359 .addImm(SrcSize); // Width
1360 I.eraseFromParent();
1361 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1362 }
1363
1364 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1365 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI))
1366 return false;
1367
1368 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1369 const unsigned SextOpc = SrcSize == 8 ?
1370 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1371 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1372 .addReg(SrcReg);
1373 I.eraseFromParent();
1374 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1375 }
1376
1377 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1378 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1379
1380 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1381 if (DstSize > 32 && SrcSize <= 32) {
1382 // We need a 64-bit register source, but the high bits don't matter.
1383 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1384 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1385 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1386 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1387 .addReg(SrcReg)
1388 .addImm(AMDGPU::sub0)
1389 .addReg(UndefReg)
1390 .addImm(AMDGPU::sub1);
1391
1392 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1393 .addReg(ExtReg)
1394 .addImm(SrcSize << 16);
1395
1396 I.eraseFromParent();
1397 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1398 }
1399
1400 unsigned Mask;
1401 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1402 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1403 .addReg(SrcReg)
1404 .addImm(Mask);
1405 } else {
1406 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1407 .addReg(SrcReg)
1408 .addImm(SrcSize << 16);
1409 }
1410
1411 I.eraseFromParent();
1412 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1413 }
1414
1415 return false;
1416 }
1417
selectG_CONSTANT(MachineInstr & I) const1418 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1419 MachineBasicBlock *BB = I.getParent();
1420 MachineOperand &ImmOp = I.getOperand(1);
1421
1422 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1423 if (ImmOp.isFPImm()) {
1424 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1425 ImmOp.ChangeToImmediate(Imm.getZExtValue());
1426 } else if (ImmOp.isCImm()) {
1427 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1428 }
1429
1430 Register DstReg = I.getOperand(0).getReg();
1431 unsigned Size;
1432 bool IsSgpr;
1433 const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1434 if (RB) {
1435 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1436 Size = MRI->getType(DstReg).getSizeInBits();
1437 } else {
1438 const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1439 IsSgpr = TRI.isSGPRClass(RC);
1440 Size = TRI.getRegSizeInBits(*RC);
1441 }
1442
1443 if (Size != 32 && Size != 64)
1444 return false;
1445
1446 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1447 if (Size == 32) {
1448 I.setDesc(TII.get(Opcode));
1449 I.addImplicitDefUseOperands(*MF);
1450 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1451 }
1452
1453 const DebugLoc &DL = I.getDebugLoc();
1454
1455 APInt Imm(Size, I.getOperand(1).getImm());
1456
1457 MachineInstr *ResInst;
1458 if (IsSgpr && TII.isInlineConstant(Imm)) {
1459 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1460 .addImm(I.getOperand(1).getImm());
1461 } else {
1462 const TargetRegisterClass *RC = IsSgpr ?
1463 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1464 Register LoReg = MRI->createVirtualRegister(RC);
1465 Register HiReg = MRI->createVirtualRegister(RC);
1466
1467 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1468 .addImm(Imm.trunc(32).getZExtValue());
1469
1470 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1471 .addImm(Imm.ashr(32).getZExtValue());
1472
1473 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1474 .addReg(LoReg)
1475 .addImm(AMDGPU::sub0)
1476 .addReg(HiReg)
1477 .addImm(AMDGPU::sub1);
1478 }
1479
1480 // We can't call constrainSelectedInstRegOperands here, because it doesn't
1481 // work for target independent opcodes
1482 I.eraseFromParent();
1483 const TargetRegisterClass *DstRC =
1484 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1485 if (!DstRC)
1486 return true;
1487 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1488 }
1489
isConstant(const MachineInstr & MI)1490 static bool isConstant(const MachineInstr &MI) {
1491 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1492 }
1493
getAddrModeInfo(const MachineInstr & Load,const MachineRegisterInfo & MRI,SmallVectorImpl<GEPInfo> & AddrInfo) const1494 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1495 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1496
1497 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1498
1499 assert(PtrMI);
1500
1501 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
1502 return;
1503
1504 GEPInfo GEPInfo(*PtrMI);
1505
1506 for (unsigned i = 1; i != 3; ++i) {
1507 const MachineOperand &GEPOp = PtrMI->getOperand(i);
1508 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1509 assert(OpDef);
1510 if (i == 2 && isConstant(*OpDef)) {
1511 // TODO: Could handle constant base + variable offset, but a combine
1512 // probably should have commuted it.
1513 assert(GEPInfo.Imm == 0);
1514 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1515 continue;
1516 }
1517 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1518 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1519 GEPInfo.SgprParts.push_back(GEPOp.getReg());
1520 else
1521 GEPInfo.VgprParts.push_back(GEPOp.getReg());
1522 }
1523
1524 AddrInfo.push_back(GEPInfo);
1525 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1526 }
1527
isInstrUniform(const MachineInstr & MI) const1528 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1529 if (!MI.hasOneMemOperand())
1530 return false;
1531
1532 const MachineMemOperand *MMO = *MI.memoperands_begin();
1533 const Value *Ptr = MMO->getValue();
1534
1535 // UndefValue means this is a load of a kernel input. These are uniform.
1536 // Sometimes LDS instructions have constant pointers.
1537 // If Ptr is null, then that means this mem operand contains a
1538 // PseudoSourceValue like GOT.
1539 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1540 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1541 return true;
1542
1543 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1544 return true;
1545
1546 const Instruction *I = dyn_cast<Instruction>(Ptr);
1547 return I && I->getMetadata("amdgpu.uniform");
1548 }
1549
hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const1550 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1551 for (const GEPInfo &GEPInfo : AddrInfo) {
1552 if (!GEPInfo.VgprParts.empty())
1553 return true;
1554 }
1555 return false;
1556 }
1557
initM0(MachineInstr & I) const1558 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
1559 MachineBasicBlock *BB = I.getParent();
1560
1561 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
1562 unsigned AS = PtrTy.getAddressSpace();
1563 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1564 STI.ldsRequiresM0Init()) {
1565 // If DS instructions require M0 initializtion, insert it before selecting.
1566 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1567 .addImm(-1);
1568 }
1569 }
1570
selectG_LOAD_ATOMICRMW(MachineInstr & I) const1571 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
1572 initM0(I);
1573 return selectImpl(I, *CoverageInfo);
1574 }
1575
selectG_BRCOND(MachineInstr & I) const1576 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1577 MachineBasicBlock *BB = I.getParent();
1578 MachineOperand &CondOp = I.getOperand(0);
1579 Register CondReg = CondOp.getReg();
1580 const DebugLoc &DL = I.getDebugLoc();
1581
1582 unsigned BrOpcode;
1583 Register CondPhysReg;
1584 const TargetRegisterClass *ConstrainRC;
1585
1586 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1587 // whether the branch is uniform when selecting the instruction. In
1588 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1589 // RegBankSelect knows what it's doing if the branch condition is scc, even
1590 // though it currently does not.
1591 if (!isVCC(CondReg, *MRI)) {
1592 if (MRI->getType(CondReg) != LLT::scalar(32))
1593 return false;
1594
1595 CondPhysReg = AMDGPU::SCC;
1596 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1597 // FIXME: Hack for isSCC tests
1598 ConstrainRC = &AMDGPU::SGPR_32RegClass;
1599 } else {
1600 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1601 // We sort of know that a VCC producer based on the register bank, that ands
1602 // inactive lanes with 0. What if there was a logical operation with vcc
1603 // producers in different blocks/with different exec masks?
1604 // FIXME: Should scc->vcc copies and with exec?
1605 CondPhysReg = TRI.getVCC();
1606 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1607 ConstrainRC = TRI.getBoolRC();
1608 }
1609
1610 if (!MRI->getRegClassOrNull(CondReg))
1611 MRI->setRegClass(CondReg, ConstrainRC);
1612
1613 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1614 .addReg(CondReg);
1615 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1616 .addMBB(I.getOperand(1).getMBB());
1617
1618 I.eraseFromParent();
1619 return true;
1620 }
1621
selectG_FRAME_INDEX(MachineInstr & I) const1622 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX(MachineInstr &I) const {
1623 Register DstReg = I.getOperand(0).getReg();
1624 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1625 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1626 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1627 if (IsVGPR)
1628 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1629
1630 return RBI.constrainGenericRegister(
1631 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
1632 }
1633
selectG_PTR_MASK(MachineInstr & I) const1634 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
1635 uint64_t Align = I.getOperand(2).getImm();
1636 const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
1637
1638 MachineBasicBlock *BB = I.getParent();
1639
1640 Register DstReg = I.getOperand(0).getReg();
1641 Register SrcReg = I.getOperand(1).getReg();
1642
1643 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1644 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1645 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1646 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1647 unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1648 const TargetRegisterClass &RegRC
1649 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
1650
1651 LLT Ty = MRI->getType(DstReg);
1652
1653 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
1654 *MRI);
1655 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
1656 *MRI);
1657 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1658 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1659 return false;
1660
1661 const DebugLoc &DL = I.getDebugLoc();
1662 Register ImmReg = MRI->createVirtualRegister(&RegRC);
1663 BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
1664 .addImm(Mask);
1665
1666 if (Ty.getSizeInBits() == 32) {
1667 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
1668 .addReg(SrcReg)
1669 .addReg(ImmReg);
1670 I.eraseFromParent();
1671 return true;
1672 }
1673
1674 Register HiReg = MRI->createVirtualRegister(&RegRC);
1675 Register LoReg = MRI->createVirtualRegister(&RegRC);
1676 Register MaskLo = MRI->createVirtualRegister(&RegRC);
1677
1678 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
1679 .addReg(SrcReg, 0, AMDGPU::sub0);
1680 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
1681 .addReg(SrcReg, 0, AMDGPU::sub1);
1682
1683 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
1684 .addReg(LoReg)
1685 .addReg(ImmReg);
1686 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1687 .addReg(MaskLo)
1688 .addImm(AMDGPU::sub0)
1689 .addReg(HiReg)
1690 .addImm(AMDGPU::sub1);
1691 I.eraseFromParent();
1692 return true;
1693 }
1694
selectG_EXTRACT_VECTOR_ELT(MachineInstr & MI) const1695 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
1696 MachineInstr &MI) const {
1697 Register DstReg = MI.getOperand(0).getReg();
1698 Register SrcReg = MI.getOperand(1).getReg();
1699 Register IdxReg = MI.getOperand(2).getReg();
1700
1701 LLT DstTy = MRI->getType(DstReg);
1702 LLT SrcTy = MRI->getType(SrcReg);
1703
1704 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1705 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1706 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1707
1708 // The index must be scalar. If it wasn't RegBankSelect should have moved this
1709 // into a waterfall loop.
1710 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1711 return false;
1712
1713 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
1714 *MRI);
1715 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
1716 *MRI);
1717 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1718 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1719 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1720 return false;
1721
1722 MachineBasicBlock *BB = MI.getParent();
1723 const DebugLoc &DL = MI.getDebugLoc();
1724 const bool Is64 = DstTy.getSizeInBits() == 64;
1725
1726 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1727
1728 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
1729 if (DstTy.getSizeInBits() != 32 && !Is64)
1730 return false;
1731
1732 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1733 .addReg(IdxReg);
1734
1735 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
1736 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
1737 .addReg(SrcReg, 0, SubReg)
1738 .addReg(SrcReg, RegState::Implicit);
1739 MI.eraseFromParent();
1740 return true;
1741 }
1742
1743 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
1744 return false;
1745
1746 if (!STI.useVGPRIndexMode()) {
1747 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1748 .addReg(IdxReg);
1749 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
1750 .addReg(SrcReg, RegState::Undef, SubReg)
1751 .addReg(SrcReg, RegState::Implicit);
1752 MI.eraseFromParent();
1753 return true;
1754 }
1755
1756 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1757 .addReg(IdxReg)
1758 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
1759 BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
1760 .addReg(SrcReg, RegState::Undef, SubReg)
1761 .addReg(SrcReg, RegState::Implicit)
1762 .addReg(AMDGPU::M0, RegState::Implicit);
1763 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1764
1765 MI.eraseFromParent();
1766 return true;
1767 }
1768
select(MachineInstr & I)1769 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
1770 if (I.isPHI())
1771 return selectPHI(I);
1772
1773 if (!I.isPreISelOpcode()) {
1774 if (I.isCopy())
1775 return selectCOPY(I);
1776 return true;
1777 }
1778
1779 switch (I.getOpcode()) {
1780 case TargetOpcode::G_AND:
1781 case TargetOpcode::G_OR:
1782 case TargetOpcode::G_XOR:
1783 if (selectG_AND_OR_XOR(I))
1784 return true;
1785 return selectImpl(I, *CoverageInfo);
1786 case TargetOpcode::G_ADD:
1787 case TargetOpcode::G_SUB:
1788 if (selectImpl(I, *CoverageInfo))
1789 return true;
1790 return selectG_ADD_SUB(I);
1791 case TargetOpcode::G_UADDO:
1792 case TargetOpcode::G_USUBO:
1793 case TargetOpcode::G_UADDE:
1794 case TargetOpcode::G_USUBE:
1795 return selectG_UADDO_USUBO_UADDE_USUBE(I);
1796 case TargetOpcode::G_INTTOPTR:
1797 case TargetOpcode::G_BITCAST:
1798 case TargetOpcode::G_PTRTOINT:
1799 return selectCOPY(I);
1800 case TargetOpcode::G_CONSTANT:
1801 case TargetOpcode::G_FCONSTANT:
1802 return selectG_CONSTANT(I);
1803 case TargetOpcode::G_EXTRACT:
1804 return selectG_EXTRACT(I);
1805 case TargetOpcode::G_MERGE_VALUES:
1806 case TargetOpcode::G_BUILD_VECTOR:
1807 case TargetOpcode::G_CONCAT_VECTORS:
1808 return selectG_MERGE_VALUES(I);
1809 case TargetOpcode::G_UNMERGE_VALUES:
1810 return selectG_UNMERGE_VALUES(I);
1811 case TargetOpcode::G_PTR_ADD:
1812 return selectG_PTR_ADD(I);
1813 case TargetOpcode::G_IMPLICIT_DEF:
1814 return selectG_IMPLICIT_DEF(I);
1815 case TargetOpcode::G_INSERT:
1816 return selectG_INSERT(I);
1817 case TargetOpcode::G_INTRINSIC:
1818 return selectG_INTRINSIC(I);
1819 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1820 return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
1821 case TargetOpcode::G_ICMP:
1822 if (selectG_ICMP(I))
1823 return true;
1824 return selectImpl(I, *CoverageInfo);
1825 case TargetOpcode::G_LOAD:
1826 case TargetOpcode::G_ATOMIC_CMPXCHG:
1827 case TargetOpcode::G_ATOMICRMW_XCHG:
1828 case TargetOpcode::G_ATOMICRMW_ADD:
1829 case TargetOpcode::G_ATOMICRMW_SUB:
1830 case TargetOpcode::G_ATOMICRMW_AND:
1831 case TargetOpcode::G_ATOMICRMW_OR:
1832 case TargetOpcode::G_ATOMICRMW_XOR:
1833 case TargetOpcode::G_ATOMICRMW_MIN:
1834 case TargetOpcode::G_ATOMICRMW_MAX:
1835 case TargetOpcode::G_ATOMICRMW_UMIN:
1836 case TargetOpcode::G_ATOMICRMW_UMAX:
1837 case TargetOpcode::G_ATOMICRMW_FADD:
1838 return selectG_LOAD_ATOMICRMW(I);
1839 case TargetOpcode::G_SELECT:
1840 return selectG_SELECT(I);
1841 case TargetOpcode::G_STORE:
1842 return selectG_STORE(I);
1843 case TargetOpcode::G_TRUNC:
1844 return selectG_TRUNC(I);
1845 case TargetOpcode::G_SEXT:
1846 case TargetOpcode::G_ZEXT:
1847 case TargetOpcode::G_ANYEXT:
1848 if (selectImpl(I, *CoverageInfo))
1849 return true;
1850 return selectG_SZA_EXT(I);
1851 case TargetOpcode::G_BRCOND:
1852 return selectG_BRCOND(I);
1853 case TargetOpcode::G_FRAME_INDEX:
1854 return selectG_FRAME_INDEX(I);
1855 case TargetOpcode::G_PTR_MASK:
1856 return selectG_PTR_MASK(I);
1857 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1858 return selectG_EXTRACT_VECTOR_ELT(I);
1859 default:
1860 return selectImpl(I, *CoverageInfo);
1861 }
1862 return false;
1863 }
1864
1865 InstructionSelector::ComplexRendererFns
selectVCSRC(MachineOperand & Root) const1866 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1867 return {{
1868 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1869 }};
1870
1871 }
1872
1873 std::pair<Register, unsigned>
selectVOP3ModsImpl(Register Src) const1874 AMDGPUInstructionSelector::selectVOP3ModsImpl(
1875 Register Src) const {
1876 unsigned Mods = 0;
1877 MachineInstr *MI = MRI->getVRegDef(Src);
1878
1879 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
1880 Src = MI->getOperand(1).getReg();
1881 Mods |= SISrcMods::NEG;
1882 MI = MRI->getVRegDef(Src);
1883 }
1884
1885 if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
1886 Src = MI->getOperand(1).getReg();
1887 Mods |= SISrcMods::ABS;
1888 }
1889
1890 return std::make_pair(Src, Mods);
1891 }
1892
1893 ///
1894 /// This will select either an SGPR or VGPR operand and will save us from
1895 /// having to write an extra tablegen pattern.
1896 InstructionSelector::ComplexRendererFns
selectVSRC0(MachineOperand & Root) const1897 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1898 return {{
1899 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1900 }};
1901 }
1902
1903 InstructionSelector::ComplexRendererFns
selectVOP3Mods0(MachineOperand & Root) const1904 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
1905 Register Src;
1906 unsigned Mods;
1907 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
1908
1909 return {{
1910 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1911 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
1912 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1913 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1914 }};
1915 }
1916
1917 InstructionSelector::ComplexRendererFns
selectVOP3OMods(MachineOperand & Root) const1918 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
1919 return {{
1920 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
1921 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
1922 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
1923 }};
1924 }
1925
1926 InstructionSelector::ComplexRendererFns
selectVOP3Mods(MachineOperand & Root) const1927 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
1928 Register Src;
1929 unsigned Mods;
1930 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
1931
1932 return {{
1933 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1934 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
1935 }};
1936 }
1937
1938 InstructionSelector::ComplexRendererFns
selectVOP3Mods_nnan(MachineOperand & Root) const1939 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
1940 Register Src;
1941 unsigned Mods;
1942 std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
1943 if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
1944 return None;
1945
1946 return {{
1947 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
1948 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
1949 }};
1950 }
1951
1952 InstructionSelector::ComplexRendererFns
selectVOP3OpSelMods0(MachineOperand & Root) const1953 AMDGPUInstructionSelector::selectVOP3OpSelMods0(MachineOperand &Root) const {
1954 // FIXME: Handle clamp and op_sel
1955 return {{
1956 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1957 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src_mods
1958 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // clamp
1959 }};
1960 }
1961
1962 InstructionSelector::ComplexRendererFns
selectVOP3OpSelMods(MachineOperand & Root) const1963 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
1964 // FIXME: Handle op_sel
1965 return {{
1966 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
1967 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
1968 }};
1969 }
1970
1971 InstructionSelector::ComplexRendererFns
selectSmrdImm(MachineOperand & Root) const1972 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
1973 SmallVector<GEPInfo, 4> AddrInfo;
1974 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
1975
1976 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1977 return None;
1978
1979 const GEPInfo &GEPInfo = AddrInfo[0];
1980
1981 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
1982 return None;
1983
1984 unsigned PtrReg = GEPInfo.SgprParts[0];
1985 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
1986 return {{
1987 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
1988 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
1989 }};
1990 }
1991
1992 InstructionSelector::ComplexRendererFns
selectSmrdImm32(MachineOperand & Root) const1993 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
1994 SmallVector<GEPInfo, 4> AddrInfo;
1995 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
1996
1997 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
1998 return None;
1999
2000 const GEPInfo &GEPInfo = AddrInfo[0];
2001 unsigned PtrReg = GEPInfo.SgprParts[0];
2002 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2003 if (!isUInt<32>(EncodedImm))
2004 return None;
2005
2006 return {{
2007 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2008 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
2009 }};
2010 }
2011
2012 InstructionSelector::ComplexRendererFns
selectSmrdSgpr(MachineOperand & Root) const2013 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2014 MachineInstr *MI = Root.getParent();
2015 MachineBasicBlock *MBB = MI->getParent();
2016
2017 SmallVector<GEPInfo, 4> AddrInfo;
2018 getAddrModeInfo(*MI, *MRI, AddrInfo);
2019
2020 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2021 // then we can select all ptr + 32-bit offsets not just immediate offsets.
2022 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2023 return None;
2024
2025 const GEPInfo &GEPInfo = AddrInfo[0];
2026 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
2027 return None;
2028
2029 // If we make it this far we have a load with an 32-bit immediate offset.
2030 // It is OK to select this using a sgpr offset, because we have already
2031 // failed trying to select this load into one of the _IMM variants since
2032 // the _IMM Patterns are considered before the _SGPR patterns.
2033 unsigned PtrReg = GEPInfo.SgprParts[0];
2034 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2035 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2036 .addImm(GEPInfo.Imm);
2037 return {{
2038 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2039 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2040 }};
2041 }
2042
2043 template <bool Signed>
2044 InstructionSelector::ComplexRendererFns
selectFlatOffsetImpl(MachineOperand & Root) const2045 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2046 MachineInstr *MI = Root.getParent();
2047
2048 InstructionSelector::ComplexRendererFns Default = {{
2049 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2050 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // offset
2051 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
2052 }};
2053
2054 if (!STI.hasFlatInstOffsets())
2055 return Default;
2056
2057 const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2058 if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2059 return Default;
2060
2061 Optional<int64_t> Offset =
2062 getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
2063 if (!Offset.hasValue())
2064 return Default;
2065
2066 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
2067 if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
2068 return Default;
2069
2070 Register BasePtr = OpDef->getOperand(1).getReg();
2071
2072 return {{
2073 [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
2074 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
2075 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // slc
2076 }};
2077 }
2078
2079 InstructionSelector::ComplexRendererFns
selectFlatOffset(MachineOperand & Root) const2080 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
2081 return selectFlatOffsetImpl<false>(Root);
2082 }
2083
2084 InstructionSelector::ComplexRendererFns
selectFlatOffsetSigned(MachineOperand & Root) const2085 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
2086 return selectFlatOffsetImpl<true>(Root);
2087 }
2088
isStackPtrRelative(const MachinePointerInfo & PtrInfo)2089 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
2090 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
2091 return PSV && PSV->isStack();
2092 }
2093
2094 InstructionSelector::ComplexRendererFns
selectMUBUFScratchOffen(MachineOperand & Root) const2095 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2096 MachineInstr *MI = Root.getParent();
2097 MachineBasicBlock *MBB = MI->getParent();
2098 MachineFunction *MF = MBB->getParent();
2099 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2100
2101 int64_t Offset = 0;
2102 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2103 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2104
2105 // TODO: Should this be inside the render function? The iterator seems to
2106 // move.
2107 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2108 HighBits)
2109 .addImm(Offset & ~4095);
2110
2111 return {{[=](MachineInstrBuilder &MIB) { // rsrc
2112 MIB.addReg(Info->getScratchRSrcReg());
2113 },
2114 [=](MachineInstrBuilder &MIB) { // vaddr
2115 MIB.addReg(HighBits);
2116 },
2117 [=](MachineInstrBuilder &MIB) { // soffset
2118 const MachineMemOperand *MMO = *MI->memoperands_begin();
2119 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2120
2121 Register SOffsetReg = isStackPtrRelative(PtrInfo)
2122 ? Info->getStackPtrOffsetReg()
2123 : Info->getScratchWaveOffsetReg();
2124 MIB.addReg(SOffsetReg);
2125 },
2126 [=](MachineInstrBuilder &MIB) { // offset
2127 MIB.addImm(Offset & 4095);
2128 }}};
2129 }
2130
2131 assert(Offset == 0);
2132
2133 // Try to fold a frame index directly into the MUBUF vaddr field, and any
2134 // offsets.
2135 Optional<int> FI;
2136 Register VAddr = Root.getReg();
2137 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2138 if (isBaseWithConstantOffset(Root, *MRI)) {
2139 const MachineOperand &LHS = RootDef->getOperand(1);
2140 const MachineOperand &RHS = RootDef->getOperand(2);
2141 const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2142 const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2143 if (LHSDef && RHSDef) {
2144 int64_t PossibleOffset =
2145 RHSDef->getOperand(1).getCImm()->getSExtValue();
2146 if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2147 (!STI.privateMemoryResourceIsRangeChecked() ||
2148 KnownBits->signBitIsZero(LHS.getReg()))) {
2149 if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
2150 FI = LHSDef->getOperand(1).getIndex();
2151 else
2152 VAddr = LHS.getReg();
2153 Offset = PossibleOffset;
2154 }
2155 }
2156 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
2157 FI = RootDef->getOperand(1).getIndex();
2158 }
2159 }
2160
2161 // If we don't know this private access is a local stack object, it needs to
2162 // be relative to the entry point's scratch wave offset register.
2163 // TODO: Should split large offsets that don't fit like above.
2164 // TODO: Don't use scratch wave offset just because the offset didn't fit.
2165 Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
2166 : Info->getScratchWaveOffsetReg();
2167
2168 return {{[=](MachineInstrBuilder &MIB) { // rsrc
2169 MIB.addReg(Info->getScratchRSrcReg());
2170 },
2171 [=](MachineInstrBuilder &MIB) { // vaddr
2172 if (FI.hasValue())
2173 MIB.addFrameIndex(FI.getValue());
2174 else
2175 MIB.addReg(VAddr);
2176 },
2177 [=](MachineInstrBuilder &MIB) { // soffset
2178 MIB.addReg(SOffset);
2179 },
2180 [=](MachineInstrBuilder &MIB) { // offset
2181 MIB.addImm(Offset);
2182 }}};
2183 }
2184
isDSOffsetLegal(const MachineRegisterInfo & MRI,const MachineOperand & Base,int64_t Offset,unsigned OffsetBits) const2185 bool AMDGPUInstructionSelector::isDSOffsetLegal(const MachineRegisterInfo &MRI,
2186 const MachineOperand &Base,
2187 int64_t Offset,
2188 unsigned OffsetBits) const {
2189 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
2190 (OffsetBits == 8 && !isUInt<8>(Offset)))
2191 return false;
2192
2193 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
2194 return true;
2195
2196 // On Southern Islands instruction with a negative base value and an offset
2197 // don't seem to work.
2198 return KnownBits->signBitIsZero(Base.getReg());
2199 }
2200
2201 InstructionSelector::ComplexRendererFns
selectMUBUFScratchOffset(MachineOperand & Root) const2202 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
2203 MachineOperand &Root) const {
2204 MachineInstr *MI = Root.getParent();
2205 MachineBasicBlock *MBB = MI->getParent();
2206
2207 int64_t Offset = 0;
2208 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
2209 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
2210 return {};
2211
2212 const MachineFunction *MF = MBB->getParent();
2213 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2214 const MachineMemOperand *MMO = *MI->memoperands_begin();
2215 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2216
2217 Register SOffsetReg = isStackPtrRelative(PtrInfo)
2218 ? Info->getStackPtrOffsetReg()
2219 : Info->getScratchWaveOffsetReg();
2220 return {{
2221 [=](MachineInstrBuilder &MIB) {
2222 MIB.addReg(Info->getScratchRSrcReg());
2223 }, // rsrc
2224 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
2225 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
2226 }};
2227 }
2228
2229 InstructionSelector::ComplexRendererFns
selectDS1Addr1Offset(MachineOperand & Root) const2230 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
2231 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2232 if (!RootDef) {
2233 return {{
2234 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2235 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }
2236 }};
2237 }
2238
2239 int64_t ConstAddr = 0;
2240 if (isBaseWithConstantOffset(Root, *MRI)) {
2241 const MachineOperand &LHS = RootDef->getOperand(1);
2242 const MachineOperand &RHS = RootDef->getOperand(2);
2243 const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2244 const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2245 if (LHSDef && RHSDef) {
2246 int64_t PossibleOffset =
2247 RHSDef->getOperand(1).getCImm()->getSExtValue();
2248 if (isDSOffsetLegal(*MRI, LHS, PossibleOffset, 16)) {
2249 // (add n0, c0)
2250 return {{
2251 [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
2252 [=](MachineInstrBuilder &MIB) { MIB.addImm(PossibleOffset); }
2253 }};
2254 }
2255 }
2256 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2257
2258
2259
2260 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2261
2262
2263 }
2264
2265 return {{
2266 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2267 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }
2268 }};
2269 }
2270
renderTruncImm32(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const2271 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
2272 const MachineInstr &MI,
2273 int OpIdx) const {
2274 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2275 "Expected G_CONSTANT");
2276 Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), *MRI);
2277 assert(CstVal && "Expected constant value");
2278 MIB.addImm(CstVal.getValue());
2279 }
2280
renderNegateImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const2281 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
2282 const MachineInstr &MI,
2283 int OpIdx) const {
2284 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2285 "Expected G_CONSTANT");
2286 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
2287 }
2288
renderBitcastImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const2289 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
2290 const MachineInstr &MI,
2291 int OpIdx) const {
2292 assert(OpIdx == -1);
2293
2294 const MachineOperand &Op = MI.getOperand(1);
2295 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
2296 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
2297 else {
2298 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2299 MIB.addImm(Op.getCImm()->getSExtValue());
2300 }
2301 }
2302
renderPopcntImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const2303 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
2304 const MachineInstr &MI,
2305 int OpIdx) const {
2306 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2307 "Expected G_CONSTANT");
2308 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
2309 }
2310
2311 /// This only really exists to satisfy DAG type checking machinery, so is a
2312 /// no-op here.
renderTruncTImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const2313 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
2314 const MachineInstr &MI,
2315 int OpIdx) const {
2316 MIB.addImm(MI.getOperand(OpIdx).getImm());
2317 }
2318
isInlineImmediate16(int64_t Imm) const2319 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
2320 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
2321 }
2322
isInlineImmediate32(int64_t Imm) const2323 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
2324 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
2325 }
2326
isInlineImmediate64(int64_t Imm) const2327 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
2328 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
2329 }
2330
isInlineImmediate(const APFloat & Imm) const2331 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
2332 return TII.isInlineConstant(Imm);
2333 }
2334