1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
9 //
10
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
24 #include "llvm/Target/TargetMachine.h"
25
26 #define DEBUG_TYPE "si-shrink-instructions"
27
28 STATISTIC(NumInstructionsShrunk,
29 "Number of 64-bit instruction reduced to 32-bit.");
30 STATISTIC(NumLiteralConstantsFolded,
31 "Number of literal constants folded into 32-bit instructions.");
32
33 using namespace llvm;
34
35 namespace {
36
37 class SIShrinkInstructions : public MachineFunctionPass {
38 public:
39 static char ID;
40
41 void shrinkMIMG(MachineInstr &MI);
42
43 public:
SIShrinkInstructions()44 SIShrinkInstructions() : MachineFunctionPass(ID) {
45 }
46
47 bool runOnMachineFunction(MachineFunction &MF) override;
48
getPassName() const49 StringRef getPassName() const override { return "SI Shrink Instructions"; }
50
getAnalysisUsage(AnalysisUsage & AU) const51 void getAnalysisUsage(AnalysisUsage &AU) const override {
52 AU.setPreservesCFG();
53 MachineFunctionPass::getAnalysisUsage(AU);
54 }
55 };
56
57 } // End anonymous namespace.
58
59 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
60 "SI Shrink Instructions", false, false)
61
62 char SIShrinkInstructions::ID = 0;
63
createSIShrinkInstructionsPass()64 FunctionPass *llvm::createSIShrinkInstructionsPass() {
65 return new SIShrinkInstructions();
66 }
67
68 /// This function checks \p MI for operands defined by a move immediate
69 /// instruction and then folds the literal constant into the instruction if it
70 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
foldImmediates(MachineInstr & MI,const SIInstrInfo * TII,MachineRegisterInfo & MRI,bool TryToCommute=true)71 static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
72 MachineRegisterInfo &MRI, bool TryToCommute = true) {
73 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
74
75 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
76
77 // Try to fold Src0
78 MachineOperand &Src0 = MI.getOperand(Src0Idx);
79 if (Src0.isReg()) {
80 Register Reg = Src0.getReg();
81 if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
82 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
83 if (Def && Def->isMoveImmediate()) {
84 MachineOperand &MovSrc = Def->getOperand(1);
85 bool ConstantFolded = false;
86
87 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
88 isUInt<32>(MovSrc.getImm()))) {
89 // It's possible to have only one component of a super-reg defined by
90 // a single mov, so we need to clear any subregister flag.
91 Src0.setSubReg(0);
92 Src0.ChangeToImmediate(MovSrc.getImm());
93 ConstantFolded = true;
94 } else if (MovSrc.isFI()) {
95 Src0.setSubReg(0);
96 Src0.ChangeToFrameIndex(MovSrc.getIndex());
97 ConstantFolded = true;
98 } else if (MovSrc.isGlobal()) {
99 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
100 MovSrc.getTargetFlags());
101 ConstantFolded = true;
102 }
103
104 if (ConstantFolded) {
105 assert(MRI.use_empty(Reg));
106 Def->eraseFromParent();
107 ++NumLiteralConstantsFolded;
108 return true;
109 }
110 }
111 }
112 }
113
114 // We have failed to fold src0, so commute the instruction and try again.
115 if (TryToCommute && MI.isCommutable()) {
116 if (TII->commuteInstruction(MI)) {
117 if (foldImmediates(MI, TII, MRI, false))
118 return true;
119
120 // Commute back.
121 TII->commuteInstruction(MI);
122 }
123 }
124
125 return false;
126 }
127
isKImmOperand(const SIInstrInfo * TII,const MachineOperand & Src)128 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
129 return isInt<16>(Src.getImm()) &&
130 !TII->isInlineConstant(*Src.getParent(),
131 Src.getParent()->getOperandNo(&Src));
132 }
133
isKUImmOperand(const SIInstrInfo * TII,const MachineOperand & Src)134 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
135 return isUInt<16>(Src.getImm()) &&
136 !TII->isInlineConstant(*Src.getParent(),
137 Src.getParent()->getOperandNo(&Src));
138 }
139
isKImmOrKUImmOperand(const SIInstrInfo * TII,const MachineOperand & Src,bool & IsUnsigned)140 static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
141 const MachineOperand &Src,
142 bool &IsUnsigned) {
143 if (isInt<16>(Src.getImm())) {
144 IsUnsigned = false;
145 return !TII->isInlineConstant(Src);
146 }
147
148 if (isUInt<16>(Src.getImm())) {
149 IsUnsigned = true;
150 return !TII->isInlineConstant(Src);
151 }
152
153 return false;
154 }
155
156 /// \returns true if the constant in \p Src should be replaced with a bitreverse
157 /// of an inline immediate.
isReverseInlineImm(const SIInstrInfo * TII,const MachineOperand & Src,int32_t & ReverseImm)158 static bool isReverseInlineImm(const SIInstrInfo *TII,
159 const MachineOperand &Src,
160 int32_t &ReverseImm) {
161 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
162 return false;
163
164 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
165 return ReverseImm >= -16 && ReverseImm <= 64;
166 }
167
168 /// Copy implicit register operands from specified instruction to this
169 /// instruction that are not part of the instruction definition.
copyExtraImplicitOps(MachineInstr & NewMI,MachineFunction & MF,const MachineInstr & MI)170 static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
171 const MachineInstr &MI) {
172 for (unsigned i = MI.getDesc().getNumOperands() +
173 MI.getDesc().getNumImplicitUses() +
174 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
175 i != e; ++i) {
176 const MachineOperand &MO = MI.getOperand(i);
177 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
178 NewMI.addOperand(MF, MO);
179 }
180 }
181
shrinkScalarCompare(const SIInstrInfo * TII,MachineInstr & MI)182 static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
183 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
184 // get constants on the RHS.
185 if (!MI.getOperand(0).isReg())
186 TII->commuteInstruction(MI, false, 0, 1);
187
188 const MachineOperand &Src1 = MI.getOperand(1);
189 if (!Src1.isImm())
190 return;
191
192 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
193 if (SOPKOpc == -1)
194 return;
195
196 // eq/ne is special because the imm16 can be treated as signed or unsigned,
197 // and initially selectd to the unsigned versions.
198 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
199 bool HasUImm;
200 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
201 if (!HasUImm) {
202 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
203 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
204 }
205
206 MI.setDesc(TII->get(SOPKOpc));
207 }
208
209 return;
210 }
211
212 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
213
214 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
215 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
216 MI.setDesc(NewDesc);
217 }
218 }
219
220 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
shrinkMIMG(MachineInstr & MI)221 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) {
222 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
223 if (Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA)
224 return;
225
226 MachineFunction *MF = MI.getParent()->getParent();
227 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
228 const SIInstrInfo *TII = ST.getInstrInfo();
229 const SIRegisterInfo &TRI = TII->getRegisterInfo();
230 int VAddr0Idx =
231 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
232 unsigned NewAddrDwords = Info->VAddrDwords;
233 const TargetRegisterClass *RC;
234
235 if (Info->VAddrDwords == 2) {
236 RC = &AMDGPU::VReg_64RegClass;
237 } else if (Info->VAddrDwords == 3) {
238 RC = &AMDGPU::VReg_96RegClass;
239 } else if (Info->VAddrDwords == 4) {
240 RC = &AMDGPU::VReg_128RegClass;
241 } else if (Info->VAddrDwords <= 8) {
242 RC = &AMDGPU::VReg_256RegClass;
243 NewAddrDwords = 8;
244 } else {
245 RC = &AMDGPU::VReg_512RegClass;
246 NewAddrDwords = 16;
247 }
248
249 unsigned VgprBase = 0;
250 bool IsUndef = true;
251 bool IsKill = NewAddrDwords == Info->VAddrDwords;
252 for (unsigned i = 0; i < Info->VAddrDwords; ++i) {
253 const MachineOperand &Op = MI.getOperand(VAddr0Idx + i);
254 unsigned Vgpr = TRI.getHWRegIndex(Op.getReg());
255
256 if (i == 0) {
257 VgprBase = Vgpr;
258 } else if (VgprBase + i != Vgpr)
259 return;
260
261 if (!Op.isUndef())
262 IsUndef = false;
263 if (!Op.isKill())
264 IsKill = false;
265 }
266
267 if (VgprBase + NewAddrDwords > 256)
268 return;
269
270 // Further check for implicit tied operands - this may be present if TFE is
271 // enabled
272 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
273 int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
274 unsigned TFEVal = MI.getOperand(TFEIdx).getImm();
275 unsigned LWEVal = MI.getOperand(LWEIdx).getImm();
276 int ToUntie = -1;
277 if (TFEVal || LWEVal) {
278 // TFE/LWE is enabled so we need to deal with an implicit tied operand
279 for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
280 if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
281 MI.getOperand(i).isImplicit()) {
282 // This is the tied operand
283 assert(
284 ToUntie == -1 &&
285 "found more than one tied implicit operand when expecting only 1");
286 ToUntie = i;
287 MI.untieRegOperand(ToUntie);
288 }
289 }
290 }
291
292 unsigned NewOpcode =
293 AMDGPU::getMIMGOpcode(Info->BaseOpcode, AMDGPU::MIMGEncGfx10Default,
294 Info->VDataDwords, NewAddrDwords);
295 MI.setDesc(TII->get(NewOpcode));
296 MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
297 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
298 MI.getOperand(VAddr0Idx).setIsKill(IsKill);
299
300 for (unsigned i = 1; i < Info->VAddrDwords; ++i)
301 MI.RemoveOperand(VAddr0Idx + 1);
302
303 if (ToUntie >= 0) {
304 MI.tieOperands(
305 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
306 ToUntie - (Info->VAddrDwords - 1));
307 }
308 }
309
310 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
311 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
312 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
313 /// XNOR (as a ^ b == ~(a ^ ~b)).
314 /// \returns true if the caller should continue the machine function iterator
shrinkScalarLogicOp(const GCNSubtarget & ST,MachineRegisterInfo & MRI,const SIInstrInfo * TII,MachineInstr & MI)315 static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
316 MachineRegisterInfo &MRI,
317 const SIInstrInfo *TII,
318 MachineInstr &MI) {
319 unsigned Opc = MI.getOpcode();
320 const MachineOperand *Dest = &MI.getOperand(0);
321 MachineOperand *Src0 = &MI.getOperand(1);
322 MachineOperand *Src1 = &MI.getOperand(2);
323 MachineOperand *SrcReg = Src0;
324 MachineOperand *SrcImm = Src1;
325
326 if (SrcImm->isImm() &&
327 !AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST.hasInv2PiInlineImm())) {
328 uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
329 uint32_t NewImm = 0;
330
331 if (Opc == AMDGPU::S_AND_B32) {
332 if (isPowerOf2_32(~Imm)) {
333 NewImm = countTrailingOnes(Imm);
334 Opc = AMDGPU::S_BITSET0_B32;
335 } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
336 NewImm = ~Imm;
337 Opc = AMDGPU::S_ANDN2_B32;
338 }
339 } else if (Opc == AMDGPU::S_OR_B32) {
340 if (isPowerOf2_32(Imm)) {
341 NewImm = countTrailingZeros(Imm);
342 Opc = AMDGPU::S_BITSET1_B32;
343 } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
344 NewImm = ~Imm;
345 Opc = AMDGPU::S_ORN2_B32;
346 }
347 } else if (Opc == AMDGPU::S_XOR_B32) {
348 if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
349 NewImm = ~Imm;
350 Opc = AMDGPU::S_XNOR_B32;
351 }
352 } else {
353 llvm_unreachable("unexpected opcode");
354 }
355
356 if ((Opc == AMDGPU::S_ANDN2_B32 || Opc == AMDGPU::S_ORN2_B32) &&
357 SrcImm == Src0) {
358 if (!TII->commuteInstruction(MI, false, 1, 2))
359 NewImm = 0;
360 }
361
362 if (NewImm != 0) {
363 if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
364 MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
365 MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
366 return true;
367 }
368
369 if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
370 MI.setDesc(TII->get(Opc));
371 if (Opc == AMDGPU::S_BITSET0_B32 ||
372 Opc == AMDGPU::S_BITSET1_B32) {
373 Src0->ChangeToImmediate(NewImm);
374 // Remove the immediate and add the tied input.
375 MI.getOperand(2).ChangeToRegister(Dest->getReg(), false);
376 MI.tieOperands(0, 2);
377 } else {
378 SrcImm->setImm(NewImm);
379 }
380 }
381 }
382 }
383
384 return false;
385 }
386
387 // This is the same as MachineInstr::readsRegister/modifiesRegister except
388 // it takes subregs into account.
instAccessReg(iterator_range<MachineInstr::const_mop_iterator> && R,unsigned Reg,unsigned SubReg,const SIRegisterInfo & TRI)389 static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
390 unsigned Reg, unsigned SubReg,
391 const SIRegisterInfo &TRI) {
392 for (const MachineOperand &MO : R) {
393 if (!MO.isReg())
394 continue;
395
396 if (Register::isPhysicalRegister(Reg) &&
397 Register::isPhysicalRegister(MO.getReg())) {
398 if (TRI.regsOverlap(Reg, MO.getReg()))
399 return true;
400 } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
401 LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
402 TRI.getSubRegIndexLaneMask(MO.getSubReg());
403 if (Overlap.any())
404 return true;
405 }
406 }
407 return false;
408 }
409
instReadsReg(const MachineInstr * MI,unsigned Reg,unsigned SubReg,const SIRegisterInfo & TRI)410 static bool instReadsReg(const MachineInstr *MI,
411 unsigned Reg, unsigned SubReg,
412 const SIRegisterInfo &TRI) {
413 return instAccessReg(MI->uses(), Reg, SubReg, TRI);
414 }
415
instModifiesReg(const MachineInstr * MI,unsigned Reg,unsigned SubReg,const SIRegisterInfo & TRI)416 static bool instModifiesReg(const MachineInstr *MI,
417 unsigned Reg, unsigned SubReg,
418 const SIRegisterInfo &TRI) {
419 return instAccessReg(MI->defs(), Reg, SubReg, TRI);
420 }
421
422 static TargetInstrInfo::RegSubRegPair
getSubRegForIndex(unsigned Reg,unsigned Sub,unsigned I,const SIRegisterInfo & TRI,const MachineRegisterInfo & MRI)423 getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
424 const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
425 if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
426 if (Register::isPhysicalRegister(Reg)) {
427 Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
428 } else {
429 LaneBitmask LM = TRI.getSubRegIndexLaneMask(Sub);
430 Sub = TRI.getSubRegFromChannel(I + countTrailingZeros(LM.getAsInteger()));
431 }
432 }
433 return TargetInstrInfo::RegSubRegPair(Reg, Sub);
434 }
435
436 // Match:
437 // mov t, x
438 // mov x, y
439 // mov y, t
440 //
441 // =>
442 //
443 // mov t, x (t is potentially dead and move eliminated)
444 // v_swap_b32 x, y
445 //
446 // Returns next valid instruction pointer if was able to create v_swap_b32.
447 //
448 // This shall not be done too early not to prevent possible folding which may
449 // remove matched moves, and this should prefereably be done before RA to
450 // release saved registers and also possibly after RA which can insert copies
451 // too.
452 //
453 // This is really just a generic peephole that is not a canocical shrinking,
454 // although requirements match the pass placement and it reduces code size too.
matchSwap(MachineInstr & MovT,MachineRegisterInfo & MRI,const SIInstrInfo * TII)455 static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
456 const SIInstrInfo *TII) {
457 assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
458 MovT.getOpcode() == AMDGPU::COPY);
459
460 Register T = MovT.getOperand(0).getReg();
461 unsigned Tsub = MovT.getOperand(0).getSubReg();
462 MachineOperand &Xop = MovT.getOperand(1);
463
464 if (!Xop.isReg())
465 return nullptr;
466 Register X = Xop.getReg();
467 unsigned Xsub = Xop.getSubReg();
468
469 unsigned Size = TII->getOpSize(MovT, 0) / 4;
470
471 const SIRegisterInfo &TRI = TII->getRegisterInfo();
472 if (!TRI.isVGPR(MRI, X))
473 return nullptr;
474
475 for (MachineOperand &YTop : MRI.use_nodbg_operands(T)) {
476 if (YTop.getSubReg() != Tsub)
477 continue;
478
479 MachineInstr &MovY = *YTop.getParent();
480 if ((MovY.getOpcode() != AMDGPU::V_MOV_B32_e32 &&
481 MovY.getOpcode() != AMDGPU::COPY) ||
482 MovY.getOperand(1).getSubReg() != Tsub)
483 continue;
484
485 Register Y = MovY.getOperand(0).getReg();
486 unsigned Ysub = MovY.getOperand(0).getSubReg();
487
488 if (!TRI.isVGPR(MRI, Y) || MovT.getParent() != MovY.getParent())
489 continue;
490
491 MachineInstr *MovX = nullptr;
492 auto I = std::next(MovT.getIterator()), E = MovT.getParent()->instr_end();
493 for (auto IY = MovY.getIterator(); I != E && I != IY; ++I) {
494 if (instReadsReg(&*I, X, Xsub, TRI) ||
495 instModifiesReg(&*I, Y, Ysub, TRI) ||
496 instModifiesReg(&*I, T, Tsub, TRI) ||
497 (MovX && instModifiesReg(&*I, X, Xsub, TRI))) {
498 MovX = nullptr;
499 break;
500 }
501 if (!instReadsReg(&*I, Y, Ysub, TRI)) {
502 if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) {
503 MovX = nullptr;
504 break;
505 }
506 continue;
507 }
508 if (MovX ||
509 (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
510 I->getOpcode() != AMDGPU::COPY) ||
511 I->getOperand(0).getReg() != X ||
512 I->getOperand(0).getSubReg() != Xsub) {
513 MovX = nullptr;
514 break;
515 }
516 MovX = &*I;
517 }
518
519 if (!MovX || I == E)
520 continue;
521
522 LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << MovY);
523
524 for (unsigned I = 0; I < Size; ++I) {
525 TargetInstrInfo::RegSubRegPair X1, Y1;
526 X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI);
527 Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI);
528 BuildMI(*MovT.getParent(), MovX->getIterator(), MovT.getDebugLoc(),
529 TII->get(AMDGPU::V_SWAP_B32))
530 .addDef(X1.Reg, 0, X1.SubReg)
531 .addDef(Y1.Reg, 0, Y1.SubReg)
532 .addReg(Y1.Reg, 0, Y1.SubReg)
533 .addReg(X1.Reg, 0, X1.SubReg).getInstr();
534 }
535 MovX->eraseFromParent();
536 MovY.eraseFromParent();
537 MachineInstr *Next = &*std::next(MovT.getIterator());
538 if (MRI.use_nodbg_empty(T))
539 MovT.eraseFromParent();
540 else
541 Xop.setIsKill(false);
542
543 return Next;
544 }
545
546 return nullptr;
547 }
548
runOnMachineFunction(MachineFunction & MF)549 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
550 if (skipFunction(MF.getFunction()))
551 return false;
552
553 MachineRegisterInfo &MRI = MF.getRegInfo();
554 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
555 const SIInstrInfo *TII = ST.getInstrInfo();
556 unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
557
558 std::vector<unsigned> I1Defs;
559
560 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
561 BI != BE; ++BI) {
562
563 MachineBasicBlock &MBB = *BI;
564 MachineBasicBlock::iterator I, Next;
565 for (I = MBB.begin(); I != MBB.end(); I = Next) {
566 Next = std::next(I);
567 MachineInstr &MI = *I;
568
569 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
570 // If this has a literal constant source that is the same as the
571 // reversed bits of an inline immediate, replace with a bitreverse of
572 // that constant. This saves 4 bytes in the common case of materializing
573 // sign bits.
574
575 // Test if we are after regalloc. We only want to do this after any
576 // optimizations happen because this will confuse them.
577 // XXX - not exactly a check for post-regalloc run.
578 MachineOperand &Src = MI.getOperand(1);
579 if (Src.isImm() &&
580 Register::isPhysicalRegister(MI.getOperand(0).getReg())) {
581 int32_t ReverseImm;
582 if (isReverseInlineImm(TII, Src, ReverseImm)) {
583 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
584 Src.setImm(ReverseImm);
585 continue;
586 }
587 }
588 }
589
590 if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
591 MI.getOpcode() == AMDGPU::COPY)) {
592 if (auto *NextMI = matchSwap(MI, MRI, TII)) {
593 Next = NextMI->getIterator();
594 continue;
595 }
596 }
597
598 // Combine adjacent s_nops to use the immediate operand encoding how long
599 // to wait.
600 //
601 // s_nop N
602 // s_nop M
603 // =>
604 // s_nop (N + M)
605 if (MI.getOpcode() == AMDGPU::S_NOP &&
606 MI.getNumOperands() == 1 && // Don't merge with implicit operands
607 Next != MBB.end() &&
608 (*Next).getOpcode() == AMDGPU::S_NOP &&
609 (*Next).getNumOperands() == 1) {
610
611 MachineInstr &NextMI = *Next;
612 // The instruction encodes the amount to wait with an offset of 1,
613 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
614 // after adding.
615 uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
616 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
617
618 // Make sure we don't overflow the bounds.
619 if (Nop0 + Nop1 <= 8) {
620 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
621 MI.eraseFromParent();
622 }
623
624 continue;
625 }
626
627 // FIXME: We also need to consider movs of constant operands since
628 // immediate operands are not folded if they have more than one use, and
629 // the operand folding pass is unaware if the immediate will be free since
630 // it won't know if the src == dest constraint will end up being
631 // satisfied.
632 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
633 MI.getOpcode() == AMDGPU::S_MUL_I32) {
634 const MachineOperand *Dest = &MI.getOperand(0);
635 MachineOperand *Src0 = &MI.getOperand(1);
636 MachineOperand *Src1 = &MI.getOperand(2);
637
638 if (!Src0->isReg() && Src1->isReg()) {
639 if (TII->commuteInstruction(MI, false, 1, 2))
640 std::swap(Src0, Src1);
641 }
642
643 // FIXME: This could work better if hints worked with subregisters. If
644 // we have a vector add of a constant, we usually don't get the correct
645 // allocation due to the subregister usage.
646 if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
647 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
648 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
649 continue;
650 }
651
652 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
653 if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
654 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
655 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
656
657 MI.setDesc(TII->get(Opc));
658 MI.tieOperands(0, 1);
659 }
660 }
661 }
662
663 // Try to use s_cmpk_*
664 if (MI.isCompare() && TII->isSOPC(MI)) {
665 shrinkScalarCompare(TII, MI);
666 continue;
667 }
668
669 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
670 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
671 const MachineOperand &Dst = MI.getOperand(0);
672 MachineOperand &Src = MI.getOperand(1);
673
674 if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
675 int32_t ReverseImm;
676 if (isKImmOperand(TII, Src))
677 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
678 else if (isReverseInlineImm(TII, Src, ReverseImm)) {
679 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
680 Src.setImm(ReverseImm);
681 }
682 }
683
684 continue;
685 }
686
687 // Shrink scalar logic operations.
688 if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
689 MI.getOpcode() == AMDGPU::S_OR_B32 ||
690 MI.getOpcode() == AMDGPU::S_XOR_B32) {
691 if (shrinkScalarLogicOp(ST, MRI, TII, MI))
692 continue;
693 }
694
695 if (TII->isMIMG(MI.getOpcode()) &&
696 ST.getGeneration() >= AMDGPUSubtarget::GFX10 &&
697 MF.getProperties().hasProperty(
698 MachineFunctionProperties::Property::NoVRegs)) {
699 shrinkMIMG(MI);
700 continue;
701 }
702
703 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
704 continue;
705
706 if (!TII->canShrink(MI, MRI)) {
707 // Try commuting the instruction and see if that enables us to shrink
708 // it.
709 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
710 !TII->canShrink(MI, MRI))
711 continue;
712 }
713
714 // getVOPe32 could be -1 here if we started with an instruction that had
715 // a 32-bit encoding and then commuted it to an instruction that did not.
716 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
717 continue;
718
719 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
720
721 if (TII->isVOPC(Op32)) {
722 Register DstReg = MI.getOperand(0).getReg();
723 if (Register::isVirtualRegister(DstReg)) {
724 // VOPC instructions can only write to the VCC register. We can't
725 // force them to use VCC here, because this is only one register and
726 // cannot deal with sequences which would require multiple copies of
727 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
728 //
729 // So, instead of forcing the instruction to write to VCC, we provide
730 // a hint to the register allocator to use VCC and then we will run
731 // this pass again after RA and shrink it if it outputs to VCC.
732 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg);
733 continue;
734 }
735 if (DstReg != VCCReg)
736 continue;
737 }
738
739 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
740 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
741 // instructions.
742 const MachineOperand *Src2 =
743 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
744 if (!Src2->isReg())
745 continue;
746 Register SReg = Src2->getReg();
747 if (Register::isVirtualRegister(SReg)) {
748 MRI.setRegAllocationHint(SReg, 0, VCCReg);
749 continue;
750 }
751 if (SReg != VCCReg)
752 continue;
753 }
754
755 // Check for the bool flag output for instructions like V_ADD_I32_e64.
756 const MachineOperand *SDst = TII->getNamedOperand(MI,
757 AMDGPU::OpName::sdst);
758
759 // Check the carry-in operand for v_addc_u32_e64.
760 const MachineOperand *Src2 = TII->getNamedOperand(MI,
761 AMDGPU::OpName::src2);
762
763 if (SDst) {
764 bool Next = false;
765
766 if (SDst->getReg() != VCCReg) {
767 if (Register::isVirtualRegister(SDst->getReg()))
768 MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
769 Next = true;
770 }
771
772 // All of the instructions with carry outs also have an SGPR input in
773 // src2.
774 if (Src2 && Src2->getReg() != VCCReg) {
775 if (Register::isVirtualRegister(Src2->getReg()))
776 MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
777 Next = true;
778 }
779
780 if (Next)
781 continue;
782 }
783
784 // We can shrink this instruction
785 LLVM_DEBUG(dbgs() << "Shrinking " << MI);
786
787 MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
788 ++NumInstructionsShrunk;
789
790 // Copy extra operands not present in the instruction definition.
791 copyExtraImplicitOps(*Inst32, MF, MI);
792
793 MI.eraseFromParent();
794 foldImmediates(*Inst32, TII, MRI);
795
796 LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
797 }
798 }
799 return false;
800 }
801