1 //===- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass tries to apply several peephole SDWA patterns.
10 ///
11 /// E.g. original:
12 /// V_LSHRREV_B32_e32 %0, 16, %1
13 /// V_ADD_I32_e32 %2, %0, %3
14 /// V_LSHLREV_B32_e32 %4, 16, %2
15 ///
16 /// Replace:
17 /// V_ADD_I32_sdwa %4, %1, %3
18 /// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
19 ///
20 //===----------------------------------------------------------------------===//
21
22 #include "AMDGPU.h"
23 #include "AMDGPUSubtarget.h"
24 #include "SIDefines.h"
25 #include "SIInstrInfo.h"
26 #include "SIRegisterInfo.h"
27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28 #include "Utils/AMDGPUBaseInfo.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/None.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/CodeGen/MachineBasicBlock.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineFunctionPass.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineInstrBuilder.h"
40 #include "llvm/CodeGen/MachineOperand.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/Config/llvm-config.h"
44 #include "llvm/MC/LaneBitmask.h"
45 #include "llvm/MC/MCInstrDesc.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include <algorithm>
50 #include <cassert>
51 #include <cstdint>
52 #include <memory>
53 #include <unordered_map>
54
55 using namespace llvm;
56
57 #define DEBUG_TYPE "si-peephole-sdwa"
58
59 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found.");
60 STATISTIC(NumSDWAInstructionsPeepholed,
61 "Number of instruction converted to SDWA.");
62
63 namespace {
64
65 class SDWAOperand;
66 class SDWADstOperand;
67
68 class SIPeepholeSDWA : public MachineFunctionPass {
69 public:
70 using SDWAOperandsVector = SmallVector<SDWAOperand *, 4>;
71
72 private:
73 MachineRegisterInfo *MRI;
74 const SIRegisterInfo *TRI;
75 const SIInstrInfo *TII;
76
77 MapVector<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands;
78 MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
79 SmallVector<MachineInstr *, 8> ConvertedInstructions;
80
81 Optional<int64_t> foldToImm(const MachineOperand &Op) const;
82
83 public:
84 static char ID;
85
SIPeepholeSDWA()86 SIPeepholeSDWA() : MachineFunctionPass(ID) {
87 initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry());
88 }
89
90 bool runOnMachineFunction(MachineFunction &MF) override;
91 void matchSDWAOperands(MachineBasicBlock &MBB);
92 std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
93 bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const;
94 void pseudoOpConvertToVOP2(MachineInstr &MI,
95 const GCNSubtarget &ST) const;
96 bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
97 void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
98
getPassName() const99 StringRef getPassName() const override { return "SI Peephole SDWA"; }
100
getAnalysisUsage(AnalysisUsage & AU) const101 void getAnalysisUsage(AnalysisUsage &AU) const override {
102 AU.setPreservesCFG();
103 MachineFunctionPass::getAnalysisUsage(AU);
104 }
105 };
106
107 class SDWAOperand {
108 private:
109 MachineOperand *Target; // Operand that would be used in converted instruction
110 MachineOperand *Replaced; // Operand that would be replace by Target
111
112 public:
SDWAOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp)113 SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
114 : Target(TargetOp), Replaced(ReplacedOp) {
115 assert(Target->isReg());
116 assert(Replaced->isReg());
117 }
118
119 virtual ~SDWAOperand() = default;
120
121 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0;
122 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
123
getTargetOperand() const124 MachineOperand *getTargetOperand() const { return Target; }
getReplacedOperand() const125 MachineOperand *getReplacedOperand() const { return Replaced; }
getParentInst() const126 MachineInstr *getParentInst() const { return Target->getParent(); }
127
getMRI() const128 MachineRegisterInfo *getMRI() const {
129 return &getParentInst()->getParent()->getParent()->getRegInfo();
130 }
131
132 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
133 virtual void print(raw_ostream& OS) const = 0;
dump() const134 void dump() const { print(dbgs()); }
135 #endif
136 };
137
138 using namespace AMDGPU::SDWA;
139
140 class SDWASrcOperand : public SDWAOperand {
141 private:
142 SdwaSel SrcSel;
143 bool Abs;
144 bool Neg;
145 bool Sext;
146
147 public:
SDWASrcOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp,SdwaSel SrcSel_=DWORD,bool Abs_=false,bool Neg_=false,bool Sext_=false)148 SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
149 SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
150 bool Sext_ = false)
151 : SDWAOperand(TargetOp, ReplacedOp),
152 SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
153
154 MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
155 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
156
getSrcSel() const157 SdwaSel getSrcSel() const { return SrcSel; }
getAbs() const158 bool getAbs() const { return Abs; }
getNeg() const159 bool getNeg() const { return Neg; }
getSext() const160 bool getSext() const { return Sext; }
161
162 uint64_t getSrcMods(const SIInstrInfo *TII,
163 const MachineOperand *SrcOp) const;
164
165 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
166 void print(raw_ostream& OS) const override;
167 #endif
168 };
169
170 class SDWADstOperand : public SDWAOperand {
171 private:
172 SdwaSel DstSel;
173 DstUnused DstUn;
174
175 public:
176
SDWADstOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp,SdwaSel DstSel_=DWORD,DstUnused DstUn_=UNUSED_PAD)177 SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
178 SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
179 : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
180
181 MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
182 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
183
getDstSel() const184 SdwaSel getDstSel() const { return DstSel; }
getDstUnused() const185 DstUnused getDstUnused() const { return DstUn; }
186
187 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
188 void print(raw_ostream& OS) const override;
189 #endif
190 };
191
192 class SDWADstPreserveOperand : public SDWADstOperand {
193 private:
194 MachineOperand *Preserve;
195
196 public:
SDWADstPreserveOperand(MachineOperand * TargetOp,MachineOperand * ReplacedOp,MachineOperand * PreserveOp,SdwaSel DstSel_=DWORD)197 SDWADstPreserveOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
198 MachineOperand *PreserveOp, SdwaSel DstSel_ = DWORD)
199 : SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
200 Preserve(PreserveOp) {}
201
202 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
203
getPreservedOperand() const204 MachineOperand *getPreservedOperand() const { return Preserve; }
205
206 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
207 void print(raw_ostream& OS) const override;
208 #endif
209 };
210
211 } // end anonymous namespace
212
213 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false)
214
215 char SIPeepholeSDWA::ID = 0;
216
217 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID;
218
createSIPeepholeSDWAPass()219 FunctionPass *llvm::createSIPeepholeSDWAPass() {
220 return new SIPeepholeSDWA();
221 }
222
223
224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
operator <<(raw_ostream & OS,SdwaSel Sel)225 static raw_ostream& operator<<(raw_ostream &OS, SdwaSel Sel) {
226 switch(Sel) {
227 case BYTE_0: OS << "BYTE_0"; break;
228 case BYTE_1: OS << "BYTE_1"; break;
229 case BYTE_2: OS << "BYTE_2"; break;
230 case BYTE_3: OS << "BYTE_3"; break;
231 case WORD_0: OS << "WORD_0"; break;
232 case WORD_1: OS << "WORD_1"; break;
233 case DWORD: OS << "DWORD"; break;
234 }
235 return OS;
236 }
237
operator <<(raw_ostream & OS,const DstUnused & Un)238 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) {
239 switch(Un) {
240 case UNUSED_PAD: OS << "UNUSED_PAD"; break;
241 case UNUSED_SEXT: OS << "UNUSED_SEXT"; break;
242 case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break;
243 }
244 return OS;
245 }
246
247 LLVM_DUMP_METHOD
print(raw_ostream & OS) const248 void SDWASrcOperand::print(raw_ostream& OS) const {
249 OS << "SDWA src: " << *getTargetOperand()
250 << " src_sel:" << getSrcSel()
251 << " abs:" << getAbs() << " neg:" << getNeg()
252 << " sext:" << getSext() << '\n';
253 }
254
255 LLVM_DUMP_METHOD
print(raw_ostream & OS) const256 void SDWADstOperand::print(raw_ostream& OS) const {
257 OS << "SDWA dst: " << *getTargetOperand()
258 << " dst_sel:" << getDstSel()
259 << " dst_unused:" << getDstUnused() << '\n';
260 }
261
262 LLVM_DUMP_METHOD
print(raw_ostream & OS) const263 void SDWADstPreserveOperand::print(raw_ostream& OS) const {
264 OS << "SDWA preserve dst: " << *getTargetOperand()
265 << " dst_sel:" << getDstSel()
266 << " preserve:" << *getPreservedOperand() << '\n';
267 }
268
269 #endif
270
copyRegOperand(MachineOperand & To,const MachineOperand & From)271 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
272 assert(To.isReg() && From.isReg());
273 To.setReg(From.getReg());
274 To.setSubReg(From.getSubReg());
275 To.setIsUndef(From.isUndef());
276 if (To.isUse()) {
277 To.setIsKill(From.isKill());
278 } else {
279 To.setIsDead(From.isDead());
280 }
281 }
282
isSameReg(const MachineOperand & LHS,const MachineOperand & RHS)283 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) {
284 return LHS.isReg() &&
285 RHS.isReg() &&
286 LHS.getReg() == RHS.getReg() &&
287 LHS.getSubReg() == RHS.getSubReg();
288 }
289
findSingleRegUse(const MachineOperand * Reg,const MachineRegisterInfo * MRI)290 static MachineOperand *findSingleRegUse(const MachineOperand *Reg,
291 const MachineRegisterInfo *MRI) {
292 if (!Reg->isReg() || !Reg->isDef())
293 return nullptr;
294
295 MachineOperand *ResMO = nullptr;
296 for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg->getReg())) {
297 // If there exist use of subreg of Reg then return nullptr
298 if (!isSameReg(UseMO, *Reg))
299 return nullptr;
300
301 // Check that there is only one instruction that uses Reg
302 if (!ResMO) {
303 ResMO = &UseMO;
304 } else if (ResMO->getParent() != UseMO.getParent()) {
305 return nullptr;
306 }
307 }
308
309 return ResMO;
310 }
311
findSingleRegDef(const MachineOperand * Reg,const MachineRegisterInfo * MRI)312 static MachineOperand *findSingleRegDef(const MachineOperand *Reg,
313 const MachineRegisterInfo *MRI) {
314 if (!Reg->isReg())
315 return nullptr;
316
317 MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg->getReg());
318 if (!DefInstr)
319 return nullptr;
320
321 for (auto &DefMO : DefInstr->defs()) {
322 if (DefMO.isReg() && DefMO.getReg() == Reg->getReg())
323 return &DefMO;
324 }
325
326 // Ignore implicit defs.
327 return nullptr;
328 }
329
getSrcMods(const SIInstrInfo * TII,const MachineOperand * SrcOp) const330 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
331 const MachineOperand *SrcOp) const {
332 uint64_t Mods = 0;
333 const auto *MI = SrcOp->getParent();
334 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
335 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
336 Mods = Mod->getImm();
337 }
338 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) {
339 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) {
340 Mods = Mod->getImm();
341 }
342 }
343 if (Abs || Neg) {
344 assert(!Sext &&
345 "Float and integer src modifiers can't be set simulteniously");
346 Mods |= Abs ? SISrcMods::ABS : 0u;
347 Mods ^= Neg ? SISrcMods::NEG : 0u;
348 } else if (Sext) {
349 Mods |= SISrcMods::SEXT;
350 }
351
352 return Mods;
353 }
354
potentialToConvert(const SIInstrInfo * TII)355 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) {
356 // For SDWA src operand potential instruction is one that use register
357 // defined by parent instruction
358 MachineOperand *PotentialMO = findSingleRegUse(getReplacedOperand(), getMRI());
359 if (!PotentialMO)
360 return nullptr;
361
362 return PotentialMO->getParent();
363 }
364
convertToSDWA(MachineInstr & MI,const SIInstrInfo * TII)365 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
366 // Find operand in instruction that matches source operand and replace it with
367 // target operand. Set corresponding src_sel
368 bool IsPreserveSrc = false;
369 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
370 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
371 MachineOperand *SrcMods =
372 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
373 assert(Src && (Src->isReg() || Src->isImm()));
374 if (!isSameReg(*Src, *getReplacedOperand())) {
375 // If this is not src0 then it could be src1
376 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
377 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
378 SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
379
380 if (!Src ||
381 !isSameReg(*Src, *getReplacedOperand())) {
382 // It's possible this Src is a tied operand for
383 // UNUSED_PRESERVE, in which case we can either
384 // abandon the peephole attempt, or if legal we can
385 // copy the target operand into the tied slot
386 // if the preserve operation will effectively cause the same
387 // result by overwriting the rest of the dst.
388 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
389 MachineOperand *DstUnused =
390 TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
391
392 if (Dst &&
393 DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
394 // This will work if the tied src is acessing WORD_0, and the dst is
395 // writing WORD_1. Modifiers don't matter because all the bits that
396 // would be impacted are being overwritten by the dst.
397 // Any other case will not work.
398 SdwaSel DstSel = static_cast<SdwaSel>(
399 TII->getNamedImmOperand(MI, AMDGPU::OpName::dst_sel));
400 if (DstSel == AMDGPU::SDWA::SdwaSel::WORD_1 &&
401 getSrcSel() == AMDGPU::SDWA::SdwaSel::WORD_0) {
402 IsPreserveSrc = true;
403 auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
404 AMDGPU::OpName::vdst);
405 auto TiedIdx = MI.findTiedOperandIdx(DstIdx);
406 Src = &MI.getOperand(TiedIdx);
407 SrcSel = nullptr;
408 SrcMods = nullptr;
409 } else {
410 // Not legal to convert this src
411 return false;
412 }
413 }
414 }
415 assert(Src && Src->isReg());
416
417 if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
418 MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
419 MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
420 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
421 !isSameReg(*Src, *getReplacedOperand())) {
422 // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to
423 // src2. This is not allowed.
424 return false;
425 }
426
427 assert(isSameReg(*Src, *getReplacedOperand()) &&
428 (IsPreserveSrc || (SrcSel && SrcMods)));
429 }
430 copyRegOperand(*Src, *getTargetOperand());
431 if (!IsPreserveSrc) {
432 SrcSel->setImm(getSrcSel());
433 SrcMods->setImm(getSrcMods(TII, Src));
434 }
435 getTargetOperand()->setIsKill(false);
436 return true;
437 }
438
potentialToConvert(const SIInstrInfo * TII)439 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) {
440 // For SDWA dst operand potential instruction is one that defines register
441 // that this operand uses
442 MachineRegisterInfo *MRI = getMRI();
443 MachineInstr *ParentMI = getParentInst();
444
445 MachineOperand *PotentialMO = findSingleRegDef(getReplacedOperand(), MRI);
446 if (!PotentialMO)
447 return nullptr;
448
449 // Check that ParentMI is the only instruction that uses replaced register
450 for (MachineInstr &UseInst : MRI->use_nodbg_instructions(PotentialMO->getReg())) {
451 if (&UseInst != ParentMI)
452 return nullptr;
453 }
454
455 return PotentialMO->getParent();
456 }
457
convertToSDWA(MachineInstr & MI,const SIInstrInfo * TII)458 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
459 // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
460
461 if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
462 MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
463 MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
464 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
465 getDstSel() != AMDGPU::SDWA::DWORD) {
466 // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD
467 return false;
468 }
469
470 MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
471 assert(Operand &&
472 Operand->isReg() &&
473 isSameReg(*Operand, *getReplacedOperand()));
474 copyRegOperand(*Operand, *getTargetOperand());
475 MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
476 assert(DstSel);
477 DstSel->setImm(getDstSel());
478 MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
479 assert(DstUnused);
480 DstUnused->setImm(getDstUnused());
481
482 // Remove original instruction because it would conflict with our new
483 // instruction by register definition
484 getParentInst()->eraseFromParent();
485 return true;
486 }
487
convertToSDWA(MachineInstr & MI,const SIInstrInfo * TII)488 bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
489 const SIInstrInfo *TII) {
490 // MI should be moved right before v_or_b32.
491 // For this we should clear all kill flags on uses of MI src-operands or else
492 // we can encounter problem with use of killed operand.
493 for (MachineOperand &MO : MI.uses()) {
494 if (!MO.isReg())
495 continue;
496 getMRI()->clearKillFlags(MO.getReg());
497 }
498
499 // Move MI before v_or_b32
500 auto MBB = MI.getParent();
501 MBB->remove(&MI);
502 MBB->insert(getParentInst(), &MI);
503
504 // Add Implicit use of preserved register
505 MachineInstrBuilder MIB(*MBB->getParent(), MI);
506 MIB.addReg(getPreservedOperand()->getReg(),
507 RegState::ImplicitKill,
508 getPreservedOperand()->getSubReg());
509
510 // Tie dst to implicit use
511 MI.tieOperands(AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst),
512 MI.getNumOperands() - 1);
513
514 // Convert MI as any other SDWADstOperand and remove v_or_b32
515 return SDWADstOperand::convertToSDWA(MI, TII);
516 }
517
foldToImm(const MachineOperand & Op) const518 Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
519 if (Op.isImm()) {
520 return Op.getImm();
521 }
522
523 // If this is not immediate then it can be copy of immediate value, e.g.:
524 // %1 = S_MOV_B32 255;
525 if (Op.isReg()) {
526 for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
527 if (!isSameReg(Op, Def))
528 continue;
529
530 const MachineInstr *DefInst = Def.getParent();
531 if (!TII->isFoldableCopy(*DefInst))
532 return None;
533
534 const MachineOperand &Copied = DefInst->getOperand(1);
535 if (!Copied.isImm())
536 return None;
537
538 return Copied.getImm();
539 }
540 }
541
542 return None;
543 }
544
545 std::unique_ptr<SDWAOperand>
matchSDWAOperand(MachineInstr & MI)546 SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
547 unsigned Opcode = MI.getOpcode();
548 switch (Opcode) {
549 case AMDGPU::V_LSHRREV_B32_e32:
550 case AMDGPU::V_ASHRREV_I32_e32:
551 case AMDGPU::V_LSHLREV_B32_e32:
552 case AMDGPU::V_LSHRREV_B32_e64:
553 case AMDGPU::V_ASHRREV_I32_e64:
554 case AMDGPU::V_LSHLREV_B32_e64: {
555 // from: v_lshrrev_b32_e32 v1, 16/24, v0
556 // to SDWA src:v0 src_sel:WORD_1/BYTE_3
557
558 // from: v_ashrrev_i32_e32 v1, 16/24, v0
559 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1
560
561 // from: v_lshlrev_b32_e32 v1, 16/24, v0
562 // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD
563 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
564 auto Imm = foldToImm(*Src0);
565 if (!Imm)
566 break;
567
568 if (*Imm != 16 && *Imm != 24)
569 break;
570
571 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
572 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
573 if (Register::isPhysicalRegister(Src1->getReg()) ||
574 Register::isPhysicalRegister(Dst->getReg()))
575 break;
576
577 if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
578 Opcode == AMDGPU::V_LSHLREV_B32_e64) {
579 return std::make_unique<SDWADstOperand>(
580 Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
581 } else {
582 return std::make_unique<SDWASrcOperand>(
583 Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
584 Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
585 Opcode != AMDGPU::V_LSHRREV_B32_e64);
586 }
587 break;
588 }
589
590 case AMDGPU::V_LSHRREV_B16_e32:
591 case AMDGPU::V_ASHRREV_I16_e32:
592 case AMDGPU::V_LSHLREV_B16_e32:
593 case AMDGPU::V_LSHRREV_B16_e64:
594 case AMDGPU::V_ASHRREV_I16_e64:
595 case AMDGPU::V_LSHLREV_B16_e64: {
596 // from: v_lshrrev_b16_e32 v1, 8, v0
597 // to SDWA src:v0 src_sel:BYTE_1
598
599 // from: v_ashrrev_i16_e32 v1, 8, v0
600 // to SDWA src:v0 src_sel:BYTE_1 sext:1
601
602 // from: v_lshlrev_b16_e32 v1, 8, v0
603 // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD
604 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
605 auto Imm = foldToImm(*Src0);
606 if (!Imm || *Imm != 8)
607 break;
608
609 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
610 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
611
612 if (Register::isPhysicalRegister(Src1->getReg()) ||
613 Register::isPhysicalRegister(Dst->getReg()))
614 break;
615
616 if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
617 Opcode == AMDGPU::V_LSHLREV_B16_e64) {
618 return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
619 } else {
620 return std::make_unique<SDWASrcOperand>(
621 Src1, Dst, BYTE_1, false, false,
622 Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
623 Opcode != AMDGPU::V_LSHRREV_B16_e64);
624 }
625 break;
626 }
627
628 case AMDGPU::V_BFE_I32:
629 case AMDGPU::V_BFE_U32: {
630 // e.g.:
631 // from: v_bfe_u32 v1, v0, 8, 8
632 // to SDWA src:v0 src_sel:BYTE_1
633
634 // offset | width | src_sel
635 // ------------------------
636 // 0 | 8 | BYTE_0
637 // 0 | 16 | WORD_0
638 // 0 | 32 | DWORD ?
639 // 8 | 8 | BYTE_1
640 // 16 | 8 | BYTE_2
641 // 16 | 16 | WORD_1
642 // 24 | 8 | BYTE_3
643
644 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
645 auto Offset = foldToImm(*Src1);
646 if (!Offset)
647 break;
648
649 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
650 auto Width = foldToImm(*Src2);
651 if (!Width)
652 break;
653
654 SdwaSel SrcSel = DWORD;
655
656 if (*Offset == 0 && *Width == 8)
657 SrcSel = BYTE_0;
658 else if (*Offset == 0 && *Width == 16)
659 SrcSel = WORD_0;
660 else if (*Offset == 0 && *Width == 32)
661 SrcSel = DWORD;
662 else if (*Offset == 8 && *Width == 8)
663 SrcSel = BYTE_1;
664 else if (*Offset == 16 && *Width == 8)
665 SrcSel = BYTE_2;
666 else if (*Offset == 16 && *Width == 16)
667 SrcSel = WORD_1;
668 else if (*Offset == 24 && *Width == 8)
669 SrcSel = BYTE_3;
670 else
671 break;
672
673 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
674 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
675
676 if (Register::isPhysicalRegister(Src0->getReg()) ||
677 Register::isPhysicalRegister(Dst->getReg()))
678 break;
679
680 return std::make_unique<SDWASrcOperand>(
681 Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32);
682 }
683
684 case AMDGPU::V_AND_B32_e32:
685 case AMDGPU::V_AND_B32_e64: {
686 // e.g.:
687 // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0
688 // to SDWA src:v0 src_sel:WORD_0/BYTE_0
689
690 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
691 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
692 auto ValSrc = Src1;
693 auto Imm = foldToImm(*Src0);
694
695 if (!Imm) {
696 Imm = foldToImm(*Src1);
697 ValSrc = Src0;
698 }
699
700 if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff))
701 break;
702
703 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
704
705 if (Register::isPhysicalRegister(ValSrc->getReg()) ||
706 Register::isPhysicalRegister(Dst->getReg()))
707 break;
708
709 return std::make_unique<SDWASrcOperand>(
710 ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
711 }
712
713 case AMDGPU::V_OR_B32_e32:
714 case AMDGPU::V_OR_B32_e64: {
715 // Patterns for dst_unused:UNUSED_PRESERVE.
716 // e.g., from:
717 // v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD
718 // src1_sel:WORD_1 src2_sel:WORD1
719 // v_add_f16_e32 v3, v1, v2
720 // v_or_b32_e32 v4, v0, v3
721 // to SDWA preserve dst:v4 dst_sel:WORD_1 dst_unused:UNUSED_PRESERVE preserve:v3
722
723 // Check if one of operands of v_or_b32 is SDWA instruction
724 using CheckRetType = Optional<std::pair<MachineOperand *, MachineOperand *>>;
725 auto CheckOROperandsForSDWA =
726 [&](const MachineOperand *Op1, const MachineOperand *Op2) -> CheckRetType {
727 if (!Op1 || !Op1->isReg() || !Op2 || !Op2->isReg())
728 return CheckRetType(None);
729
730 MachineOperand *Op1Def = findSingleRegDef(Op1, MRI);
731 if (!Op1Def)
732 return CheckRetType(None);
733
734 MachineInstr *Op1Inst = Op1Def->getParent();
735 if (!TII->isSDWA(*Op1Inst))
736 return CheckRetType(None);
737
738 MachineOperand *Op2Def = findSingleRegDef(Op2, MRI);
739 if (!Op2Def)
740 return CheckRetType(None);
741
742 return CheckRetType(std::make_pair(Op1Def, Op2Def));
743 };
744
745 MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
746 MachineOperand *OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
747 assert(OrSDWA && OrOther);
748 auto Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
749 if (!Res) {
750 OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
751 OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
752 assert(OrSDWA && OrOther);
753 Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
754 if (!Res)
755 break;
756 }
757
758 MachineOperand *OrSDWADef = Res->first;
759 MachineOperand *OrOtherDef = Res->second;
760 assert(OrSDWADef && OrOtherDef);
761
762 MachineInstr *SDWAInst = OrSDWADef->getParent();
763 MachineInstr *OtherInst = OrOtherDef->getParent();
764
765 // Check that OtherInstr is actually bitwise compatible with SDWAInst = their
766 // destination patterns don't overlap. Compatible instruction can be either
767 // regular instruction with compatible bitness or SDWA instruction with
768 // correct dst_sel
769 // SDWAInst | OtherInst bitness / OtherInst dst_sel
770 // -----------------------------------------------------
771 // DWORD | no / no
772 // WORD_0 | no / BYTE_2/3, WORD_1
773 // WORD_1 | 8/16-bit instructions / BYTE_0/1, WORD_0
774 // BYTE_0 | no / BYTE_1/2/3, WORD_1
775 // BYTE_1 | 8-bit / BYTE_0/2/3, WORD_1
776 // BYTE_2 | 8/16-bit / BYTE_0/1/3. WORD_0
777 // BYTE_3 | 8/16/24-bit / BYTE_0/1/2, WORD_0
778 // E.g. if SDWAInst is v_add_f16_sdwa dst_sel:WORD_1 then v_add_f16 is OK
779 // but v_add_f32 is not.
780
781 // TODO: add support for non-SDWA instructions as OtherInst.
782 // For now this only works with SDWA instructions. For regular instructions
783 // there is no way to determine if the instruction writes only 8/16/24-bit
784 // out of full register size and all registers are at min 32-bit wide.
785 if (!TII->isSDWA(*OtherInst))
786 break;
787
788 SdwaSel DstSel = static_cast<SdwaSel>(
789 TII->getNamedImmOperand(*SDWAInst, AMDGPU::OpName::dst_sel));;
790 SdwaSel OtherDstSel = static_cast<SdwaSel>(
791 TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_sel));
792
793 bool DstSelAgree = false;
794 switch (DstSel) {
795 case WORD_0: DstSelAgree = ((OtherDstSel == BYTE_2) ||
796 (OtherDstSel == BYTE_3) ||
797 (OtherDstSel == WORD_1));
798 break;
799 case WORD_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
800 (OtherDstSel == BYTE_1) ||
801 (OtherDstSel == WORD_0));
802 break;
803 case BYTE_0: DstSelAgree = ((OtherDstSel == BYTE_1) ||
804 (OtherDstSel == BYTE_2) ||
805 (OtherDstSel == BYTE_3) ||
806 (OtherDstSel == WORD_1));
807 break;
808 case BYTE_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
809 (OtherDstSel == BYTE_2) ||
810 (OtherDstSel == BYTE_3) ||
811 (OtherDstSel == WORD_1));
812 break;
813 case BYTE_2: DstSelAgree = ((OtherDstSel == BYTE_0) ||
814 (OtherDstSel == BYTE_1) ||
815 (OtherDstSel == BYTE_3) ||
816 (OtherDstSel == WORD_0));
817 break;
818 case BYTE_3: DstSelAgree = ((OtherDstSel == BYTE_0) ||
819 (OtherDstSel == BYTE_1) ||
820 (OtherDstSel == BYTE_2) ||
821 (OtherDstSel == WORD_0));
822 break;
823 default: DstSelAgree = false;
824 }
825
826 if (!DstSelAgree)
827 break;
828
829 // Also OtherInst dst_unused should be UNUSED_PAD
830 DstUnused OtherDstUnused = static_cast<DstUnused>(
831 TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_unused));
832 if (OtherDstUnused != DstUnused::UNUSED_PAD)
833 break;
834
835 // Create DstPreserveOperand
836 MachineOperand *OrDst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
837 assert(OrDst && OrDst->isReg());
838
839 return std::make_unique<SDWADstPreserveOperand>(
840 OrDst, OrSDWADef, OrOtherDef, DstSel);
841
842 }
843 }
844
845 return std::unique_ptr<SDWAOperand>(nullptr);
846 }
847
848 #if !defined(NDEBUG)
operator <<(raw_ostream & OS,const SDWAOperand & Operand)849 static raw_ostream& operator<<(raw_ostream &OS, const SDWAOperand &Operand) {
850 Operand.print(OS);
851 return OS;
852 }
853 #endif
854
matchSDWAOperands(MachineBasicBlock & MBB)855 void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) {
856 for (MachineInstr &MI : MBB) {
857 if (auto Operand = matchSDWAOperand(MI)) {
858 LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
859 SDWAOperands[&MI] = std::move(Operand);
860 ++NumSDWAPatternsFound;
861 }
862 }
863 }
864
865 // Convert the V_ADDC_U32_e64 into V_ADDC_U32_e32, and
866 // V_ADD_I32_e64 into V_ADD_I32_e32. This allows isConvertibleToSDWA
867 // to perform its transformation on V_ADD_I32_e32 into V_ADD_I32_sdwa.
868 //
869 // We are transforming from a VOP3 into a VOP2 form of the instruction.
870 // %19:vgpr_32 = V_AND_B32_e32 255,
871 // killed %16:vgpr_32, implicit $exec
872 // %47:vgpr_32, %49:sreg_64_xexec = V_ADD_I32_e64
873 // %26.sub0:vreg_64, %19:vgpr_32, implicit $exec
874 // %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64
875 // %26.sub1:vreg_64, %54:vgpr_32, killed %49:sreg_64_xexec, implicit $exec
876 //
877 // becomes
878 // %47:vgpr_32 = V_ADD_I32_sdwa
879 // 0, %26.sub0:vreg_64, 0, killed %16:vgpr_32, 0, 6, 0, 6, 0,
880 // implicit-def $vcc, implicit $exec
881 // %48:vgpr_32 = V_ADDC_U32_e32
882 // 0, %26.sub1:vreg_64, implicit-def $vcc, implicit $vcc, implicit $exec
pseudoOpConvertToVOP2(MachineInstr & MI,const GCNSubtarget & ST) const883 void SIPeepholeSDWA::pseudoOpConvertToVOP2(MachineInstr &MI,
884 const GCNSubtarget &ST) const {
885 int Opc = MI.getOpcode();
886 assert((Opc == AMDGPU::V_ADD_I32_e64 || Opc == AMDGPU::V_SUB_I32_e64) &&
887 "Currently only handles V_ADD_I32_e64 or V_SUB_I32_e64");
888
889 // Can the candidate MI be shrunk?
890 if (!TII->canShrink(MI, *MRI))
891 return;
892 Opc = AMDGPU::getVOPe32(Opc);
893 // Find the related ADD instruction.
894 const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
895 if (!Sdst)
896 return;
897 MachineOperand *NextOp = findSingleRegUse(Sdst, MRI);
898 if (!NextOp)
899 return;
900 MachineInstr &MISucc = *NextOp->getParent();
901 // Can the successor be shrunk?
902 if (!TII->canShrink(MISucc, *MRI))
903 return;
904 int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode());
905 // Make sure the carry in/out are subsequently unused.
906 MachineOperand *CarryIn = TII->getNamedOperand(MISucc, AMDGPU::OpName::src2);
907 if (!CarryIn)
908 return;
909 MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst);
910 if (!CarryOut)
911 return;
912 if (!MRI->hasOneUse(CarryIn->getReg()) || !MRI->use_empty(CarryOut->getReg()))
913 return;
914 // Make sure VCC or its subregs are dead before MI.
915 MachineBasicBlock &MBB = *MI.getParent();
916 auto Liveness = MBB.computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 25);
917 if (Liveness != MachineBasicBlock::LQR_Dead)
918 return;
919 // Check if VCC is referenced in range of (MI,MISucc].
920 for (auto I = std::next(MI.getIterator()), E = MISucc.getIterator();
921 I != E; ++I) {
922 if (I->modifiesRegister(AMDGPU::VCC, TRI))
923 return;
924 }
925 // Make the two new e32 instruction variants.
926 // Replace MI with V_{SUB|ADD}_I32_e32
927 auto NewMI = BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opc));
928 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::vdst));
929 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
930 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src1));
931 MI.eraseFromParent();
932 // Replace MISucc with V_{SUBB|ADDC}_U32_e32
933 auto NewInst = BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc));
934 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst));
935 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0));
936 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src1));
937 MISucc.eraseFromParent();
938 }
939
isConvertibleToSDWA(MachineInstr & MI,const GCNSubtarget & ST) const940 bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI,
941 const GCNSubtarget &ST) const {
942 // Check if this is already an SDWA instruction
943 unsigned Opc = MI.getOpcode();
944 if (TII->isSDWA(Opc))
945 return true;
946
947 // Check if this instruction has opcode that supports SDWA
948 if (AMDGPU::getSDWAOp(Opc) == -1)
949 Opc = AMDGPU::getVOPe32(Opc);
950
951 if (AMDGPU::getSDWAOp(Opc) == -1)
952 return false;
953
954 if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
955 return false;
956
957 if (TII->isVOPC(Opc)) {
958 if (!ST.hasSDWASdst()) {
959 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
960 if (SDst && (SDst->getReg() != AMDGPU::VCC &&
961 SDst->getReg() != AMDGPU::VCC_LO))
962 return false;
963 }
964
965 if (!ST.hasSDWAOutModsVOPC() &&
966 (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
967 TII->hasModifiersSet(MI, AMDGPU::OpName::omod)))
968 return false;
969
970 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) ||
971 !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
972 return false;
973 }
974
975 if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_FMAC_F16_e32 ||
976 Opc == AMDGPU::V_FMAC_F32_e32 ||
977 Opc == AMDGPU::V_MAC_F16_e32 ||
978 Opc == AMDGPU::V_MAC_F32_e32))
979 return false;
980
981 // Check if target supports this SDWA opcode
982 if (TII->pseudoToMCOpcode(Opc) == -1)
983 return false;
984
985 // FIXME: has SDWA but require handling of implicit VCC use
986 if (Opc == AMDGPU::V_CNDMASK_B32_e32)
987 return false;
988
989 return true;
990 }
991
convertToSDWA(MachineInstr & MI,const SDWAOperandsVector & SDWAOperands)992 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
993 const SDWAOperandsVector &SDWAOperands) {
994
995 LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
996
997 // Convert to sdwa
998 int SDWAOpcode;
999 unsigned Opcode = MI.getOpcode();
1000 if (TII->isSDWA(Opcode)) {
1001 SDWAOpcode = Opcode;
1002 } else {
1003 SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
1004 if (SDWAOpcode == -1)
1005 SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
1006 }
1007 assert(SDWAOpcode != -1);
1008
1009 const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
1010
1011 // Create SDWA version of instruction MI and initialize its operands
1012 MachineInstrBuilder SDWAInst =
1013 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc);
1014
1015 // Copy dst, if it is present in original then should also be present in SDWA
1016 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
1017 if (Dst) {
1018 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1);
1019 SDWAInst.add(*Dst);
1020 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) {
1021 assert(Dst &&
1022 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1023 SDWAInst.add(*Dst);
1024 } else {
1025 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1026 SDWAInst.addReg(TRI->getVCC(), RegState::Define);
1027 }
1028
1029 // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and
1030 // src0_modifiers (except for v_nop_sdwa, but it can't get here)
1031 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1032 assert(
1033 Src0 &&
1034 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 &&
1035 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1);
1036 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers))
1037 SDWAInst.addImm(Mod->getImm());
1038 else
1039 SDWAInst.addImm(0);
1040 SDWAInst.add(*Src0);
1041
1042 // Copy src1 if present, initialize src1_modifiers.
1043 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1044 if (Src1) {
1045 assert(
1046 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 &&
1047 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1);
1048 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers))
1049 SDWAInst.addImm(Mod->getImm());
1050 else
1051 SDWAInst.addImm(0);
1052 SDWAInst.add(*Src1);
1053 }
1054
1055 if (SDWAOpcode == AMDGPU::V_FMAC_F16_sdwa ||
1056 SDWAOpcode == AMDGPU::V_FMAC_F32_sdwa ||
1057 SDWAOpcode == AMDGPU::V_MAC_F16_sdwa ||
1058 SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) {
1059 // v_mac_f16/32 has additional src2 operand tied to vdst
1060 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
1061 assert(Src2);
1062 SDWAInst.add(*Src2);
1063 }
1064
1065 // Copy clamp if present, initialize otherwise
1066 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1);
1067 MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
1068 if (Clamp) {
1069 SDWAInst.add(*Clamp);
1070 } else {
1071 SDWAInst.addImm(0);
1072 }
1073
1074 // Copy omod if present, initialize otherwise if needed
1075 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) {
1076 MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
1077 if (OMod) {
1078 SDWAInst.add(*OMod);
1079 } else {
1080 SDWAInst.addImm(0);
1081 }
1082 }
1083
1084 // Copy dst_sel if present, initialize otherwise if needed
1085 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) {
1086 MachineOperand *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
1087 if (DstSel) {
1088 SDWAInst.add(*DstSel);
1089 } else {
1090 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1091 }
1092 }
1093
1094 // Copy dst_unused if present, initialize otherwise if needed
1095 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) {
1096 MachineOperand *DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1097 if (DstUnused) {
1098 SDWAInst.add(*DstUnused);
1099 } else {
1100 SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD);
1101 }
1102 }
1103
1104 // Copy src0_sel if present, initialize otherwise
1105 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1);
1106 MachineOperand *Src0Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
1107 if (Src0Sel) {
1108 SDWAInst.add(*Src0Sel);
1109 } else {
1110 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1111 }
1112
1113 // Copy src1_sel if present, initialize otherwise if needed
1114 if (Src1) {
1115 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1);
1116 MachineOperand *Src1Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
1117 if (Src1Sel) {
1118 SDWAInst.add(*Src1Sel);
1119 } else {
1120 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1121 }
1122 }
1123
1124 // Check for a preserved register that needs to be copied.
1125 auto DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1126 if (DstUnused &&
1127 DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
1128 // We expect, if we are here, that the instruction was already in it's SDWA form,
1129 // with a tied operand.
1130 assert(Dst && Dst->isTied());
1131 assert(Opcode == static_cast<unsigned int>(SDWAOpcode));
1132 // We also expect a vdst, since sdst can't preserve.
1133 auto PreserveDstIdx = AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst);
1134 assert(PreserveDstIdx != -1);
1135
1136 auto TiedIdx = MI.findTiedOperandIdx(PreserveDstIdx);
1137 auto Tied = MI.getOperand(TiedIdx);
1138
1139 SDWAInst.add(Tied);
1140 SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
1141 }
1142
1143 // Apply all sdwa operand patterns.
1144 bool Converted = false;
1145 for (auto &Operand : SDWAOperands) {
1146 LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
1147 // There should be no intesection between SDWA operands and potential MIs
1148 // e.g.:
1149 // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
1150 // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
1151 // v_add_u32 v3, v4, v2
1152 //
1153 // In that example it is possible that we would fold 2nd instruction into 3rd
1154 // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was
1155 // already destroyed). So if SDWAOperand is also a potential MI then do not
1156 // apply it.
1157 if (PotentialMatches.count(Operand->getParentInst()) == 0)
1158 Converted |= Operand->convertToSDWA(*SDWAInst, TII);
1159 }
1160 if (Converted) {
1161 ConvertedInstructions.push_back(SDWAInst);
1162 } else {
1163 SDWAInst->eraseFromParent();
1164 return false;
1165 }
1166
1167 LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
1168 ++NumSDWAInstructionsPeepholed;
1169
1170 MI.eraseFromParent();
1171 return true;
1172 }
1173
1174 // If an instruction was converted to SDWA it should not have immediates or SGPR
1175 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
legalizeScalarOperands(MachineInstr & MI,const GCNSubtarget & ST) const1176 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
1177 const GCNSubtarget &ST) const {
1178 const MCInstrDesc &Desc = TII->get(MI.getOpcode());
1179 unsigned ConstantBusCount = 0;
1180 for (MachineOperand &Op : MI.explicit_uses()) {
1181 if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
1182 continue;
1183
1184 unsigned I = MI.getOperandNo(&Op);
1185 if (Desc.OpInfo[I].RegClass == -1 ||
1186 !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
1187 continue;
1188
1189 if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
1190 TRI->isSGPRReg(*MRI, Op.getReg())) {
1191 ++ConstantBusCount;
1192 continue;
1193 }
1194
1195 Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1196 auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1197 TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
1198 if (Op.isImm())
1199 Copy.addImm(Op.getImm());
1200 else if (Op.isReg())
1201 Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0,
1202 Op.getSubReg());
1203 Op.ChangeToRegister(VGPR, false);
1204 }
1205 }
1206
runOnMachineFunction(MachineFunction & MF)1207 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
1208 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1209
1210 if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
1211 return false;
1212
1213 MRI = &MF.getRegInfo();
1214 TRI = ST.getRegisterInfo();
1215 TII = ST.getInstrInfo();
1216
1217 // Find all SDWA operands in MF.
1218 bool Ret = false;
1219 for (MachineBasicBlock &MBB : MF) {
1220 bool Changed = false;
1221 do {
1222 // Preprocess the ADD/SUB pairs so they could be SDWA'ed.
1223 // Look for a possible ADD or SUB that resulted from a previously lowered
1224 // V_{ADD|SUB}_U64_PSEUDO. The function pseudoOpConvertToVOP2
1225 // lowers the pair of instructions into e32 form.
1226 matchSDWAOperands(MBB);
1227 for (const auto &OperandPair : SDWAOperands) {
1228 const auto &Operand = OperandPair.second;
1229 MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1230 if (PotentialMI &&
1231 (PotentialMI->getOpcode() == AMDGPU::V_ADD_I32_e64 ||
1232 PotentialMI->getOpcode() == AMDGPU::V_SUB_I32_e64))
1233 pseudoOpConvertToVOP2(*PotentialMI, ST);
1234 }
1235 SDWAOperands.clear();
1236
1237 // Generate potential match list.
1238 matchSDWAOperands(MBB);
1239
1240 for (const auto &OperandPair : SDWAOperands) {
1241 const auto &Operand = OperandPair.second;
1242 MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1243 if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
1244 PotentialMatches[PotentialMI].push_back(Operand.get());
1245 }
1246 }
1247
1248 for (auto &PotentialPair : PotentialMatches) {
1249 MachineInstr &PotentialMI = *PotentialPair.first;
1250 convertToSDWA(PotentialMI, PotentialPair.second);
1251 }
1252
1253 PotentialMatches.clear();
1254 SDWAOperands.clear();
1255
1256 Changed = !ConvertedInstructions.empty();
1257
1258 if (Changed)
1259 Ret = true;
1260 while (!ConvertedInstructions.empty())
1261 legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);
1262 } while (Changed);
1263 }
1264
1265 return Ret;
1266 }
1267