1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements hazard recognizers for scheduling on GCN processors.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "GCNHazardRecognizer.h"
15 #include "AMDGPUSubtarget.h"
16 #include "SIDefines.h"
17 #include "SIInstrInfo.h"
18 #include "SIRegisterInfo.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "Utils/AMDGPUBaseInfo.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineOperand.h"
25 #include "llvm/CodeGen/ScheduleDAG.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include <algorithm>
29 #include <cassert>
30 #include <limits>
31 #include <set>
32 #include <vector>
33
34 using namespace llvm;
35
36 //===----------------------------------------------------------------------===//
37 // Hazard Recoginizer Implementation
38 //===----------------------------------------------------------------------===//
39
GCNHazardRecognizer(const MachineFunction & MF)40 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
41 CurrCycleInstr(nullptr),
42 MF(MF),
43 ST(MF.getSubtarget<GCNSubtarget>()),
44 TII(*ST.getInstrInfo()),
45 TRI(TII.getRegisterInfo()),
46 ClauseUses(TRI.getNumRegUnits()),
47 ClauseDefs(TRI.getNumRegUnits()) {
48 MaxLookAhead = 5;
49 }
50
EmitInstruction(SUnit * SU)51 void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
52 EmitInstruction(SU->getInstr());
53 }
54
EmitInstruction(MachineInstr * MI)55 void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
56 CurrCycleInstr = MI;
57 }
58
isDivFMas(unsigned Opcode)59 static bool isDivFMas(unsigned Opcode) {
60 return Opcode == AMDGPU::V_DIV_FMAS_F32 || Opcode == AMDGPU::V_DIV_FMAS_F64;
61 }
62
isSGetReg(unsigned Opcode)63 static bool isSGetReg(unsigned Opcode) {
64 return Opcode == AMDGPU::S_GETREG_B32;
65 }
66
isSSetReg(unsigned Opcode)67 static bool isSSetReg(unsigned Opcode) {
68 return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32;
69 }
70
isRWLane(unsigned Opcode)71 static bool isRWLane(unsigned Opcode) {
72 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
73 }
74
isRFE(unsigned Opcode)75 static bool isRFE(unsigned Opcode) {
76 return Opcode == AMDGPU::S_RFE_B64;
77 }
78
isSMovRel(unsigned Opcode)79 static bool isSMovRel(unsigned Opcode) {
80 switch (Opcode) {
81 case AMDGPU::S_MOVRELS_B32:
82 case AMDGPU::S_MOVRELS_B64:
83 case AMDGPU::S_MOVRELD_B32:
84 case AMDGPU::S_MOVRELD_B64:
85 return true;
86 default:
87 return false;
88 }
89 }
90
isSendMsgTraceDataOrGDS(const MachineInstr & MI)91 static bool isSendMsgTraceDataOrGDS(const MachineInstr &MI) {
92 switch (MI.getOpcode()) {
93 case AMDGPU::S_SENDMSG:
94 case AMDGPU::S_SENDMSGHALT:
95 case AMDGPU::S_TTRACEDATA:
96 return true;
97 default:
98 // TODO: GDS
99 return false;
100 }
101 }
102
getHWReg(const SIInstrInfo * TII,const MachineInstr & RegInstr)103 static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
104 const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
105 AMDGPU::OpName::simm16);
106 return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
107 }
108
109 ScheduleHazardRecognizer::HazardType
getHazardType(SUnit * SU,int Stalls)110 GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
111 MachineInstr *MI = SU->getInstr();
112
113 if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
114 return NoopHazard;
115
116 // FIXME: Should flat be considered vmem?
117 if ((SIInstrInfo::isVMEM(*MI) ||
118 SIInstrInfo::isFLAT(*MI))
119 && checkVMEMHazards(MI) > 0)
120 return NoopHazard;
121
122 if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
123 return NoopHazard;
124
125 if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
126 return NoopHazard;
127
128 if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
129 return NoopHazard;
130
131 if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
132 return NoopHazard;
133
134 if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
135 return NoopHazard;
136
137 if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
138 return NoopHazard;
139
140 if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
141 return NoopHazard;
142
143 if (ST.hasReadM0MovRelInterpHazard() &&
144 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode())) &&
145 checkReadM0Hazards(MI) > 0)
146 return NoopHazard;
147
148 if (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(*MI) &&
149 checkReadM0Hazards(MI) > 0)
150 return NoopHazard;
151
152 if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0)
153 return NoopHazard;
154
155 if (checkAnyInstHazards(MI) > 0)
156 return NoopHazard;
157
158 return NoHazard;
159 }
160
PreEmitNoops(SUnit * SU)161 unsigned GCNHazardRecognizer::PreEmitNoops(SUnit *SU) {
162 return PreEmitNoops(SU->getInstr());
163 }
164
PreEmitNoops(MachineInstr * MI)165 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
166 int WaitStates = std::max(0, checkAnyInstHazards(MI));
167
168 if (SIInstrInfo::isSMRD(*MI))
169 return std::max(WaitStates, checkSMRDHazards(MI));
170
171 if (SIInstrInfo::isVALU(*MI))
172 WaitStates = std::max(WaitStates, checkVALUHazards(MI));
173
174 if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isFLAT(*MI))
175 WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
176
177 if (SIInstrInfo::isDPP(*MI))
178 WaitStates = std::max(WaitStates, checkDPPHazards(MI));
179
180 if (isDivFMas(MI->getOpcode()))
181 WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
182
183 if (isRWLane(MI->getOpcode()))
184 WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
185
186 if (MI->isInlineAsm())
187 return std::max(WaitStates, checkInlineAsmHazards(MI));
188
189 if (isSGetReg(MI->getOpcode()))
190 return std::max(WaitStates, checkGetRegHazards(MI));
191
192 if (isSSetReg(MI->getOpcode()))
193 return std::max(WaitStates, checkSetRegHazards(MI));
194
195 if (isRFE(MI->getOpcode()))
196 return std::max(WaitStates, checkRFEHazards(MI));
197
198 if (ST.hasReadM0MovRelInterpHazard() && (TII.isVINTRP(*MI) ||
199 isSMovRel(MI->getOpcode())))
200 return std::max(WaitStates, checkReadM0Hazards(MI));
201
202 if (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(*MI))
203 return std::max(WaitStates, checkReadM0Hazards(MI));
204
205 return WaitStates;
206 }
207
EmitNoop()208 void GCNHazardRecognizer::EmitNoop() {
209 EmittedInstrs.push_front(nullptr);
210 }
211
AdvanceCycle()212 void GCNHazardRecognizer::AdvanceCycle() {
213 // When the scheduler detects a stall, it will call AdvanceCycle() without
214 // emitting any instructions.
215 if (!CurrCycleInstr)
216 return;
217
218 unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
219
220 // Keep track of emitted instructions
221 EmittedInstrs.push_front(CurrCycleInstr);
222
223 // Add a nullptr for each additional wait state after the first. Make sure
224 // not to add more than getMaxLookAhead() items to the list, since we
225 // truncate the list to that size right after this loop.
226 for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
227 i < e; ++i) {
228 EmittedInstrs.push_front(nullptr);
229 }
230
231 // getMaxLookahead() is the largest number of wait states we will ever need
232 // to insert, so there is no point in keeping track of more than that many
233 // wait states.
234 EmittedInstrs.resize(getMaxLookAhead());
235
236 CurrCycleInstr = nullptr;
237 }
238
RecedeCycle()239 void GCNHazardRecognizer::RecedeCycle() {
240 llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
241 }
242
243 //===----------------------------------------------------------------------===//
244 // Helper Functions
245 //===----------------------------------------------------------------------===//
246
getWaitStatesSince(function_ref<bool (MachineInstr *)> IsHazard)247 int GCNHazardRecognizer::getWaitStatesSince(
248 function_ref<bool(MachineInstr *)> IsHazard) {
249 int WaitStates = 0;
250 for (MachineInstr *MI : EmittedInstrs) {
251 if (MI) {
252 if (IsHazard(MI))
253 return WaitStates;
254
255 unsigned Opcode = MI->getOpcode();
256 if (Opcode == AMDGPU::DBG_VALUE || Opcode == AMDGPU::IMPLICIT_DEF ||
257 Opcode == AMDGPU::INLINEASM)
258 continue;
259 }
260 ++WaitStates;
261 }
262 return std::numeric_limits<int>::max();
263 }
264
getWaitStatesSinceDef(unsigned Reg,function_ref<bool (MachineInstr *)> IsHazardDef)265 int GCNHazardRecognizer::getWaitStatesSinceDef(
266 unsigned Reg, function_ref<bool(MachineInstr *)> IsHazardDef) {
267 const SIRegisterInfo *TRI = ST.getRegisterInfo();
268
269 auto IsHazardFn = [IsHazardDef, TRI, Reg] (MachineInstr *MI) {
270 return IsHazardDef(MI) && MI->modifiesRegister(Reg, TRI);
271 };
272
273 return getWaitStatesSince(IsHazardFn);
274 }
275
getWaitStatesSinceSetReg(function_ref<bool (MachineInstr *)> IsHazard)276 int GCNHazardRecognizer::getWaitStatesSinceSetReg(
277 function_ref<bool(MachineInstr *)> IsHazard) {
278 auto IsHazardFn = [IsHazard] (MachineInstr *MI) {
279 return isSSetReg(MI->getOpcode()) && IsHazard(MI);
280 };
281
282 return getWaitStatesSince(IsHazardFn);
283 }
284
285 //===----------------------------------------------------------------------===//
286 // No-op Hazard Detection
287 //===----------------------------------------------------------------------===//
288
addRegUnits(const SIRegisterInfo & TRI,BitVector & BV,unsigned Reg)289 static void addRegUnits(const SIRegisterInfo &TRI,
290 BitVector &BV, unsigned Reg) {
291 for (MCRegUnitIterator RUI(Reg, &TRI); RUI.isValid(); ++RUI)
292 BV.set(*RUI);
293 }
294
addRegsToSet(const SIRegisterInfo & TRI,iterator_range<MachineInstr::const_mop_iterator> Ops,BitVector & Set)295 static void addRegsToSet(const SIRegisterInfo &TRI,
296 iterator_range<MachineInstr::const_mop_iterator> Ops,
297 BitVector &Set) {
298 for (const MachineOperand &Op : Ops) {
299 if (Op.isReg())
300 addRegUnits(TRI, Set, Op.getReg());
301 }
302 }
303
addClauseInst(const MachineInstr & MI)304 void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) {
305 // XXX: Do we need to worry about implicit operands
306 addRegsToSet(TRI, MI.defs(), ClauseDefs);
307 addRegsToSet(TRI, MI.uses(), ClauseUses);
308 }
309
checkSoftClauseHazards(MachineInstr * MEM)310 int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr *MEM) {
311 // SMEM soft clause are only present on VI+, and only matter if xnack is
312 // enabled.
313 if (!ST.isXNACKEnabled())
314 return 0;
315
316 bool IsSMRD = TII.isSMRD(*MEM);
317
318 resetClause();
319
320 // A soft-clause is any group of consecutive SMEM instructions. The
321 // instructions in this group may return out of order and/or may be
322 // replayed (i.e. the same instruction issued more than once).
323 //
324 // In order to handle these situations correctly we need to make sure
325 // that when a clause has more than one instruction, no instruction in the
326 // clause writes to a register that is read another instruction in the clause
327 // (including itself). If we encounter this situaion, we need to break the
328 // clause by inserting a non SMEM instruction.
329
330 for (MachineInstr *MI : EmittedInstrs) {
331 // When we hit a non-SMEM instruction then we have passed the start of the
332 // clause and we can stop.
333 if (!MI)
334 break;
335
336 if (IsSMRD != SIInstrInfo::isSMRD(*MI))
337 break;
338
339 addClauseInst(*MI);
340 }
341
342 if (ClauseDefs.none())
343 return 0;
344
345 // We need to make sure not to put loads and stores in the same clause if they
346 // use the same address. For now, just start a new clause whenever we see a
347 // store.
348 if (MEM->mayStore())
349 return 1;
350
351 addClauseInst(*MEM);
352
353 // If the set of defs and uses intersect then we cannot add this instruction
354 // to the clause, so we have a hazard.
355 return ClauseDefs.anyCommon(ClauseUses) ? 1 : 0;
356 }
357
checkSMRDHazards(MachineInstr * SMRD)358 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
359 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
360 int WaitStatesNeeded = 0;
361
362 WaitStatesNeeded = checkSoftClauseHazards(SMRD);
363
364 // This SMRD hazard only affects SI.
365 if (ST.getGeneration() != AMDGPUSubtarget::SOUTHERN_ISLANDS)
366 return WaitStatesNeeded;
367
368 // A read of an SGPR by SMRD instruction requires 4 wait states when the
369 // SGPR was written by a VALU instruction.
370 int SmrdSgprWaitStates = 4;
371 auto IsHazardDefFn = [this] (MachineInstr *MI) { return TII.isVALU(*MI); };
372 auto IsBufferHazardDefFn = [this] (MachineInstr *MI) { return TII.isSALU(*MI); };
373
374 bool IsBufferSMRD = TII.isBufferSMRD(*SMRD);
375
376 for (const MachineOperand &Use : SMRD->uses()) {
377 if (!Use.isReg())
378 continue;
379 int WaitStatesNeededForUse =
380 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
381 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
382
383 // This fixes what appears to be undocumented hardware behavior in SI where
384 // s_mov writing a descriptor and s_buffer_load_dword reading the descriptor
385 // needs some number of nops in between. We don't know how many we need, but
386 // let's use 4. This wasn't discovered before probably because the only
387 // case when this happens is when we expand a 64-bit pointer into a full
388 // descriptor and use s_buffer_load_dword instead of s_load_dword, which was
389 // probably never encountered in the closed-source land.
390 if (IsBufferSMRD) {
391 int WaitStatesNeededForUse =
392 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(),
393 IsBufferHazardDefFn);
394 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
395 }
396 }
397
398 return WaitStatesNeeded;
399 }
400
checkVMEMHazards(MachineInstr * VMEM)401 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
402 if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
403 return 0;
404
405 int WaitStatesNeeded = checkSoftClauseHazards(VMEM);
406
407 // A read of an SGPR by a VMEM instruction requires 5 wait states when the
408 // SGPR was written by a VALU Instruction.
409 const int VmemSgprWaitStates = 5;
410 auto IsHazardDefFn = [this] (MachineInstr *MI) { return TII.isVALU(*MI); };
411
412 for (const MachineOperand &Use : VMEM->uses()) {
413 if (!Use.isReg() || TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
414 continue;
415
416 int WaitStatesNeededForUse =
417 VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
418 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
419 }
420 return WaitStatesNeeded;
421 }
422
checkDPPHazards(MachineInstr * DPP)423 int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
424 const SIRegisterInfo *TRI = ST.getRegisterInfo();
425 const SIInstrInfo *TII = ST.getInstrInfo();
426
427 // Check for DPP VGPR read after VALU VGPR write and EXEC write.
428 int DppVgprWaitStates = 2;
429 int DppExecWaitStates = 5;
430 int WaitStatesNeeded = 0;
431 auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
432
433 for (const MachineOperand &Use : DPP->uses()) {
434 if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
435 continue;
436 int WaitStatesNeededForUse =
437 DppVgprWaitStates - getWaitStatesSinceDef(Use.getReg());
438 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
439 }
440
441 WaitStatesNeeded = std::max(
442 WaitStatesNeeded,
443 DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn));
444
445 return WaitStatesNeeded;
446 }
447
checkDivFMasHazards(MachineInstr * DivFMas)448 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
449 const SIInstrInfo *TII = ST.getInstrInfo();
450
451 // v_div_fmas requires 4 wait states after a write to vcc from a VALU
452 // instruction.
453 const int DivFMasWaitStates = 4;
454 auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
455 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn);
456
457 return DivFMasWaitStates - WaitStatesNeeded;
458 }
459
checkGetRegHazards(MachineInstr * GetRegInstr)460 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
461 const SIInstrInfo *TII = ST.getInstrInfo();
462 unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
463
464 const int GetRegWaitStates = 2;
465 auto IsHazardFn = [TII, GetRegHWReg] (MachineInstr *MI) {
466 return GetRegHWReg == getHWReg(TII, *MI);
467 };
468 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
469
470 return GetRegWaitStates - WaitStatesNeeded;
471 }
472
checkSetRegHazards(MachineInstr * SetRegInstr)473 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
474 const SIInstrInfo *TII = ST.getInstrInfo();
475 unsigned HWReg = getHWReg(TII, *SetRegInstr);
476
477 const int SetRegWaitStates =
478 ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS ? 1 : 2;
479 auto IsHazardFn = [TII, HWReg] (MachineInstr *MI) {
480 return HWReg == getHWReg(TII, *MI);
481 };
482 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
483 return SetRegWaitStates - WaitStatesNeeded;
484 }
485
createsVALUHazard(const MachineInstr & MI)486 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
487 if (!MI.mayStore())
488 return -1;
489
490 const SIInstrInfo *TII = ST.getInstrInfo();
491 unsigned Opcode = MI.getOpcode();
492 const MCInstrDesc &Desc = MI.getDesc();
493
494 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
495 int VDataRCID = -1;
496 if (VDataIdx != -1)
497 VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
498
499 if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
500 // There is no hazard if the instruction does not use vector regs
501 // (like wbinvl1)
502 if (VDataIdx == -1)
503 return -1;
504 // For MUBUF/MTBUF instructions this hazard only exists if the
505 // instruction is not using a register in the soffset field.
506 const MachineOperand *SOffset =
507 TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
508 // If we have no soffset operand, then assume this field has been
509 // hardcoded to zero.
510 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
511 (!SOffset || !SOffset->isReg()))
512 return VDataIdx;
513 }
514
515 // MIMG instructions create a hazard if they don't use a 256-bit T# and
516 // the store size is greater than 8 bytes and they have more than two bits
517 // of their dmask set.
518 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
519 if (TII->isMIMG(MI)) {
520 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
521 assert(SRsrcIdx != -1 &&
522 AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
523 (void)SRsrcIdx;
524 }
525
526 if (TII->isFLAT(MI)) {
527 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
528 if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
529 return DataIdx;
530 }
531
532 return -1;
533 }
534
checkVALUHazardsHelper(const MachineOperand & Def,const MachineRegisterInfo & MRI)535 int GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
536 const MachineRegisterInfo &MRI) {
537 // Helper to check for the hazard where VMEM instructions that store more than
538 // 8 bytes can have there store data over written by the next instruction.
539 const SIRegisterInfo *TRI = ST.getRegisterInfo();
540
541 const int VALUWaitStates = 1;
542 int WaitStatesNeeded = 0;
543
544 if (!TRI->isVGPR(MRI, Def.getReg()))
545 return WaitStatesNeeded;
546 unsigned Reg = Def.getReg();
547 auto IsHazardFn = [this, Reg, TRI] (MachineInstr *MI) {
548 int DataIdx = createsVALUHazard(*MI);
549 return DataIdx >= 0 &&
550 TRI->regsOverlap(MI->getOperand(DataIdx).getReg(), Reg);
551 };
552 int WaitStatesNeededForDef =
553 VALUWaitStates - getWaitStatesSince(IsHazardFn);
554 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
555
556 return WaitStatesNeeded;
557 }
558
checkVALUHazards(MachineInstr * VALU)559 int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
560 // This checks for the hazard where VMEM instructions that store more than
561 // 8 bytes can have there store data over written by the next instruction.
562 if (!ST.has12DWordStoreHazard())
563 return 0;
564
565 const MachineRegisterInfo &MRI = MF.getRegInfo();
566 int WaitStatesNeeded = 0;
567
568 for (const MachineOperand &Def : VALU->defs()) {
569 WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI));
570 }
571
572 return WaitStatesNeeded;
573 }
574
checkInlineAsmHazards(MachineInstr * IA)575 int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
576 // This checks for hazards associated with inline asm statements.
577 // Since inline asms can contain just about anything, we use this
578 // to call/leverage other check*Hazard routines. Note that
579 // this function doesn't attempt to address all possible inline asm
580 // hazards (good luck), but is a collection of what has been
581 // problematic thus far.
582
583 // see checkVALUHazards()
584 if (!ST.has12DWordStoreHazard())
585 return 0;
586
587 const MachineRegisterInfo &MRI = MF.getRegInfo();
588 int WaitStatesNeeded = 0;
589
590 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = IA->getNumOperands();
591 I != E; ++I) {
592 const MachineOperand &Op = IA->getOperand(I);
593 if (Op.isReg() && Op.isDef()) {
594 WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
595 }
596 }
597
598 return WaitStatesNeeded;
599 }
600
checkRWLaneHazards(MachineInstr * RWLane)601 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
602 const SIInstrInfo *TII = ST.getInstrInfo();
603 const SIRegisterInfo *TRI = ST.getRegisterInfo();
604 const MachineRegisterInfo &MRI = MF.getRegInfo();
605
606 const MachineOperand *LaneSelectOp =
607 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
608
609 if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
610 return 0;
611
612 unsigned LaneSelectReg = LaneSelectOp->getReg();
613 auto IsHazardFn = [TII] (MachineInstr *MI) {
614 return TII->isVALU(*MI);
615 };
616
617 const int RWLaneWaitStates = 4;
618 int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn);
619 return RWLaneWaitStates - WaitStatesSince;
620 }
621
checkRFEHazards(MachineInstr * RFE)622 int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
623 if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
624 return 0;
625
626 const SIInstrInfo *TII = ST.getInstrInfo();
627
628 const int RFEWaitStates = 1;
629
630 auto IsHazardFn = [TII] (MachineInstr *MI) {
631 return getHWReg(TII, *MI) == AMDGPU::Hwreg::ID_TRAPSTS;
632 };
633 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
634 return RFEWaitStates - WaitStatesNeeded;
635 }
636
checkAnyInstHazards(MachineInstr * MI)637 int GCNHazardRecognizer::checkAnyInstHazards(MachineInstr *MI) {
638 if (MI->isDebugInstr())
639 return 0;
640
641 const SIRegisterInfo *TRI = ST.getRegisterInfo();
642 if (!ST.hasSMovFedHazard())
643 return 0;
644
645 // Check for any instruction reading an SGPR after a write from
646 // s_mov_fed_b32.
647 int MovFedWaitStates = 1;
648 int WaitStatesNeeded = 0;
649
650 for (const MachineOperand &Use : MI->uses()) {
651 if (!Use.isReg() || TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
652 continue;
653 auto IsHazardFn = [] (MachineInstr *MI) {
654 return MI->getOpcode() == AMDGPU::S_MOV_FED_B32;
655 };
656 int WaitStatesNeededForUse =
657 MovFedWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardFn);
658 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
659 }
660
661 return WaitStatesNeeded;
662 }
663
checkReadM0Hazards(MachineInstr * MI)664 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
665 const SIInstrInfo *TII = ST.getInstrInfo();
666 const int SMovRelWaitStates = 1;
667 auto IsHazardFn = [TII] (MachineInstr *MI) {
668 return TII->isSALU(*MI);
669 };
670 return SMovRelWaitStates - getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn);
671 }
672