1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Insert wait instructions for memory reads and writes.
11 ///
12 /// Memory reads and writes are issued asynchronously, so we need to insert
13 /// S_WAITCNT instructions when we want to access any of their results or
14 /// overwrite any register that's used asynchronously.
15 ///
16 /// TODO: This pass currently keeps one timeline per hardware counter. A more
17 /// finely-grained approach that keeps one timeline per event type could
18 /// sometimes get away with generating weaker s_waitcnt instructions. For
19 /// example, when both SMEM and LDS are in flight and we need to wait for
20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient,
21 /// but the pass will currently generate a conservative lgkmcnt(0) because
22 /// multiple event types are in flight.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #include "AMDGPU.h"
27 #include "AMDGPUSubtarget.h"
28 #include "SIDefines.h"
29 #include "SIInstrInfo.h"
30 #include "SIMachineFunctionInfo.h"
31 #include "SIRegisterInfo.h"
32 #include "Utils/AMDGPUBaseInfo.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/DenseSet.h"
35 #include "llvm/ADT/MapVector.h"
36 #include "llvm/ADT/PostOrderIterator.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineFunctionPass.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineMemOperand.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachinePostDominators.h"
47 #include "llvm/CodeGen/MachineRegisterInfo.h"
48 #include "llvm/InitializePasses.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/DebugCounter.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include <algorithm>
56 #include <cassert>
57 #include <cstdint>
58 #include <cstring>
59 #include <memory>
60 #include <utility>
61
62 using namespace llvm;
63
64 #define DEBUG_TYPE "si-insert-waitcnts"
65
66 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp",
67 "Force emit s_waitcnt expcnt(0) instrs");
68 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm",
69 "Force emit s_waitcnt lgkmcnt(0) instrs");
70 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm",
71 "Force emit s_waitcnt vmcnt(0) instrs");
72
73 static cl::opt<bool> ForceEmitZeroFlag(
74 "amdgpu-waitcnt-forcezero",
75 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
76 cl::init(false), cl::Hidden);
77
78 namespace {
79
80 template <typename EnumT>
81 class enum_iterator
82 : public iterator_facade_base<enum_iterator<EnumT>,
83 std::forward_iterator_tag, const EnumT> {
84 EnumT Value;
85 public:
86 enum_iterator() = default;
enum_iterator(EnumT Value)87 enum_iterator(EnumT Value) : Value(Value) {}
88
operator ++()89 enum_iterator &operator++() {
90 Value = static_cast<EnumT>(Value + 1);
91 return *this;
92 }
93
operator ==(const enum_iterator & RHS) const94 bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; }
95
operator *() const96 EnumT operator*() const { return Value; }
97 };
98
99 // Class of object that encapsulates latest instruction counter score
100 // associated with the operand. Used for determining whether
101 // s_waitcnt instruction needs to be emited.
102
103 #define CNT_MASK(t) (1u << (t))
104
105 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS };
106
inst_counter_types()107 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() {
108 return make_range(enum_iterator<InstCounterType>(VM_CNT),
109 enum_iterator<InstCounterType>(NUM_INST_CNTS));
110 }
111
112 using RegInterval = std::pair<int, int>;
113
114 struct {
115 unsigned VmcntMax;
116 unsigned ExpcntMax;
117 unsigned LgkmcntMax;
118 unsigned VscntMax;
119 } HardwareLimits;
120
121 struct {
122 unsigned VGPR0;
123 unsigned VGPRL;
124 unsigned SGPR0;
125 unsigned SGPRL;
126 } RegisterEncoding;
127
128 enum WaitEventType {
129 VMEM_ACCESS, // vector-memory read & write
130 VMEM_READ_ACCESS, // vector-memory read
131 VMEM_WRITE_ACCESS,// vector-memory write
132 LDS_ACCESS, // lds read & write
133 GDS_ACCESS, // gds read & write
134 SQ_MESSAGE, // send message
135 SMEM_ACCESS, // scalar-memory read & write
136 EXP_GPR_LOCK, // export holding on its data src
137 GDS_GPR_LOCK, // GDS holding on its data and addr src
138 EXP_POS_ACCESS, // write to export position
139 EXP_PARAM_ACCESS, // write to export parameter
140 VMW_GPR_LOCK, // vector-memory write holding on its data src
141 NUM_WAIT_EVENTS,
142 };
143
144 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = {
145 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS),
146 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) |
147 (1 << SQ_MESSAGE),
148 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) |
149 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS),
150 (1 << VMEM_WRITE_ACCESS)
151 };
152
153 // The mapping is:
154 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs
155 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots
156 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs
157 // We reserve a fixed number of VGPR slots in the scoring tables for
158 // special tokens like SCMEM_LDS (needed for buffer load to LDS).
159 enum RegisterMapping {
160 SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets.
161 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets.
162 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS.
163 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses.
164 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts.
165 };
166
167 // Enumerate different types of result-returning VMEM operations. Although
168 // s_waitcnt orders them all with a single vmcnt counter, in the absence of
169 // s_waitcnt only instructions of the same VmemType are guaranteed to write
170 // their results in order -- so there is no need to insert an s_waitcnt between
171 // two instructions of the same type that write the same vgpr.
172 enum VmemType {
173 // BUF instructions and MIMG instructions without a sampler.
174 VMEM_NOSAMPLER,
175 // MIMG instructions with a sampler.
176 VMEM_SAMPLER,
177 };
178
getVmemType(const MachineInstr & Inst)179 VmemType getVmemType(const MachineInstr &Inst) {
180 assert(SIInstrInfo::isVMEM(Inst));
181 if (!SIInstrInfo::isMIMG(Inst))
182 return VMEM_NOSAMPLER;
183 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode());
184 return AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler
185 ? VMEM_SAMPLER
186 : VMEM_NOSAMPLER;
187 }
188
addWait(AMDGPU::Waitcnt & Wait,InstCounterType T,unsigned Count)189 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) {
190 switch (T) {
191 case VM_CNT:
192 Wait.VmCnt = std::min(Wait.VmCnt, Count);
193 break;
194 case EXP_CNT:
195 Wait.ExpCnt = std::min(Wait.ExpCnt, Count);
196 break;
197 case LGKM_CNT:
198 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count);
199 break;
200 case VS_CNT:
201 Wait.VsCnt = std::min(Wait.VsCnt, Count);
202 break;
203 default:
204 llvm_unreachable("bad InstCounterType");
205 }
206 }
207
208 // This objects maintains the current score brackets of each wait counter, and
209 // a per-register scoreboard for each wait counter.
210 //
211 // We also maintain the latest score for every event type that can change the
212 // waitcnt in order to know if there are multiple types of events within
213 // the brackets. When multiple types of event happen in the bracket,
214 // wait count may get decreased out of order, therefore we need to put in
215 // "s_waitcnt 0" before use.
216 class WaitcntBrackets {
217 public:
WaitcntBrackets(const GCNSubtarget * SubTarget)218 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {}
219
getWaitCountMax(InstCounterType T)220 static unsigned getWaitCountMax(InstCounterType T) {
221 switch (T) {
222 case VM_CNT:
223 return HardwareLimits.VmcntMax;
224 case LGKM_CNT:
225 return HardwareLimits.LgkmcntMax;
226 case EXP_CNT:
227 return HardwareLimits.ExpcntMax;
228 case VS_CNT:
229 return HardwareLimits.VscntMax;
230 default:
231 break;
232 }
233 return 0;
234 }
235
getScoreLB(InstCounterType T) const236 unsigned getScoreLB(InstCounterType T) const {
237 assert(T < NUM_INST_CNTS);
238 return ScoreLBs[T];
239 }
240
getScoreUB(InstCounterType T) const241 unsigned getScoreUB(InstCounterType T) const {
242 assert(T < NUM_INST_CNTS);
243 return ScoreUBs[T];
244 }
245
246 // Mapping from event to counter.
eventCounter(WaitEventType E)247 InstCounterType eventCounter(WaitEventType E) {
248 if (WaitEventMaskForInst[VM_CNT] & (1 << E))
249 return VM_CNT;
250 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E))
251 return LGKM_CNT;
252 if (WaitEventMaskForInst[VS_CNT] & (1 << E))
253 return VS_CNT;
254 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E));
255 return EXP_CNT;
256 }
257
getRegScore(int GprNo,InstCounterType T)258 unsigned getRegScore(int GprNo, InstCounterType T) {
259 if (GprNo < NUM_ALL_VGPRS) {
260 return VgprScores[T][GprNo];
261 }
262 assert(T == LGKM_CNT);
263 return SgprScores[GprNo - NUM_ALL_VGPRS];
264 }
265
266 bool merge(const WaitcntBrackets &Other);
267
268 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII,
269 const MachineRegisterInfo *MRI,
270 const SIRegisterInfo *TRI, unsigned OpNo) const;
271
272 bool counterOutOfOrder(InstCounterType T) const;
273 bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
274 bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const;
275 void determineWait(InstCounterType T, unsigned ScoreToWait,
276 AMDGPU::Waitcnt &Wait) const;
277 void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
278 void applyWaitcnt(InstCounterType T, unsigned Count);
279 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
280 const MachineRegisterInfo *MRI, WaitEventType E,
281 MachineInstr &MI);
282
hasPending() const283 bool hasPending() const { return PendingEvents != 0; }
hasPendingEvent(WaitEventType E) const284 bool hasPendingEvent(WaitEventType E) const {
285 return PendingEvents & (1 << E);
286 }
287
hasMixedPendingEvents(InstCounterType T) const288 bool hasMixedPendingEvents(InstCounterType T) const {
289 unsigned Events = PendingEvents & WaitEventMaskForInst[T];
290 // Return true if more than one bit is set in Events.
291 return Events & (Events - 1);
292 }
293
hasPendingFlat() const294 bool hasPendingFlat() const {
295 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
296 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
297 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
298 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
299 }
300
setPendingFlat()301 void setPendingFlat() {
302 LastFlat[VM_CNT] = ScoreUBs[VM_CNT];
303 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT];
304 }
305
306 // Return true if there might be pending writes to the specified vgpr by VMEM
307 // instructions with types different from V.
hasOtherPendingVmemTypes(int GprNo,VmemType V) const308 bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const {
309 assert(GprNo < NUM_ALL_VGPRS);
310 return VgprVmemTypes[GprNo] & ~(1 << V);
311 }
312
clearVgprVmemTypes(int GprNo)313 void clearVgprVmemTypes(int GprNo) {
314 assert(GprNo < NUM_ALL_VGPRS);
315 VgprVmemTypes[GprNo] = 0;
316 }
317
318 void print(raw_ostream &);
dump()319 void dump() { print(dbgs()); }
320
321 private:
322 struct MergeInfo {
323 unsigned OldLB;
324 unsigned OtherLB;
325 unsigned MyShift;
326 unsigned OtherShift;
327 };
328 static bool mergeScore(const MergeInfo &M, unsigned &Score,
329 unsigned OtherScore);
330
setScoreLB(InstCounterType T,unsigned Val)331 void setScoreLB(InstCounterType T, unsigned Val) {
332 assert(T < NUM_INST_CNTS);
333 ScoreLBs[T] = Val;
334 }
335
setScoreUB(InstCounterType T,unsigned Val)336 void setScoreUB(InstCounterType T, unsigned Val) {
337 assert(T < NUM_INST_CNTS);
338 ScoreUBs[T] = Val;
339 if (T == EXP_CNT) {
340 unsigned UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT);
341 if (ScoreLBs[T] < UB && UB < ScoreUBs[T])
342 ScoreLBs[T] = UB;
343 }
344 }
345
setRegScore(int GprNo,InstCounterType T,unsigned Val)346 void setRegScore(int GprNo, InstCounterType T, unsigned Val) {
347 if (GprNo < NUM_ALL_VGPRS) {
348 VgprUB = std::max(VgprUB, GprNo);
349 VgprScores[T][GprNo] = Val;
350 } else {
351 assert(T == LGKM_CNT);
352 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS);
353 SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
354 }
355 }
356
357 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII,
358 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI,
359 unsigned OpNo, unsigned Val);
360
361 const GCNSubtarget *ST = nullptr;
362 unsigned ScoreLBs[NUM_INST_CNTS] = {0};
363 unsigned ScoreUBs[NUM_INST_CNTS] = {0};
364 unsigned PendingEvents = 0;
365 // Remember the last flat memory operation.
366 unsigned LastFlat[NUM_INST_CNTS] = {0};
367 // wait_cnt scores for every vgpr.
368 // Keep track of the VgprUB and SgprUB to make merge at join efficient.
369 int VgprUB = -1;
370 int SgprUB = -1;
371 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}};
372 // Wait cnt scores for every sgpr, only lgkmcnt is relevant.
373 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0};
374 // Bitmask of the VmemTypes of VMEM instructions that might have a pending
375 // write to each vgpr.
376 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0};
377 };
378
379 class SIInsertWaitcnts : public MachineFunctionPass {
380 private:
381 const GCNSubtarget *ST = nullptr;
382 const SIInstrInfo *TII = nullptr;
383 const SIRegisterInfo *TRI = nullptr;
384 const MachineRegisterInfo *MRI = nullptr;
385 AMDGPU::IsaVersion IV;
386
387 DenseSet<MachineInstr *> TrackedWaitcntSet;
388 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
389 MachinePostDominatorTree *PDT;
390
391 struct BlockInfo {
392 MachineBasicBlock *MBB;
393 std::unique_ptr<WaitcntBrackets> Incoming;
394 bool Dirty = true;
395
BlockInfo__anon15ccbbe80111::SIInsertWaitcnts::BlockInfo396 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {}
397 };
398
399 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
400
401 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0
402 // because of amdgpu-waitcnt-forcezero flag
403 bool ForceEmitZeroWaitcnts;
404 bool ForceEmitWaitcnt[NUM_INST_CNTS];
405
406 public:
407 static char ID;
408
SIInsertWaitcnts()409 SIInsertWaitcnts() : MachineFunctionPass(ID) {
410 (void)ForceExpCounter;
411 (void)ForceLgkmCounter;
412 (void)ForceVMCounter;
413 }
414
415 bool runOnMachineFunction(MachineFunction &MF) override;
416
getPassName() const417 StringRef getPassName() const override {
418 return "SI insert wait instructions";
419 }
420
getAnalysisUsage(AnalysisUsage & AU) const421 void getAnalysisUsage(AnalysisUsage &AU) const override {
422 AU.setPreservesCFG();
423 AU.addRequired<MachinePostDominatorTree>();
424 MachineFunctionPass::getAnalysisUsage(AU);
425 }
426
isForceEmitWaitcnt() const427 bool isForceEmitWaitcnt() const {
428 for (auto T : inst_counter_types())
429 if (ForceEmitWaitcnt[T])
430 return true;
431 return false;
432 }
433
setForceEmitWaitcnt()434 void setForceEmitWaitcnt() {
435 // For non-debug builds, ForceEmitWaitcnt has been initialized to false;
436 // For debug builds, get the debug counter info and adjust if need be
437 #ifndef NDEBUG
438 if (DebugCounter::isCounterSet(ForceExpCounter) &&
439 DebugCounter::shouldExecute(ForceExpCounter)) {
440 ForceEmitWaitcnt[EXP_CNT] = true;
441 } else {
442 ForceEmitWaitcnt[EXP_CNT] = false;
443 }
444
445 if (DebugCounter::isCounterSet(ForceLgkmCounter) &&
446 DebugCounter::shouldExecute(ForceLgkmCounter)) {
447 ForceEmitWaitcnt[LGKM_CNT] = true;
448 } else {
449 ForceEmitWaitcnt[LGKM_CNT] = false;
450 }
451
452 if (DebugCounter::isCounterSet(ForceVMCounter) &&
453 DebugCounter::shouldExecute(ForceVMCounter)) {
454 ForceEmitWaitcnt[VM_CNT] = true;
455 } else {
456 ForceEmitWaitcnt[VM_CNT] = false;
457 }
458 #endif // NDEBUG
459 }
460
461 bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const;
462 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
463 bool generateWaitcntInstBefore(MachineInstr &MI,
464 WaitcntBrackets &ScoreBrackets,
465 MachineInstr *OldWaitcntInstr);
466 void updateEventWaitcntAfter(MachineInstr &Inst,
467 WaitcntBrackets *ScoreBrackets);
468 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
469 WaitcntBrackets &ScoreBrackets);
470 };
471
472 } // end anonymous namespace
473
getRegInterval(const MachineInstr * MI,const SIInstrInfo * TII,const MachineRegisterInfo * MRI,const SIRegisterInfo * TRI,unsigned OpNo) const474 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI,
475 const SIInstrInfo *TII,
476 const MachineRegisterInfo *MRI,
477 const SIRegisterInfo *TRI,
478 unsigned OpNo) const {
479 const MachineOperand &Op = MI->getOperand(OpNo);
480 assert(Op.isReg());
481 if (!TRI->isInAllocatableClass(Op.getReg()) || TRI->isAGPR(*MRI, Op.getReg()))
482 return {-1, -1};
483
484 // A use via a PW operand does not need a waitcnt.
485 // A partial write is not a WAW.
486 assert(!Op.getSubReg() || !Op.isUndef());
487
488 RegInterval Result;
489
490 unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST));
491
492 if (TRI->isVGPR(*MRI, Op.getReg())) {
493 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL);
494 Result.first = Reg - RegisterEncoding.VGPR0;
495 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS);
496 } else if (TRI->isSGPRReg(*MRI, Op.getReg())) {
497 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
498 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS;
499 assert(Result.first >= NUM_ALL_VGPRS &&
500 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
501 }
502 // TODO: Handle TTMP
503 // else if (TRI->isTTMP(*MRI, Reg.getReg())) ...
504 else
505 return {-1, -1};
506
507 const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo);
508 unsigned Size = TRI->getRegSizeInBits(*RC);
509 Result.second = Result.first + ((Size + 16) / 32);
510
511 return Result;
512 }
513
setExpScore(const MachineInstr * MI,const SIInstrInfo * TII,const SIRegisterInfo * TRI,const MachineRegisterInfo * MRI,unsigned OpNo,unsigned Val)514 void WaitcntBrackets::setExpScore(const MachineInstr *MI,
515 const SIInstrInfo *TII,
516 const SIRegisterInfo *TRI,
517 const MachineRegisterInfo *MRI, unsigned OpNo,
518 unsigned Val) {
519 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo);
520 assert(TRI->isVGPR(*MRI, MI->getOperand(OpNo).getReg()));
521 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
522 setRegScore(RegNo, EXP_CNT, Val);
523 }
524 }
525
updateByEvent(const SIInstrInfo * TII,const SIRegisterInfo * TRI,const MachineRegisterInfo * MRI,WaitEventType E,MachineInstr & Inst)526 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
527 const SIRegisterInfo *TRI,
528 const MachineRegisterInfo *MRI,
529 WaitEventType E, MachineInstr &Inst) {
530 InstCounterType T = eventCounter(E);
531 unsigned CurrScore = getScoreUB(T) + 1;
532 if (CurrScore == 0)
533 report_fatal_error("InsertWaitcnt score wraparound");
534 // PendingEvents and ScoreUB need to be update regardless if this event
535 // changes the score of a register or not.
536 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
537 PendingEvents |= 1 << E;
538 setScoreUB(T, CurrScore);
539
540 if (T == EXP_CNT) {
541 // Put score on the source vgprs. If this is a store, just use those
542 // specific register(s).
543 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) {
544 int AddrOpIdx =
545 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr);
546 // All GDS operations must protect their address register (same as
547 // export.)
548 if (AddrOpIdx != -1) {
549 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore);
550 }
551
552 if (Inst.mayStore()) {
553 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
554 AMDGPU::OpName::data0) != -1) {
555 setExpScore(
556 &Inst, TII, TRI, MRI,
557 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0),
558 CurrScore);
559 }
560 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
561 AMDGPU::OpName::data1) != -1) {
562 setExpScore(&Inst, TII, TRI, MRI,
563 AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
564 AMDGPU::OpName::data1),
565 CurrScore);
566 }
567 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 &&
568 Inst.getOpcode() != AMDGPU::DS_GWS_INIT &&
569 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
570 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
571 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
572 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER &&
573 Inst.getOpcode() != AMDGPU::DS_APPEND &&
574 Inst.getOpcode() != AMDGPU::DS_CONSUME &&
575 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
576 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
577 const MachineOperand &Op = Inst.getOperand(I);
578 if (Op.isReg() && !Op.isDef() && TRI->isVGPR(*MRI, Op.getReg())) {
579 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
580 }
581 }
582 }
583 } else if (TII->isFLAT(Inst)) {
584 if (Inst.mayStore()) {
585 setExpScore(
586 &Inst, TII, TRI, MRI,
587 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
588 CurrScore);
589 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
590 setExpScore(
591 &Inst, TII, TRI, MRI,
592 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
593 CurrScore);
594 }
595 } else if (TII->isMIMG(Inst)) {
596 if (Inst.mayStore()) {
597 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
598 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
599 setExpScore(
600 &Inst, TII, TRI, MRI,
601 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
602 CurrScore);
603 }
604 } else if (TII->isMTBUF(Inst)) {
605 if (Inst.mayStore()) {
606 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
607 }
608 } else if (TII->isMUBUF(Inst)) {
609 if (Inst.mayStore()) {
610 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
611 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
612 setExpScore(
613 &Inst, TII, TRI, MRI,
614 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
615 CurrScore);
616 }
617 } else {
618 if (TII->isEXP(Inst)) {
619 // For export the destination registers are really temps that
620 // can be used as the actual source after export patching, so
621 // we need to treat them like sources and set the EXP_CNT
622 // score.
623 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
624 MachineOperand &DefMO = Inst.getOperand(I);
625 if (DefMO.isReg() && DefMO.isDef() &&
626 TRI->isVGPR(*MRI, DefMO.getReg())) {
627 setRegScore(
628 TRI->getEncodingValue(AMDGPU::getMCReg(DefMO.getReg(), *ST)),
629 EXP_CNT, CurrScore);
630 }
631 }
632 }
633 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
634 MachineOperand &MO = Inst.getOperand(I);
635 if (MO.isReg() && !MO.isDef() && TRI->isVGPR(*MRI, MO.getReg())) {
636 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
637 }
638 }
639 }
640 #if 0 // TODO: check if this is handled by MUBUF code above.
641 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
642 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
643 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
644 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
645 unsigned OpNo;//TODO: find the OpNo for this operand;
646 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo);
647 for (int RegNo = Interval.first; RegNo < Interval.second;
648 ++RegNo) {
649 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
650 }
651 #endif
652 } else {
653 // Match the score to the destination registers.
654 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
655 auto &Op = Inst.getOperand(I);
656 if (!Op.isReg() || !Op.isDef())
657 continue;
658 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I);
659 if (T == VM_CNT) {
660 if (Interval.first >= NUM_ALL_VGPRS)
661 continue;
662 if (SIInstrInfo::isVMEM(Inst)) {
663 VmemType V = getVmemType(Inst);
664 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo)
665 VgprVmemTypes[RegNo] |= 1 << V;
666 }
667 }
668 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
669 setRegScore(RegNo, T, CurrScore);
670 }
671 }
672 if (TII->isDS(Inst) && Inst.mayStore()) {
673 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore);
674 }
675 }
676 }
677
print(raw_ostream & OS)678 void WaitcntBrackets::print(raw_ostream &OS) {
679 OS << '\n';
680 for (auto T : inst_counter_types()) {
681 unsigned LB = getScoreLB(T);
682 unsigned UB = getScoreUB(T);
683
684 switch (T) {
685 case VM_CNT:
686 OS << " VM_CNT(" << UB - LB << "): ";
687 break;
688 case LGKM_CNT:
689 OS << " LGKM_CNT(" << UB - LB << "): ";
690 break;
691 case EXP_CNT:
692 OS << " EXP_CNT(" << UB - LB << "): ";
693 break;
694 case VS_CNT:
695 OS << " VS_CNT(" << UB - LB << "): ";
696 break;
697 default:
698 OS << " UNKNOWN(" << UB - LB << "): ";
699 break;
700 }
701
702 if (LB < UB) {
703 // Print vgpr scores.
704 for (int J = 0; J <= VgprUB; J++) {
705 unsigned RegScore = getRegScore(J, T);
706 if (RegScore <= LB)
707 continue;
708 unsigned RelScore = RegScore - LB - 1;
709 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
710 OS << RelScore << ":v" << J << " ";
711 } else {
712 OS << RelScore << ":ds ";
713 }
714 }
715 // Also need to print sgpr scores for lgkm_cnt.
716 if (T == LGKM_CNT) {
717 for (int J = 0; J <= SgprUB; J++) {
718 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
719 if (RegScore <= LB)
720 continue;
721 unsigned RelScore = RegScore - LB - 1;
722 OS << RelScore << ":s" << J << " ";
723 }
724 }
725 }
726 OS << '\n';
727 }
728 OS << '\n';
729 }
730
731 /// Simplify the waitcnt, in the sense of removing redundant counts, and return
732 /// whether a waitcnt instruction is needed at all.
simplifyWaitcnt(AMDGPU::Waitcnt & Wait) const733 bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const {
734 return simplifyWaitcnt(VM_CNT, Wait.VmCnt) |
735 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) |
736 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt) |
737 simplifyWaitcnt(VS_CNT, Wait.VsCnt);
738 }
739
simplifyWaitcnt(InstCounterType T,unsigned & Count) const740 bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
741 unsigned &Count) const {
742 const unsigned LB = getScoreLB(T);
743 const unsigned UB = getScoreUB(T);
744 if (Count < UB && UB - Count > LB)
745 return true;
746
747 Count = ~0u;
748 return false;
749 }
750
determineWait(InstCounterType T,unsigned ScoreToWait,AMDGPU::Waitcnt & Wait) const751 void WaitcntBrackets::determineWait(InstCounterType T, unsigned ScoreToWait,
752 AMDGPU::Waitcnt &Wait) const {
753 // If the score of src_operand falls within the bracket, we need an
754 // s_waitcnt instruction.
755 const unsigned LB = getScoreLB(T);
756 const unsigned UB = getScoreUB(T);
757 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
758 if ((T == VM_CNT || T == LGKM_CNT) &&
759 hasPendingFlat() &&
760 !ST->hasFlatLgkmVMemCountInOrder()) {
761 // If there is a pending FLAT operation, and this is a VMem or LGKM
762 // waitcnt and the target can report early completion, then we need
763 // to force a waitcnt 0.
764 addWait(Wait, T, 0);
765 } else if (counterOutOfOrder(T)) {
766 // Counter can get decremented out-of-order when there
767 // are multiple types event in the bracket. Also emit an s_wait counter
768 // with a conservative value of 0 for the counter.
769 addWait(Wait, T, 0);
770 } else {
771 // If a counter has been maxed out avoid overflow by waiting for
772 // MAX(CounterType) - 1 instead.
773 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1);
774 addWait(Wait, T, NeededWait);
775 }
776 }
777 }
778
applyWaitcnt(const AMDGPU::Waitcnt & Wait)779 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) {
780 applyWaitcnt(VM_CNT, Wait.VmCnt);
781 applyWaitcnt(EXP_CNT, Wait.ExpCnt);
782 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
783 applyWaitcnt(VS_CNT, Wait.VsCnt);
784 }
785
applyWaitcnt(InstCounterType T,unsigned Count)786 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
787 const unsigned UB = getScoreUB(T);
788 if (Count >= UB)
789 return;
790 if (Count != 0) {
791 if (counterOutOfOrder(T))
792 return;
793 setScoreLB(T, std::max(getScoreLB(T), UB - Count));
794 } else {
795 setScoreLB(T, UB);
796 PendingEvents &= ~WaitEventMaskForInst[T];
797 }
798 }
799
800 // Where there are multiple types of event in the bracket of a counter,
801 // the decrement may go out of order.
counterOutOfOrder(InstCounterType T) const802 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const {
803 // Scalar memory read always can go out of order.
804 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS))
805 return true;
806 return hasMixedPendingEvents(T);
807 }
808
809 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
810 false)
811 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
812 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
813 false)
814
815 char SIInsertWaitcnts::ID = 0;
816
817 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID;
818
createSIInsertWaitcntsPass()819 FunctionPass *llvm::createSIInsertWaitcntsPass() {
820 return new SIInsertWaitcnts();
821 }
822
readsVCCZ(const MachineInstr & MI)823 static bool readsVCCZ(const MachineInstr &MI) {
824 unsigned Opc = MI.getOpcode();
825 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
826 !MI.getOperand(1).isUndef();
827 }
828
829 /// \returns true if the callee inserts an s_waitcnt 0 on function entry.
callWaitsOnFunctionEntry(const MachineInstr & MI)830 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) {
831 // Currently all conventions wait, but this may not always be the case.
832 //
833 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make
834 // senses to omit the wait and do it in the caller.
835 return true;
836 }
837
838 /// \returns true if the callee is expected to wait for any outstanding waits
839 /// before returning.
callWaitsOnFunctionReturn(const MachineInstr & MI)840 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) {
841 return true;
842 }
843
844 /// Generate s_waitcnt instruction to be placed before cur_Inst.
845 /// Instructions of a given type are returned in order,
846 /// but instructions of different types can complete out of order.
847 /// We rely on this in-order completion
848 /// and simply assign a score to the memory access instructions.
849 /// We keep track of the active "score bracket" to determine
850 /// if an access of a memory read requires an s_waitcnt
851 /// and if so what the value of each counter is.
852 /// The "score bracket" is bound by the lower bound and upper bound
853 /// scores (*_score_LB and *_score_ub respectively).
generateWaitcntInstBefore(MachineInstr & MI,WaitcntBrackets & ScoreBrackets,MachineInstr * OldWaitcntInstr)854 bool SIInsertWaitcnts::generateWaitcntInstBefore(
855 MachineInstr &MI, WaitcntBrackets &ScoreBrackets,
856 MachineInstr *OldWaitcntInstr) {
857 setForceEmitWaitcnt();
858 bool IsForceEmitWaitcnt = isForceEmitWaitcnt();
859
860 if (MI.isMetaInstruction())
861 return false;
862
863 AMDGPU::Waitcnt Wait;
864
865 // See if this instruction has a forced S_WAITCNT VM.
866 // TODO: Handle other cases of NeedsWaitcntVmBefore()
867 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
868 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
869 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
870 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
871 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
872 Wait.VmCnt = 0;
873 }
874
875 // All waits must be resolved at call return.
876 // NOTE: this could be improved with knowledge of all call sites or
877 // with knowledge of the called routines.
878 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
879 MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
880 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
881 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt()));
882 }
883 // Resolve vm waits before gs-done.
884 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
885 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
886 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) ==
887 AMDGPU::SendMsg::ID_GS_DONE)) {
888 Wait.VmCnt = 0;
889 }
890 #if 0 // TODO: the following blocks of logic when we have fence.
891 else if (MI.getOpcode() == SC_FENCE) {
892 const unsigned int group_size =
893 context->shader_info->GetMaxThreadGroupSize();
894 // group_size == 0 means thread group size is unknown at compile time
895 const bool group_is_multi_wave =
896 (group_size == 0 || group_size > target_info->GetWaveFrontSize());
897 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
898
899 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
900 SCRegType src_type = Inst->GetSrcType(i);
901 switch (src_type) {
902 case SCMEM_LDS:
903 if (group_is_multi_wave ||
904 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
905 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
906 ScoreBrackets->getScoreUB(LGKM_CNT));
907 // LDS may have to wait for VM_CNT after buffer load to LDS
908 if (target_info->HasBufferLoadToLDS()) {
909 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
910 ScoreBrackets->getScoreUB(VM_CNT));
911 }
912 }
913 break;
914
915 case SCMEM_GDS:
916 if (group_is_multi_wave || fence_is_global) {
917 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
918 ScoreBrackets->getScoreUB(EXP_CNT));
919 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
920 ScoreBrackets->getScoreUB(LGKM_CNT));
921 }
922 break;
923
924 case SCMEM_UAV:
925 case SCMEM_TFBUF:
926 case SCMEM_RING:
927 case SCMEM_SCATTER:
928 if (group_is_multi_wave || fence_is_global) {
929 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
930 ScoreBrackets->getScoreUB(EXP_CNT));
931 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
932 ScoreBrackets->getScoreUB(VM_CNT));
933 }
934 break;
935
936 case SCMEM_SCRATCH:
937 default:
938 break;
939 }
940 }
941 }
942 #endif
943
944 // Export & GDS instructions do not read the EXEC mask until after the export
945 // is granted (which can occur well after the instruction is issued).
946 // The shader program must flush all EXP operations on the export-count
947 // before overwriting the EXEC mask.
948 else {
949 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
950 // Export and GDS are tracked individually, either may trigger a waitcnt
951 // for EXEC.
952 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
953 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
954 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
955 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
956 Wait.ExpCnt = 0;
957 }
958 }
959
960 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) {
961 // The function is going to insert a wait on everything in its prolog.
962 // This still needs to be careful if the call target is a load (e.g. a GOT
963 // load). We also need to check WAW depenancy with saved PC.
964 Wait = AMDGPU::Waitcnt();
965
966 int CallAddrOpIdx =
967 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
968
969 if (MI.getOperand(CallAddrOpIdx).isReg()) {
970 RegInterval CallAddrOpInterval =
971 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx);
972
973 for (int RegNo = CallAddrOpInterval.first;
974 RegNo < CallAddrOpInterval.second; ++RegNo)
975 ScoreBrackets.determineWait(
976 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
977
978 int RtnAddrOpIdx =
979 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
980 if (RtnAddrOpIdx != -1) {
981 RegInterval RtnAddrOpInterval =
982 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx);
983
984 for (int RegNo = RtnAddrOpInterval.first;
985 RegNo < RtnAddrOpInterval.second; ++RegNo)
986 ScoreBrackets.determineWait(
987 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
988 }
989 }
990 } else {
991 // FIXME: Should not be relying on memoperands.
992 // Look at the source operands of every instruction to see if
993 // any of them results from a previous memory operation that affects
994 // its current usage. If so, an s_waitcnt instruction needs to be
995 // emitted.
996 // If the source operand was defined by a load, add the s_waitcnt
997 // instruction.
998 //
999 // Two cases are handled for destination operands:
1000 // 1) If the destination operand was defined by a load, add the s_waitcnt
1001 // instruction to guarantee the right WAW order.
1002 // 2) If a destination operand that was used by a recent export/store ins,
1003 // add s_waitcnt on exp_cnt to guarantee the WAR order.
1004 for (const MachineMemOperand *Memop : MI.memoperands()) {
1005 const Value *Ptr = Memop->getValue();
1006 if (Memop->isStore() && SLoadAddresses.count(Ptr)) {
1007 addWait(Wait, LGKM_CNT, 0);
1008 if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second))
1009 SLoadAddresses.erase(Ptr);
1010 }
1011 unsigned AS = Memop->getAddrSpace();
1012 if (AS != AMDGPUAS::LOCAL_ADDRESS)
1013 continue;
1014 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
1015 // VM_CNT is only relevant to vgpr or LDS.
1016 ScoreBrackets.determineWait(
1017 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
1018 if (Memop->isStore()) {
1019 ScoreBrackets.determineWait(
1020 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
1021 }
1022 }
1023
1024 // Loop over use and def operands.
1025 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
1026 MachineOperand &Op = MI.getOperand(I);
1027 if (!Op.isReg())
1028 continue;
1029 RegInterval Interval =
1030 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I);
1031
1032 const bool IsVGPR = TRI->isVGPR(*MRI, Op.getReg());
1033 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
1034 if (IsVGPR) {
1035 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the
1036 // previous write and this write are the same type of VMEM
1037 // instruction, in which case they're guaranteed to write their
1038 // results in order anyway.
1039 if (Op.isUse() || !SIInstrInfo::isVMEM(MI) ||
1040 ScoreBrackets.hasOtherPendingVmemTypes(RegNo,
1041 getVmemType(MI))) {
1042 ScoreBrackets.determineWait(
1043 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
1044 ScoreBrackets.clearVgprVmemTypes(RegNo);
1045 }
1046 if (Op.isDef()) {
1047 ScoreBrackets.determineWait(
1048 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
1049 }
1050 }
1051 ScoreBrackets.determineWait(
1052 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
1053 }
1054 }
1055 }
1056 }
1057
1058 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0
1059 // occurs before the instruction. Doing it here prevents any additional
1060 // S_WAITCNTs from being emitted if the instruction was marked as
1061 // requiring a WAITCNT beforehand.
1062 if (MI.getOpcode() == AMDGPU::S_BARRIER &&
1063 !ST->hasAutoWaitcntBeforeBarrier()) {
1064 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt()));
1065 }
1066
1067 // TODO: Remove this work-around, enable the assert for Bug 457939
1068 // after fixing the scheduler. Also, the Shader Compiler code is
1069 // independent of target.
1070 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) {
1071 if (ScoreBrackets.getScoreLB(LGKM_CNT) <
1072 ScoreBrackets.getScoreUB(LGKM_CNT) &&
1073 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1074 Wait.LgkmCnt = 0;
1075 }
1076 }
1077
1078 // Early-out if no wait is indicated.
1079 if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) {
1080 bool Modified = false;
1081 if (OldWaitcntInstr) {
1082 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II);
1083 &*II != &MI; II = NextI, ++NextI) {
1084 if (II->isDebugInstr())
1085 continue;
1086
1087 if (TrackedWaitcntSet.count(&*II)) {
1088 TrackedWaitcntSet.erase(&*II);
1089 II->eraseFromParent();
1090 Modified = true;
1091 } else if (II->getOpcode() == AMDGPU::S_WAITCNT) {
1092 int64_t Imm = II->getOperand(0).getImm();
1093 ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm));
1094 } else {
1095 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
1096 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1097 auto W = TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->getImm();
1098 ScoreBrackets.applyWaitcnt(AMDGPU::Waitcnt(~0u, ~0u, ~0u, W));
1099 }
1100 }
1101 }
1102 return Modified;
1103 }
1104
1105 if (ForceEmitZeroWaitcnts)
1106 Wait = AMDGPU::Waitcnt::allZero(ST->hasVscnt());
1107
1108 if (ForceEmitWaitcnt[VM_CNT])
1109 Wait.VmCnt = 0;
1110 if (ForceEmitWaitcnt[EXP_CNT])
1111 Wait.ExpCnt = 0;
1112 if (ForceEmitWaitcnt[LGKM_CNT])
1113 Wait.LgkmCnt = 0;
1114 if (ForceEmitWaitcnt[VS_CNT])
1115 Wait.VsCnt = 0;
1116
1117 ScoreBrackets.applyWaitcnt(Wait);
1118
1119 AMDGPU::Waitcnt OldWait;
1120 bool Modified = false;
1121
1122 if (OldWaitcntInstr) {
1123 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II);
1124 &*II != &MI; II = NextI, NextI++) {
1125 if (II->isDebugInstr())
1126 continue;
1127
1128 if (II->getOpcode() == AMDGPU::S_WAITCNT) {
1129 unsigned IEnc = II->getOperand(0).getImm();
1130 AMDGPU::Waitcnt IWait = AMDGPU::decodeWaitcnt(IV, IEnc);
1131 OldWait = OldWait.combined(IWait);
1132 if (!TrackedWaitcntSet.count(&*II))
1133 Wait = Wait.combined(IWait);
1134 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait);
1135 if (IEnc != NewEnc) {
1136 II->getOperand(0).setImm(NewEnc);
1137 Modified = true;
1138 }
1139 Wait.VmCnt = ~0u;
1140 Wait.LgkmCnt = ~0u;
1141 Wait.ExpCnt = ~0u;
1142 } else {
1143 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
1144 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1145
1146 unsigned ICnt = TII->getNamedOperand(*II, AMDGPU::OpName::simm16)
1147 ->getImm();
1148 OldWait.VsCnt = std::min(OldWait.VsCnt, ICnt);
1149 if (!TrackedWaitcntSet.count(&*II))
1150 Wait.VsCnt = std::min(Wait.VsCnt, ICnt);
1151 if (Wait.VsCnt != ICnt) {
1152 TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->setImm(Wait.VsCnt);
1153 Modified = true;
1154 }
1155 Wait.VsCnt = ~0u;
1156 }
1157
1158 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1159 << "Old Instr: " << MI
1160 << "New Instr: " << *II << '\n');
1161
1162 if (!Wait.hasWait())
1163 return Modified;
1164 }
1165 }
1166
1167 if (Wait.VmCnt != ~0u || Wait.LgkmCnt != ~0u || Wait.ExpCnt != ~0u) {
1168 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
1169 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(),
1170 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
1171 .addImm(Enc);
1172 TrackedWaitcntSet.insert(SWaitInst);
1173 Modified = true;
1174
1175 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1176 << "Old Instr: " << MI
1177 << "New Instr: " << *SWaitInst << '\n');
1178 }
1179
1180 if (Wait.VsCnt != ~0u) {
1181 assert(ST->hasVscnt());
1182
1183 auto SWaitInst =
1184 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1185 TII->get(AMDGPU::S_WAITCNT_VSCNT))
1186 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1187 .addImm(Wait.VsCnt);
1188 TrackedWaitcntSet.insert(SWaitInst);
1189 Modified = true;
1190
1191 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1192 << "Old Instr: " << MI
1193 << "New Instr: " << *SWaitInst << '\n');
1194 }
1195
1196 return Modified;
1197 }
1198
1199 // This is a flat memory operation. Check to see if it has memory tokens other
1200 // than LDS. Other address spaces supported by flat memory operations involve
1201 // global memory.
mayAccessVMEMThroughFlat(const MachineInstr & MI) const1202 bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const {
1203 assert(TII->isFLAT(MI));
1204
1205 // All flat instructions use the VMEM counter.
1206 assert(TII->usesVM_CNT(MI));
1207
1208 // If there are no memory operands then conservatively assume the flat
1209 // operation may access VMEM.
1210 if (MI.memoperands_empty())
1211 return true;
1212
1213 // See if any memory operand specifies an address space that involves VMEM.
1214 // Flat operations only supported FLAT, LOCAL (LDS), or address spaces
1215 // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION
1216 // (GDS) address space is not supported by flat operations. Therefore, simply
1217 // return true unless only the LDS address space is found.
1218 for (const MachineMemOperand *Memop : MI.memoperands()) {
1219 unsigned AS = Memop->getAddrSpace();
1220 assert(AS != AMDGPUAS::REGION_ADDRESS);
1221 if (AS != AMDGPUAS::LOCAL_ADDRESS)
1222 return true;
1223 }
1224
1225 return false;
1226 }
1227
1228 // This is a flat memory operation. Check to see if it has memory tokens for
1229 // either LDS or FLAT.
mayAccessLDSThroughFlat(const MachineInstr & MI) const1230 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
1231 assert(TII->isFLAT(MI));
1232
1233 // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter.
1234 if (!TII->usesLGKM_CNT(MI))
1235 return false;
1236
1237 // If there are no memory operands then conservatively assume the flat
1238 // operation may access LDS.
1239 if (MI.memoperands_empty())
1240 return true;
1241
1242 // See if any memory operand specifies an address space that involves LDS.
1243 for (const MachineMemOperand *Memop : MI.memoperands()) {
1244 unsigned AS = Memop->getAddrSpace();
1245 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
1246 return true;
1247 }
1248
1249 return false;
1250 }
1251
updateEventWaitcntAfter(MachineInstr & Inst,WaitcntBrackets * ScoreBrackets)1252 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
1253 WaitcntBrackets *ScoreBrackets) {
1254 // Now look at the instruction opcode. If it is a memory access
1255 // instruction, update the upper-bound of the appropriate counter's
1256 // bracket and the destination operand scores.
1257 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere.
1258 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
1259 if (TII->isAlwaysGDS(Inst.getOpcode()) ||
1260 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
1261 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
1262 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
1263 } else {
1264 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1265 }
1266 } else if (TII->isFLAT(Inst)) {
1267 assert(Inst.mayLoadOrStore());
1268
1269 int FlatASCount = 0;
1270
1271 if (mayAccessVMEMThroughFlat(Inst)) {
1272 ++FlatASCount;
1273 if (!ST->hasVscnt())
1274 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1275 else if (Inst.mayLoad() &&
1276 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1)
1277 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
1278 else
1279 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
1280 }
1281
1282 if (mayAccessLDSThroughFlat(Inst)) {
1283 ++FlatASCount;
1284 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1285 }
1286
1287 // A Flat memory operation must access at least one address space.
1288 assert(FlatASCount);
1289
1290 // This is a flat memory operation that access both VMEM and LDS, so note it
1291 // - it will require that both the VM and LGKM be flushed to zero if it is
1292 // pending when a VM or LGKM dependency occurs.
1293 if (FlatASCount > 1)
1294 ScoreBrackets->setPendingFlat();
1295 } else if (SIInstrInfo::isVMEM(Inst) &&
1296 // TODO: get a better carve out.
1297 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 &&
1298 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC &&
1299 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL &&
1300 Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV &&
1301 Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) {
1302 if (!ST->hasVscnt())
1303 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1304 else if ((Inst.mayLoad() &&
1305 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) ||
1306 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */
1307 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore()))
1308 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
1309 else if (Inst.mayStore())
1310 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
1311
1312 if (ST->vmemWriteNeedsExpWaitcnt() &&
1313 (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
1314 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
1315 }
1316 } else if (TII->isSMRD(Inst)) {
1317 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1318 } else if (Inst.isCall()) {
1319 if (callWaitsOnFunctionReturn(Inst)) {
1320 // Act as a wait on everything
1321 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(ST->hasVscnt()));
1322 } else {
1323 // May need to way wait for anything.
1324 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt());
1325 }
1326 } else if (SIInstrInfo::isEXP(Inst)) {
1327 int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
1328 if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31)
1329 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
1330 else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST)
1331 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
1332 else
1333 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
1334 } else {
1335 switch (Inst.getOpcode()) {
1336 case AMDGPU::S_SENDMSG:
1337 case AMDGPU::S_SENDMSGHALT:
1338 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
1339 break;
1340 case AMDGPU::S_MEMTIME:
1341 case AMDGPU::S_MEMREALTIME:
1342 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1343 break;
1344 }
1345 }
1346 }
1347
mergeScore(const MergeInfo & M,unsigned & Score,unsigned OtherScore)1348 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score,
1349 unsigned OtherScore) {
1350 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift;
1351 unsigned OtherShifted =
1352 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift;
1353 Score = std::max(MyShifted, OtherShifted);
1354 return OtherShifted > MyShifted;
1355 }
1356
1357 /// Merge the pending events and associater score brackets of \p Other into
1358 /// this brackets status.
1359 ///
1360 /// Returns whether the merge resulted in a change that requires tighter waits
1361 /// (i.e. the merged brackets strictly dominate the original brackets).
merge(const WaitcntBrackets & Other)1362 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
1363 bool StrictDom = false;
1364
1365 VgprUB = std::max(VgprUB, Other.VgprUB);
1366 SgprUB = std::max(SgprUB, Other.SgprUB);
1367
1368 for (auto T : inst_counter_types()) {
1369 // Merge event flags for this counter
1370 const bool OldOutOfOrder = counterOutOfOrder(T);
1371 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T];
1372 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
1373 if (OtherEvents & ~OldEvents)
1374 StrictDom = true;
1375 PendingEvents |= OtherEvents;
1376
1377 // Merge scores for this counter
1378 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T];
1379 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T];
1380 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending);
1381 if (NewUB < ScoreLBs[T])
1382 report_fatal_error("waitcnt score overflow");
1383
1384 MergeInfo M;
1385 M.OldLB = ScoreLBs[T];
1386 M.OtherLB = Other.ScoreLBs[T];
1387 M.MyShift = NewUB - ScoreUBs[T];
1388 M.OtherShift = NewUB - Other.ScoreUBs[T];
1389
1390 ScoreUBs[T] = NewUB;
1391
1392 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]);
1393
1394 bool RegStrictDom = false;
1395 for (int J = 0; J <= VgprUB; J++) {
1396 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]);
1397 }
1398
1399 if (T == VM_CNT) {
1400 for (int J = 0; J <= VgprUB; J++) {
1401 unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J];
1402 RegStrictDom |= NewVmemTypes != VgprVmemTypes[J];
1403 VgprVmemTypes[J] = NewVmemTypes;
1404 }
1405 }
1406
1407 if (T == LGKM_CNT) {
1408 for (int J = 0; J <= SgprUB; J++) {
1409 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]);
1410 }
1411 }
1412
1413 if (RegStrictDom && !OldOutOfOrder)
1414 StrictDom = true;
1415 }
1416
1417 return StrictDom;
1418 }
1419
1420 // Generate s_waitcnt instructions where needed.
insertWaitcntInBlock(MachineFunction & MF,MachineBasicBlock & Block,WaitcntBrackets & ScoreBrackets)1421 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
1422 MachineBasicBlock &Block,
1423 WaitcntBrackets &ScoreBrackets) {
1424 bool Modified = false;
1425
1426 LLVM_DEBUG({
1427 dbgs() << "*** Block" << Block.getNumber() << " ***";
1428 ScoreBrackets.dump();
1429 });
1430
1431 // Track the correctness of vccz through this basic block. There are two
1432 // reasons why it might be incorrect; see ST->hasReadVCCZBug() and
1433 // ST->partialVCCWritesUpdateVCCZ().
1434 bool VCCZCorrect = true;
1435 if (ST->hasReadVCCZBug()) {
1436 // vccz could be incorrect at a basic block boundary if a predecessor wrote
1437 // to vcc and then issued an smem load.
1438 VCCZCorrect = false;
1439 } else if (!ST->partialVCCWritesUpdateVCCZ()) {
1440 // vccz could be incorrect at a basic block boundary if a predecessor wrote
1441 // to vcc_lo or vcc_hi.
1442 VCCZCorrect = false;
1443 }
1444
1445 // Walk over the instructions.
1446 MachineInstr *OldWaitcntInstr = nullptr;
1447
1448 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(),
1449 E = Block.instr_end();
1450 Iter != E;) {
1451 MachineInstr &Inst = *Iter;
1452
1453 // Track pre-existing waitcnts from earlier iterations.
1454 if (Inst.getOpcode() == AMDGPU::S_WAITCNT ||
1455 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1456 Inst.getOperand(0).isReg() &&
1457 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) {
1458 if (!OldWaitcntInstr)
1459 OldWaitcntInstr = &Inst;
1460 ++Iter;
1461 continue;
1462 }
1463
1464 // Generate an s_waitcnt instruction to be placed before Inst, if needed.
1465 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr);
1466 OldWaitcntInstr = nullptr;
1467
1468 // Restore vccz if it's not known to be correct already.
1469 bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst);
1470
1471 // Don't examine operands unless we need to track vccz correctness.
1472 if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) {
1473 if (Inst.definesRegister(AMDGPU::VCC_LO) ||
1474 Inst.definesRegister(AMDGPU::VCC_HI)) {
1475 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz.
1476 if (!ST->partialVCCWritesUpdateVCCZ())
1477 VCCZCorrect = false;
1478 } else if (Inst.definesRegister(AMDGPU::VCC)) {
1479 // There is a hardware bug on CI/SI where SMRD instruction may corrupt
1480 // vccz bit, so when we detect that an instruction may read from a
1481 // corrupt vccz bit, we need to:
1482 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD
1483 // operations to complete.
1484 // 2. Restore the correct value of vccz by writing the current value
1485 // of vcc back to vcc.
1486 if (ST->hasReadVCCZBug() &&
1487 ScoreBrackets.getScoreLB(LGKM_CNT) <
1488 ScoreBrackets.getScoreUB(LGKM_CNT) &&
1489 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1490 // Writes to vcc while there's an outstanding smem read may get
1491 // clobbered as soon as any read completes.
1492 VCCZCorrect = false;
1493 } else {
1494 // Writes to vcc will fix any incorrect value in vccz.
1495 VCCZCorrect = true;
1496 }
1497 }
1498 }
1499
1500 if (TII->isSMRD(Inst)) {
1501 for (const MachineMemOperand *Memop : Inst.memoperands()) {
1502 const Value *Ptr = Memop->getValue();
1503 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent()));
1504 }
1505 if (ST->hasReadVCCZBug()) {
1506 // This smem read could complete and clobber vccz at any time.
1507 VCCZCorrect = false;
1508 }
1509 }
1510
1511 updateEventWaitcntAfter(Inst, &ScoreBrackets);
1512
1513 #if 0 // TODO: implement resource type check controlled by options with ub = LB.
1514 // If this instruction generates a S_SETVSKIP because it is an
1515 // indexed resource, and we are on Tahiti, then it will also force
1516 // an S_WAITCNT vmcnt(0)
1517 if (RequireCheckResourceType(Inst, context)) {
1518 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
1519 ScoreBrackets->setScoreLB(VM_CNT,
1520 ScoreBrackets->getScoreUB(VM_CNT));
1521 }
1522 #endif
1523
1524 LLVM_DEBUG({
1525 Inst.print(dbgs());
1526 ScoreBrackets.dump();
1527 });
1528
1529 // TODO: Remove this work-around after fixing the scheduler and enable the
1530 // assert above.
1531 if (RestoreVCCZ) {
1532 // Restore the vccz bit. Any time a value is written to vcc, the vcc
1533 // bit is updated, so we can restore the bit by reading the value of
1534 // vcc and then writing it back to the register.
1535 BuildMI(Block, Inst, Inst.getDebugLoc(),
1536 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
1537 TRI->getVCC())
1538 .addReg(TRI->getVCC());
1539 VCCZCorrect = true;
1540 Modified = true;
1541 }
1542
1543 ++Iter;
1544 }
1545
1546 return Modified;
1547 }
1548
runOnMachineFunction(MachineFunction & MF)1549 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
1550 ST = &MF.getSubtarget<GCNSubtarget>();
1551 TII = ST->getInstrInfo();
1552 TRI = &TII->getRegisterInfo();
1553 MRI = &MF.getRegInfo();
1554 IV = AMDGPU::getIsaVersion(ST->getCPU());
1555 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1556 PDT = &getAnalysis<MachinePostDominatorTree>();
1557
1558 ForceEmitZeroWaitcnts = ForceEmitZeroFlag;
1559 for (auto T : inst_counter_types())
1560 ForceEmitWaitcnt[T] = false;
1561
1562 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV);
1563 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV);
1564 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV);
1565 HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0;
1566
1567 unsigned NumVGPRsMax = ST->getAddressableNumVGPRs();
1568 unsigned NumSGPRsMax = ST->getAddressableNumSGPRs();
1569 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
1570 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
1571
1572 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0);
1573 RegisterEncoding.VGPRL = RegisterEncoding.VGPR0 + NumVGPRsMax - 1;
1574 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0);
1575 RegisterEncoding.SGPRL = RegisterEncoding.SGPR0 + NumSGPRsMax - 1;
1576
1577 TrackedWaitcntSet.clear();
1578 BlockInfos.clear();
1579
1580 // Keep iterating over the blocks in reverse post order, inserting and
1581 // updating s_waitcnt where needed, until a fix point is reached.
1582 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
1583 BlockInfos.insert({MBB, BlockInfo(MBB)});
1584
1585 std::unique_ptr<WaitcntBrackets> Brackets;
1586 bool Modified = false;
1587 bool Repeat;
1588 do {
1589 Repeat = false;
1590
1591 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE;
1592 ++BII) {
1593 BlockInfo &BI = BII->second;
1594 if (!BI.Dirty)
1595 continue;
1596
1597 if (BI.Incoming) {
1598 if (!Brackets)
1599 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
1600 else
1601 *Brackets = *BI.Incoming;
1602 } else {
1603 if (!Brackets)
1604 Brackets = std::make_unique<WaitcntBrackets>(ST);
1605 else
1606 *Brackets = WaitcntBrackets(ST);
1607 }
1608
1609 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets);
1610 BI.Dirty = false;
1611
1612 if (Brackets->hasPending()) {
1613 BlockInfo *MoveBracketsToSucc = nullptr;
1614 for (MachineBasicBlock *Succ : BI.MBB->successors()) {
1615 auto SuccBII = BlockInfos.find(Succ);
1616 BlockInfo &SuccBI = SuccBII->second;
1617 if (!SuccBI.Incoming) {
1618 SuccBI.Dirty = true;
1619 if (SuccBII <= BII)
1620 Repeat = true;
1621 if (!MoveBracketsToSucc) {
1622 MoveBracketsToSucc = &SuccBI;
1623 } else {
1624 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
1625 }
1626 } else if (SuccBI.Incoming->merge(*Brackets)) {
1627 SuccBI.Dirty = true;
1628 if (SuccBII <= BII)
1629 Repeat = true;
1630 }
1631 }
1632 if (MoveBracketsToSucc)
1633 MoveBracketsToSucc->Incoming = std::move(Brackets);
1634 }
1635 }
1636 } while (Repeat);
1637
1638 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
1639
1640 bool HaveScalarStores = false;
1641
1642 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
1643 ++BI) {
1644 MachineBasicBlock &MBB = *BI;
1645
1646 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
1647 ++I) {
1648 if (!HaveScalarStores && TII->isScalarStore(*I))
1649 HaveScalarStores = true;
1650
1651 if (I->getOpcode() == AMDGPU::S_ENDPGM ||
1652 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
1653 EndPgmBlocks.push_back(&MBB);
1654 }
1655 }
1656
1657 if (HaveScalarStores) {
1658 // If scalar writes are used, the cache must be flushed or else the next
1659 // wave to reuse the same scratch memory can be clobbered.
1660 //
1661 // Insert s_dcache_wb at wave termination points if there were any scalar
1662 // stores, and only if the cache hasn't already been flushed. This could be
1663 // improved by looking across blocks for flushes in postdominating blocks
1664 // from the stores but an explicitly requested flush is probably very rare.
1665 for (MachineBasicBlock *MBB : EndPgmBlocks) {
1666 bool SeenDCacheWB = false;
1667
1668 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
1669 ++I) {
1670 if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
1671 SeenDCacheWB = true;
1672 else if (TII->isScalarStore(*I))
1673 SeenDCacheWB = false;
1674
1675 // FIXME: It would be better to insert this before a waitcnt if any.
1676 if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
1677 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
1678 !SeenDCacheWB) {
1679 Modified = true;
1680 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
1681 }
1682 }
1683 }
1684 }
1685
1686 if (!MFI->isEntryFunction()) {
1687 // Wait for any outstanding memory operations that the input registers may
1688 // depend on. We can't track them and it's better to the wait after the
1689 // costly call sequence.
1690
1691 // TODO: Could insert earlier and schedule more liberally with operations
1692 // that only use caller preserved registers.
1693 MachineBasicBlock &EntryBB = MF.front();
1694 MachineBasicBlock::iterator I = EntryBB.begin();
1695 for (MachineBasicBlock::iterator E = EntryBB.end();
1696 I != E && (I->isPHI() || I->isMetaInstruction()); ++I)
1697 ;
1698 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0);
1699 if (ST->hasVscnt())
1700 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_VSCNT))
1701 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1702 .addImm(0);
1703
1704 Modified = true;
1705 }
1706
1707 return Modified;
1708 }
1709