1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9
10 #include "common/sid.h"
11
12 #include <map>
13 #include <stack>
14 #include <vector>
15 #include <optional>
16
17 namespace aco {
18
19 namespace {
20
21 /**
22 * The general idea of this pass is:
23 * The CFG is traversed in reverse postorder (forward) and loops are processed
24 * several times until no progress is made.
25 * Per BB two wait_ctx is maintained: an in-context and out-context.
26 * The in-context is the joined out-contexts of the predecessors.
27 * The context contains a map: gpr -> wait_entry
28 * consisting of the information about the cnt values to be waited for.
29 * Note: After merge-nodes, it might occur that for the same register
30 * multiple cnt values are to be waited for.
31 *
32 * The values are updated according to the encountered instructions:
33 * - additional events increment the counter of waits of the same type
34 * - or erase gprs with counters higher than to be waited for.
35 */
36
37 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt)
38 // when there is a load followed by a use of a previous load
39
40 /* Instructions of the same event will finish in-order except for smem
41 * and maybe flat. Instructions of different events may not finish in-order. */
42 enum wait_event : uint32_t {
43 event_smem = 1 << 0,
44 event_lds = 1 << 1,
45 event_gds = 1 << 2,
46 event_vmem = 1 << 3,
47 event_vmem_store = 1 << 4, /* GFX10+ */
48 event_flat = 1 << 5,
49 event_exp_pos = 1 << 6,
50 event_exp_param = 1 << 7,
51 event_exp_mrt_null = 1 << 8,
52 event_gds_gpr_lock = 1 << 9,
53 event_vmem_gpr_lock = 1 << 10,
54 event_sendmsg = 1 << 11,
55 event_ldsdir = 1 << 12,
56 event_vmem_sample = 1 << 13, /* GFX12+ */
57 event_vmem_bvh = 1 << 14, /* GFX12+ */
58 num_events = 15,
59 };
60
61 enum counter_type : uint8_t {
62 counter_exp = 1 << wait_type_exp,
63 counter_lgkm = 1 << wait_type_lgkm,
64 counter_vm = 1 << wait_type_vm,
65 counter_vs = 1 << wait_type_vs,
66 counter_sample = 1 << wait_type_sample,
67 counter_bvh = 1 << wait_type_bvh,
68 counter_km = 1 << wait_type_km,
69 num_counters = wait_type_num,
70 };
71
72 struct wait_entry {
73 wait_imm imm;
74 uint32_t events; /* use wait_event notion */
75 uint8_t counters; /* use counter_type notion */
76 bool wait_on_read : 1;
77 bool logical : 1;
78 uint8_t vmem_types : 4; /* use vmem_type notion. for counter_vm. */
79
wait_entryaco::__anon7e8406860111::wait_entry80 wait_entry(wait_event event_, wait_imm imm_, uint8_t counters_, bool logical_,
81 bool wait_on_read_)
82 : imm(imm_), events(event_), counters(counters_), wait_on_read(wait_on_read_),
83 logical(logical_), vmem_types(0)
84 {}
85
joinaco::__anon7e8406860111::wait_entry86 bool join(const wait_entry& other)
87 {
88 bool changed = (other.events & ~events) || (other.counters & ~counters) ||
89 (other.wait_on_read && !wait_on_read) || (other.vmem_types & !vmem_types) ||
90 (!other.logical && logical);
91 events |= other.events;
92 counters |= other.counters;
93 changed |= imm.combine(other.imm);
94 wait_on_read |= other.wait_on_read;
95 vmem_types |= other.vmem_types;
96 logical &= other.logical;
97 return changed;
98 }
99
remove_waitaco::__anon7e8406860111::wait_entry100 void remove_wait(wait_type type, uint32_t type_events)
101 {
102 counters &= ~(1 << type);
103 imm[type] = wait_imm::unset_counter;
104
105 events &= ~type_events | event_flat;
106 if (!(counters & counter_lgkm) && !(counters & counter_vm))
107 events &= ~(type_events & event_flat);
108
109 if (type == wait_type_vm)
110 vmem_types = 0;
111 }
112
printaco::__anon7e8406860111::wait_entry113 UNUSED void print(FILE* output) const
114 {
115 fprintf(output, "logical: %u\n", logical);
116 imm.print(output);
117 if (events)
118 fprintf(output, "events: %u\n", events);
119 if (counters)
120 fprintf(output, "counters: %u\n", counters);
121 if (!wait_on_read)
122 fprintf(output, "wait_on_read: %u\n", wait_on_read);
123 if (!logical)
124 fprintf(output, "logical: %u\n", logical);
125 if (vmem_types)
126 fprintf(output, "vmem_types: %u\n", vmem_types);
127 }
128 };
129
130 struct target_info {
131 wait_imm max_cnt;
132 uint32_t events[wait_type_num] = {};
133 uint16_t unordered_events;
134
target_infoaco::__anon7e8406860111::target_info135 target_info(enum amd_gfx_level gfx_level)
136 {
137 max_cnt = wait_imm::max(gfx_level);
138 for (unsigned i = 0; i < wait_type_num; i++)
139 max_cnt[i] = max_cnt[i] ? max_cnt[i] - 1 : 0;
140
141 events[wait_type_exp] = event_exp_pos | event_exp_param | event_exp_mrt_null |
142 event_gds_gpr_lock | event_vmem_gpr_lock | event_ldsdir;
143 events[wait_type_lgkm] = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
144 events[wait_type_vm] = event_vmem | event_flat;
145 events[wait_type_vs] = event_vmem_store;
146 if (gfx_level >= GFX12) {
147 events[wait_type_sample] = event_vmem_sample;
148 events[wait_type_bvh] = event_vmem_bvh;
149 events[wait_type_km] = event_smem | event_sendmsg;
150 events[wait_type_lgkm] &= ~events[wait_type_km];
151 }
152
153 for (unsigned i = 0; i < wait_type_num; i++) {
154 u_foreach_bit (j, events[i])
155 counters[j] |= (1 << i);
156 }
157
158 unordered_events = event_smem | (gfx_level < GFX10 ? event_flat : 0);
159 }
160
get_counters_for_eventaco::__anon7e8406860111::target_info161 uint8_t get_counters_for_event(wait_event event) const { return counters[ffs(event) - 1]; }
162
163 private:
164 /* Bitfields of counters affected by each event */
165 uint8_t counters[num_events] = {};
166 };
167
168 struct wait_ctx {
169 Program* program;
170 enum amd_gfx_level gfx_level;
171 const target_info* info;
172
173 uint32_t nonzero = 0;
174 bool pending_flat_lgkm = false;
175 bool pending_flat_vm = false;
176 bool pending_s_buffer_store = false; /* GFX10 workaround */
177
178 wait_imm barrier_imm[storage_count];
179 uint16_t barrier_events[storage_count] = {}; /* use wait_event notion */
180
181 std::map<PhysReg, wait_entry> gpr_map;
182
wait_ctxaco::__anon7e8406860111::wait_ctx183 wait_ctx() {}
wait_ctxaco::__anon7e8406860111::wait_ctx184 wait_ctx(Program* program_, const target_info* info_)
185 : program(program_), gfx_level(program_->gfx_level), info(info_)
186 {}
187
joinaco::__anon7e8406860111::wait_ctx188 bool join(const wait_ctx* other, bool logical)
189 {
190 bool changed = (other->pending_flat_lgkm && !pending_flat_lgkm) ||
191 (other->pending_flat_vm && !pending_flat_vm) || (~nonzero & other->nonzero);
192
193 nonzero |= other->nonzero;
194 pending_flat_lgkm |= other->pending_flat_lgkm;
195 pending_flat_vm |= other->pending_flat_vm;
196 pending_s_buffer_store |= other->pending_s_buffer_store;
197
198 for (const auto& entry : other->gpr_map) {
199 if (entry.second.logical != logical)
200 continue;
201
202 using iterator = std::map<PhysReg, wait_entry>::iterator;
203 const std::pair<iterator, bool> insert_pair = gpr_map.insert(entry);
204 if (insert_pair.second) {
205 changed = true;
206 } else {
207 changed |= insert_pair.first->second.join(entry.second);
208 }
209 }
210
211 for (unsigned i = 0; i < storage_count; i++) {
212 changed |= barrier_imm[i].combine(other->barrier_imm[i]);
213 changed |= (other->barrier_events[i] & ~barrier_events[i]) != 0;
214 barrier_events[i] |= other->barrier_events[i];
215 }
216
217 return changed;
218 }
219
printaco::__anon7e8406860111::wait_ctx220 UNUSED void print(FILE* output) const
221 {
222 for (unsigned i = 0; i < wait_type_num; i++)
223 fprintf(output, "nonzero[%u]: %u\n", i, nonzero & (1 << i) ? 1 : 0);
224 fprintf(output, "pending_flat_lgkm: %u\n", pending_flat_lgkm);
225 fprintf(output, "pending_flat_vm: %u\n", pending_flat_vm);
226 for (const auto& entry : gpr_map) {
227 fprintf(output, "gpr_map[%c%u] = {\n", entry.first.reg() >= 256 ? 'v' : 's',
228 entry.first.reg() & 0xff);
229 entry.second.print(output);
230 fprintf(output, "}\n");
231 }
232
233 for (unsigned i = 0; i < storage_count; i++) {
234 if (!barrier_imm[i].empty() || barrier_events[i]) {
235 fprintf(output, "barriers[%u] = {\n", i);
236 barrier_imm[i].print(output);
237 fprintf(output, "events: %u\n", barrier_events[i]);
238 fprintf(output, "}\n");
239 }
240 }
241 }
242 };
243
244 wait_event
get_vmem_event(wait_ctx & ctx,Instruction * instr,uint8_t type)245 get_vmem_event(wait_ctx& ctx, Instruction* instr, uint8_t type)
246 {
247 if (instr->definitions.empty() && ctx.gfx_level >= GFX10)
248 return event_vmem_store;
249 wait_event ev = event_vmem;
250 if (ctx.gfx_level >= GFX12 && type != vmem_nosampler)
251 ev = type == vmem_bvh ? event_vmem_bvh : event_vmem_sample;
252 return ev;
253 }
254
255 void
check_instr(wait_ctx & ctx,wait_imm & wait,Instruction * instr)256 check_instr(wait_ctx& ctx, wait_imm& wait, Instruction* instr)
257 {
258 for (const Operand op : instr->operands) {
259 if (op.isConstant() || op.isUndefined())
260 continue;
261
262 /* check consecutively read gprs */
263 for (unsigned j = 0; j < op.size(); j++) {
264 std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.find(PhysReg{op.physReg() + j});
265 if (it != ctx.gpr_map.end() && it->second.wait_on_read)
266 wait.combine(it->second.imm);
267 }
268 }
269
270 for (const Definition& def : instr->definitions) {
271 /* check consecutively written gprs */
272 for (unsigned j = 0; j < def.getTemp().size(); j++) {
273 PhysReg reg{def.physReg() + j};
274
275 std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.find(reg);
276 if (it == ctx.gpr_map.end())
277 continue;
278
279 wait_imm reg_imm = it->second.imm;
280
281 /* Vector Memory reads and writes decrease the counter in the order they were issued.
282 * Before GFX12, they also write VGPRs in order if they're of the same type.
283 * TODO: We can do this for GFX12 and different types for GFX11 if we know that the two
284 * VMEM loads do not write the same lanes. Since GFX11, we track VMEM operations on the
285 * linear CFG, so this is difficult */
286 uint8_t vmem_type = get_vmem_type(ctx.gfx_level, instr);
287 if (vmem_type && ctx.gfx_level < GFX12) {
288 wait_event event = get_vmem_event(ctx, instr, vmem_type);
289 wait_type type = (wait_type)(ffs(ctx.info->get_counters_for_event(event)) - 1);
290 if ((it->second.events & ctx.info->events[type]) == event &&
291 (type != wait_type_vm || it->second.vmem_types == vmem_type))
292 reg_imm[type] = wait_imm::unset_counter;
293 }
294
295 /* LDS reads and writes return in the order they were issued. same for GDS */
296 if (instr->isDS() && (it->second.events & ctx.info->events[wait_type_lgkm]) ==
297 (instr->ds().gds ? event_gds : event_lds))
298 reg_imm.lgkm = wait_imm::unset_counter;
299
300 wait.combine(reg_imm);
301 }
302 }
303 }
304
305 void
perform_barrier(wait_ctx & ctx,wait_imm & imm,memory_sync_info sync,unsigned semantics)306 perform_barrier(wait_ctx& ctx, wait_imm& imm, memory_sync_info sync, unsigned semantics)
307 {
308 sync_scope subgroup_scope =
309 ctx.program->workgroup_size <= ctx.program->wave_size ? scope_workgroup : scope_subgroup;
310 if ((sync.semantics & semantics) && sync.scope > subgroup_scope) {
311 unsigned storage = sync.storage;
312 while (storage) {
313 unsigned idx = u_bit_scan(&storage);
314
315 /* LDS is private to the workgroup */
316 sync_scope bar_scope_lds = MIN2(sync.scope, scope_workgroup);
317
318 uint16_t events = ctx.barrier_events[idx];
319 if (bar_scope_lds <= subgroup_scope)
320 events &= ~event_lds;
321
322 /* Until GFX12, in non-WGP, the L1 (L0 on GFX10+) cache keeps all memory operations
323 * in-order for the same workgroup */
324 if (ctx.gfx_level < GFX12 && !ctx.program->wgp_mode && sync.scope <= scope_workgroup)
325 events &= ~(event_vmem | event_vmem_store | event_smem);
326
327 if (events)
328 imm.combine(ctx.barrier_imm[idx]);
329 }
330 }
331 }
332
333 void
force_waitcnt(wait_ctx & ctx,wait_imm & imm)334 force_waitcnt(wait_ctx& ctx, wait_imm& imm)
335 {
336 u_foreach_bit (i, ctx.nonzero)
337 imm[i] = 0;
338 }
339
340 void
kill(wait_imm & imm,Instruction * instr,wait_ctx & ctx,memory_sync_info sync_info)341 kill(wait_imm& imm, Instruction* instr, wait_ctx& ctx, memory_sync_info sync_info)
342 {
343 if (instr->opcode == aco_opcode::s_setpc_b64 || (debug_flags & DEBUG_FORCE_WAITCNT)) {
344 /* Force emitting waitcnt states right after the instruction if there is
345 * something to wait for. This is also applied for s_setpc_b64 to ensure
346 * waitcnt states are inserted before jumping to the PS epilog.
347 */
348 force_waitcnt(ctx, imm);
349 }
350
351 /* sendmsg(dealloc_vgprs) releases scratch, so this isn't safe if there is a in-progress
352 * scratch store.
353 */
354 if (ctx.gfx_level >= GFX11 && instr->opcode == aco_opcode::s_sendmsg &&
355 instr->salu().imm == sendmsg_dealloc_vgprs) {
356 imm.combine(ctx.barrier_imm[ffs(storage_scratch) - 1]);
357 imm.combine(ctx.barrier_imm[ffs(storage_vgpr_spill) - 1]);
358 }
359
360 /* Make sure POPS coherent memory accesses have reached the L2 cache before letting the
361 * overlapping waves proceed into the ordered section.
362 */
363 if (ctx.program->has_pops_overlapped_waves_wait &&
364 (ctx.gfx_level >= GFX11 ? instr->isEXP() && instr->exp().done
365 : (instr->opcode == aco_opcode::s_sendmsg &&
366 instr->salu().imm == sendmsg_ordered_ps_done))) {
367 uint8_t c = counter_vm | counter_vs;
368 /* Await SMEM loads too, as it's possible for an application to create them, like using a
369 * scalarization loop - pointless and unoptimal for an inherently divergent address of
370 * per-pixel data, but still can be done at least synthetically and must be handled correctly.
371 */
372 if (ctx.program->has_smem_buffer_or_global_loads)
373 c |= counter_lgkm;
374
375 u_foreach_bit (i, c & ctx.nonzero)
376 imm[i] = 0;
377 }
378
379 check_instr(ctx, imm, instr);
380
381 /* It's required to wait for scalar stores before "writing back" data.
382 * It shouldn't cost anything anyways since we're about to do s_endpgm.
383 */
384 if ((ctx.nonzero & BITFIELD_BIT(wait_type_lgkm)) && instr->opcode == aco_opcode::s_dcache_wb) {
385 assert(ctx.gfx_level >= GFX8);
386 imm.lgkm = 0;
387 }
388
389 if (ctx.gfx_level >= GFX10 && instr->isSMEM()) {
390 /* GFX10: A store followed by a load at the same address causes a problem because
391 * the load doesn't load the correct values unless we wait for the store first.
392 * This is NOT mitigated by an s_nop.
393 *
394 * TODO: Refine this when we have proper alias analysis.
395 */
396 if (ctx.pending_s_buffer_store && !instr->smem().definitions.empty() &&
397 !instr->smem().sync.can_reorder()) {
398 imm.lgkm = 0;
399 }
400 }
401
402 if (instr->opcode == aco_opcode::ds_ordered_count &&
403 ((instr->ds().offset1 | (instr->ds().offset0 >> 8)) & 0x1)) {
404 imm.combine(ctx.barrier_imm[ffs(storage_gds) - 1]);
405 }
406
407 if (instr->opcode == aco_opcode::p_barrier)
408 perform_barrier(ctx, imm, instr->barrier().sync, semantic_acqrel);
409 else
410 perform_barrier(ctx, imm, sync_info, semantic_release);
411
412 if (!imm.empty()) {
413 if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
414 imm.vm = 0;
415 if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
416 imm.lgkm = 0;
417
418 /* reset counters */
419 for (unsigned i = 0; i < wait_type_num; i++)
420 ctx.nonzero &= imm[i] == 0 ? ~BITFIELD_BIT(i) : UINT32_MAX;
421
422 /* update barrier wait imms */
423 for (unsigned i = 0; i < storage_count; i++) {
424 wait_imm& bar = ctx.barrier_imm[i];
425 uint16_t& bar_ev = ctx.barrier_events[i];
426 for (unsigned j = 0; j < wait_type_num; j++) {
427 if (bar[j] != wait_imm::unset_counter && imm[j] <= bar[j]) {
428 bar[j] = wait_imm::unset_counter;
429 bar_ev &= ~ctx.info->events[j] | event_flat;
430 }
431 }
432 if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
433 bar_ev &= ~event_flat;
434 }
435
436 /* remove all gprs with higher counter from map */
437 std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.begin();
438 while (it != ctx.gpr_map.end()) {
439 for (unsigned i = 0; i < wait_type_num; i++) {
440 if (imm[i] != wait_imm::unset_counter && imm[i] <= it->second.imm[i])
441 it->second.remove_wait((wait_type)i, ctx.info->events[i]);
442 }
443 if (!it->second.counters)
444 it = ctx.gpr_map.erase(it);
445 else
446 it++;
447 }
448 }
449
450 if (imm.vm == 0)
451 ctx.pending_flat_vm = false;
452 if (imm.lgkm == 0) {
453 ctx.pending_flat_lgkm = false;
454 ctx.pending_s_buffer_store = false;
455 }
456 }
457
458 void
update_barrier_imm(wait_ctx & ctx,uint8_t counters,wait_event event,memory_sync_info sync)459 update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, memory_sync_info sync)
460 {
461 for (unsigned i = 0; i < storage_count; i++) {
462 wait_imm& bar = ctx.barrier_imm[i];
463 uint16_t& bar_ev = ctx.barrier_events[i];
464
465 /* We re-use barrier_imm/barrier_events to wait for all scratch stores to finish. */
466 bool ignore_private = i == (ffs(storage_scratch) - 1) || i == (ffs(storage_vgpr_spill) - 1);
467
468 if (sync.storage & (1 << i) && (!(sync.semantics & semantic_private) || ignore_private)) {
469 bar_ev |= event;
470 u_foreach_bit (j, counters)
471 bar[j] = 0;
472 } else if (!(bar_ev & ctx.info->unordered_events) && !(ctx.info->unordered_events & event)) {
473 u_foreach_bit (j, counters) {
474 if (bar[j] != wait_imm::unset_counter && (bar_ev & ctx.info->events[j]) == event)
475 bar[j] = std::min<uint16_t>(bar[j] + 1, ctx.info->max_cnt[j]);
476 }
477 }
478 }
479 }
480
481 void
update_counters(wait_ctx & ctx,wait_event event,memory_sync_info sync=memory_sync_info ())482 update_counters(wait_ctx& ctx, wait_event event, memory_sync_info sync = memory_sync_info())
483 {
484 uint8_t counters = ctx.info->get_counters_for_event(event);
485
486 ctx.nonzero |= counters;
487
488 update_barrier_imm(ctx, counters, event, sync);
489
490 if (ctx.info->unordered_events & event)
491 return;
492
493 if (ctx.pending_flat_lgkm)
494 counters &= ~counter_lgkm;
495 if (ctx.pending_flat_vm)
496 counters &= ~counter_vm;
497
498 for (std::pair<const PhysReg, wait_entry>& e : ctx.gpr_map) {
499 wait_entry& entry = e.second;
500
501 if (entry.events & ctx.info->unordered_events)
502 continue;
503
504 assert(entry.events);
505
506 u_foreach_bit (i, counters) {
507 if ((entry.events & ctx.info->events[i]) == event)
508 entry.imm[i] = std::min<uint16_t>(entry.imm[i] + 1, ctx.info->max_cnt[i]);
509 }
510 }
511 }
512
513 void
update_counters_for_flat_load(wait_ctx & ctx,memory_sync_info sync=memory_sync_info ())514 update_counters_for_flat_load(wait_ctx& ctx, memory_sync_info sync = memory_sync_info())
515 {
516 assert(ctx.gfx_level < GFX10);
517
518 ctx.nonzero |= BITFIELD_BIT(wait_type_lgkm) | BITFIELD_BIT(wait_type_vm);
519
520 update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, sync);
521
522 for (std::pair<PhysReg, wait_entry> e : ctx.gpr_map) {
523 if (e.second.counters & counter_vm)
524 e.second.imm.vm = 0;
525 if (e.second.counters & counter_lgkm)
526 e.second.imm.lgkm = 0;
527 }
528 ctx.pending_flat_lgkm = true;
529 ctx.pending_flat_vm = true;
530 }
531
532 void
insert_wait_entry(wait_ctx & ctx,PhysReg reg,RegClass rc,wait_event event,bool wait_on_read,uint8_t vmem_types=0,bool force_linear=false)533 insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read,
534 uint8_t vmem_types = 0, bool force_linear = false)
535 {
536 uint16_t counters = ctx.info->get_counters_for_event(event);
537 wait_imm imm;
538 u_foreach_bit (i, counters)
539 imm[i] = 0;
540
541 wait_entry new_entry(event, imm, counters, !rc.is_linear() && !force_linear, wait_on_read);
542 if (counters & counter_vm)
543 new_entry.vmem_types |= vmem_types;
544
545 for (unsigned i = 0; i < rc.size(); i++) {
546 auto it = ctx.gpr_map.emplace(PhysReg{reg.reg() + i}, new_entry);
547 if (!it.second)
548 it.first->second.join(new_entry);
549 }
550 }
551
552 void
insert_wait_entry(wait_ctx & ctx,Operand op,wait_event event,uint8_t vmem_types=0)553 insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event, uint8_t vmem_types = 0)
554 {
555 if (!op.isConstant() && !op.isUndefined())
556 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, vmem_types);
557 }
558
559 void
insert_wait_entry(wait_ctx & ctx,Definition def,wait_event event,uint8_t vmem_types=0)560 insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event, uint8_t vmem_types = 0)
561 {
562 /* We can't safely write to unwritten destination VGPR lanes with DS/VMEM on GFX11 without
563 * waiting for the load to finish.
564 */
565 uint32_t ds_vmem_events =
566 event_lds | event_gds | event_vmem | event_vmem_sample | event_vmem_bvh | event_flat;
567 bool force_linear = ctx.gfx_level >= GFX11 && (event & ds_vmem_events);
568
569 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, vmem_types, force_linear);
570 }
571
572 void
gen(Instruction * instr,wait_ctx & ctx)573 gen(Instruction* instr, wait_ctx& ctx)
574 {
575 switch (instr->format) {
576 case Format::EXP: {
577 Export_instruction& exp_instr = instr->exp();
578
579 wait_event ev;
580 if (exp_instr.dest <= 9)
581 ev = event_exp_mrt_null;
582 else if (exp_instr.dest <= 15)
583 ev = event_exp_pos;
584 else
585 ev = event_exp_param;
586 update_counters(ctx, ev);
587
588 /* insert new entries for exported vgprs */
589 for (unsigned i = 0; i < 4; i++) {
590 if (exp_instr.enabled_mask & (1 << i)) {
591 unsigned idx = exp_instr.compressed ? i >> 1 : i;
592 assert(idx < exp_instr.operands.size());
593 insert_wait_entry(ctx, exp_instr.operands[idx], ev);
594 }
595 }
596 insert_wait_entry(ctx, exec, s2, ev, false);
597 break;
598 }
599 case Format::FLAT: {
600 FLAT_instruction& flat = instr->flat();
601 if (ctx.gfx_level < GFX10 && !instr->definitions.empty())
602 update_counters_for_flat_load(ctx, flat.sync);
603 else
604 update_counters(ctx, event_flat, flat.sync);
605
606 if (!instr->definitions.empty())
607 insert_wait_entry(ctx, instr->definitions[0], event_flat);
608 break;
609 }
610 case Format::SMEM: {
611 SMEM_instruction& smem = instr->smem();
612 update_counters(ctx, event_smem, smem.sync);
613
614 if (!instr->definitions.empty())
615 insert_wait_entry(ctx, instr->definitions[0], event_smem);
616 else if (ctx.gfx_level >= GFX10 && !smem.sync.can_reorder())
617 ctx.pending_s_buffer_store = true;
618
619 break;
620 }
621 case Format::DS: {
622 DS_instruction& ds = instr->ds();
623 update_counters(ctx, ds.gds ? event_gds : event_lds, ds.sync);
624 if (ds.gds)
625 update_counters(ctx, event_gds_gpr_lock);
626
627 if (!instr->definitions.empty())
628 insert_wait_entry(ctx, instr->definitions[0], ds.gds ? event_gds : event_lds);
629
630 if (ds.gds) {
631 for (const Operand& op : instr->operands)
632 insert_wait_entry(ctx, op, event_gds_gpr_lock);
633 insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
634 }
635 break;
636 }
637 case Format::LDSDIR: {
638 LDSDIR_instruction& ldsdir = instr->ldsdir();
639 update_counters(ctx, event_ldsdir, ldsdir.sync);
640 insert_wait_entry(ctx, instr->definitions[0], event_ldsdir);
641 break;
642 }
643 case Format::MUBUF:
644 case Format::MTBUF:
645 case Format::MIMG:
646 case Format::GLOBAL:
647 case Format::SCRATCH: {
648 uint8_t type = get_vmem_type(ctx.gfx_level, instr);
649 wait_event ev = get_vmem_event(ctx, instr, type);
650
651 update_counters(ctx, ev, get_sync_info(instr));
652
653 if (!instr->definitions.empty())
654 insert_wait_entry(ctx, instr->definitions[0], ev, type);
655
656 if (ctx.gfx_level == GFX6 && instr->format != Format::MIMG && instr->operands.size() == 4) {
657 update_counters(ctx, event_vmem_gpr_lock);
658 insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
659 } else if (ctx.gfx_level == GFX6 && instr->isMIMG() && !instr->operands[2].isUndefined()) {
660 update_counters(ctx, event_vmem_gpr_lock);
661 insert_wait_entry(ctx, instr->operands[2], event_vmem_gpr_lock);
662 }
663
664 break;
665 }
666 case Format::SOPP: {
667 if (instr->opcode == aco_opcode::s_sendmsg || instr->opcode == aco_opcode::s_sendmsghalt)
668 update_counters(ctx, event_sendmsg);
669 break;
670 }
671 case Format::SOP1: {
672 if (instr->opcode == aco_opcode::s_sendmsg_rtn_b32 ||
673 instr->opcode == aco_opcode::s_sendmsg_rtn_b64) {
674 update_counters(ctx, event_sendmsg);
675 insert_wait_entry(ctx, instr->definitions[0], event_sendmsg);
676 }
677 break;
678 }
679 default: break;
680 }
681 }
682
683 void
emit_waitcnt(wait_ctx & ctx,std::vector<aco_ptr<Instruction>> & instructions,wait_imm & imm)684 emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm& imm)
685 {
686 Builder bld(ctx.program, &instructions);
687 imm.build_waitcnt(bld);
688 }
689
690 bool
check_clause_raw(std::bitset<512> & regs_written,Instruction * instr)691 check_clause_raw(std::bitset<512>& regs_written, Instruction* instr)
692 {
693 for (Operand op : instr->operands) {
694 if (op.isConstant())
695 continue;
696 for (unsigned i = 0; i < op.size(); i++) {
697 if (regs_written[op.physReg().reg() + i])
698 return false;
699 }
700 }
701
702 for (Definition def : instr->definitions) {
703 for (unsigned i = 0; i < def.size(); i++)
704 regs_written[def.physReg().reg() + i] = 1;
705 }
706
707 return true;
708 }
709
710 void
handle_block(Program * program,Block & block,wait_ctx & ctx)711 handle_block(Program* program, Block& block, wait_ctx& ctx)
712 {
713 std::vector<aco_ptr<Instruction>> new_instructions;
714
715 wait_imm queued_imm;
716
717 size_t clause_end = 0;
718 for (size_t i = 0; i < block.instructions.size(); i++) {
719 aco_ptr<Instruction>& instr = block.instructions[i];
720
721 bool is_wait = queued_imm.unpack(ctx.gfx_level, instr.get());
722
723 memory_sync_info sync_info = get_sync_info(instr.get());
724 kill(queued_imm, instr.get(), ctx, sync_info);
725
726 /* At the start of a possible clause, also emit waitcnts for each instruction to avoid
727 * splitting the clause.
728 */
729 if (i >= clause_end || !queued_imm.empty()) {
730 std::optional<std::bitset<512>> regs_written;
731 for (clause_end = i + 1; clause_end < block.instructions.size(); clause_end++) {
732 Instruction* next = block.instructions[clause_end].get();
733 if (!should_form_clause(instr.get(), next))
734 break;
735
736 if (!regs_written) {
737 regs_written.emplace();
738 check_clause_raw(*regs_written, instr.get());
739 }
740
741 if (!check_clause_raw(*regs_written, next))
742 break;
743
744 kill(queued_imm, next, ctx, get_sync_info(next));
745 }
746 }
747
748 gen(instr.get(), ctx);
749
750 if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
751 if (instr->isVINTERP_INREG() && queued_imm.exp != wait_imm::unset_counter) {
752 instr->vinterp_inreg().wait_exp = MIN2(instr->vinterp_inreg().wait_exp, queued_imm.exp);
753 queued_imm.exp = wait_imm::unset_counter;
754 }
755
756 if (!queued_imm.empty())
757 emit_waitcnt(ctx, new_instructions, queued_imm);
758
759 bool is_ordered_count_acquire =
760 instr->opcode == aco_opcode::ds_ordered_count &&
761 !((instr->ds().offset1 | (instr->ds().offset0 >> 8)) & 0x1);
762
763 new_instructions.emplace_back(std::move(instr));
764 perform_barrier(ctx, queued_imm, sync_info, semantic_acquire);
765
766 if (is_ordered_count_acquire)
767 queued_imm.combine(ctx.barrier_imm[ffs(storage_gds) - 1]);
768 }
769 }
770
771 /* For last block of a program which has succeed shader part, wait all memory ops done
772 * before go to next shader part.
773 */
774 if (block.kind & block_kind_end_with_regs)
775 force_waitcnt(ctx, queued_imm);
776
777 if (!queued_imm.empty())
778 emit_waitcnt(ctx, new_instructions, queued_imm);
779
780 block.instructions.swap(new_instructions);
781 }
782
783 } /* end namespace */
784
785 void
insert_waitcnt(Program * program)786 insert_waitcnt(Program* program)
787 {
788 target_info info(program->gfx_level);
789
790 /* per BB ctx */
791 std::vector<bool> done(program->blocks.size());
792 std::vector<wait_ctx> in_ctx(program->blocks.size(), wait_ctx(program, &info));
793 std::vector<wait_ctx> out_ctx(program->blocks.size(), wait_ctx(program, &info));
794
795 std::stack<unsigned, std::vector<unsigned>> loop_header_indices;
796 unsigned loop_progress = 0;
797
798 if (program->pending_lds_access) {
799 update_barrier_imm(in_ctx[0], info.get_counters_for_event(event_lds), event_lds,
800 memory_sync_info(storage_shared));
801 }
802
803 for (Definition def : program->args_pending_vmem) {
804 update_counters(in_ctx[0], event_vmem);
805 insert_wait_entry(in_ctx[0], def, event_vmem);
806 }
807
808 for (unsigned i = 0; i < program->blocks.size();) {
809 Block& current = program->blocks[i++];
810
811 if (current.kind & block_kind_discard_early_exit) {
812 /* Because the jump to the discard early exit block may happen anywhere in a block, it's
813 * not possible to join it with its predecessors this way.
814 * We emit all required waits when emitting the discard block.
815 */
816 continue;
817 }
818
819 wait_ctx ctx = in_ctx[current.index];
820
821 if (current.kind & block_kind_loop_header) {
822 loop_header_indices.push(current.index);
823 } else if (current.kind & block_kind_loop_exit) {
824 bool repeat = false;
825 if (loop_progress == loop_header_indices.size()) {
826 i = loop_header_indices.top();
827 repeat = true;
828 }
829 loop_header_indices.pop();
830 loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
831 if (repeat)
832 continue;
833 }
834
835 bool changed = false;
836 for (unsigned b : current.linear_preds)
837 changed |= ctx.join(&out_ctx[b], false);
838 for (unsigned b : current.logical_preds)
839 changed |= ctx.join(&out_ctx[b], true);
840
841 if (done[current.index] && !changed) {
842 in_ctx[current.index] = std::move(ctx);
843 continue;
844 } else {
845 in_ctx[current.index] = ctx;
846 }
847
848 loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
849 done[current.index] = true;
850
851 handle_block(program, current, ctx);
852
853 out_ctx[current.index] = std::move(ctx);
854 }
855 }
856
857 } // namespace aco
858