1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "aco_ir.h"
26
27 #include "common/sid.h"
28
29 #include <map>
30 #include <stack>
31 #include <vector>
32
33 namespace aco {
34
35 namespace {
36
37 /**
38 * The general idea of this pass is:
39 * The CFG is traversed in reverse postorder (forward) and loops are processed
40 * several times until no progress is made.
41 * Per BB two wait_ctx is maintained: an in-context and out-context.
42 * The in-context is the joined out-contexts of the predecessors.
43 * The context contains a map: gpr -> wait_entry
44 * consisting of the information about the cnt values to be waited for.
45 * Note: After merge-nodes, it might occur that for the same register
46 * multiple cnt values are to be waited for.
47 *
48 * The values are updated according to the encountered instructions:
49 * - additional events increment the counter of waits of the same type
50 * - or erase gprs with counters higher than to be waited for.
51 */
52
53 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt)
54 // when there is a load followed by a use of a previous load
55
56 /* Instructions of the same event will finish in-order except for smem
57 * and maybe flat. Instructions of different events may not finish in-order. */
58 enum wait_event : uint16_t {
59 event_smem = 1 << 0,
60 event_lds = 1 << 1,
61 event_gds = 1 << 2,
62 event_vmem = 1 << 3,
63 event_vmem_store = 1 << 4, /* GFX10+ */
64 event_flat = 1 << 5,
65 event_exp_pos = 1 << 6,
66 event_exp_param = 1 << 7,
67 event_exp_mrt_null = 1 << 8,
68 event_gds_gpr_lock = 1 << 9,
69 event_vmem_gpr_lock = 1 << 10,
70 event_sendmsg = 1 << 11,
71 num_events = 12,
72 };
73
74 enum counter_type : uint8_t {
75 counter_exp = 1 << 0,
76 counter_lgkm = 1 << 1,
77 counter_vm = 1 << 2,
78 counter_vs = 1 << 3,
79 num_counters = 4,
80 };
81
82 static const uint16_t exp_events =
83 event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock;
84 static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
85 static const uint16_t vm_events = event_vmem | event_flat;
86 static const uint16_t vs_events = event_vmem_store;
87
88 uint8_t
get_counters_for_event(wait_event ev)89 get_counters_for_event(wait_event ev)
90 {
91 switch (ev) {
92 case event_smem:
93 case event_lds:
94 case event_gds:
95 case event_sendmsg: return counter_lgkm;
96 case event_vmem: return counter_vm;
97 case event_vmem_store: return counter_vs;
98 case event_flat: return counter_vm | counter_lgkm;
99 case event_exp_pos:
100 case event_exp_param:
101 case event_exp_mrt_null:
102 case event_gds_gpr_lock:
103 case event_vmem_gpr_lock: return counter_exp;
104 default: return 0;
105 }
106 }
107
108 struct wait_entry {
109 wait_imm imm;
110 uint16_t events; /* use wait_event notion */
111 uint8_t counters; /* use counter_type notion */
112 bool wait_on_read : 1;
113 bool logical : 1;
114 bool has_vmem_nosampler : 1;
115 bool has_vmem_sampler : 1;
116
wait_entryaco::__anon69790f030111::wait_entry117 wait_entry(wait_event event_, wait_imm imm_, bool logical_, bool wait_on_read_)
118 : imm(imm_), events(event_), counters(get_counters_for_event(event_)),
119 wait_on_read(wait_on_read_), logical(logical_), has_vmem_nosampler(false),
120 has_vmem_sampler(false)
121 {}
122
joinaco::__anon69790f030111::wait_entry123 bool join(const wait_entry& other)
124 {
125 bool changed = (other.events & ~events) || (other.counters & ~counters) ||
126 (other.wait_on_read && !wait_on_read) ||
127 (other.has_vmem_nosampler && !has_vmem_nosampler) ||
128 (other.has_vmem_sampler && !has_vmem_sampler);
129 events |= other.events;
130 counters |= other.counters;
131 changed |= imm.combine(other.imm);
132 wait_on_read |= other.wait_on_read;
133 has_vmem_nosampler |= other.has_vmem_nosampler;
134 has_vmem_sampler |= other.has_vmem_sampler;
135 assert(logical == other.logical);
136 return changed;
137 }
138
remove_counteraco::__anon69790f030111::wait_entry139 void remove_counter(counter_type counter)
140 {
141 counters &= ~counter;
142
143 if (counter == counter_lgkm) {
144 imm.lgkm = wait_imm::unset_counter;
145 events &= ~(event_smem | event_lds | event_gds | event_sendmsg);
146 }
147
148 if (counter == counter_vm) {
149 imm.vm = wait_imm::unset_counter;
150 events &= ~event_vmem;
151 has_vmem_nosampler = false;
152 has_vmem_sampler = false;
153 }
154
155 if (counter == counter_exp) {
156 imm.exp = wait_imm::unset_counter;
157 events &= ~(event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock |
158 event_vmem_gpr_lock);
159 }
160
161 if (counter == counter_vs) {
162 imm.vs = wait_imm::unset_counter;
163 events &= ~event_vmem_store;
164 }
165
166 if (!(counters & counter_lgkm) && !(counters & counter_vm))
167 events &= ~event_flat;
168 }
169 };
170
171 struct wait_ctx {
172 Program* program;
173 enum chip_class chip_class;
174 uint16_t max_vm_cnt;
175 uint16_t max_exp_cnt;
176 uint16_t max_lgkm_cnt;
177 uint16_t max_vs_cnt;
178 uint16_t unordered_events = event_smem | event_flat;
179
180 uint8_t vm_cnt = 0;
181 uint8_t exp_cnt = 0;
182 uint8_t lgkm_cnt = 0;
183 uint8_t vs_cnt = 0;
184 bool pending_flat_lgkm = false;
185 bool pending_flat_vm = false;
186 bool pending_s_buffer_store = false; /* GFX10 workaround */
187
188 wait_imm barrier_imm[storage_count];
189 uint16_t barrier_events[storage_count] = {}; /* use wait_event notion */
190
191 std::map<PhysReg, wait_entry> gpr_map;
192
wait_ctxaco::__anon69790f030111::wait_ctx193 wait_ctx() {}
wait_ctxaco::__anon69790f030111::wait_ctx194 wait_ctx(Program* program_)
195 : program(program_), chip_class(program_->chip_class),
196 max_vm_cnt(program_->chip_class >= GFX9 ? 62 : 14), max_exp_cnt(6),
197 max_lgkm_cnt(program_->chip_class >= GFX10 ? 62 : 14),
198 max_vs_cnt(program_->chip_class >= GFX10 ? 62 : 0),
199 unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0))
200 {}
201
joinaco::__anon69790f030111::wait_ctx202 bool join(const wait_ctx* other, bool logical)
203 {
204 bool changed = other->exp_cnt > exp_cnt || other->vm_cnt > vm_cnt ||
205 other->lgkm_cnt > lgkm_cnt || other->vs_cnt > vs_cnt ||
206 (other->pending_flat_lgkm && !pending_flat_lgkm) ||
207 (other->pending_flat_vm && !pending_flat_vm);
208
209 exp_cnt = std::max(exp_cnt, other->exp_cnt);
210 vm_cnt = std::max(vm_cnt, other->vm_cnt);
211 lgkm_cnt = std::max(lgkm_cnt, other->lgkm_cnt);
212 vs_cnt = std::max(vs_cnt, other->vs_cnt);
213 pending_flat_lgkm |= other->pending_flat_lgkm;
214 pending_flat_vm |= other->pending_flat_vm;
215 pending_s_buffer_store |= other->pending_s_buffer_store;
216
217 for (const auto& entry : other->gpr_map) {
218 if (entry.second.logical != logical)
219 continue;
220
221 using iterator = std::map<PhysReg, wait_entry>::iterator;
222 const std::pair<iterator, bool> insert_pair = gpr_map.insert(entry);
223 if (insert_pair.second) {
224 changed = true;
225 } else {
226 changed |= insert_pair.first->second.join(entry.second);
227 }
228 }
229
230 for (unsigned i = 0; i < storage_count; i++) {
231 changed |= barrier_imm[i].combine(other->barrier_imm[i]);
232 changed |= (other->barrier_events[i] & ~barrier_events[i]) != 0;
233 barrier_events[i] |= other->barrier_events[i];
234 }
235
236 return changed;
237 }
238
wait_and_remove_from_entryaco::__anon69790f030111::wait_ctx239 void wait_and_remove_from_entry(PhysReg reg, wait_entry& entry, counter_type counter)
240 {
241 entry.remove_counter(counter);
242 }
243 };
244
245 void
check_instr(wait_ctx & ctx,wait_imm & wait,Instruction * instr)246 check_instr(wait_ctx& ctx, wait_imm& wait, Instruction* instr)
247 {
248 for (const Operand op : instr->operands) {
249 if (op.isConstant() || op.isUndefined())
250 continue;
251
252 /* check consecutively read gprs */
253 for (unsigned j = 0; j < op.size(); j++) {
254 PhysReg reg{op.physReg() + j};
255 std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.find(reg);
256 if (it == ctx.gpr_map.end() || !it->second.wait_on_read)
257 continue;
258
259 wait.combine(it->second.imm);
260 }
261 }
262
263 for (const Definition& def : instr->definitions) {
264 /* check consecutively written gprs */
265 for (unsigned j = 0; j < def.getTemp().size(); j++) {
266 PhysReg reg{def.physReg() + j};
267
268 std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.find(reg);
269 if (it == ctx.gpr_map.end())
270 continue;
271
272 /* Vector Memory reads and writes return in the order they were issued */
273 bool has_sampler = instr->isMIMG() && !instr->operands[1].isUndefined() &&
274 instr->operands[1].regClass() == s4;
275 if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem) &&
276 it->second.has_vmem_nosampler == !has_sampler &&
277 it->second.has_vmem_sampler == has_sampler)
278 continue;
279
280 /* LDS reads and writes return in the order they were issued. same for GDS */
281 if (instr->isDS() &&
282 (it->second.events & lgkm_events) == (instr->ds().gds ? event_gds : event_lds))
283 continue;
284
285 wait.combine(it->second.imm);
286 }
287 }
288 }
289
290 bool
parse_wait_instr(wait_ctx & ctx,wait_imm & imm,Instruction * instr)291 parse_wait_instr(wait_ctx& ctx, wait_imm& imm, Instruction* instr)
292 {
293 if (instr->opcode == aco_opcode::s_waitcnt_vscnt &&
294 instr->definitions[0].physReg() == sgpr_null) {
295 imm.vs = std::min<uint8_t>(imm.vs, instr->sopk().imm);
296 return true;
297 } else if (instr->opcode == aco_opcode::s_waitcnt) {
298 imm.combine(wait_imm(ctx.chip_class, instr->sopp().imm));
299 return true;
300 }
301 return false;
302 }
303
304 void
perform_barrier(wait_ctx & ctx,wait_imm & imm,memory_sync_info sync,unsigned semantics)305 perform_barrier(wait_ctx& ctx, wait_imm& imm, memory_sync_info sync, unsigned semantics)
306 {
307 sync_scope subgroup_scope =
308 ctx.program->workgroup_size <= ctx.program->wave_size ? scope_workgroup : scope_subgroup;
309 if ((sync.semantics & semantics) && sync.scope > subgroup_scope) {
310 unsigned storage = sync.storage;
311 while (storage) {
312 unsigned idx = u_bit_scan(&storage);
313
314 /* LDS is private to the workgroup */
315 sync_scope bar_scope_lds = MIN2(sync.scope, scope_workgroup);
316
317 uint16_t events = ctx.barrier_events[idx];
318 if (bar_scope_lds <= subgroup_scope)
319 events &= ~event_lds;
320
321 /* in non-WGP, the L1 (L0 on GFX10+) cache keeps all memory operations
322 * in-order for the same workgroup */
323 if (!ctx.program->wgp_mode && sync.scope <= scope_workgroup)
324 events &= ~(event_vmem | event_vmem_store | event_smem);
325
326 if (events)
327 imm.combine(ctx.barrier_imm[idx]);
328 }
329 }
330 }
331
332 void
force_waitcnt(wait_ctx & ctx,wait_imm & imm)333 force_waitcnt(wait_ctx& ctx, wait_imm& imm)
334 {
335 if (ctx.vm_cnt)
336 imm.vm = 0;
337 if (ctx.exp_cnt)
338 imm.exp = 0;
339 if (ctx.lgkm_cnt)
340 imm.lgkm = 0;
341
342 if (ctx.chip_class >= GFX10) {
343 if (ctx.vs_cnt)
344 imm.vs = 0;
345 }
346 }
347
348 void
kill(wait_imm & imm,Instruction * instr,wait_ctx & ctx,memory_sync_info sync_info)349 kill(wait_imm& imm, Instruction* instr, wait_ctx& ctx, memory_sync_info sync_info)
350 {
351 if (debug_flags & DEBUG_FORCE_WAITCNT) {
352 /* Force emitting waitcnt states right after the instruction if there is
353 * something to wait for.
354 */
355 return force_waitcnt(ctx, imm);
356 }
357
358 if (ctx.exp_cnt || ctx.vm_cnt || ctx.lgkm_cnt)
359 check_instr(ctx, imm, instr);
360
361 /* It's required to wait for scalar stores before "writing back" data.
362 * It shouldn't cost anything anyways since we're about to do s_endpgm.
363 */
364 if (ctx.lgkm_cnt && instr->opcode == aco_opcode::s_dcache_wb) {
365 assert(ctx.chip_class >= GFX8);
366 imm.lgkm = 0;
367 }
368
369 if (ctx.chip_class >= GFX10 && instr->isSMEM()) {
370 /* GFX10: A store followed by a load at the same address causes a problem because
371 * the load doesn't load the correct values unless we wait for the store first.
372 * This is NOT mitigated by an s_nop.
373 *
374 * TODO: Refine this when we have proper alias analysis.
375 */
376 if (ctx.pending_s_buffer_store && !instr->smem().definitions.empty() &&
377 !instr->smem().sync.can_reorder()) {
378 imm.lgkm = 0;
379 }
380 }
381
382 if (ctx.program->early_rast && instr->opcode == aco_opcode::exp) {
383 if (instr->exp().dest >= V_008DFC_SQ_EXP_POS && instr->exp().dest < V_008DFC_SQ_EXP_PRIM) {
384
385 /* With early_rast, the HW will start clipping and rasterization after the 1st DONE pos
386 * export. Wait for all stores (and atomics) to complete, so PS can read them.
387 * TODO: This only really applies to DONE pos exports.
388 * Consider setting the DONE bit earlier.
389 */
390 if (ctx.vs_cnt > 0)
391 imm.vs = 0;
392 if (ctx.vm_cnt > 0)
393 imm.vm = 0;
394 }
395 }
396
397 if (instr->opcode == aco_opcode::p_barrier)
398 perform_barrier(ctx, imm, instr->barrier().sync, semantic_acqrel);
399 else
400 perform_barrier(ctx, imm, sync_info, semantic_release);
401
402 if (!imm.empty()) {
403 if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
404 imm.vm = 0;
405 if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
406 imm.lgkm = 0;
407
408 /* reset counters */
409 ctx.exp_cnt = std::min(ctx.exp_cnt, imm.exp);
410 ctx.vm_cnt = std::min(ctx.vm_cnt, imm.vm);
411 ctx.lgkm_cnt = std::min(ctx.lgkm_cnt, imm.lgkm);
412 ctx.vs_cnt = std::min(ctx.vs_cnt, imm.vs);
413
414 /* update barrier wait imms */
415 for (unsigned i = 0; i < storage_count; i++) {
416 wait_imm& bar = ctx.barrier_imm[i];
417 uint16_t& bar_ev = ctx.barrier_events[i];
418 if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp) {
419 bar.exp = wait_imm::unset_counter;
420 bar_ev &= ~exp_events;
421 }
422 if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm) {
423 bar.vm = wait_imm::unset_counter;
424 bar_ev &= ~(vm_events & ~event_flat);
425 }
426 if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm) {
427 bar.lgkm = wait_imm::unset_counter;
428 bar_ev &= ~(lgkm_events & ~event_flat);
429 }
430 if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs) {
431 bar.vs = wait_imm::unset_counter;
432 bar_ev &= ~vs_events;
433 }
434 if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
435 bar_ev &= ~event_flat;
436 }
437
438 /* remove all gprs with higher counter from map */
439 std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.begin();
440 while (it != ctx.gpr_map.end()) {
441 if (imm.exp != wait_imm::unset_counter && imm.exp <= it->second.imm.exp)
442 ctx.wait_and_remove_from_entry(it->first, it->second, counter_exp);
443 if (imm.vm != wait_imm::unset_counter && imm.vm <= it->second.imm.vm)
444 ctx.wait_and_remove_from_entry(it->first, it->second, counter_vm);
445 if (imm.lgkm != wait_imm::unset_counter && imm.lgkm <= it->second.imm.lgkm)
446 ctx.wait_and_remove_from_entry(it->first, it->second, counter_lgkm);
447 if (imm.vs != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
448 ctx.wait_and_remove_from_entry(it->first, it->second, counter_vs);
449 if (!it->second.counters)
450 it = ctx.gpr_map.erase(it);
451 else
452 it++;
453 }
454 }
455
456 if (imm.vm == 0)
457 ctx.pending_flat_vm = false;
458 if (imm.lgkm == 0) {
459 ctx.pending_flat_lgkm = false;
460 ctx.pending_s_buffer_store = false;
461 }
462 }
463
464 void
update_barrier_counter(uint8_t * ctr,unsigned max)465 update_barrier_counter(uint8_t* ctr, unsigned max)
466 {
467 if (*ctr != wait_imm::unset_counter && *ctr < max)
468 (*ctr)++;
469 }
470
471 void
update_barrier_imm(wait_ctx & ctx,uint8_t counters,wait_event event,memory_sync_info sync)472 update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, memory_sync_info sync)
473 {
474 for (unsigned i = 0; i < storage_count; i++) {
475 wait_imm& bar = ctx.barrier_imm[i];
476 uint16_t& bar_ev = ctx.barrier_events[i];
477 if (sync.storage & (1 << i) && !(sync.semantics & semantic_private)) {
478 bar_ev |= event;
479 if (counters & counter_lgkm)
480 bar.lgkm = 0;
481 if (counters & counter_vm)
482 bar.vm = 0;
483 if (counters & counter_exp)
484 bar.exp = 0;
485 if (counters & counter_vs)
486 bar.vs = 0;
487 } else if (!(bar_ev & ctx.unordered_events) && !(ctx.unordered_events & event)) {
488 if (counters & counter_lgkm && (bar_ev & lgkm_events) == event)
489 update_barrier_counter(&bar.lgkm, ctx.max_lgkm_cnt);
490 if (counters & counter_vm && (bar_ev & vm_events) == event)
491 update_barrier_counter(&bar.vm, ctx.max_vm_cnt);
492 if (counters & counter_exp && (bar_ev & exp_events) == event)
493 update_barrier_counter(&bar.exp, ctx.max_exp_cnt);
494 if (counters & counter_vs && (bar_ev & vs_events) == event)
495 update_barrier_counter(&bar.vs, ctx.max_vs_cnt);
496 }
497 }
498 }
499
500 void
update_counters(wait_ctx & ctx,wait_event event,memory_sync_info sync=memory_sync_info ())501 update_counters(wait_ctx& ctx, wait_event event, memory_sync_info sync = memory_sync_info())
502 {
503 uint8_t counters = get_counters_for_event(event);
504
505 if (counters & counter_lgkm && ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
506 ctx.lgkm_cnt++;
507 if (counters & counter_vm && ctx.vm_cnt <= ctx.max_vm_cnt)
508 ctx.vm_cnt++;
509 if (counters & counter_exp && ctx.exp_cnt <= ctx.max_exp_cnt)
510 ctx.exp_cnt++;
511 if (counters & counter_vs && ctx.vs_cnt <= ctx.max_vs_cnt)
512 ctx.vs_cnt++;
513
514 update_barrier_imm(ctx, counters, event, sync);
515
516 if (ctx.unordered_events & event)
517 return;
518
519 if (ctx.pending_flat_lgkm)
520 counters &= ~counter_lgkm;
521 if (ctx.pending_flat_vm)
522 counters &= ~counter_vm;
523
524 for (std::pair<const PhysReg, wait_entry>& e : ctx.gpr_map) {
525 wait_entry& entry = e.second;
526
527 if (entry.events & ctx.unordered_events)
528 continue;
529
530 assert(entry.events);
531
532 if ((counters & counter_exp) && (entry.events & exp_events) == event &&
533 entry.imm.exp < ctx.max_exp_cnt)
534 entry.imm.exp++;
535 if ((counters & counter_lgkm) && (entry.events & lgkm_events) == event &&
536 entry.imm.lgkm < ctx.max_lgkm_cnt)
537 entry.imm.lgkm++;
538 if ((counters & counter_vm) && (entry.events & vm_events) == event &&
539 entry.imm.vm < ctx.max_vm_cnt)
540 entry.imm.vm++;
541 if ((counters & counter_vs) && (entry.events & vs_events) == event &&
542 entry.imm.vs < ctx.max_vs_cnt)
543 entry.imm.vs++;
544 }
545 }
546
547 void
update_counters_for_flat_load(wait_ctx & ctx,memory_sync_info sync=memory_sync_info ())548 update_counters_for_flat_load(wait_ctx& ctx, memory_sync_info sync = memory_sync_info())
549 {
550 assert(ctx.chip_class < GFX10);
551
552 if (ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
553 ctx.lgkm_cnt++;
554 if (ctx.vm_cnt <= ctx.max_vm_cnt)
555 ctx.vm_cnt++;
556
557 update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, sync);
558
559 for (std::pair<PhysReg, wait_entry> e : ctx.gpr_map) {
560 if (e.second.counters & counter_vm)
561 e.second.imm.vm = 0;
562 if (e.second.counters & counter_lgkm)
563 e.second.imm.lgkm = 0;
564 }
565 ctx.pending_flat_lgkm = true;
566 ctx.pending_flat_vm = true;
567 }
568
569 void
insert_wait_entry(wait_ctx & ctx,PhysReg reg,RegClass rc,wait_event event,bool wait_on_read,bool has_sampler=false)570 insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read,
571 bool has_sampler = false)
572 {
573 uint16_t counters = get_counters_for_event(event);
574 wait_imm imm;
575 if (counters & counter_lgkm)
576 imm.lgkm = 0;
577 if (counters & counter_vm)
578 imm.vm = 0;
579 if (counters & counter_exp)
580 imm.exp = 0;
581 if (counters & counter_vs)
582 imm.vs = 0;
583
584 wait_entry new_entry(event, imm, !rc.is_linear(), wait_on_read);
585 new_entry.has_vmem_nosampler = (event & event_vmem) && !has_sampler;
586 new_entry.has_vmem_sampler = (event & event_vmem) && has_sampler;
587
588 for (unsigned i = 0; i < rc.size(); i++) {
589 auto it = ctx.gpr_map.emplace(PhysReg{reg.reg() + i}, new_entry);
590 if (!it.second)
591 it.first->second.join(new_entry);
592 }
593 }
594
595 void
insert_wait_entry(wait_ctx & ctx,Operand op,wait_event event,bool has_sampler=false)596 insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event, bool has_sampler = false)
597 {
598 if (!op.isConstant() && !op.isUndefined())
599 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, has_sampler);
600 }
601
602 void
insert_wait_entry(wait_ctx & ctx,Definition def,wait_event event,bool has_sampler=false)603 insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event, bool has_sampler = false)
604 {
605 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, has_sampler);
606 }
607
608 void
gen(Instruction * instr,wait_ctx & ctx)609 gen(Instruction* instr, wait_ctx& ctx)
610 {
611 switch (instr->format) {
612 case Format::EXP: {
613 Export_instruction& exp_instr = instr->exp();
614
615 wait_event ev;
616 if (exp_instr.dest <= 9)
617 ev = event_exp_mrt_null;
618 else if (exp_instr.dest <= 15)
619 ev = event_exp_pos;
620 else
621 ev = event_exp_param;
622 update_counters(ctx, ev);
623
624 /* insert new entries for exported vgprs */
625 for (unsigned i = 0; i < 4; i++) {
626 if (exp_instr.enabled_mask & (1 << i)) {
627 unsigned idx = exp_instr.compressed ? i >> 1 : i;
628 assert(idx < exp_instr.operands.size());
629 insert_wait_entry(ctx, exp_instr.operands[idx], ev);
630 }
631 }
632 insert_wait_entry(ctx, exec, s2, ev, false);
633 break;
634 }
635 case Format::FLAT: {
636 FLAT_instruction& flat = instr->flat();
637 if (ctx.chip_class < GFX10 && !instr->definitions.empty())
638 update_counters_for_flat_load(ctx, flat.sync);
639 else
640 update_counters(ctx, event_flat, flat.sync);
641
642 if (!instr->definitions.empty())
643 insert_wait_entry(ctx, instr->definitions[0], event_flat);
644 break;
645 }
646 case Format::SMEM: {
647 SMEM_instruction& smem = instr->smem();
648 update_counters(ctx, event_smem, smem.sync);
649
650 if (!instr->definitions.empty())
651 insert_wait_entry(ctx, instr->definitions[0], event_smem);
652 else if (ctx.chip_class >= GFX10 && !smem.sync.can_reorder())
653 ctx.pending_s_buffer_store = true;
654
655 break;
656 }
657 case Format::DS: {
658 DS_instruction& ds = instr->ds();
659 update_counters(ctx, ds.gds ? event_gds : event_lds, ds.sync);
660 if (ds.gds)
661 update_counters(ctx, event_gds_gpr_lock);
662
663 if (!instr->definitions.empty())
664 insert_wait_entry(ctx, instr->definitions[0], ds.gds ? event_gds : event_lds);
665
666 if (ds.gds) {
667 for (const Operand& op : instr->operands)
668 insert_wait_entry(ctx, op, event_gds_gpr_lock);
669 insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
670 }
671 break;
672 }
673 case Format::MUBUF:
674 case Format::MTBUF:
675 case Format::MIMG:
676 case Format::GLOBAL: {
677 wait_event ev =
678 !instr->definitions.empty() || ctx.chip_class < GFX10 ? event_vmem : event_vmem_store;
679 update_counters(ctx, ev, get_sync_info(instr));
680
681 bool has_sampler = instr->isMIMG() && !instr->operands[1].isUndefined() &&
682 instr->operands[1].regClass() == s4;
683
684 if (!instr->definitions.empty())
685 insert_wait_entry(ctx, instr->definitions[0], ev, has_sampler);
686
687 if (ctx.chip_class == GFX6 && instr->format != Format::MIMG && instr->operands.size() == 4) {
688 ctx.exp_cnt++;
689 update_counters(ctx, event_vmem_gpr_lock);
690 insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
691 } else if (ctx.chip_class == GFX6 && instr->isMIMG() && !instr->operands[2].isUndefined()) {
692 ctx.exp_cnt++;
693 update_counters(ctx, event_vmem_gpr_lock);
694 insert_wait_entry(ctx, instr->operands[2], event_vmem_gpr_lock);
695 }
696
697 break;
698 }
699 case Format::SOPP: {
700 if (instr->opcode == aco_opcode::s_sendmsg || instr->opcode == aco_opcode::s_sendmsghalt)
701 update_counters(ctx, event_sendmsg);
702 break;
703 }
704 default: break;
705 }
706 }
707
708 void
emit_waitcnt(wait_ctx & ctx,std::vector<aco_ptr<Instruction>> & instructions,wait_imm & imm)709 emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm& imm)
710 {
711 if (imm.vs != wait_imm::unset_counter) {
712 assert(ctx.chip_class >= GFX10);
713 SOPK_instruction* waitcnt_vs =
714 create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1);
715 waitcnt_vs->definitions[0] = Definition(sgpr_null, s1);
716 waitcnt_vs->imm = imm.vs;
717 instructions.emplace_back(waitcnt_vs);
718 imm.vs = wait_imm::unset_counter;
719 }
720 if (!imm.empty()) {
721 SOPP_instruction* waitcnt =
722 create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt, Format::SOPP, 0, 0);
723 waitcnt->imm = imm.pack(ctx.chip_class);
724 waitcnt->block = -1;
725 instructions.emplace_back(waitcnt);
726 }
727 imm = wait_imm();
728 }
729
730 void
handle_block(Program * program,Block & block,wait_ctx & ctx)731 handle_block(Program* program, Block& block, wait_ctx& ctx)
732 {
733 std::vector<aco_ptr<Instruction>> new_instructions;
734
735 wait_imm queued_imm;
736
737 for (aco_ptr<Instruction>& instr : block.instructions) {
738 bool is_wait = parse_wait_instr(ctx, queued_imm, instr.get());
739
740 memory_sync_info sync_info = get_sync_info(instr.get());
741 kill(queued_imm, instr.get(), ctx, sync_info);
742
743 gen(instr.get(), ctx);
744
745 if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
746 if (!queued_imm.empty())
747 emit_waitcnt(ctx, new_instructions, queued_imm);
748
749 new_instructions.emplace_back(std::move(instr));
750 perform_barrier(ctx, queued_imm, sync_info, semantic_acquire);
751 }
752 }
753
754 if (!queued_imm.empty())
755 emit_waitcnt(ctx, new_instructions, queued_imm);
756
757 block.instructions.swap(new_instructions);
758 }
759
760 } /* end namespace */
761
762 void
insert_wait_states(Program * program)763 insert_wait_states(Program* program)
764 {
765 /* per BB ctx */
766 std::vector<bool> done(program->blocks.size());
767 std::vector<wait_ctx> in_ctx(program->blocks.size(), wait_ctx(program));
768 std::vector<wait_ctx> out_ctx(program->blocks.size(), wait_ctx(program));
769
770 std::stack<unsigned, std::vector<unsigned>> loop_header_indices;
771 unsigned loop_progress = 0;
772
773 if (program->stage.has(SWStage::VS) && program->info->vs.dynamic_inputs) {
774 for (Definition def : program->vs_inputs) {
775 update_counters(in_ctx[0], event_vmem);
776 insert_wait_entry(in_ctx[0], def, event_vmem);
777 }
778 }
779
780 for (unsigned i = 0; i < program->blocks.size();) {
781 Block& current = program->blocks[i++];
782 wait_ctx ctx = in_ctx[current.index];
783
784 if (current.kind & block_kind_loop_header) {
785 loop_header_indices.push(current.index);
786 } else if (current.kind & block_kind_loop_exit) {
787 bool repeat = false;
788 if (loop_progress == loop_header_indices.size()) {
789 i = loop_header_indices.top();
790 repeat = true;
791 }
792 loop_header_indices.pop();
793 loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
794 if (repeat)
795 continue;
796 }
797
798 bool changed = false;
799 for (unsigned b : current.linear_preds)
800 changed |= ctx.join(&out_ctx[b], false);
801 for (unsigned b : current.logical_preds)
802 changed |= ctx.join(&out_ctx[b], true);
803
804 if (done[current.index] && !changed) {
805 in_ctx[current.index] = std::move(ctx);
806 continue;
807 } else {
808 in_ctx[current.index] = ctx;
809 }
810
811 if (current.instructions.empty()) {
812 out_ctx[current.index] = std::move(ctx);
813 continue;
814 }
815
816 loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
817 done[current.index] = true;
818
819 handle_block(program, current, ctx);
820
821 out_ctx[current.index] = std::move(ctx);
822 }
823 }
824
825 } // namespace aco
826