1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <algorithm>
26 #include <map>
27 #include <stack>
28 #include <math.h>
29
30 #include "aco_ir.h"
31
32 namespace aco {
33
34 namespace {
35
36 /**
37 * The general idea of this pass is:
38 * The CFG is traversed in reverse postorder (forward) and loops are processed
39 * several times until no progress is made.
40 * Per BB two wait_ctx is maintained: an in-context and out-context.
41 * The in-context is the joined out-contexts of the predecessors.
42 * The context contains a map: gpr -> wait_entry
43 * consisting of the information about the cnt values to be waited for.
44 * Note: After merge-nodes, it might occur that for the same register
45 * multiple cnt values are to be waited for.
46 *
47 * The values are updated according to the encountered instructions:
48 * - additional events increment the counter of waits of the same type
49 * - or erase gprs with counters higher than to be waited for.
50 */
51
52 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
53
54 /* Instructions of the same event will finish in-order except for smem
55 * and maybe flat. Instructions of different events may not finish in-order. */
56 enum wait_event : uint16_t {
57 event_smem = 1 << 0,
58 event_lds = 1 << 1,
59 event_gds = 1 << 2,
60 event_vmem = 1 << 3,
61 event_vmem_store = 1 << 4, /* GFX10+ */
62 event_flat = 1 << 5,
63 event_exp_pos = 1 << 6,
64 event_exp_param = 1 << 7,
65 event_exp_mrt_null = 1 << 8,
66 event_gds_gpr_lock = 1 << 9,
67 event_vmem_gpr_lock = 1 << 10,
68 event_sendmsg = 1 << 11,
69 num_events = 12,
70 };
71
72 enum counter_type : uint8_t {
73 counter_exp = 1 << 0,
74 counter_lgkm = 1 << 1,
75 counter_vm = 1 << 2,
76 counter_vs = 1 << 3,
77 num_counters = 4,
78 };
79
80 static const uint16_t exp_events = event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock;
81 static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
82 static const uint16_t vm_events = event_vmem | event_flat;
83 static const uint16_t vs_events = event_vmem_store;
84
get_counters_for_event(wait_event ev)85 uint8_t get_counters_for_event(wait_event ev)
86 {
87 switch (ev) {
88 case event_smem:
89 case event_lds:
90 case event_gds:
91 case event_sendmsg:
92 return counter_lgkm;
93 case event_vmem:
94 return counter_vm;
95 case event_vmem_store:
96 return counter_vs;
97 case event_flat:
98 return counter_vm | counter_lgkm;
99 case event_exp_pos:
100 case event_exp_param:
101 case event_exp_mrt_null:
102 case event_gds_gpr_lock:
103 case event_vmem_gpr_lock:
104 return counter_exp;
105 default:
106 return 0;
107 }
108 }
109
get_events_for_counter(counter_type ctr)110 uint16_t get_events_for_counter(counter_type ctr)
111 {
112 switch (ctr) {
113 case counter_exp:
114 return exp_events;
115 case counter_lgkm:
116 return lgkm_events;
117 case counter_vm:
118 return vm_events;
119 case counter_vs:
120 return vs_events;
121 }
122 return 0;
123 }
124
125 struct wait_imm {
126 static const uint8_t unset_counter = 0xff;
127
128 uint8_t vm;
129 uint8_t exp;
130 uint8_t lgkm;
131 uint8_t vs;
132
wait_immaco::__anondb4d2a970111::wait_imm133 wait_imm() :
134 vm(unset_counter), exp(unset_counter), lgkm(unset_counter), vs(unset_counter) {}
wait_immaco::__anondb4d2a970111::wait_imm135 wait_imm(uint16_t vm_, uint16_t exp_, uint16_t lgkm_, uint16_t vs_) :
136 vm(vm_), exp(exp_), lgkm(lgkm_), vs(vs_) {}
137
wait_immaco::__anondb4d2a970111::wait_imm138 wait_imm(enum chip_class chip, uint16_t packed) : vs(unset_counter)
139 {
140 vm = packed & 0xf;
141 if (chip >= GFX9)
142 vm |= (packed >> 10) & 0x30;
143
144 exp = (packed >> 4) & 0x7;
145
146 lgkm = (packed >> 8) & 0xf;
147 if (chip >= GFX10)
148 lgkm |= (packed >> 8) & 0x30;
149 }
150
packaco::__anondb4d2a970111::wait_imm151 uint16_t pack(enum chip_class chip) const
152 {
153 uint16_t imm = 0;
154 assert(exp == unset_counter || exp <= 0x7);
155 switch (chip) {
156 case GFX10:
157 case GFX10_3:
158 assert(lgkm == unset_counter || lgkm <= 0x3f);
159 assert(vm == unset_counter || vm <= 0x3f);
160 imm = ((vm & 0x30) << 10) | ((lgkm & 0x3f) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
161 break;
162 case GFX9:
163 assert(lgkm == unset_counter || lgkm <= 0xf);
164 assert(vm == unset_counter || vm <= 0x3f);
165 imm = ((vm & 0x30) << 10) | ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
166 break;
167 default:
168 assert(lgkm == unset_counter || lgkm <= 0xf);
169 assert(vm == unset_counter || vm <= 0xf);
170 imm = ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
171 break;
172 }
173 if (chip < GFX9 && vm == wait_imm::unset_counter)
174 imm |= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
175 if (chip < GFX10 && lgkm == wait_imm::unset_counter)
176 imm |= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
177 return imm;
178 }
179
combineaco::__anondb4d2a970111::wait_imm180 bool combine(const wait_imm& other)
181 {
182 bool changed = other.vm < vm || other.exp < exp || other.lgkm < lgkm || other.vs < vs;
183 vm = std::min(vm, other.vm);
184 exp = std::min(exp, other.exp);
185 lgkm = std::min(lgkm, other.lgkm);
186 vs = std::min(vs, other.vs);
187 return changed;
188 }
189
emptyaco::__anondb4d2a970111::wait_imm190 bool empty() const
191 {
192 return vm == unset_counter && exp == unset_counter &&
193 lgkm == unset_counter && vs == unset_counter;
194 }
195 };
196
197 struct wait_entry {
198 wait_imm imm;
199 uint16_t events; /* use wait_event notion */
200 uint8_t counters; /* use counter_type notion */
201 bool wait_on_read:1;
202 bool logical:1;
203 bool has_vmem_nosampler:1;
204 bool has_vmem_sampler:1;
205
wait_entryaco::__anondb4d2a970111::wait_entry206 wait_entry(wait_event event, wait_imm imm, bool logical, bool wait_on_read)
207 : imm(imm), events(event), counters(get_counters_for_event(event)),
208 wait_on_read(wait_on_read), logical(logical),
209 has_vmem_nosampler(false), has_vmem_sampler(false) {}
210
joinaco::__anondb4d2a970111::wait_entry211 bool join(const wait_entry& other)
212 {
213 bool changed = (other.events & ~events) ||
214 (other.counters & ~counters) ||
215 (other.wait_on_read && !wait_on_read) ||
216 (other.has_vmem_nosampler && !has_vmem_nosampler) ||
217 (other.has_vmem_sampler && !has_vmem_sampler);
218 events |= other.events;
219 counters |= other.counters;
220 changed |= imm.combine(other.imm);
221 wait_on_read |= other.wait_on_read;
222 has_vmem_nosampler |= other.has_vmem_nosampler;
223 has_vmem_sampler |= other.has_vmem_sampler;
224 assert(logical == other.logical);
225 return changed;
226 }
227
remove_counteraco::__anondb4d2a970111::wait_entry228 void remove_counter(counter_type counter)
229 {
230 counters &= ~counter;
231
232 if (counter == counter_lgkm) {
233 imm.lgkm = wait_imm::unset_counter;
234 events &= ~(event_smem | event_lds | event_gds | event_sendmsg);
235 }
236
237 if (counter == counter_vm) {
238 imm.vm = wait_imm::unset_counter;
239 events &= ~event_vmem;
240 has_vmem_nosampler = false;
241 has_vmem_sampler = false;
242 }
243
244 if (counter == counter_exp) {
245 imm.exp = wait_imm::unset_counter;
246 events &= ~(event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock);
247 }
248
249 if (counter == counter_vs) {
250 imm.vs = wait_imm::unset_counter;
251 events &= ~event_vmem_store;
252 }
253
254 if (!(counters & counter_lgkm) && !(counters & counter_vm))
255 events &= ~event_flat;
256 }
257 };
258
259 struct wait_ctx {
260 Program *program;
261 enum chip_class chip_class;
262 uint16_t max_vm_cnt;
263 uint16_t max_exp_cnt;
264 uint16_t max_lgkm_cnt;
265 uint16_t max_vs_cnt;
266 uint16_t unordered_events = event_smem | event_flat;
267
268 uint8_t vm_cnt = 0;
269 uint8_t exp_cnt = 0;
270 uint8_t lgkm_cnt = 0;
271 uint8_t vs_cnt = 0;
272 bool pending_flat_lgkm = false;
273 bool pending_flat_vm = false;
274 bool pending_s_buffer_store = false; /* GFX10 workaround */
275
276 wait_imm barrier_imm[storage_count];
277 uint16_t barrier_events[storage_count] = {}; /* use wait_event notion */
278
279 std::map<PhysReg,wait_entry> gpr_map;
280
281 /* used for vmem/smem scores */
282 bool collect_statistics;
283 Instruction *gen_instr;
284 std::map<Instruction *, unsigned> unwaited_instrs[num_counters];
285 std::map<PhysReg,std::set<Instruction *>> reg_instrs[num_counters];
286 std::vector<unsigned> wait_distances[num_events];
287
wait_ctxaco::__anondb4d2a970111::wait_ctx288 wait_ctx() {}
wait_ctxaco::__anondb4d2a970111::wait_ctx289 wait_ctx(Program *program_)
290 : program(program_),
291 chip_class(program_->chip_class),
292 max_vm_cnt(program_->chip_class >= GFX9 ? 62 : 14),
293 max_exp_cnt(6),
294 max_lgkm_cnt(program_->chip_class >= GFX10 ? 62 : 14),
295 max_vs_cnt(program_->chip_class >= GFX10 ? 62 : 0),
296 unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0)),
297 collect_statistics(program_->collect_statistics) {}
298
joinaco::__anondb4d2a970111::wait_ctx299 bool join(const wait_ctx* other, bool logical)
300 {
301 bool changed = other->exp_cnt > exp_cnt ||
302 other->vm_cnt > vm_cnt ||
303 other->lgkm_cnt > lgkm_cnt ||
304 other->vs_cnt > vs_cnt ||
305 (other->pending_flat_lgkm && !pending_flat_lgkm) ||
306 (other->pending_flat_vm && !pending_flat_vm);
307
308 exp_cnt = std::max(exp_cnt, other->exp_cnt);
309 vm_cnt = std::max(vm_cnt, other->vm_cnt);
310 lgkm_cnt = std::max(lgkm_cnt, other->lgkm_cnt);
311 vs_cnt = std::max(vs_cnt, other->vs_cnt);
312 pending_flat_lgkm |= other->pending_flat_lgkm;
313 pending_flat_vm |= other->pending_flat_vm;
314 pending_s_buffer_store |= other->pending_s_buffer_store;
315
316 for (const auto& entry : other->gpr_map)
317 {
318 if (entry.second.logical != logical)
319 continue;
320
321 using iterator = std::map<PhysReg,wait_entry>::iterator;
322 const std::pair<iterator, bool> insert_pair = gpr_map.insert(entry);
323 if (insert_pair.second) {
324 changed = true;
325 } else {
326 changed |= insert_pair.first->second.join(entry.second);
327 }
328 }
329
330 for (unsigned i = 0; i < storage_count; i++) {
331 changed |= barrier_imm[i].combine(other->barrier_imm[i]);
332 changed |= other->barrier_events[i] & ~barrier_events[i];
333 barrier_events[i] |= other->barrier_events[i];
334 }
335
336 /* these are used for statistics, so don't update "changed" */
337 for (unsigned i = 0; i < num_counters; i++) {
338 for (const auto& instr : other->unwaited_instrs[i]) {
339 using iterator = std::map<Instruction *, unsigned>::iterator;
340 const std::pair<iterator, bool> insert_pair = unwaited_instrs[i].insert(instr);
341 if (!insert_pair.second) {
342 const iterator pos = insert_pair.first;
343 pos->second = std::min(pos->second, instr.second);
344 }
345 }
346 for (const auto& instr_pair : other->reg_instrs[i]) {
347 const PhysReg reg = instr_pair.first;
348 const std::set<Instruction *>& instrs = instr_pair.second;
349 reg_instrs[i][reg].insert(instrs.begin(), instrs.end());
350 }
351 }
352
353 return changed;
354 }
355
wait_and_remove_from_entryaco::__anondb4d2a970111::wait_ctx356 void wait_and_remove_from_entry(PhysReg reg, wait_entry& entry, counter_type counter) {
357 if (collect_statistics && (entry.counters & counter)) {
358 unsigned counter_idx = ffs(counter) - 1;
359 for (Instruction *instr : reg_instrs[counter_idx][reg]) {
360 auto pos = unwaited_instrs[counter_idx].find(instr);
361 if (pos == unwaited_instrs[counter_idx].end())
362 continue;
363
364 unsigned distance = pos->second;
365 unsigned events = entry.events & get_events_for_counter(counter);
366 while (events) {
367 unsigned event_idx = u_bit_scan(&events);
368 wait_distances[event_idx].push_back(distance);
369 }
370
371 unwaited_instrs[counter_idx].erase(pos);
372 }
373 reg_instrs[counter_idx][reg].clear();
374 }
375
376 entry.remove_counter(counter);
377 }
378
advance_unwaited_instrsaco::__anondb4d2a970111::wait_ctx379 void advance_unwaited_instrs()
380 {
381 for (unsigned i = 0; i < num_counters; i++) {
382 for (std::pair<Instruction * const, unsigned>& instr : unwaited_instrs[i])
383 instr.second++;
384 }
385 }
386 };
387
check_instr(Instruction * instr,wait_ctx & ctx)388 wait_imm check_instr(Instruction* instr, wait_ctx& ctx)
389 {
390 wait_imm wait;
391
392 for (const Operand op : instr->operands) {
393 if (op.isConstant() || op.isUndefined())
394 continue;
395
396 /* check consecutively read gprs */
397 for (unsigned j = 0; j < op.size(); j++) {
398 PhysReg reg{op.physReg() + j};
399 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
400 if (it == ctx.gpr_map.end() || !it->second.wait_on_read)
401 continue;
402
403 wait.combine(it->second.imm);
404 }
405 }
406
407 for (const Definition& def : instr->definitions) {
408 /* check consecutively written gprs */
409 for (unsigned j = 0; j < def.getTemp().size(); j++)
410 {
411 PhysReg reg{def.physReg() + j};
412
413 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
414 if (it == ctx.gpr_map.end())
415 continue;
416
417 /* Vector Memory reads and writes return in the order they were issued */
418 bool has_sampler = instr->format == Format::MIMG && !instr->operands[1].isUndefined() && instr->operands[1].regClass() == s4;
419 if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem) &&
420 it->second.has_vmem_nosampler == !has_sampler && it->second.has_vmem_sampler == has_sampler)
421 continue;
422
423 /* LDS reads and writes return in the order they were issued. same for GDS */
424 if (instr->format == Format::DS) {
425 bool gds = static_cast<DS_instruction*>(instr)->gds;
426 if ((it->second.events & lgkm_events) == (gds ? event_gds : event_lds))
427 continue;
428 }
429
430 wait.combine(it->second.imm);
431 }
432 }
433
434 return wait;
435 }
436
parse_wait_instr(wait_ctx & ctx,Instruction * instr)437 wait_imm parse_wait_instr(wait_ctx& ctx, Instruction *instr)
438 {
439 if (instr->opcode == aco_opcode::s_waitcnt_vscnt &&
440 instr->definitions[0].physReg() == sgpr_null) {
441 wait_imm imm;
442 imm.vs = std::min<uint8_t>(imm.vs, static_cast<SOPK_instruction*>(instr)->imm);
443 return imm;
444 } else if (instr->opcode == aco_opcode::s_waitcnt) {
445 return wait_imm(ctx.chip_class, static_cast<SOPP_instruction*>(instr)->imm);
446 }
447 return wait_imm();
448 }
449
perform_barrier(wait_ctx & ctx,memory_sync_info sync,unsigned semantics)450 wait_imm perform_barrier(wait_ctx& ctx, memory_sync_info sync, unsigned semantics)
451 {
452 wait_imm imm;
453 sync_scope subgroup_scope = ctx.program->workgroup_size <= ctx.program->wave_size ? scope_workgroup : scope_subgroup;
454 if ((sync.semantics & semantics) && sync.scope > subgroup_scope) {
455 unsigned storage = sync.storage;
456 while (storage) {
457 unsigned idx = u_bit_scan(&storage);
458
459 /* LDS is private to the workgroup */
460 sync_scope bar_scope_lds = MIN2(sync.scope, scope_workgroup);
461
462 uint16_t events = ctx.barrier_events[idx];
463 if (bar_scope_lds <= subgroup_scope)
464 events &= ~event_lds;
465
466 /* in non-WGP, the L1/L0 cache keeps all memory operations in-order for the same workgroup */
467 if (ctx.chip_class < GFX10 && sync.scope <= scope_workgroup)
468 events &= ~(event_vmem | event_vmem_store | event_smem);
469
470 if (events)
471 imm.combine(ctx.barrier_imm[idx]);
472 }
473 }
474
475 return imm;
476 }
477
force_waitcnt(wait_ctx & ctx,wait_imm & imm)478 void force_waitcnt(wait_ctx& ctx, wait_imm& imm)
479 {
480 if (ctx.vm_cnt)
481 imm.vm = 0;
482 if (ctx.exp_cnt)
483 imm.exp = 0;
484 if (ctx.lgkm_cnt)
485 imm.lgkm = 0;
486
487 if (ctx.chip_class >= GFX10) {
488 if (ctx.vs_cnt)
489 imm.vs = 0;
490 }
491 }
492
kill(Instruction * instr,wait_ctx & ctx,memory_sync_info sync_info)493 wait_imm kill(Instruction* instr, wait_ctx& ctx, memory_sync_info sync_info)
494 {
495 wait_imm imm;
496
497 if (debug_flags & DEBUG_FORCE_WAITCNT) {
498 /* Force emitting waitcnt states right after the instruction if there is
499 * something to wait for.
500 */
501 force_waitcnt(ctx, imm);
502 }
503
504 if (ctx.exp_cnt || ctx.vm_cnt || ctx.lgkm_cnt)
505 imm.combine(check_instr(instr, ctx));
506
507 imm.combine(parse_wait_instr(ctx, instr));
508
509
510 /* It's required to wait for scalar stores before "writing back" data.
511 * It shouldn't cost anything anyways since we're about to do s_endpgm.
512 */
513 if (ctx.lgkm_cnt && instr->opcode == aco_opcode::s_dcache_wb) {
514 assert(ctx.chip_class >= GFX8);
515 imm.lgkm = 0;
516 }
517
518 if (ctx.chip_class >= GFX10 && instr->format == Format::SMEM) {
519 /* GFX10: A store followed by a load at the same address causes a problem because
520 * the load doesn't load the correct values unless we wait for the store first.
521 * This is NOT mitigated by an s_nop.
522 *
523 * TODO: Refine this when we have proper alias analysis.
524 */
525 SMEM_instruction *smem = static_cast<SMEM_instruction *>(instr);
526 if (ctx.pending_s_buffer_store &&
527 !smem->definitions.empty() &&
528 !smem->sync.can_reorder()) {
529 imm.lgkm = 0;
530 }
531 }
532
533 if (instr->opcode == aco_opcode::p_barrier)
534 imm.combine(perform_barrier(ctx, static_cast<Pseudo_barrier_instruction *>(instr)->sync, semantic_acqrel));
535 else
536 imm.combine(perform_barrier(ctx, sync_info, semantic_release));
537
538 if (!imm.empty()) {
539 if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
540 imm.vm = 0;
541 if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
542 imm.lgkm = 0;
543
544 /* reset counters */
545 ctx.exp_cnt = std::min(ctx.exp_cnt, imm.exp);
546 ctx.vm_cnt = std::min(ctx.vm_cnt, imm.vm);
547 ctx.lgkm_cnt = std::min(ctx.lgkm_cnt, imm.lgkm);
548 ctx.vs_cnt = std::min(ctx.vs_cnt, imm.vs);
549
550 /* update barrier wait imms */
551 for (unsigned i = 0; i < storage_count; i++) {
552 wait_imm& bar = ctx.barrier_imm[i];
553 uint16_t& bar_ev = ctx.barrier_events[i];
554 if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp) {
555 bar.exp = wait_imm::unset_counter;
556 bar_ev &= ~exp_events;
557 }
558 if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm) {
559 bar.vm = wait_imm::unset_counter;
560 bar_ev &= ~(vm_events & ~event_flat);
561 }
562 if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm) {
563 bar.lgkm = wait_imm::unset_counter;
564 bar_ev &= ~(lgkm_events & ~event_flat);
565 }
566 if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs) {
567 bar.vs = wait_imm::unset_counter;
568 bar_ev &= ~vs_events;
569 }
570 if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
571 bar_ev &= ~event_flat;
572 }
573
574 /* remove all gprs with higher counter from map */
575 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.begin();
576 while (it != ctx.gpr_map.end())
577 {
578 if (imm.exp != wait_imm::unset_counter && imm.exp <= it->second.imm.exp)
579 ctx.wait_and_remove_from_entry(it->first, it->second, counter_exp);
580 if (imm.vm != wait_imm::unset_counter && imm.vm <= it->second.imm.vm)
581 ctx.wait_and_remove_from_entry(it->first, it->second, counter_vm);
582 if (imm.lgkm != wait_imm::unset_counter && imm.lgkm <= it->second.imm.lgkm)
583 ctx.wait_and_remove_from_entry(it->first, it->second, counter_lgkm);
584 if (imm.vs != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
585 ctx.wait_and_remove_from_entry(it->first, it->second, counter_vs);
586 if (!it->second.counters)
587 it = ctx.gpr_map.erase(it);
588 else
589 it++;
590 }
591 }
592
593 if (imm.vm == 0)
594 ctx.pending_flat_vm = false;
595 if (imm.lgkm == 0) {
596 ctx.pending_flat_lgkm = false;
597 ctx.pending_s_buffer_store = false;
598 }
599
600 return imm;
601 }
602
update_barrier_counter(uint8_t * ctr,unsigned max)603 void update_barrier_counter(uint8_t *ctr, unsigned max)
604 {
605 if (*ctr != wait_imm::unset_counter && *ctr < max)
606 (*ctr)++;
607 }
608
update_barrier_imm(wait_ctx & ctx,uint8_t counters,wait_event event,memory_sync_info sync)609 void update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, memory_sync_info sync)
610 {
611 for (unsigned i = 0; i < storage_count; i++) {
612 wait_imm& bar = ctx.barrier_imm[i];
613 uint16_t& bar_ev = ctx.barrier_events[i];
614 if (sync.storage & (1 << i) && !(sync.semantics & semantic_private)) {
615 bar_ev |= event;
616 if (counters & counter_lgkm)
617 bar.lgkm = 0;
618 if (counters & counter_vm)
619 bar.vm = 0;
620 if (counters & counter_exp)
621 bar.exp = 0;
622 if (counters & counter_vs)
623 bar.vs = 0;
624 } else if (!(bar_ev & ctx.unordered_events) && !(ctx.unordered_events & event)) {
625 if (counters & counter_lgkm && (bar_ev & lgkm_events) == event)
626 update_barrier_counter(&bar.lgkm, ctx.max_lgkm_cnt);
627 if (counters & counter_vm && (bar_ev & vm_events) == event)
628 update_barrier_counter(&bar.vm, ctx.max_vm_cnt);
629 if (counters & counter_exp && (bar_ev & exp_events) == event)
630 update_barrier_counter(&bar.exp, ctx.max_exp_cnt);
631 if (counters & counter_vs && (bar_ev & vs_events) == event)
632 update_barrier_counter(&bar.vs, ctx.max_vs_cnt);
633 }
634 }
635 }
636
update_counters(wait_ctx & ctx,wait_event event,memory_sync_info sync=memory_sync_info ())637 void update_counters(wait_ctx& ctx, wait_event event, memory_sync_info sync=memory_sync_info())
638 {
639 uint8_t counters = get_counters_for_event(event);
640
641 if (counters & counter_lgkm && ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
642 ctx.lgkm_cnt++;
643 if (counters & counter_vm && ctx.vm_cnt <= ctx.max_vm_cnt)
644 ctx.vm_cnt++;
645 if (counters & counter_exp && ctx.exp_cnt <= ctx.max_exp_cnt)
646 ctx.exp_cnt++;
647 if (counters & counter_vs && ctx.vs_cnt <= ctx.max_vs_cnt)
648 ctx.vs_cnt++;
649
650 update_barrier_imm(ctx, counters, event, sync);
651
652 if (ctx.unordered_events & event)
653 return;
654
655 if (ctx.pending_flat_lgkm)
656 counters &= ~counter_lgkm;
657 if (ctx.pending_flat_vm)
658 counters &= ~counter_vm;
659
660 for (std::pair<const PhysReg,wait_entry>& e : ctx.gpr_map) {
661 wait_entry& entry = e.second;
662
663 if (entry.events & ctx.unordered_events)
664 continue;
665
666 assert(entry.events);
667
668 if ((counters & counter_exp) && (entry.events & exp_events) == event && entry.imm.exp < ctx.max_exp_cnt)
669 entry.imm.exp++;
670 if ((counters & counter_lgkm) && (entry.events & lgkm_events) == event && entry.imm.lgkm < ctx.max_lgkm_cnt)
671 entry.imm.lgkm++;
672 if ((counters & counter_vm) && (entry.events & vm_events) == event && entry.imm.vm < ctx.max_vm_cnt)
673 entry.imm.vm++;
674 if ((counters & counter_vs) && (entry.events & vs_events) == event && entry.imm.vs < ctx.max_vs_cnt)
675 entry.imm.vs++;
676 }
677 }
678
update_counters_for_flat_load(wait_ctx & ctx,memory_sync_info sync=memory_sync_info ())679 void update_counters_for_flat_load(wait_ctx& ctx, memory_sync_info sync=memory_sync_info())
680 {
681 assert(ctx.chip_class < GFX10);
682
683 if (ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
684 ctx.lgkm_cnt++;
685 if (ctx.vm_cnt <= ctx.max_vm_cnt)
686 ctx.vm_cnt++;
687
688 update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, sync);
689
690 for (std::pair<PhysReg,wait_entry> e : ctx.gpr_map)
691 {
692 if (e.second.counters & counter_vm)
693 e.second.imm.vm = 0;
694 if (e.second.counters & counter_lgkm)
695 e.second.imm.lgkm = 0;
696 }
697 ctx.pending_flat_lgkm = true;
698 ctx.pending_flat_vm = true;
699 }
700
insert_wait_entry(wait_ctx & ctx,PhysReg reg,RegClass rc,wait_event event,bool wait_on_read,bool has_sampler=false)701 void insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read,
702 bool has_sampler=false)
703 {
704 uint16_t counters = get_counters_for_event(event);
705 wait_imm imm;
706 if (counters & counter_lgkm)
707 imm.lgkm = 0;
708 if (counters & counter_vm)
709 imm.vm = 0;
710 if (counters & counter_exp)
711 imm.exp = 0;
712 if (counters & counter_vs)
713 imm.vs = 0;
714
715 wait_entry new_entry(event, imm, !rc.is_linear(), wait_on_read);
716 new_entry.has_vmem_nosampler = (event & event_vmem) && !has_sampler;
717 new_entry.has_vmem_sampler = (event & event_vmem) && has_sampler;
718
719 for (unsigned i = 0; i < rc.size(); i++) {
720 auto it = ctx.gpr_map.emplace(PhysReg{reg.reg()+i}, new_entry);
721 if (!it.second)
722 it.first->second.join(new_entry);
723 }
724
725 if (ctx.collect_statistics) {
726 unsigned counters_todo = counters;
727 while (counters_todo) {
728 unsigned i = u_bit_scan(&counters_todo);
729 ctx.unwaited_instrs[i].insert(std::make_pair(ctx.gen_instr, 0u));
730 for (unsigned j = 0; j < rc.size(); j++)
731 ctx.reg_instrs[i][PhysReg{reg.reg()+j}].insert(ctx.gen_instr);
732 }
733 }
734 }
735
insert_wait_entry(wait_ctx & ctx,Operand op,wait_event event,bool has_sampler=false)736 void insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event, bool has_sampler=false)
737 {
738 if (!op.isConstant() && !op.isUndefined())
739 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, has_sampler);
740 }
741
insert_wait_entry(wait_ctx & ctx,Definition def,wait_event event,bool has_sampler=false)742 void insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event, bool has_sampler=false)
743 {
744 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, has_sampler);
745 }
746
gen(Instruction * instr,wait_ctx & ctx)747 void gen(Instruction* instr, wait_ctx& ctx)
748 {
749 switch (instr->format) {
750 case Format::EXP: {
751 Export_instruction* exp_instr = static_cast<Export_instruction*>(instr);
752
753 wait_event ev;
754 if (exp_instr->dest <= 9)
755 ev = event_exp_mrt_null;
756 else if (exp_instr->dest <= 15)
757 ev = event_exp_pos;
758 else
759 ev = event_exp_param;
760 update_counters(ctx, ev);
761
762 /* insert new entries for exported vgprs */
763 for (unsigned i = 0; i < 4; i++)
764 {
765 if (exp_instr->enabled_mask & (1 << i)) {
766 unsigned idx = exp_instr->compressed ? i >> 1 : i;
767 assert(idx < exp_instr->operands.size());
768 insert_wait_entry(ctx, exp_instr->operands[idx], ev);
769
770 }
771 }
772 insert_wait_entry(ctx, exec, s2, ev, false);
773 break;
774 }
775 case Format::FLAT: {
776 FLAT_instruction *flat = static_cast<FLAT_instruction*>(instr);
777 if (ctx.chip_class < GFX10 && !instr->definitions.empty())
778 update_counters_for_flat_load(ctx, flat->sync);
779 else
780 update_counters(ctx, event_flat, flat->sync);
781
782 if (!instr->definitions.empty())
783 insert_wait_entry(ctx, instr->definitions[0], event_flat);
784 break;
785 }
786 case Format::SMEM: {
787 SMEM_instruction *smem = static_cast<SMEM_instruction*>(instr);
788 update_counters(ctx, event_smem, smem->sync);
789
790 if (!instr->definitions.empty())
791 insert_wait_entry(ctx, instr->definitions[0], event_smem);
792 else if (ctx.chip_class >= GFX10 &&
793 !smem->sync.can_reorder())
794 ctx.pending_s_buffer_store = true;
795
796 break;
797 }
798 case Format::DS: {
799 DS_instruction *ds = static_cast<DS_instruction*>(instr);
800 update_counters(ctx, ds->gds ? event_gds : event_lds, ds->sync);
801 if (ds->gds)
802 update_counters(ctx, event_gds_gpr_lock);
803
804 if (!instr->definitions.empty())
805 insert_wait_entry(ctx, instr->definitions[0], ds->gds ? event_gds : event_lds);
806
807 if (ds->gds) {
808 for (const Operand& op : instr->operands)
809 insert_wait_entry(ctx, op, event_gds_gpr_lock);
810 insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
811 }
812 break;
813 }
814 case Format::MUBUF:
815 case Format::MTBUF:
816 case Format::MIMG:
817 case Format::GLOBAL: {
818 wait_event ev = !instr->definitions.empty() || ctx.chip_class < GFX10 ? event_vmem : event_vmem_store;
819 update_counters(ctx, ev, get_sync_info(instr));
820
821 bool has_sampler = instr->format == Format::MIMG && !instr->operands[1].isUndefined() && instr->operands[1].regClass() == s4;
822
823 if (!instr->definitions.empty())
824 insert_wait_entry(ctx, instr->definitions[0], ev, has_sampler);
825
826 if (ctx.chip_class == GFX6 &&
827 instr->format != Format::MIMG &&
828 instr->operands.size() == 4) {
829 ctx.exp_cnt++;
830 update_counters(ctx, event_vmem_gpr_lock);
831 insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
832 } else if (ctx.chip_class == GFX6 &&
833 instr->format == Format::MIMG &&
834 instr->operands[1].regClass().type() == RegType::vgpr) {
835 ctx.exp_cnt++;
836 update_counters(ctx, event_vmem_gpr_lock);
837 insert_wait_entry(ctx, instr->operands[1], event_vmem_gpr_lock);
838 }
839
840 break;
841 }
842 case Format::SOPP: {
843 if (instr->opcode == aco_opcode::s_sendmsg ||
844 instr->opcode == aco_opcode::s_sendmsghalt)
845 update_counters(ctx, event_sendmsg);
846 }
847 default:
848 break;
849 }
850 }
851
emit_waitcnt(wait_ctx & ctx,std::vector<aco_ptr<Instruction>> & instructions,wait_imm imm)852 void emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm imm)
853 {
854 if (imm.vs != wait_imm::unset_counter) {
855 assert(ctx.chip_class >= GFX10);
856 SOPK_instruction* waitcnt_vs = create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1);
857 waitcnt_vs->definitions[0] = Definition(sgpr_null, s1);
858 waitcnt_vs->imm = imm.vs;
859 instructions.emplace_back(waitcnt_vs);
860 imm.vs = wait_imm::unset_counter;
861 }
862 if (!imm.empty()) {
863 SOPP_instruction* waitcnt = create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt, Format::SOPP, 0, 0);
864 waitcnt->imm = imm.pack(ctx.chip_class);
865 waitcnt->block = -1;
866 instructions.emplace_back(waitcnt);
867 }
868 }
869
handle_block(Program * program,Block & block,wait_ctx & ctx)870 void handle_block(Program *program, Block& block, wait_ctx& ctx)
871 {
872 std::vector<aco_ptr<Instruction>> new_instructions;
873
874 wait_imm queued_imm;
875
876 for (aco_ptr<Instruction>& instr : block.instructions) {
877 bool is_wait = !parse_wait_instr(ctx, instr.get()).empty();
878
879 memory_sync_info sync_info = get_sync_info(instr.get());
880 queued_imm.combine(kill(instr.get(), ctx, sync_info));
881
882 ctx.gen_instr = instr.get();
883 gen(instr.get(), ctx);
884
885 if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
886 if (!queued_imm.empty()) {
887 emit_waitcnt(ctx, new_instructions, queued_imm);
888 queued_imm = wait_imm();
889 }
890 new_instructions.emplace_back(std::move(instr));
891
892 queued_imm.combine(perform_barrier(ctx, sync_info, semantic_acquire));
893
894 if (ctx.collect_statistics)
895 ctx.advance_unwaited_instrs();
896 }
897 }
898
899 if (!queued_imm.empty())
900 emit_waitcnt(ctx, new_instructions, queued_imm);
901
902 block.instructions.swap(new_instructions);
903 }
904
905 } /* end namespace */
906
calculate_score(std::vector<wait_ctx> & ctx_vec,uint32_t event_mask)907 static uint32_t calculate_score(std::vector<wait_ctx> &ctx_vec, uint32_t event_mask)
908 {
909 double result = 0.0;
910 unsigned num_waits = 0;
911 while (event_mask) {
912 unsigned event_index = u_bit_scan(&event_mask);
913 for (const wait_ctx &ctx : ctx_vec) {
914 for (unsigned dist : ctx.wait_distances[event_index]) {
915 double score = dist;
916 /* for many events, excessive distances provide little benefit, so
917 * decrease the score in that case. */
918 double threshold = INFINITY;
919 double inv_strength = 0.000001;
920 switch (1 << event_index) {
921 case event_smem:
922 threshold = 70.0;
923 inv_strength = 75.0;
924 break;
925 case event_vmem:
926 case event_vmem_store:
927 case event_flat:
928 threshold = 230.0;
929 inv_strength = 150.0;
930 break;
931 case event_lds:
932 threshold = 16.0;
933 break;
934 default:
935 break;
936 }
937 if (score > threshold) {
938 score -= threshold;
939 score = threshold + score / (1.0 + score / inv_strength);
940 }
941
942 /* we don't want increases in high scores to hide decreases in low scores,
943 * so raise to the power of 0.1 before averaging. */
944 result += pow(score, 0.1);
945 num_waits++;
946 }
947 }
948 }
949 return round(pow(result / num_waits, 10.0) * 10.0);
950 }
951
insert_wait_states(Program * program)952 void insert_wait_states(Program* program)
953 {
954 /* per BB ctx */
955 std::vector<bool> done(program->blocks.size());
956 std::vector<wait_ctx> in_ctx(program->blocks.size(), wait_ctx(program));
957 std::vector<wait_ctx> out_ctx(program->blocks.size(), wait_ctx(program));
958
959 std::stack<unsigned> loop_header_indices;
960 unsigned loop_progress = 0;
961
962 for (unsigned i = 0; i < program->blocks.size();) {
963 Block& current = program->blocks[i++];
964 wait_ctx ctx = in_ctx[current.index];
965
966 if (current.kind & block_kind_loop_header) {
967 loop_header_indices.push(current.index);
968 } else if (current.kind & block_kind_loop_exit) {
969 bool repeat = false;
970 if (loop_progress == loop_header_indices.size()) {
971 i = loop_header_indices.top();
972 repeat = true;
973 }
974 loop_header_indices.pop();
975 loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
976 if (repeat)
977 continue;
978 }
979
980 bool changed = false;
981 for (unsigned b : current.linear_preds)
982 changed |= ctx.join(&out_ctx[b], false);
983 for (unsigned b : current.logical_preds)
984 changed |= ctx.join(&out_ctx[b], true);
985
986 if (done[current.index] && !changed) {
987 in_ctx[current.index] = std::move(ctx);
988 continue;
989 } else {
990 in_ctx[current.index] = ctx;
991 }
992
993 if (current.instructions.empty()) {
994 out_ctx[current.index] = std::move(ctx);
995 continue;
996 }
997
998 loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
999 done[current.index] = true;
1000
1001 handle_block(program, current, ctx);
1002
1003 out_ctx[current.index] = std::move(ctx);
1004 }
1005
1006 if (program->collect_statistics) {
1007 program->statistics[statistic_vmem_score] =
1008 calculate_score(out_ctx, event_vmem | event_flat | event_vmem_store);
1009 program->statistics[statistic_smem_score] =
1010 calculate_score(out_ctx, event_smem);
1011 }
1012 }
1013
1014 }
1015
1016