1 /*
2 * Copyright © 2019 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_schedule.h"
25 #include "util/dag.h"
26 #include "util/u_dynarray.h"
27
28 /** @file
29 *
30 * Implements basic-block-level prepass instruction scheduling in NIR to
31 * manage register pressure.
32 *
33 * This is based on the Goodman/Hsu paper (1988, cached copy at
34 * https://people.freedesktop.org/~anholt/scheduling-goodman-hsu.pdf). We
35 * make up the DDG for NIR (which can be mostly done using the NIR def/use
36 * chains for SSA instructions, plus some edges for ordering register writes
37 * vs reads, and some more for ordering intrinsics). Then we pick heads off
38 * of the DDG using their heuristic to emit the NIR instructions back into the
39 * block in their new order.
40 *
41 * The hard case for prepass scheduling on GPUs seems to always be consuming
42 * texture/ubo results. The register pressure heuristic doesn't want to pick
43 * an instr that starts consuming texture results because it usually won't be
44 * the only usage, so that instruction will increase pressure.
45 *
46 * If you try to force consumption of tex results always, then in a case where
47 * single sample is used for many outputs, you'll end up picking every other
48 * user and expanding register pressure. The partially_evaluated_path flag
49 * helps tremendously, in that if you happen for whatever reason to pick a
50 * texture sample's output, then you'll try to finish off that sample. Future
51 * work may include doing some local search before locking in a choice, to try
52 * to more reliably find the case where just a few choices going against the
53 * heuristic can manage to free the whole vector.
54 */
55
56 static bool debug;
57
58 /**
59 * Represents a node in the DDG for a NIR instruction.
60 */
61 typedef struct {
62 struct dag_node dag; /* must be first for our u_dynarray_foreach */
63 nir_instr *instr;
64 bool partially_evaluated_path;
65
66 /* Approximate estimate of the delay between starting this instruction and
67 * its results being available.
68 *
69 * Accuracy is not too important, given that we're prepass scheduling here
70 * and just trying to reduce excess dependencies introduced by a register
71 * allocator by stretching out the live intervals of expensive
72 * instructions.
73 */
74 uint32_t delay;
75
76 /* Cost of the maximum-delay path from this node to the leaves. */
77 uint32_t max_delay;
78
79 /* scoreboard->time value when this instruction can be scheduled without
80 * any stalls expected.
81 */
82 uint32_t ready_time;
83 } nir_schedule_node;
84
85 typedef struct {
86 struct dag *dag;
87
88 nir_shader *shader;
89
90 /* Mapping from nir_def * to a struct set of
91 * instructions remaining to be scheduled using the register.
92 */
93 struct hash_table *remaining_uses;
94
95 /* Map from nir_instr to nir_schedule_node * */
96 struct hash_table *instr_map;
97
98 /* Set of nir_def * that have had any instruction scheduled on them. */
99 struct set *live_values;
100
101 /* An abstract approximation of the number of nir_scheduler_node->delay
102 * units since the start of the shader.
103 */
104 uint32_t time;
105
106 /* Number of channels currently used by the NIR instructions that have been
107 * scheduled.
108 */
109 int pressure;
110
111 /* Options specified by the backend */
112 const nir_schedule_options *options;
113 } nir_schedule_scoreboard;
114
115 /* When walking the instructions in reverse, we use this flag to swap
116 * before/after in add_dep().
117 */
118 enum direction { F,
119 R };
120
121 struct nir_schedule_class_dep {
122 int klass;
123 nir_schedule_node *node;
124 struct nir_schedule_class_dep *next;
125 };
126
127 typedef struct {
128 nir_schedule_scoreboard *scoreboard;
129
130 /* Map from registers to nir_schedule_node * */
131 struct hash_table *reg_map;
132
133 /* Scheduler nodes for last instruction involved in some class of dependency.
134 */
135 nir_schedule_node *load_input;
136 nir_schedule_node *store_shared;
137 nir_schedule_node *unknown_intrinsic;
138 nir_schedule_node *discard;
139 nir_schedule_node *jump;
140
141 struct nir_schedule_class_dep *class_deps;
142
143 enum direction dir;
144 } nir_deps_state;
145
146 static void *
_mesa_hash_table_search_data(struct hash_table * ht,void * key)147 _mesa_hash_table_search_data(struct hash_table *ht, void *key)
148 {
149 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
150 if (!entry)
151 return NULL;
152 return entry->data;
153 }
154
155 static nir_schedule_node *
nir_schedule_get_node(struct hash_table * instr_map,nir_instr * instr)156 nir_schedule_get_node(struct hash_table *instr_map, nir_instr *instr)
157 {
158 return _mesa_hash_table_search_data(instr_map, instr);
159 }
160
161 static struct set *
nir_schedule_scoreboard_get_reg(nir_schedule_scoreboard * scoreboard,nir_def * reg)162 nir_schedule_scoreboard_get_reg(nir_schedule_scoreboard *scoreboard,
163 nir_def *reg)
164 {
165 return _mesa_hash_table_search_data(scoreboard->remaining_uses, reg);
166 }
167
168 static struct set *
nir_schedule_scoreboard_get_src(nir_schedule_scoreboard * scoreboard,nir_src * src)169 nir_schedule_scoreboard_get_src(nir_schedule_scoreboard *scoreboard, nir_src *src)
170 {
171 return _mesa_hash_table_search_data(scoreboard->remaining_uses, src->ssa);
172 }
173
174 static int
nir_schedule_reg_pressure(nir_def * reg)175 nir_schedule_reg_pressure(nir_def *reg)
176 {
177 nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
178 return nir_intrinsic_num_components(decl);
179 }
180
181 static int
nir_schedule_def_pressure(nir_def * def)182 nir_schedule_def_pressure(nir_def *def)
183 {
184 return def->num_components;
185 }
186
187 static int
nir_schedule_src_pressure(nir_src * src)188 nir_schedule_src_pressure(nir_src *src)
189 {
190 return nir_schedule_def_pressure(src->ssa);
191 }
192
193 /**
194 * Adds a dependency such that @after must appear in the final program after
195 * @before.
196 *
197 * We add @before as a child of @after, so that DAG heads are the outputs of
198 * the program and we make our scheduling decisions bottom to top.
199 */
200 static void
add_dep(nir_deps_state * state,nir_schedule_node * before,nir_schedule_node * after)201 add_dep(nir_deps_state *state,
202 nir_schedule_node *before,
203 nir_schedule_node *after)
204 {
205 if (!before || !after)
206 return;
207
208 assert(before != after);
209
210 if (state->dir == F)
211 dag_add_edge(&before->dag, &after->dag, 0);
212 else
213 dag_add_edge(&after->dag, &before->dag, 0);
214 }
215
216 static void
add_read_dep(nir_deps_state * state,nir_schedule_node * before,nir_schedule_node * after)217 add_read_dep(nir_deps_state *state,
218 nir_schedule_node *before,
219 nir_schedule_node *after)
220 {
221 add_dep(state, before, after);
222 }
223
224 static void
add_write_dep(nir_deps_state * state,nir_schedule_node ** before,nir_schedule_node * after)225 add_write_dep(nir_deps_state *state,
226 nir_schedule_node **before,
227 nir_schedule_node *after)
228 {
229 add_dep(state, *before, after);
230 *before = after;
231 }
232
233 static void
nir_schedule_load_reg_deps(nir_intrinsic_instr * load,nir_deps_state * state)234 nir_schedule_load_reg_deps(nir_intrinsic_instr *load,
235 nir_deps_state *state)
236 {
237 nir_def *reg = load->src[0].ssa;
238 (void)nir_reg_get_decl(reg);
239
240 struct hash_entry *entry = _mesa_hash_table_search(state->reg_map, reg);
241 if (!entry)
242 return;
243 nir_schedule_node *dst_n = entry->data;
244
245 nir_schedule_node *src_n =
246 nir_schedule_get_node(state->scoreboard->instr_map, &load->instr);
247
248 add_dep(state, dst_n, src_n);
249 }
250
251 static void
nir_schedule_store_reg_deps(nir_intrinsic_instr * store,nir_deps_state * state)252 nir_schedule_store_reg_deps(nir_intrinsic_instr *store,
253 nir_deps_state *state)
254 {
255 nir_def *reg = store->src[1].ssa;
256 (void)nir_reg_get_decl(reg);
257
258 nir_schedule_node *dest_n =
259 nir_schedule_get_node(state->scoreboard->instr_map, &store->instr);
260
261 struct hash_entry *entry = _mesa_hash_table_search(state->reg_map, reg);
262 if (!entry) {
263 _mesa_hash_table_insert(state->reg_map, reg, dest_n);
264 return;
265 }
266 nir_schedule_node **before = (nir_schedule_node **)&entry->data;
267
268 add_write_dep(state, before, dest_n);
269 }
270
271 static bool
nir_schedule_ssa_deps(nir_def * def,void * in_state)272 nir_schedule_ssa_deps(nir_def *def, void *in_state)
273 {
274 nir_deps_state *state = in_state;
275 struct hash_table *instr_map = state->scoreboard->instr_map;
276 nir_schedule_node *def_n = nir_schedule_get_node(instr_map, def->parent_instr);
277
278 nir_foreach_use(src, def) {
279 nir_schedule_node *use_n = nir_schedule_get_node(instr_map,
280 nir_src_parent_instr(src));
281
282 add_read_dep(state, def_n, use_n);
283 }
284
285 return true;
286 }
287
288 static struct nir_schedule_class_dep *
nir_schedule_get_class_dep(nir_deps_state * state,int klass)289 nir_schedule_get_class_dep(nir_deps_state *state,
290 int klass)
291 {
292 for (struct nir_schedule_class_dep *class_dep = state->class_deps;
293 class_dep != NULL;
294 class_dep = class_dep->next) {
295 if (class_dep->klass == klass)
296 return class_dep;
297 }
298
299 struct nir_schedule_class_dep *class_dep =
300 ralloc(state->reg_map, struct nir_schedule_class_dep);
301
302 class_dep->klass = klass;
303 class_dep->node = NULL;
304 class_dep->next = state->class_deps;
305
306 state->class_deps = class_dep;
307
308 return class_dep;
309 }
310
311 static void
nir_schedule_intrinsic_deps(nir_deps_state * state,nir_intrinsic_instr * instr)312 nir_schedule_intrinsic_deps(nir_deps_state *state,
313 nir_intrinsic_instr *instr)
314 {
315 nir_schedule_node *n = nir_schedule_get_node(state->scoreboard->instr_map,
316 &instr->instr);
317 const nir_schedule_options *options = state->scoreboard->options;
318 nir_schedule_dependency dep;
319
320 if (options->intrinsic_cb &&
321 options->intrinsic_cb(instr, &dep, options->intrinsic_cb_data)) {
322 struct nir_schedule_class_dep *class_dep =
323 nir_schedule_get_class_dep(state, dep.klass);
324
325 switch (dep.type) {
326 case NIR_SCHEDULE_READ_DEPENDENCY:
327 add_read_dep(state, class_dep->node, n);
328 break;
329 case NIR_SCHEDULE_WRITE_DEPENDENCY:
330 add_write_dep(state, &class_dep->node, n);
331 break;
332 }
333 }
334
335 switch (instr->intrinsic) {
336 case nir_intrinsic_decl_reg:
337 break; /* Nothing to do */
338
339 case nir_intrinsic_load_reg:
340 nir_schedule_load_reg_deps(instr, state);
341 break;
342
343 case nir_intrinsic_store_reg:
344 nir_schedule_store_reg_deps(instr, state);
345 break;
346
347 case nir_intrinsic_load_uniform:
348 case nir_intrinsic_load_ubo:
349 case nir_intrinsic_load_front_face:
350 break;
351
352 case nir_intrinsic_discard:
353 case nir_intrinsic_discard_if:
354 case nir_intrinsic_demote:
355 case nir_intrinsic_demote_if:
356 case nir_intrinsic_terminate:
357 case nir_intrinsic_terminate_if:
358 /* We are adding two dependencies:
359 *
360 * * A individual one that we could use to add a read_dep while handling
361 * nir_instr_type_tex
362 *
363 * * Include it on the unknown intrinsic set, as we want discard to be
364 * serialized in in the same order relative to intervening stores or
365 * atomic accesses to SSBOs and images
366 */
367 add_write_dep(state, &state->discard, n);
368 add_write_dep(state, &state->unknown_intrinsic, n);
369 break;
370
371 case nir_intrinsic_store_output:
372 /* For some hardware and stages, output stores affect the same shared
373 * memory as input loads.
374 */
375 if ((state->scoreboard->options->stages_with_shared_io_memory &
376 (1 << state->scoreboard->shader->info.stage)))
377 add_write_dep(state, &state->load_input, n);
378
379 /* Make sure that preceding discards stay before the store_output */
380 add_read_dep(state, state->discard, n);
381
382 break;
383
384 case nir_intrinsic_load_input:
385 case nir_intrinsic_load_per_vertex_input:
386 add_read_dep(state, state->load_input, n);
387 break;
388
389 case nir_intrinsic_load_shared:
390 case nir_intrinsic_load_shared2_amd:
391 /* Don't move load_shared beyond a following store_shared, as it could
392 * change their value
393 */
394 add_read_dep(state, state->store_shared, n);
395 break;
396
397 case nir_intrinsic_store_shared:
398 case nir_intrinsic_store_shared2_amd:
399 add_write_dep(state, &state->store_shared, n);
400 break;
401
402 case nir_intrinsic_barrier: {
403 const nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
404
405 if (modes & nir_var_mem_shared)
406 add_write_dep(state, &state->store_shared, n);
407
408 /* Serialize against other categories. */
409 add_write_dep(state, &state->unknown_intrinsic, n);
410
411 break;
412 }
413
414 default:
415 /* Attempt to handle other intrinsics that we haven't individually
416 * categorized by serializing them in the same order relative to each
417 * other.
418 */
419 add_write_dep(state, &state->unknown_intrinsic, n);
420 break;
421 }
422 }
423
424 /**
425 * Common code for dependencies that need to be tracked both forward and
426 * backward.
427 *
428 * This is for things like "all reads of r4 have to happen between the r4
429 * writes that surround them".
430 */
431 static void
nir_schedule_calculate_deps(nir_deps_state * state,nir_schedule_node * n)432 nir_schedule_calculate_deps(nir_deps_state *state, nir_schedule_node *n)
433 {
434 nir_instr *instr = n->instr;
435
436 /* For NIR SSA defs, we only need to do a single pass of making the uses
437 * depend on the def.
438 */
439 if (state->dir == F)
440 nir_foreach_def(instr, nir_schedule_ssa_deps, state);
441
442 /* Make sure any other instructions keep their positions relative to
443 * jumps.
444 */
445 if (instr->type != nir_instr_type_jump)
446 add_read_dep(state, state->jump, n);
447
448 switch (instr->type) {
449 case nir_instr_type_undef:
450 case nir_instr_type_load_const:
451 case nir_instr_type_alu:
452 case nir_instr_type_deref:
453 break;
454
455 case nir_instr_type_tex:
456 /* Don't move texture ops before a discard, as that could increase
457 * memory bandwidth for reading the discarded samples.
458 */
459 add_read_dep(state, state->discard, n);
460 break;
461
462 case nir_instr_type_jump:
463 add_write_dep(state, &state->jump, n);
464 break;
465
466 case nir_instr_type_call:
467 unreachable("Calls should have been lowered");
468 break;
469
470 case nir_instr_type_parallel_copy:
471 unreachable("Parallel copies should have been lowered");
472 break;
473
474 case nir_instr_type_phi:
475 unreachable("nir_schedule() should be called after lowering from SSA");
476 break;
477
478 case nir_instr_type_intrinsic:
479 nir_schedule_intrinsic_deps(state, nir_instr_as_intrinsic(instr));
480 break;
481 }
482 }
483
484 static void
calculate_forward_deps(nir_schedule_scoreboard * scoreboard,nir_block * block)485 calculate_forward_deps(nir_schedule_scoreboard *scoreboard, nir_block *block)
486 {
487 nir_deps_state state = {
488 .scoreboard = scoreboard,
489 .dir = F,
490 .reg_map = _mesa_pointer_hash_table_create(NULL),
491 };
492
493 nir_foreach_instr(instr, block) {
494 nir_schedule_node *node = nir_schedule_get_node(scoreboard->instr_map,
495 instr);
496 nir_schedule_calculate_deps(&state, node);
497 }
498
499 ralloc_free(state.reg_map);
500 }
501
502 static void
calculate_reverse_deps(nir_schedule_scoreboard * scoreboard,nir_block * block)503 calculate_reverse_deps(nir_schedule_scoreboard *scoreboard, nir_block *block)
504 {
505 nir_deps_state state = {
506 .scoreboard = scoreboard,
507 .dir = R,
508 .reg_map = _mesa_pointer_hash_table_create(NULL),
509 };
510
511 nir_foreach_instr_reverse(instr, block) {
512 nir_schedule_node *node = nir_schedule_get_node(scoreboard->instr_map,
513 instr);
514 nir_schedule_calculate_deps(&state, node);
515 }
516
517 ralloc_free(state.reg_map);
518 }
519
520 typedef struct {
521 nir_schedule_scoreboard *scoreboard;
522 int regs_freed;
523 } nir_schedule_regs_freed_state;
524
525 static bool
nir_schedule_regs_freed_src_cb(nir_src * src,void * in_state)526 nir_schedule_regs_freed_src_cb(nir_src *src, void *in_state)
527 {
528 nir_schedule_regs_freed_state *state = in_state;
529 nir_schedule_scoreboard *scoreboard = state->scoreboard;
530 struct set *remaining_uses = nir_schedule_scoreboard_get_src(scoreboard, src);
531
532 if (remaining_uses->entries == 1 &&
533 _mesa_set_search(remaining_uses, nir_src_parent_instr(src))) {
534 state->regs_freed += nir_schedule_src_pressure(src);
535 }
536
537 return true;
538 }
539
540 static bool
nir_schedule_regs_freed_def_cb(nir_def * def,void * in_state)541 nir_schedule_regs_freed_def_cb(nir_def *def, void *in_state)
542 {
543 nir_schedule_regs_freed_state *state = in_state;
544
545 state->regs_freed -= nir_schedule_def_pressure(def);
546
547 return true;
548 }
549
550 static void
nir_schedule_regs_freed_load_reg(nir_intrinsic_instr * load,nir_schedule_regs_freed_state * state)551 nir_schedule_regs_freed_load_reg(nir_intrinsic_instr *load,
552 nir_schedule_regs_freed_state *state)
553 {
554 assert(nir_is_load_reg(load));
555
556 if (load->intrinsic == nir_intrinsic_load_reg_indirect)
557 nir_schedule_regs_freed_src_cb(&load->src[1], state);
558
559 nir_schedule_scoreboard *scoreboard = state->scoreboard;
560 nir_def *reg = load->src[0].ssa;
561 struct set *remaining_uses = nir_schedule_scoreboard_get_reg(scoreboard, reg);
562
563 if (remaining_uses->entries == 1 &&
564 _mesa_set_search(remaining_uses, &load->instr)) {
565 state->regs_freed += nir_schedule_reg_pressure(reg);
566 }
567
568 nir_schedule_regs_freed_def_cb(&load->def, state);
569 }
570
571 static void
nir_schedule_regs_freed_store_reg(nir_intrinsic_instr * store,nir_schedule_regs_freed_state * state)572 nir_schedule_regs_freed_store_reg(nir_intrinsic_instr *store,
573 nir_schedule_regs_freed_state *state)
574 {
575 assert(nir_is_store_reg(store));
576
577 nir_schedule_regs_freed_src_cb(&store->src[0], state);
578 if (store->intrinsic == nir_intrinsic_store_reg_indirect)
579 nir_schedule_regs_freed_src_cb(&store->src[2], state);
580
581 nir_schedule_scoreboard *scoreboard = state->scoreboard;
582 nir_def *reg = store->src[1].ssa;
583
584 /* Only the first def of a reg counts against register pressure. */
585 if (!_mesa_set_search(scoreboard->live_values, reg))
586 state->regs_freed -= nir_schedule_reg_pressure(reg);
587 }
588
589 static bool
nir_schedule_regs_freed_reg_intrin(nir_instr * instr,nir_schedule_regs_freed_state * state)590 nir_schedule_regs_freed_reg_intrin(nir_instr *instr,
591 nir_schedule_regs_freed_state *state)
592 {
593 if (instr->type != nir_instr_type_intrinsic)
594 return false;
595
596 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
597 switch (intrin->intrinsic) {
598 case nir_intrinsic_decl_reg:
599 return true; /* Handled but nothing to do */
600
601 case nir_intrinsic_load_reg:
602 case nir_intrinsic_load_reg_indirect:
603 nir_schedule_regs_freed_load_reg(intrin, state);
604 return true;
605
606 case nir_intrinsic_store_reg:
607 case nir_intrinsic_store_reg_indirect:
608 nir_schedule_regs_freed_store_reg(intrin, state);
609 return true;
610
611 default:
612 return false;
613 }
614 }
615
616 static int
nir_schedule_regs_freed(nir_schedule_scoreboard * scoreboard,nir_schedule_node * n)617 nir_schedule_regs_freed(nir_schedule_scoreboard *scoreboard, nir_schedule_node *n)
618 {
619 nir_schedule_regs_freed_state state = {
620 .scoreboard = scoreboard,
621 };
622
623 if (!nir_schedule_regs_freed_reg_intrin(n->instr, &state)) {
624 nir_foreach_src(n->instr, nir_schedule_regs_freed_src_cb, &state);
625 nir_foreach_def(n->instr, nir_schedule_regs_freed_def_cb, &state);
626 }
627
628 return state.regs_freed;
629 }
630
631 /**
632 * Chooses an instruction that will minimise the register pressure as much as
633 * possible. This should only be used as a fallback when the regular scheduling
634 * generates a shader whose register allocation fails.
635 */
636 static nir_schedule_node *
nir_schedule_choose_instruction_fallback(nir_schedule_scoreboard * scoreboard)637 nir_schedule_choose_instruction_fallback(nir_schedule_scoreboard *scoreboard)
638 {
639 nir_schedule_node *chosen = NULL;
640
641 /* Find the leader in the ready (shouldn't-stall) set with the mininum
642 * cost.
643 */
644 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
645 if (scoreboard->time < n->ready_time)
646 continue;
647
648 if (!chosen || chosen->max_delay > n->max_delay)
649 chosen = n;
650 }
651 if (chosen) {
652 if (debug) {
653 fprintf(stderr, "chose (ready fallback): ");
654 nir_print_instr(chosen->instr, stderr);
655 fprintf(stderr, "\n");
656 }
657
658 return chosen;
659 }
660
661 /* Otherwise, choose the leader with the minimum cost. */
662 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
663 if (!chosen || chosen->max_delay > n->max_delay)
664 chosen = n;
665 }
666 if (debug) {
667 fprintf(stderr, "chose (leader fallback): ");
668 nir_print_instr(chosen->instr, stderr);
669 fprintf(stderr, "\n");
670 }
671
672 return chosen;
673 }
674
675 /**
676 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSP (Code
677 * Scheduling for Parallelism) heuristic.
678 *
679 * Picks an instruction on the critical that's ready to execute without
680 * stalls, if possible, otherwise picks the instruction on the critical path.
681 */
682 static nir_schedule_node *
nir_schedule_choose_instruction_csp(nir_schedule_scoreboard * scoreboard)683 nir_schedule_choose_instruction_csp(nir_schedule_scoreboard *scoreboard)
684 {
685 nir_schedule_node *chosen = NULL;
686
687 /* Find the leader in the ready (shouldn't-stall) set with the maximum
688 * cost.
689 */
690 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
691 if (scoreboard->time < n->ready_time)
692 continue;
693
694 if (!chosen || chosen->max_delay < n->max_delay)
695 chosen = n;
696 }
697 if (chosen) {
698 if (debug) {
699 fprintf(stderr, "chose (ready): ");
700 nir_print_instr(chosen->instr, stderr);
701 fprintf(stderr, "\n");
702 }
703
704 return chosen;
705 }
706
707 /* Otherwise, choose the leader with the maximum cost. */
708 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
709 if (!chosen || chosen->max_delay < n->max_delay)
710 chosen = n;
711 }
712 if (debug) {
713 fprintf(stderr, "chose (leader): ");
714 nir_print_instr(chosen->instr, stderr);
715 fprintf(stderr, "\n");
716 }
717
718 return chosen;
719 }
720
721 /**
722 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSR (Code
723 * Scheduling for Register pressure) heuristic.
724 */
725 static nir_schedule_node *
nir_schedule_choose_instruction_csr(nir_schedule_scoreboard * scoreboard)726 nir_schedule_choose_instruction_csr(nir_schedule_scoreboard *scoreboard)
727 {
728 nir_schedule_node *chosen = NULL;
729
730 /* Find a ready inst with regs freed and pick the one with max cost. */
731 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
732 if (n->ready_time > scoreboard->time)
733 continue;
734
735 int regs_freed = nir_schedule_regs_freed(scoreboard, n);
736
737 if (regs_freed > 0 && (!chosen || chosen->max_delay < n->max_delay)) {
738 chosen = n;
739 }
740 }
741 if (chosen) {
742 if (debug) {
743 fprintf(stderr, "chose (freed+ready): ");
744 nir_print_instr(chosen->instr, stderr);
745 fprintf(stderr, "\n");
746 }
747
748 return chosen;
749 }
750
751 /* Find a leader with regs freed and pick the one with max cost. */
752 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
753 int regs_freed = nir_schedule_regs_freed(scoreboard, n);
754
755 if (regs_freed > 0 && (!chosen || chosen->max_delay < n->max_delay)) {
756 chosen = n;
757 }
758 }
759 if (chosen) {
760 if (debug) {
761 fprintf(stderr, "chose (regs freed): ");
762 nir_print_instr(chosen->instr, stderr);
763 fprintf(stderr, "\n");
764 }
765
766 return chosen;
767 }
768
769 /* Find a partially evaluated path and try to finish it off */
770 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
771 if (n->partially_evaluated_path &&
772 (!chosen || chosen->max_delay < n->max_delay)) {
773 chosen = n;
774 }
775 }
776 if (chosen) {
777 if (debug) {
778 fprintf(stderr, "chose (partial path): ");
779 nir_print_instr(chosen->instr, stderr);
780 fprintf(stderr, "\n");
781 }
782
783 return chosen;
784 }
785
786 /* Contra the paper, pick a leader with no effect on used regs. This may
787 * open up new opportunities, as otherwise a single-operand instr consuming
788 * a value will tend to block finding freeing that value. This had a
789 * massive effect on reducing spilling on V3D.
790 *
791 * XXX: Should this prioritize ready?
792 */
793 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
794 if (nir_schedule_regs_freed(scoreboard, n) != 0)
795 continue;
796
797 if (!chosen || chosen->max_delay < n->max_delay)
798 chosen = n;
799 }
800 if (chosen) {
801 if (debug) {
802 fprintf(stderr, "chose (regs no-op): ");
803 nir_print_instr(chosen->instr, stderr);
804 fprintf(stderr, "\n");
805 }
806
807 return chosen;
808 }
809
810 /* Pick the max delay of the remaining ready set. */
811 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
812 if (n->ready_time > scoreboard->time)
813 continue;
814 if (!chosen || chosen->max_delay < n->max_delay)
815 chosen = n;
816 }
817 if (chosen) {
818 if (debug) {
819 fprintf(stderr, "chose (ready max delay): ");
820 nir_print_instr(chosen->instr, stderr);
821 fprintf(stderr, "\n");
822 }
823 return chosen;
824 }
825
826 /* Pick the max delay of the remaining leaders. */
827 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
828 if (!chosen || chosen->max_delay < n->max_delay)
829 chosen = n;
830 }
831
832 if (debug) {
833 fprintf(stderr, "chose (max delay): ");
834 nir_print_instr(chosen->instr, stderr);
835 fprintf(stderr, "\n");
836 }
837
838 return chosen;
839 }
840
841 static void
dump_state(nir_schedule_scoreboard * scoreboard)842 dump_state(nir_schedule_scoreboard *scoreboard)
843 {
844 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
845 fprintf(stderr, "maxdel %5d ", n->max_delay);
846 nir_print_instr(n->instr, stderr);
847 fprintf(stderr, "\n");
848
849 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
850 nir_schedule_node *child = (nir_schedule_node *)edge->child;
851
852 fprintf(stderr, " -> (%d parents) ", child->dag.parent_count);
853 nir_print_instr(child->instr, stderr);
854 fprintf(stderr, "\n");
855 }
856 }
857 }
858
859 static void
nir_schedule_mark_use(nir_schedule_scoreboard * scoreboard,void * reg_or_def,nir_instr * reg_or_def_parent,int pressure)860 nir_schedule_mark_use(nir_schedule_scoreboard *scoreboard,
861 void *reg_or_def,
862 nir_instr *reg_or_def_parent,
863 int pressure)
864 {
865 /* Make the value live if it's the first time it's been used. */
866 if (!_mesa_set_search(scoreboard->live_values, reg_or_def)) {
867 _mesa_set_add(scoreboard->live_values, reg_or_def);
868 scoreboard->pressure += pressure;
869 }
870
871 /* Make the value dead if it's the last remaining use. Be careful when one
872 * instruction uses a value twice to not decrement pressure twice.
873 */
874 struct set *remaining_uses =
875 _mesa_hash_table_search_data(scoreboard->remaining_uses, reg_or_def);
876 struct set_entry *entry = _mesa_set_search(remaining_uses, reg_or_def_parent);
877 if (entry) {
878 _mesa_set_remove(remaining_uses, entry);
879
880 if (remaining_uses->entries == 0)
881 scoreboard->pressure -= pressure;
882 }
883 }
884
885 static bool
nir_schedule_mark_src_scheduled(nir_src * src,void * state)886 nir_schedule_mark_src_scheduled(nir_src *src, void *state)
887 {
888 nir_schedule_scoreboard *scoreboard = state;
889 struct set *remaining_uses = nir_schedule_scoreboard_get_src(scoreboard, src);
890
891 struct set_entry *entry = _mesa_set_search(remaining_uses,
892 nir_src_parent_instr(src));
893 if (entry) {
894 /* Once we've used an SSA value in one instruction, bump the priority of
895 * the other uses so the SSA value can get fully consumed.
896 *
897 * We don't do this for registers, and it's would be a hassle and it's
898 * unclear if that would help or not. Also, skip it for constants, as
899 * they're often folded as immediates into backend instructions and have
900 * many unrelated instructions all referencing the same value (0).
901 */
902 if (src->ssa->parent_instr->type != nir_instr_type_load_const) {
903 nir_foreach_use(other_src, src->ssa) {
904 if (nir_src_parent_instr(other_src) == nir_src_parent_instr(src))
905 continue;
906
907 nir_schedule_node *n =
908 nir_schedule_get_node(scoreboard->instr_map,
909 nir_src_parent_instr(other_src));
910
911 if (n && !n->partially_evaluated_path) {
912 if (debug) {
913 fprintf(stderr, " New partially evaluated path: ");
914 nir_print_instr(n->instr, stderr);
915 fprintf(stderr, "\n");
916 }
917
918 n->partially_evaluated_path = true;
919 }
920 }
921 }
922 }
923
924 nir_schedule_mark_use(scoreboard,
925 (void *)src->ssa,
926 nir_src_parent_instr(src),
927 nir_schedule_src_pressure(src));
928
929 return true;
930 }
931
932 static bool
nir_schedule_mark_def_scheduled(nir_def * def,void * state)933 nir_schedule_mark_def_scheduled(nir_def *def, void *state)
934 {
935 nir_schedule_scoreboard *scoreboard = state;
936
937 nir_schedule_mark_use(scoreboard, def, def->parent_instr,
938 nir_schedule_def_pressure(def));
939
940 return true;
941 }
942
943 static void
nir_schedule_mark_load_reg_scheduled(nir_intrinsic_instr * load,nir_schedule_scoreboard * scoreboard)944 nir_schedule_mark_load_reg_scheduled(nir_intrinsic_instr *load,
945 nir_schedule_scoreboard *scoreboard)
946 {
947 assert(nir_is_load_reg(load));
948 nir_def *reg = load->src[0].ssa;
949
950 if (load->intrinsic == nir_intrinsic_load_reg_indirect)
951 nir_schedule_mark_src_scheduled(&load->src[1], scoreboard);
952
953 nir_schedule_mark_use(scoreboard, reg, &load->instr,
954 nir_schedule_reg_pressure(reg));
955
956 nir_schedule_mark_def_scheduled(&load->def, scoreboard);
957 }
958
959 static void
nir_schedule_mark_store_reg_scheduled(nir_intrinsic_instr * store,nir_schedule_scoreboard * scoreboard)960 nir_schedule_mark_store_reg_scheduled(nir_intrinsic_instr *store,
961 nir_schedule_scoreboard *scoreboard)
962 {
963 assert(nir_is_store_reg(store));
964 nir_def *reg = store->src[1].ssa;
965
966 nir_schedule_mark_src_scheduled(&store->src[0], scoreboard);
967 if (store->intrinsic == nir_intrinsic_store_reg_indirect)
968 nir_schedule_mark_src_scheduled(&store->src[2], scoreboard);
969
970 /* XXX: This is not actually accurate for regs -- the last use of a reg may
971 * have a live interval that extends across control flow. We should
972 * calculate the live ranges of regs, and have scheduler nodes for the CF
973 * nodes that also "use" the reg.
974 */
975 nir_schedule_mark_use(scoreboard, reg, &store->instr,
976 nir_schedule_reg_pressure(reg));
977 }
978
979 static bool
nir_schedule_mark_reg_intrin_scheduled(nir_instr * instr,nir_schedule_scoreboard * scoreboard)980 nir_schedule_mark_reg_intrin_scheduled(nir_instr *instr,
981 nir_schedule_scoreboard *scoreboard)
982 {
983 if (instr->type != nir_instr_type_intrinsic)
984 return false;
985
986 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
987 switch (intrin->intrinsic) {
988 case nir_intrinsic_decl_reg:
989 return true; /* Handled but nothing to do */
990
991 case nir_intrinsic_load_reg:
992 case nir_intrinsic_load_reg_indirect:
993 nir_schedule_mark_load_reg_scheduled(intrin, scoreboard);
994 return true;
995
996 case nir_intrinsic_store_reg:
997 case nir_intrinsic_store_reg_indirect:
998 nir_schedule_mark_store_reg_scheduled(intrin, scoreboard);
999 return true;
1000
1001 default:
1002 return false;
1003 }
1004 }
1005
1006 static void
nir_schedule_mark_node_scheduled(nir_schedule_scoreboard * scoreboard,nir_schedule_node * n)1007 nir_schedule_mark_node_scheduled(nir_schedule_scoreboard *scoreboard,
1008 nir_schedule_node *n)
1009 {
1010 if (!nir_schedule_mark_reg_intrin_scheduled(n->instr, scoreboard)) {
1011 nir_foreach_src(n->instr, nir_schedule_mark_src_scheduled, scoreboard);
1012 nir_foreach_def(n->instr, nir_schedule_mark_def_scheduled, scoreboard);
1013 }
1014
1015 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
1016 nir_schedule_node *child = (nir_schedule_node *)edge->child;
1017
1018 child->ready_time = MAX2(child->ready_time,
1019 scoreboard->time + n->delay);
1020
1021 if (child->dag.parent_count == 1) {
1022 if (debug) {
1023 fprintf(stderr, " New DAG head: ");
1024 nir_print_instr(child->instr, stderr);
1025 fprintf(stderr, "\n");
1026 }
1027 }
1028 }
1029
1030 dag_prune_head(scoreboard->dag, &n->dag);
1031
1032 scoreboard->time = MAX2(n->ready_time, scoreboard->time);
1033 scoreboard->time++;
1034 }
1035
1036 static void
nir_schedule_instructions(nir_schedule_scoreboard * scoreboard,nir_block * block)1037 nir_schedule_instructions(nir_schedule_scoreboard *scoreboard, nir_block *block)
1038 {
1039 while (!list_is_empty(&scoreboard->dag->heads)) {
1040 if (debug) {
1041 fprintf(stderr, "current list:\n");
1042 dump_state(scoreboard);
1043 }
1044
1045 nir_schedule_node *chosen;
1046 if (scoreboard->options->fallback)
1047 chosen = nir_schedule_choose_instruction_fallback(scoreboard);
1048 else if (scoreboard->pressure < scoreboard->options->threshold)
1049 chosen = nir_schedule_choose_instruction_csp(scoreboard);
1050 else
1051 chosen = nir_schedule_choose_instruction_csr(scoreboard);
1052
1053 /* Now that we've scheduled a new instruction, some of its children may
1054 * be promoted to the list of instructions ready to be scheduled.
1055 */
1056 nir_schedule_mark_node_scheduled(scoreboard, chosen);
1057
1058 /* Move the instruction to the end (so our first chosen instructions are
1059 * the start of the program).
1060 */
1061 exec_node_remove(&chosen->instr->node);
1062 exec_list_push_tail(&block->instr_list, &chosen->instr->node);
1063
1064 if (debug)
1065 fprintf(stderr, "\n");
1066 }
1067 }
1068
1069 static uint32_t
nir_schedule_get_delay(nir_schedule_scoreboard * scoreboard,nir_instr * instr)1070 nir_schedule_get_delay(nir_schedule_scoreboard *scoreboard, nir_instr *instr)
1071 {
1072 if (scoreboard->options->instr_delay_cb) {
1073 void *cb_data = scoreboard->options->instr_delay_cb_data;
1074 return scoreboard->options->instr_delay_cb(instr, cb_data);
1075 }
1076
1077 switch (instr->type) {
1078 case nir_instr_type_undef:
1079 case nir_instr_type_load_const:
1080 case nir_instr_type_alu:
1081 case nir_instr_type_deref:
1082 case nir_instr_type_jump:
1083 case nir_instr_type_parallel_copy:
1084 case nir_instr_type_call:
1085 case nir_instr_type_phi:
1086 return 1;
1087
1088 case nir_instr_type_intrinsic:
1089 switch (nir_instr_as_intrinsic(instr)->intrinsic) {
1090 case nir_intrinsic_decl_reg:
1091 case nir_intrinsic_load_reg:
1092 case nir_intrinsic_store_reg:
1093 return 0;
1094 case nir_intrinsic_load_ubo:
1095 case nir_intrinsic_load_ssbo:
1096 case nir_intrinsic_load_scratch:
1097 case nir_intrinsic_load_shared:
1098 case nir_intrinsic_image_load:
1099 return 50;
1100 default:
1101 return 1;
1102 }
1103 break;
1104
1105 case nir_instr_type_tex:
1106 /* Pick some large number to try to fetch textures early and sample them
1107 * late.
1108 */
1109 return 100;
1110 }
1111
1112 return 0;
1113 }
1114
1115 static void
nir_schedule_dag_max_delay_cb(struct dag_node * node,void * state)1116 nir_schedule_dag_max_delay_cb(struct dag_node *node, void *state)
1117 {
1118 nir_schedule_node *n = (nir_schedule_node *)node;
1119 uint32_t max_delay = 0;
1120
1121 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
1122 nir_schedule_node *child = (nir_schedule_node *)edge->child;
1123 max_delay = MAX2(child->max_delay, max_delay);
1124 }
1125
1126 n->max_delay = MAX2(n->max_delay, max_delay + n->delay);
1127 }
1128
1129 static void
nir_schedule_block(nir_schedule_scoreboard * scoreboard,nir_block * block)1130 nir_schedule_block(nir_schedule_scoreboard *scoreboard, nir_block *block)
1131 {
1132 void *mem_ctx = ralloc_context(NULL);
1133 scoreboard->instr_map = _mesa_pointer_hash_table_create(mem_ctx);
1134
1135 scoreboard->dag = dag_create(mem_ctx);
1136
1137 nir_foreach_instr(instr, block) {
1138 nir_schedule_node *n =
1139 rzalloc(mem_ctx, nir_schedule_node);
1140
1141 n->instr = instr;
1142 n->delay = nir_schedule_get_delay(scoreboard, instr);
1143 dag_init_node(scoreboard->dag, &n->dag);
1144
1145 _mesa_hash_table_insert(scoreboard->instr_map, instr, n);
1146 }
1147
1148 calculate_forward_deps(scoreboard, block);
1149 calculate_reverse_deps(scoreboard, block);
1150
1151 dag_traverse_bottom_up(scoreboard->dag, nir_schedule_dag_max_delay_cb, NULL);
1152
1153 nir_schedule_instructions(scoreboard, block);
1154
1155 ralloc_free(mem_ctx);
1156 scoreboard->instr_map = NULL;
1157 }
1158
1159 static bool
is_decl_reg(nir_instr * instr)1160 is_decl_reg(nir_instr *instr)
1161 {
1162 if (instr->type != nir_instr_type_intrinsic)
1163 return false;
1164
1165 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1166 return intrin->intrinsic == nir_intrinsic_decl_reg;
1167 }
1168
1169 static bool
nir_schedule_ssa_def_init_scoreboard(nir_def * def,void * state)1170 nir_schedule_ssa_def_init_scoreboard(nir_def *def, void *state)
1171 {
1172 nir_schedule_scoreboard *scoreboard = state;
1173 struct set *def_uses = _mesa_pointer_set_create(scoreboard);
1174
1175 _mesa_hash_table_insert(scoreboard->remaining_uses, def, def_uses);
1176
1177 /* We don't consider decl_reg to be a use to avoid extending register live
1178 * ranges any further than needed.
1179 */
1180 if (!is_decl_reg(def->parent_instr))
1181 _mesa_set_add(def_uses, def->parent_instr);
1182
1183 nir_foreach_use(src, def) {
1184 _mesa_set_add(def_uses, nir_src_parent_instr(src));
1185 }
1186
1187 /* XXX: Handle if uses */
1188
1189 return true;
1190 }
1191
1192 static nir_schedule_scoreboard *
nir_schedule_get_scoreboard(nir_shader * shader,const nir_schedule_options * options)1193 nir_schedule_get_scoreboard(nir_shader *shader,
1194 const nir_schedule_options *options)
1195 {
1196 nir_schedule_scoreboard *scoreboard = rzalloc(NULL, nir_schedule_scoreboard);
1197
1198 scoreboard->shader = shader;
1199 scoreboard->live_values = _mesa_pointer_set_create(scoreboard);
1200 scoreboard->remaining_uses = _mesa_pointer_hash_table_create(scoreboard);
1201 scoreboard->options = options;
1202 scoreboard->pressure = 0;
1203
1204 nir_foreach_function_impl(impl, shader) {
1205 nir_foreach_block(block, impl) {
1206 nir_foreach_instr(instr, block) {
1207 nir_foreach_def(instr, nir_schedule_ssa_def_init_scoreboard,
1208 scoreboard);
1209 }
1210
1211 /* XXX: We're ignoring if uses, which may prioritize scheduling other
1212 * uses of the if src even when it doesn't help. That's not many
1213 * values, though, so meh.
1214 */
1215 }
1216 }
1217
1218 return scoreboard;
1219 }
1220
1221 static void
nir_schedule_validate_uses(nir_schedule_scoreboard * scoreboard)1222 nir_schedule_validate_uses(nir_schedule_scoreboard *scoreboard)
1223 {
1224 #ifdef NDEBUG
1225 return;
1226 #endif
1227
1228 bool any_uses = false;
1229
1230 hash_table_foreach(scoreboard->remaining_uses, entry) {
1231 struct set *remaining_uses = entry->data;
1232
1233 set_foreach(remaining_uses, instr_entry) {
1234 if (!any_uses) {
1235 fprintf(stderr, "Tracked uses remain after scheduling. "
1236 "Affected instructions: \n");
1237 any_uses = true;
1238 }
1239 nir_print_instr(instr_entry->key, stderr);
1240 fprintf(stderr, "\n");
1241 }
1242 }
1243
1244 assert(!any_uses);
1245 }
1246
1247 /**
1248 * Schedules the NIR instructions to try to decrease stalls (for example,
1249 * delaying texture reads) while managing register pressure.
1250 *
1251 * The threshold represents "number of NIR register/SSA def channels live
1252 * before switching the scheduling heuristic to reduce register pressure",
1253 * since most of our GPU architectures are scalar (extending to vector with a
1254 * flag wouldn't be hard). This number should be a bit below the number of
1255 * registers available (counting any that may be occupied by system value
1256 * payload values, for example), since the heuristic may not always be able to
1257 * free a register immediately. The amount below the limit is up to you to
1258 * tune.
1259 */
1260 void
nir_schedule(nir_shader * shader,const nir_schedule_options * options)1261 nir_schedule(nir_shader *shader,
1262 const nir_schedule_options *options)
1263 {
1264 nir_schedule_scoreboard *scoreboard = nir_schedule_get_scoreboard(shader,
1265 options);
1266
1267 if (debug) {
1268 fprintf(stderr, "NIR shader before scheduling:\n");
1269 nir_print_shader(shader, stderr);
1270 }
1271
1272 nir_foreach_function_impl(impl, shader) {
1273 nir_foreach_block(block, impl) {
1274 nir_schedule_block(scoreboard, block);
1275 }
1276 }
1277
1278 nir_schedule_validate_uses(scoreboard);
1279
1280 ralloc_free(scoreboard);
1281 }
1282