1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30 #include "nir_vla.h"
31
32 /*
33 * This file implements an out-of-SSA pass as described in "Revisiting
34 * Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
35 * Boissinot et al.
36 */
37
38 struct from_ssa_state {
39 nir_builder builder;
40 void *dead_ctx;
41 struct exec_list dead_instrs;
42 bool phi_webs_only;
43 struct hash_table *merge_node_table;
44 nir_instr *instr;
45 bool progress;
46 };
47
48 /* Returns if def @a comes after def @b.
49 *
50 * The core observation that makes the Boissinot algorithm efficient
51 * is that, given two properly sorted sets, we can check for
52 * interference in these sets via a linear walk. This is accomplished
53 * by doing single combined walk over union of the two sets in DFS
54 * order. It doesn't matter what DFS we do so long as we're
55 * consistent. Fortunately, the dominance algorithm we ran prior to
56 * this pass did such a walk and recorded the pre- and post-indices in
57 * the blocks.
58 *
59 * We treat SSA undefs as always coming before other instruction types.
60 */
61 static bool
def_after(nir_ssa_def * a,nir_ssa_def * b)62 def_after(nir_ssa_def *a, nir_ssa_def *b)
63 {
64 if (a->parent_instr->type == nir_instr_type_ssa_undef)
65 return false;
66
67 if (b->parent_instr->type == nir_instr_type_ssa_undef)
68 return true;
69
70 /* If they're in the same block, we can rely on whichever instruction
71 * comes first in the block.
72 */
73 if (a->parent_instr->block == b->parent_instr->block)
74 return a->parent_instr->index > b->parent_instr->index;
75
76 /* Otherwise, if blocks are distinct, we sort them in DFS pre-order */
77 return a->parent_instr->block->dom_pre_index >
78 b->parent_instr->block->dom_pre_index;
79 }
80
81 /* Returns true if a dominates b */
82 static bool
ssa_def_dominates(nir_ssa_def * a,nir_ssa_def * b)83 ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
84 {
85 if (a->parent_instr->type == nir_instr_type_ssa_undef) {
86 /* SSA undefs always dominate */
87 return true;
88 } if (def_after(a, b)) {
89 return false;
90 } else if (a->parent_instr->block == b->parent_instr->block) {
91 return def_after(b, a);
92 } else {
93 return nir_block_dominates(a->parent_instr->block,
94 b->parent_instr->block);
95 }
96 }
97
98
99 /* The following data structure, which I have named merge_set is a way of
100 * representing a set registers of non-interfering registers. This is
101 * based on the concept of a "dominance forest" presented in "Fast Copy
102 * Coalescing and Live-Range Identification" by Budimlic et al. but the
103 * implementation concept is taken from "Revisiting Out-of-SSA Translation
104 * for Correctness, Code Quality, and Efficiency" by Boissinot et al.
105 *
106 * Each SSA definition is associated with a merge_node and the association
107 * is represented by a combination of a hash table and the "def" parameter
108 * in the merge_node structure. The merge_set stores a linked list of
109 * merge_nodes, ordered by a pre-order DFS walk of the dominance tree. (Since
110 * the liveness analysis pass indexes the SSA values in dominance order for
111 * us, this is an easy thing to keep up.) It is assumed that no pair of the
112 * nodes in a given set interfere. Merging two sets or checking for
113 * interference can be done in a single linear-time merge-sort walk of the
114 * two lists of nodes.
115 */
116 struct merge_set;
117
118 typedef struct {
119 struct exec_node node;
120 struct merge_set *set;
121 nir_ssa_def *def;
122 } merge_node;
123
124 typedef struct merge_set {
125 struct exec_list nodes;
126 unsigned size;
127 bool divergent;
128 nir_register *reg;
129 } merge_set;
130
131 #if 0
132 static void
133 merge_set_dump(merge_set *set, FILE *fp)
134 {
135 nir_ssa_def *dom[set->size];
136 int dom_idx = -1;
137
138 foreach_list_typed(merge_node, node, node, &set->nodes) {
139 while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
140 dom_idx--;
141
142 for (int i = 0; i <= dom_idx; i++)
143 fprintf(fp, " ");
144
145 fprintf(fp, "ssa_%d\n", node->def->index);
146
147 dom[++dom_idx] = node->def;
148 }
149 }
150 #endif
151
152 static merge_node *
get_merge_node(nir_ssa_def * def,struct from_ssa_state * state)153 get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
154 {
155 struct hash_entry *entry =
156 _mesa_hash_table_search(state->merge_node_table, def);
157 if (entry)
158 return entry->data;
159
160 merge_set *set = ralloc(state->dead_ctx, merge_set);
161 exec_list_make_empty(&set->nodes);
162 set->size = 1;
163 set->divergent = def->divergent;
164 set->reg = NULL;
165
166 merge_node *node = ralloc(state->dead_ctx, merge_node);
167 node->set = set;
168 node->def = def;
169 exec_list_push_head(&set->nodes, &node->node);
170
171 _mesa_hash_table_insert(state->merge_node_table, def, node);
172
173 return node;
174 }
175
176 static bool
merge_nodes_interfere(merge_node * a,merge_node * b)177 merge_nodes_interfere(merge_node *a, merge_node *b)
178 {
179 /* There's no need to check for interference within the same set,
180 * because we assume, that sets themselves are already
181 * interference-free.
182 */
183 if (a->set == b->set)
184 return false;
185
186 return nir_ssa_defs_interfere(a->def, b->def);
187 }
188
189 /* Merges b into a
190 *
191 * This algorithm uses def_after to ensure that the sets always stay in the
192 * same order as the pre-order DFS done by the liveness algorithm.
193 */
194 static merge_set *
merge_merge_sets(merge_set * a,merge_set * b)195 merge_merge_sets(merge_set *a, merge_set *b)
196 {
197 struct exec_node *an = exec_list_get_head(&a->nodes);
198 struct exec_node *bn = exec_list_get_head(&b->nodes);
199 while (!exec_node_is_tail_sentinel(bn)) {
200 merge_node *a_node = exec_node_data(merge_node, an, node);
201 merge_node *b_node = exec_node_data(merge_node, bn, node);
202
203 if (exec_node_is_tail_sentinel(an) ||
204 def_after(a_node->def, b_node->def)) {
205 struct exec_node *next = bn->next;
206 exec_node_remove(bn);
207 exec_node_insert_node_before(an, bn);
208 exec_node_data(merge_node, bn, node)->set = a;
209 bn = next;
210 } else {
211 an = an->next;
212 }
213 }
214
215 a->size += b->size;
216 b->size = 0;
217 a->divergent |= b->divergent;
218
219 return a;
220 }
221
222 /* Checks for any interference between two merge sets
223 *
224 * This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
225 * Translation for Correctness, Code Quality, and Efficiency" by
226 * Boissinot et al.
227 */
228 static bool
merge_sets_interfere(merge_set * a,merge_set * b)229 merge_sets_interfere(merge_set *a, merge_set *b)
230 {
231 /* List of all the nodes which dominate the current node, in dominance
232 * order.
233 */
234 NIR_VLA(merge_node *, dom, a->size + b->size);
235 int dom_idx = -1;
236
237 struct exec_node *an = exec_list_get_head(&a->nodes);
238 struct exec_node *bn = exec_list_get_head(&b->nodes);
239 while (!exec_node_is_tail_sentinel(an) ||
240 !exec_node_is_tail_sentinel(bn)) {
241
242 /* We walk the union of the two sets in the same order as the pre-order
243 * DFS done by liveness analysis.
244 */
245 merge_node *current;
246 if (exec_node_is_tail_sentinel(an)) {
247 current = exec_node_data(merge_node, bn, node);
248 bn = bn->next;
249 } else if (exec_node_is_tail_sentinel(bn)) {
250 current = exec_node_data(merge_node, an, node);
251 an = an->next;
252 } else {
253 merge_node *a_node = exec_node_data(merge_node, an, node);
254 merge_node *b_node = exec_node_data(merge_node, bn, node);
255
256 if (def_after(b_node->def, a_node->def)) {
257 current = a_node;
258 an = an->next;
259 } else {
260 current = b_node;
261 bn = bn->next;
262 }
263 }
264
265 /* Because our walk is a pre-order DFS, we can maintain the list of
266 * dominating nodes as a simple stack, pushing every node onto the list
267 * after we visit it and popping any non-dominating nodes off before we
268 * visit the current node.
269 */
270 while (dom_idx >= 0 &&
271 !ssa_def_dominates(dom[dom_idx]->def, current->def))
272 dom_idx--;
273
274 /* There are three invariants of this algorithm that are important here:
275 *
276 * 1. There is no interference within either set a or set b.
277 * 2. None of the nodes processed up until this point interfere.
278 * 3. All the dominators of `current` have been processed
279 *
280 * Because of these invariants, we only need to check the current node
281 * against its minimal dominator. If any other node N in the union
282 * interferes with current, then N must dominate current because we are
283 * in SSA form. If N dominates current then it must also dominate our
284 * minimal dominator dom[dom_idx]. Since N is live at current it must
285 * also be live at the minimal dominator which means N interferes with
286 * the minimal dominator dom[dom_idx] and, by invariants 2 and 3 above,
287 * the algorithm would have already terminated. Therefore, if we got
288 * here, the only node that can possibly interfere with current is the
289 * minimal dominator dom[dom_idx].
290 *
291 * This is what allows us to do a interference check of the union of the
292 * two sets with a single linear-time walk.
293 */
294 if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
295 return true;
296
297 dom[++dom_idx] = current;
298 }
299
300 return false;
301 }
302
303 static bool
add_parallel_copy_to_end_of_block(nir_shader * shader,nir_block * block,void * dead_ctx)304 add_parallel_copy_to_end_of_block(nir_shader *shader, nir_block *block, void *dead_ctx)
305 {
306 bool need_end_copy = false;
307 if (block->successors[0]) {
308 nir_instr *instr = nir_block_first_instr(block->successors[0]);
309 if (instr && instr->type == nir_instr_type_phi)
310 need_end_copy = true;
311 }
312
313 if (block->successors[1]) {
314 nir_instr *instr = nir_block_first_instr(block->successors[1]);
315 if (instr && instr->type == nir_instr_type_phi)
316 need_end_copy = true;
317 }
318
319 if (need_end_copy) {
320 /* If one of our successors has at least one phi node, we need to
321 * create a parallel copy at the end of the block but before the jump
322 * (if there is one).
323 */
324 nir_parallel_copy_instr *pcopy =
325 nir_parallel_copy_instr_create(shader);
326
327 nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
328 }
329
330 return true;
331 }
332
333 static nir_parallel_copy_instr *
get_parallel_copy_at_end_of_block(nir_block * block)334 get_parallel_copy_at_end_of_block(nir_block *block)
335 {
336 nir_instr *last_instr = nir_block_last_instr(block);
337 if (last_instr == NULL)
338 return NULL;
339
340 /* The last instruction may be a jump in which case the parallel copy is
341 * right before it.
342 */
343 if (last_instr->type == nir_instr_type_jump)
344 last_instr = nir_instr_prev(last_instr);
345
346 if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
347 return nir_instr_as_parallel_copy(last_instr);
348 else
349 return NULL;
350 }
351
352 /** Isolate phi nodes with parallel copies
353 *
354 * In order to solve the dependency problems with the sources and
355 * destinations of phi nodes, we first isolate them by adding parallel
356 * copies to the beginnings and ends of basic blocks. For every block with
357 * phi nodes, we add a parallel copy immediately following the last phi
358 * node that copies the destinations of all of the phi nodes to new SSA
359 * values. We also add a parallel copy to the end of every block that has
360 * a successor with phi nodes that, for each phi node in each successor,
361 * copies the corresponding sorce of the phi node and adjust the phi to
362 * used the destination of the parallel copy.
363 *
364 * In SSA form, each value has exactly one definition. What this does is
365 * ensure that each value used in a phi also has exactly one use. The
366 * destinations of phis are only used by the parallel copy immediately
367 * following the phi nodes and. Thanks to the parallel copy at the end of
368 * the predecessor block, the sources of phi nodes are are the only use of
369 * that value. This allows us to immediately assign all the sources and
370 * destinations of any given phi node to the same register without worrying
371 * about interference at all. We do coalescing to get rid of the parallel
372 * copies where possible.
373 *
374 * Before this pass can be run, we have to iterate over the blocks with
375 * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
376 * the ends of blocks exist. We can create the ones at the beginnings as
377 * we go, but the ones at the ends of blocks need to be created ahead of
378 * time because of potential back-edges in the CFG.
379 */
380 static bool
isolate_phi_nodes_block(nir_shader * shader,nir_block * block,void * dead_ctx)381 isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
382 {
383 nir_instr *last_phi_instr = NULL;
384 nir_foreach_instr(instr, block) {
385 /* Phi nodes only ever come at the start of a block */
386 if (instr->type != nir_instr_type_phi)
387 break;
388
389 last_phi_instr = instr;
390 }
391
392 /* If we don't have any phis, then there's nothing for us to do. */
393 if (last_phi_instr == NULL)
394 return true;
395
396 /* If we have phi nodes, we need to create a parallel copy at the
397 * start of this block but after the phi nodes.
398 */
399 nir_parallel_copy_instr *block_pcopy =
400 nir_parallel_copy_instr_create(shader);
401 nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
402
403 nir_foreach_instr(instr, block) {
404 /* Phi nodes only ever come at the start of a block */
405 if (instr->type != nir_instr_type_phi)
406 break;
407
408 nir_phi_instr *phi = nir_instr_as_phi(instr);
409 assert(phi->dest.is_ssa);
410 nir_foreach_phi_src(src, phi) {
411 if (nir_src_is_undef(src->src))
412 continue;
413
414 nir_parallel_copy_instr *pcopy =
415 get_parallel_copy_at_end_of_block(src->pred);
416 assert(pcopy);
417
418 nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
419 nir_parallel_copy_entry);
420 nir_ssa_dest_init(&pcopy->instr, &entry->dest,
421 phi->dest.ssa.num_components,
422 phi->dest.ssa.bit_size, NULL);
423 entry->dest.ssa.divergent = nir_src_is_divergent(src->src);
424 exec_list_push_tail(&pcopy->entries, &entry->node);
425
426 assert(src->src.is_ssa);
427 nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
428
429 nir_instr_rewrite_src(&phi->instr, &src->src,
430 nir_src_for_ssa(&entry->dest.ssa));
431 }
432
433 nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
434 nir_parallel_copy_entry);
435 nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
436 phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
437 NULL);
438 entry->dest.ssa.divergent = phi->dest.ssa.divergent;
439 exec_list_push_tail(&block_pcopy->entries, &entry->node);
440
441 nir_ssa_def_rewrite_uses(&phi->dest.ssa,
442 &entry->dest.ssa);
443
444 nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
445 nir_src_for_ssa(&phi->dest.ssa));
446 }
447
448 return true;
449 }
450
451 static bool
coalesce_phi_nodes_block(nir_block * block,struct from_ssa_state * state)452 coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
453 {
454 nir_foreach_instr(instr, block) {
455 /* Phi nodes only ever come at the start of a block */
456 if (instr->type != nir_instr_type_phi)
457 break;
458
459 nir_phi_instr *phi = nir_instr_as_phi(instr);
460
461 assert(phi->dest.is_ssa);
462 merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
463
464 nir_foreach_phi_src(src, phi) {
465 assert(src->src.is_ssa);
466 if (nir_src_is_undef(src->src))
467 continue;
468
469 merge_node *src_node = get_merge_node(src->src.ssa, state);
470 if (src_node->set != dest_node->set)
471 merge_merge_sets(dest_node->set, src_node->set);
472 }
473 }
474
475 return true;
476 }
477
478 static void
aggressive_coalesce_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)479 aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
480 struct from_ssa_state *state)
481 {
482 nir_foreach_parallel_copy_entry(entry, pcopy) {
483 if (!entry->src.is_ssa)
484 continue;
485
486 /* Since load_const instructions are SSA only, we can't replace their
487 * destinations with registers and, therefore, can't coalesce them.
488 */
489 if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
490 continue;
491
492 /* Don't try and coalesce these */
493 if (entry->dest.ssa.num_components != entry->src.ssa->num_components)
494 continue;
495
496 merge_node *src_node = get_merge_node(entry->src.ssa, state);
497 merge_node *dest_node = get_merge_node(&entry->dest.ssa, state);
498
499 if (src_node->set == dest_node->set)
500 continue;
501
502 /* TODO: We can probably do better here but for now we should be safe if
503 * we just don't coalesce things with different divergence.
504 */
505 if (dest_node->set->divergent != src_node->set->divergent)
506 continue;
507
508 if (!merge_sets_interfere(src_node->set, dest_node->set))
509 merge_merge_sets(src_node->set, dest_node->set);
510 }
511 }
512
513 static bool
aggressive_coalesce_block(nir_block * block,struct from_ssa_state * state)514 aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
515 {
516 nir_parallel_copy_instr *start_pcopy = NULL;
517 nir_foreach_instr(instr, block) {
518 /* Phi nodes only ever come at the start of a block */
519 if (instr->type != nir_instr_type_phi) {
520 if (instr->type != nir_instr_type_parallel_copy)
521 break; /* The parallel copy must be right after the phis */
522
523 start_pcopy = nir_instr_as_parallel_copy(instr);
524
525 aggressive_coalesce_parallel_copy(start_pcopy, state);
526
527 break;
528 }
529 }
530
531 nir_parallel_copy_instr *end_pcopy =
532 get_parallel_copy_at_end_of_block(block);
533
534 if (end_pcopy && end_pcopy != start_pcopy)
535 aggressive_coalesce_parallel_copy(end_pcopy, state);
536
537 return true;
538 }
539
540 static nir_register *
create_reg_for_ssa_def(nir_ssa_def * def,nir_function_impl * impl)541 create_reg_for_ssa_def(nir_ssa_def *def, nir_function_impl *impl)
542 {
543 nir_register *reg = nir_local_reg_create(impl);
544
545 reg->num_components = def->num_components;
546 reg->bit_size = def->bit_size;
547 reg->num_array_elems = 0;
548
549 return reg;
550 }
551
552 static bool
rewrite_ssa_def(nir_ssa_def * def,void * void_state)553 rewrite_ssa_def(nir_ssa_def *def, void *void_state)
554 {
555 struct from_ssa_state *state = void_state;
556 nir_register *reg;
557
558 struct hash_entry *entry =
559 _mesa_hash_table_search(state->merge_node_table, def);
560 if (entry) {
561 /* In this case, we're part of a phi web. Use the web's register. */
562 merge_node *node = (merge_node *)entry->data;
563
564 /* If it doesn't have a register yet, create one. Note that all of
565 * the things in the merge set should be the same so it doesn't
566 * matter which node's definition we use.
567 */
568 if (node->set->reg == NULL) {
569 node->set->reg = create_reg_for_ssa_def(def, state->builder.impl);
570 node->set->reg->divergent = node->set->divergent;
571 }
572
573 reg = node->set->reg;
574 } else {
575 if (state->phi_webs_only)
576 return true;
577
578 /* We leave load_const SSA values alone. They act as immediates to
579 * the backend. If it got coalesced into a phi, that's ok.
580 */
581 if (def->parent_instr->type == nir_instr_type_load_const)
582 return true;
583
584 reg = create_reg_for_ssa_def(def, state->builder.impl);
585 }
586
587 nir_ssa_def_rewrite_uses_src(def, nir_src_for_reg(reg));
588 assert(nir_ssa_def_is_unused(def));
589
590 if (def->parent_instr->type == nir_instr_type_ssa_undef) {
591 /* If it's an ssa_undef instruction, remove it since we know we just got
592 * rid of all its uses.
593 */
594 nir_instr *parent_instr = def->parent_instr;
595 nir_instr_remove(parent_instr);
596 exec_list_push_tail(&state->dead_instrs, &parent_instr->node);
597 state->progress = true;
598 return true;
599 }
600
601 assert(def->parent_instr->type != nir_instr_type_load_const);
602
603 /* At this point we know a priori that this SSA def is part of a
604 * nir_dest. We can use exec_node_data to get the dest pointer.
605 */
606 nir_dest *dest = exec_node_data(nir_dest, def, ssa);
607
608 nir_instr_rewrite_dest(state->instr, dest, nir_dest_for_reg(reg));
609 state->progress = true;
610 return true;
611 }
612
613 /* Resolves ssa definitions to registers. While we're at it, we also
614 * remove phi nodes.
615 */
616 static void
resolve_registers_block(nir_block * block,struct from_ssa_state * state)617 resolve_registers_block(nir_block *block, struct from_ssa_state *state)
618 {
619 nir_foreach_instr_safe(instr, block) {
620 state->instr = instr;
621 nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
622
623 if (instr->type == nir_instr_type_phi) {
624 nir_instr_remove(instr);
625 exec_list_push_tail(&state->dead_instrs, &instr->node);
626 state->progress = true;
627 }
628 }
629 state->instr = NULL;
630 }
631
632 static void
emit_copy(nir_builder * b,nir_src src,nir_src dest_src)633 emit_copy(nir_builder *b, nir_src src, nir_src dest_src)
634 {
635 assert(!dest_src.is_ssa &&
636 dest_src.reg.indirect == NULL &&
637 dest_src.reg.base_offset == 0);
638
639 assert(!nir_src_is_divergent(src) || nir_src_is_divergent(dest_src));
640
641 if (src.is_ssa)
642 assert(src.ssa->num_components >= dest_src.reg.reg->num_components);
643 else
644 assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components);
645
646 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
647 nir_src_copy(&mov->src[0].src, &src);
648 mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg);
649 mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1;
650
651 nir_builder_instr_insert(b, &mov->instr);
652 }
653
654 /* Resolves a single parallel copy operation into a sequence of movs
655 *
656 * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
657 * Correctness, Code Quality, and Efficiency" by Boissinot et al.
658 * However, I never got the algorithm to work as written, so this version
659 * is slightly modified.
660 *
661 * The algorithm works by playing this little shell game with the values.
662 * We start by recording where every source value is and which source value
663 * each destination value should receive. We then grab any copy whose
664 * destination is "empty", i.e. not used as a source, and do the following:
665 * - Find where its source value currently lives
666 * - Emit the move instruction
667 * - Set the location of the source value to the destination
668 * - Mark the location containing the source value
669 * - Mark the destination as no longer needing to be copied
670 *
671 * When we run out of "empty" destinations, we have a cycle and so we
672 * create a temporary register, copy to that register, and mark the value
673 * we copied as living in that temporary. Now, the cycle is broken, so we
674 * can continue with the above steps.
675 */
676 static void
resolve_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)677 resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
678 struct from_ssa_state *state)
679 {
680 unsigned num_copies = 0;
681 nir_foreach_parallel_copy_entry(entry, pcopy) {
682 /* Sources may be SSA */
683 if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
684 continue;
685
686 num_copies++;
687 }
688
689 if (num_copies == 0) {
690 /* Hooray, we don't need any copies! */
691 nir_instr_remove(&pcopy->instr);
692 exec_list_push_tail(&state->dead_instrs, &pcopy->instr.node);
693 return;
694 }
695
696 /* The register/source corresponding to the given index */
697 NIR_VLA_ZERO(nir_src, values, num_copies * 2);
698
699 /* The current location of a given piece of data. We will use -1 for "null" */
700 NIR_VLA_FILL(int, loc, num_copies * 2, -1);
701
702 /* The piece of data that the given piece of data is to be copied from. We will use -1 for "null" */
703 NIR_VLA_FILL(int, pred, num_copies * 2, -1);
704
705 /* The destinations we have yet to properly fill */
706 NIR_VLA(int, to_do, num_copies * 2);
707 int to_do_idx = -1;
708
709 state->builder.cursor = nir_before_instr(&pcopy->instr);
710
711 /* Now we set everything up:
712 * - All values get assigned a temporary index
713 * - Current locations are set from sources
714 * - Predicessors are recorded from sources and destinations
715 */
716 int num_vals = 0;
717 nir_foreach_parallel_copy_entry(entry, pcopy) {
718 /* Sources may be SSA */
719 if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
720 continue;
721
722 int src_idx = -1;
723 for (int i = 0; i < num_vals; ++i) {
724 if (nir_srcs_equal(values[i], entry->src))
725 src_idx = i;
726 }
727 if (src_idx < 0) {
728 src_idx = num_vals++;
729 values[src_idx] = entry->src;
730 }
731
732 nir_src dest_src = nir_src_for_reg(entry->dest.reg.reg);
733
734 int dest_idx = -1;
735 for (int i = 0; i < num_vals; ++i) {
736 if (nir_srcs_equal(values[i], dest_src)) {
737 /* Each destination of a parallel copy instruction should be
738 * unique. A destination may get used as a source, so we still
739 * have to walk the list. However, the predecessor should not,
740 * at this point, be set yet, so we should have -1 here.
741 */
742 assert(pred[i] == -1);
743 dest_idx = i;
744 }
745 }
746 if (dest_idx < 0) {
747 dest_idx = num_vals++;
748 values[dest_idx] = dest_src;
749 }
750
751 loc[src_idx] = src_idx;
752 pred[dest_idx] = src_idx;
753
754 to_do[++to_do_idx] = dest_idx;
755 }
756
757 /* Currently empty destinations we can go ahead and fill */
758 NIR_VLA(int, ready, num_copies * 2);
759 int ready_idx = -1;
760
761 /* Mark the ones that are ready for copying. We know an index is a
762 * destination if it has a predecessor and it's ready for copying if
763 * it's not marked as containing data.
764 */
765 for (int i = 0; i < num_vals; i++) {
766 if (pred[i] != -1 && loc[i] == -1)
767 ready[++ready_idx] = i;
768 }
769
770 while (to_do_idx >= 0) {
771 while (ready_idx >= 0) {
772 int b = ready[ready_idx--];
773 int a = pred[b];
774 emit_copy(&state->builder, values[loc[a]], values[b]);
775
776 /* b has been filled, mark it as not needing to be copied */
777 pred[b] = -1;
778
779 /* The next bit only applies if the source and destination have the
780 * same divergence. If they differ (it must be convergent ->
781 * divergent), then we can't guarantee we won't need the convergent
782 * version of again.
783 */
784 if (nir_src_is_divergent(values[a]) ==
785 nir_src_is_divergent(values[b])) {
786 /* If any other copies want a they can find it at b but only if the
787 * two have the same divergence.
788 */
789 loc[a] = b;
790
791 /* If a needs to be filled... */
792 if (pred[a] != -1) {
793 /* If any other copies want a they can find it at b */
794 loc[a] = b;
795
796 /* It's ready for copying now */
797 ready[++ready_idx] = a;
798 }
799 }
800 }
801 int b = to_do[to_do_idx--];
802 if (pred[b] == -1)
803 continue;
804
805 /* If we got here, then we don't have any more trivial copies that we
806 * can do. We have to break a cycle, so we create a new temporary
807 * register for that purpose. Normally, if going out of SSA after
808 * register allocation, you would want to avoid creating temporary
809 * registers. However, we are going out of SSA before register
810 * allocation, so we would rather not create extra register
811 * dependencies for the backend to deal with. If it wants, the
812 * backend can coalesce the (possibly multiple) temporaries.
813 */
814 assert(num_vals < num_copies * 2);
815 nir_register *reg = nir_local_reg_create(state->builder.impl);
816 reg->num_array_elems = 0;
817 if (values[b].is_ssa) {
818 reg->num_components = values[b].ssa->num_components;
819 reg->bit_size = values[b].ssa->bit_size;
820 } else {
821 reg->num_components = values[b].reg.reg->num_components;
822 reg->bit_size = values[b].reg.reg->bit_size;
823 }
824 reg->divergent = nir_src_is_divergent(values[b]);
825 values[num_vals].is_ssa = false;
826 values[num_vals].reg.reg = reg;
827
828 emit_copy(&state->builder, values[b], values[num_vals]);
829 loc[b] = num_vals;
830 ready[++ready_idx] = b;
831 num_vals++;
832 }
833
834 nir_instr_remove(&pcopy->instr);
835 exec_list_push_tail(&state->dead_instrs, &pcopy->instr.node);
836 }
837
838 /* Resolves the parallel copies in a block. Each block can have at most
839 * two: One at the beginning, right after all the phi noces, and one at
840 * the end (or right before the final jump if it exists).
841 */
842 static bool
resolve_parallel_copies_block(nir_block * block,struct from_ssa_state * state)843 resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
844 {
845 /* At this point, we have removed all of the phi nodes. If a parallel
846 * copy existed right after the phi nodes in this block, it is now the
847 * first instruction.
848 */
849 nir_instr *first_instr = nir_block_first_instr(block);
850 if (first_instr == NULL)
851 return true; /* Empty, nothing to do. */
852
853 if (first_instr->type == nir_instr_type_parallel_copy) {
854 nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(first_instr);
855
856 resolve_parallel_copy(pcopy, state);
857 }
858
859 /* It's possible that the above code already cleaned up the end parallel
860 * copy. However, doing so removed it form the instructions list so we
861 * won't find it here. Therefore, it's safe to go ahead and just look
862 * for one and clean it up if it exists.
863 */
864 nir_parallel_copy_instr *end_pcopy =
865 get_parallel_copy_at_end_of_block(block);
866 if (end_pcopy)
867 resolve_parallel_copy(end_pcopy, state);
868
869 return true;
870 }
871
872 static bool
nir_convert_from_ssa_impl(nir_function_impl * impl,bool phi_webs_only)873 nir_convert_from_ssa_impl(nir_function_impl *impl, bool phi_webs_only)
874 {
875 nir_shader *shader = impl->function->shader;
876
877 struct from_ssa_state state;
878
879 nir_builder_init(&state.builder, impl);
880 state.dead_ctx = ralloc_context(NULL);
881 state.phi_webs_only = phi_webs_only;
882 state.merge_node_table = _mesa_pointer_hash_table_create(NULL);
883 state.progress = false;
884 exec_list_make_empty(&state.dead_instrs);
885
886 nir_foreach_block(block, impl) {
887 add_parallel_copy_to_end_of_block(shader, block, state.dead_ctx);
888 }
889
890 nir_foreach_block(block, impl) {
891 isolate_phi_nodes_block(shader, block, state.dead_ctx);
892 }
893
894 /* Mark metadata as dirty before we ask for liveness analysis */
895 nir_metadata_preserve(impl, nir_metadata_block_index |
896 nir_metadata_dominance);
897
898 nir_metadata_require(impl, nir_metadata_instr_index |
899 nir_metadata_live_ssa_defs |
900 nir_metadata_dominance);
901
902 nir_foreach_block(block, impl) {
903 coalesce_phi_nodes_block(block, &state);
904 }
905
906 nir_foreach_block(block, impl) {
907 aggressive_coalesce_block(block, &state);
908 }
909
910 nir_foreach_block(block, impl) {
911 resolve_registers_block(block, &state);
912 }
913
914 nir_foreach_block(block, impl) {
915 resolve_parallel_copies_block(block, &state);
916 }
917
918 nir_metadata_preserve(impl, nir_metadata_block_index |
919 nir_metadata_dominance);
920
921 /* Clean up dead instructions and the hash tables */
922 nir_instr_free_list(&state.dead_instrs);
923 _mesa_hash_table_destroy(state.merge_node_table, NULL);
924 ralloc_free(state.dead_ctx);
925 return state.progress;
926 }
927
928 bool
nir_convert_from_ssa(nir_shader * shader,bool phi_webs_only)929 nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only)
930 {
931 bool progress = false;
932
933 nir_foreach_function(function, shader) {
934 if (function->impl)
935 progress |= nir_convert_from_ssa_impl(function->impl, phi_webs_only);
936 }
937
938 return progress;
939 }
940
941
942 static void
place_phi_read(nir_builder * b,nir_register * reg,nir_ssa_def * def,nir_block * block,struct set * visited_blocks)943 place_phi_read(nir_builder *b, nir_register *reg,
944 nir_ssa_def *def, nir_block *block, struct set *visited_blocks)
945 {
946 /* Search already visited blocks to avoid back edges in tree */
947 if (_mesa_set_search(visited_blocks, block) == NULL) {
948 /* Try to go up the single-successor tree */
949 bool all_single_successors = true;
950 set_foreach(block->predecessors, entry) {
951 nir_block *pred = (nir_block *)entry->key;
952 if (pred->successors[0] && pred->successors[1]) {
953 all_single_successors = false;
954 break;
955 }
956 }
957
958 if (all_single_successors) {
959 /* All predecessors of this block have exactly one successor and it
960 * is this block so they must eventually lead here without
961 * intersecting each other. Place the reads in the predecessors
962 * instead of this block.
963 */
964 _mesa_set_add(visited_blocks, block);
965
966 set_foreach(block->predecessors, entry) {
967 place_phi_read(b, reg, def, (nir_block *)entry->key, visited_blocks);
968 }
969 return;
970 }
971 }
972
973 b->cursor = nir_after_block_before_jump(block);
974 nir_store_reg(b, reg, def, ~0);
975 }
976
977 /** Lower all of the phi nodes in a block to movs to and from a register
978 *
979 * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
980 * single block to convert all of its phis to a register and some movs.
981 * The code that is generated, while not optimal for actual codegen in a
982 * back-end, is easy to generate, correct, and will turn into the same set of
983 * phis after you call regs_to_ssa and do some copy propagation. For each phi
984 * node we do the following:
985 *
986 * 1. For each phi instruction in the block, create a new nir_register
987 *
988 * 2. Insert movs at the top of the destination block for each phi and
989 * rewrite all uses of the phi to use the mov.
990 *
991 * 3. For each phi source, insert movs in the predecessor block from the phi
992 * source to the register associated with the phi.
993 *
994 * Correctness is guaranteed by the fact that we create a new register for
995 * each phi and emit movs on both sides of the control-flow edge. Because all
996 * the phis have SSA destinations (we assert this) and there is a separate
997 * temporary for each phi, all movs inserted in any particular block have
998 * unique destinations so the order of operations does not matter.
999 *
1000 * The one intelligent thing this pass does is that it places the moves from
1001 * the phi sources as high up the predecessor tree as possible instead of in
1002 * the exact predecessor. This means that, in particular, it will crawl into
1003 * the deepest nesting of any if-ladders. In order to ensure that doing so is
1004 * safe, it stops as soon as one of the predecessors has multiple successors.
1005 */
1006 bool
nir_lower_phis_to_regs_block(nir_block * block)1007 nir_lower_phis_to_regs_block(nir_block *block)
1008 {
1009 nir_builder b;
1010 nir_builder_init(&b, nir_cf_node_get_function(&block->cf_node));
1011 struct set *visited_blocks = _mesa_set_create(NULL, _mesa_hash_pointer,
1012 _mesa_key_pointer_equal);
1013
1014 bool progress = false;
1015 nir_foreach_instr_safe(instr, block) {
1016 if (instr->type != nir_instr_type_phi)
1017 break;
1018
1019 nir_phi_instr *phi = nir_instr_as_phi(instr);
1020 assert(phi->dest.is_ssa);
1021
1022 nir_register *reg = create_reg_for_ssa_def(&phi->dest.ssa, b.impl);
1023
1024 b.cursor = nir_after_instr(&phi->instr);
1025 nir_ssa_def *def = nir_load_reg(&b, reg);
1026
1027 nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
1028
1029 nir_foreach_phi_src(src, phi) {
1030 if (src->src.is_ssa) {
1031 _mesa_set_add(visited_blocks, src->src.ssa->parent_instr->block);
1032 place_phi_read(&b, reg, src->src.ssa, src->pred, visited_blocks);
1033 _mesa_set_clear(visited_blocks, NULL);
1034 } else {
1035 b.cursor = nir_after_block_before_jump(src->pred);
1036 nir_ssa_def *src_ssa =
1037 nir_ssa_for_src(&b, src->src, phi->dest.ssa.num_components);
1038 nir_store_reg(&b, reg, src_ssa, ~0);
1039 }
1040 }
1041
1042 nir_instr_remove(&phi->instr);
1043
1044 progress = true;
1045 }
1046
1047 _mesa_set_destroy(visited_blocks, NULL);
1048
1049 return progress;
1050 }
1051
1052 struct ssa_def_to_reg_state {
1053 nir_function_impl *impl;
1054 bool progress;
1055 };
1056
1057 static bool
dest_replace_ssa_with_reg(nir_dest * dest,void * void_state)1058 dest_replace_ssa_with_reg(nir_dest *dest, void *void_state)
1059 {
1060 struct ssa_def_to_reg_state *state = void_state;
1061
1062 if (!dest->is_ssa)
1063 return true;
1064
1065 nir_register *reg = create_reg_for_ssa_def(&dest->ssa, state->impl);
1066
1067 nir_ssa_def_rewrite_uses_src(&dest->ssa, nir_src_for_reg(reg));
1068
1069 nir_instr *instr = dest->ssa.parent_instr;
1070 *dest = nir_dest_for_reg(reg);
1071 dest->reg.parent_instr = instr;
1072 list_addtail(&dest->reg.def_link, ®->defs);
1073
1074 state->progress = true;
1075
1076 return true;
1077 }
1078
1079 static bool
ssa_def_is_local_to_block(nir_ssa_def * def,UNUSED void * state)1080 ssa_def_is_local_to_block(nir_ssa_def *def, UNUSED void *state)
1081 {
1082 nir_block *block = def->parent_instr->block;
1083 nir_foreach_use(use_src, def) {
1084 if (use_src->parent_instr->block != block ||
1085 use_src->parent_instr->type == nir_instr_type_phi) {
1086 return false;
1087 }
1088 }
1089
1090 if (!list_is_empty(&def->if_uses))
1091 return false;
1092
1093 return true;
1094 }
1095
1096 /** Lower all of the SSA defs in a block to registers
1097 *
1098 * This performs the very simple operation of blindly replacing all of the SSA
1099 * defs in the given block with registers. If not used carefully, this may
1100 * result in phi nodes with register sources which is technically invalid.
1101 * Fortunately, the register-based into-SSA pass handles them anyway.
1102 */
1103 bool
nir_lower_ssa_defs_to_regs_block(nir_block * block)1104 nir_lower_ssa_defs_to_regs_block(nir_block *block)
1105 {
1106 nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
1107 nir_shader *shader = impl->function->shader;
1108
1109 struct ssa_def_to_reg_state state = {
1110 .impl = impl,
1111 .progress = false,
1112 };
1113
1114 nir_foreach_instr(instr, block) {
1115 if (instr->type == nir_instr_type_ssa_undef) {
1116 /* Undefs are just a read of something never written. */
1117 nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
1118 nir_register *reg = create_reg_for_ssa_def(&undef->def, state.impl);
1119 nir_ssa_def_rewrite_uses_src(&undef->def, nir_src_for_reg(reg));
1120 } else if (instr->type == nir_instr_type_load_const) {
1121 /* Constant loads are SSA-only, we need to insert a move */
1122 nir_load_const_instr *load = nir_instr_as_load_const(instr);
1123 nir_register *reg = create_reg_for_ssa_def(&load->def, state.impl);
1124 nir_ssa_def_rewrite_uses_src(&load->def, nir_src_for_reg(reg));
1125
1126 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
1127 mov->src[0].src = nir_src_for_ssa(&load->def);
1128 mov->dest.dest = nir_dest_for_reg(reg);
1129 mov->dest.write_mask = (1 << reg->num_components) - 1;
1130 nir_instr_insert(nir_after_instr(&load->instr), &mov->instr);
1131 } else if (nir_foreach_ssa_def(instr, ssa_def_is_local_to_block, NULL)) {
1132 /* If the SSA def produced by this instruction is only in the block
1133 * in which it is defined and is not used by ifs or phis, then we
1134 * don't have a reason to convert it to a register.
1135 */
1136 } else {
1137 nir_foreach_dest(instr, dest_replace_ssa_with_reg, &state);
1138 }
1139 }
1140
1141 return state.progress;
1142 }
1143