• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_builder.h"
30 #include "nir_vla.h"
31 
32 /*
33  * This file implements an out-of-SSA pass as described in "Revisiting
34  * Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
35  * Boissinot et al.
36  */
37 
38 struct from_ssa_state {
39    nir_builder builder;
40    void *dead_ctx;
41    bool phi_webs_only;
42    struct hash_table *merge_node_table;
43    nir_instr *instr;
44    bool progress;
45 };
46 
47 /* Returns if def @a comes after def @b.
48  *
49  * We treat SSA undefs as always coming before other instruction types.
50  */
51 static bool
def_after(nir_ssa_def * a,nir_ssa_def * b)52 def_after(nir_ssa_def *a, nir_ssa_def *b)
53 {
54    if (a->parent_instr->type == nir_instr_type_ssa_undef)
55       return false;
56 
57    if (b->parent_instr->type == nir_instr_type_ssa_undef)
58       return true;
59 
60    return a->parent_instr->index > b->parent_instr->index;
61 }
62 
63 /* Returns true if a dominates b */
64 static bool
ssa_def_dominates(nir_ssa_def * a,nir_ssa_def * b)65 ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
66 {
67    if (a->parent_instr->type == nir_instr_type_ssa_undef) {
68       /* SSA undefs always dominate */
69       return true;
70    } if (def_after(a, b)) {
71       return false;
72    } else if (a->parent_instr->block == b->parent_instr->block) {
73       return def_after(b, a);
74    } else {
75       return nir_block_dominates(a->parent_instr->block,
76                                  b->parent_instr->block);
77    }
78 }
79 
80 
81 /* The following data structure, which I have named merge_set is a way of
82  * representing a set registers of non-interfering registers.  This is
83  * based on the concept of a "dominance forest" presented in "Fast Copy
84  * Coalescing and Live-Range Identification" by Budimlic et al. but the
85  * implementation concept is taken from  "Revisiting Out-of-SSA Translation
86  * for Correctness, Code Quality, and Efficiency" by Boissinot et al.
87  *
88  * Each SSA definition is associated with a merge_node and the association
89  * is represented by a combination of a hash table and the "def" parameter
90  * in the merge_node structure.  The merge_set stores a linked list of
91  * merge_nodes in dominance order of the ssa definitions.  (Since the
92  * liveness analysis pass indexes the SSA values in dominance order for us,
93  * this is an easy thing to keep up.)  It is assumed that no pair of the
94  * nodes in a given set interfere.  Merging two sets or checking for
95  * interference can be done in a single linear-time merge-sort walk of the
96  * two lists of nodes.
97  */
98 struct merge_set;
99 
100 typedef struct {
101    struct exec_node node;
102    struct merge_set *set;
103    nir_ssa_def *def;
104 } merge_node;
105 
106 typedef struct merge_set {
107    struct exec_list nodes;
108    unsigned size;
109    nir_register *reg;
110 } merge_set;
111 
112 #if 0
113 static void
114 merge_set_dump(merge_set *set, FILE *fp)
115 {
116    nir_ssa_def *dom[set->size];
117    int dom_idx = -1;
118 
119    foreach_list_typed(merge_node, node, node, &set->nodes) {
120       while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
121          dom_idx--;
122 
123       for (int i = 0; i <= dom_idx; i++)
124          fprintf(fp, "  ");
125 
126       if (node->def->name)
127          fprintf(fp, "ssa_%d /* %s */\n", node->def->index, node->def->name);
128       else
129          fprintf(fp, "ssa_%d\n", node->def->index);
130 
131       dom[++dom_idx] = node->def;
132    }
133 }
134 #endif
135 
136 static merge_node *
get_merge_node(nir_ssa_def * def,struct from_ssa_state * state)137 get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
138 {
139    struct hash_entry *entry =
140       _mesa_hash_table_search(state->merge_node_table, def);
141    if (entry)
142       return entry->data;
143 
144    merge_set *set = ralloc(state->dead_ctx, merge_set);
145    exec_list_make_empty(&set->nodes);
146    set->size = 1;
147    set->reg = NULL;
148 
149    merge_node *node = ralloc(state->dead_ctx, merge_node);
150    node->set = set;
151    node->def = def;
152    exec_list_push_head(&set->nodes, &node->node);
153 
154    _mesa_hash_table_insert(state->merge_node_table, def, node);
155 
156    return node;
157 }
158 
159 static bool
merge_nodes_interfere(merge_node * a,merge_node * b)160 merge_nodes_interfere(merge_node *a, merge_node *b)
161 {
162    return nir_ssa_defs_interfere(a->def, b->def);
163 }
164 
165 /* Merges b into a */
166 static merge_set *
merge_merge_sets(merge_set * a,merge_set * b)167 merge_merge_sets(merge_set *a, merge_set *b)
168 {
169    struct exec_node *an = exec_list_get_head(&a->nodes);
170    struct exec_node *bn = exec_list_get_head(&b->nodes);
171    while (!exec_node_is_tail_sentinel(bn)) {
172       merge_node *a_node = exec_node_data(merge_node, an, node);
173       merge_node *b_node = exec_node_data(merge_node, bn, node);
174 
175       if (exec_node_is_tail_sentinel(an) ||
176           def_after(a_node->def, b_node->def)) {
177          struct exec_node *next = bn->next;
178          exec_node_remove(bn);
179          exec_node_insert_node_before(an, bn);
180          exec_node_data(merge_node, bn, node)->set = a;
181          bn = next;
182       } else {
183          an = an->next;
184       }
185    }
186 
187    a->size += b->size;
188    b->size = 0;
189 
190    return a;
191 }
192 
193 /* Checks for any interference between two merge sets
194  *
195  * This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
196  * Translation for Correctness, Code Quality, and Efficiency" by
197  * Boissinot et al.
198  */
199 static bool
merge_sets_interfere(merge_set * a,merge_set * b)200 merge_sets_interfere(merge_set *a, merge_set *b)
201 {
202    NIR_VLA(merge_node *, dom, a->size + b->size);
203    int dom_idx = -1;
204 
205    struct exec_node *an = exec_list_get_head(&a->nodes);
206    struct exec_node *bn = exec_list_get_head(&b->nodes);
207    while (!exec_node_is_tail_sentinel(an) ||
208           !exec_node_is_tail_sentinel(bn)) {
209 
210       merge_node *current;
211       if (exec_node_is_tail_sentinel(an)) {
212          current = exec_node_data(merge_node, bn, node);
213          bn = bn->next;
214       } else if (exec_node_is_tail_sentinel(bn)) {
215          current = exec_node_data(merge_node, an, node);
216          an = an->next;
217       } else {
218          merge_node *a_node = exec_node_data(merge_node, an, node);
219          merge_node *b_node = exec_node_data(merge_node, bn, node);
220 
221          if (def_after(b_node->def, a_node->def)) {
222             current = a_node;
223             an = an->next;
224          } else {
225             current = b_node;
226             bn = bn->next;
227          }
228       }
229 
230       while (dom_idx >= 0 &&
231              !ssa_def_dominates(dom[dom_idx]->def, current->def))
232          dom_idx--;
233 
234       if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
235          return true;
236 
237       dom[++dom_idx] = current;
238    }
239 
240    return false;
241 }
242 
243 static bool
add_parallel_copy_to_end_of_block(nir_block * block,void * dead_ctx)244 add_parallel_copy_to_end_of_block(nir_block *block, void *dead_ctx)
245 {
246 
247    bool need_end_copy = false;
248    if (block->successors[0]) {
249       nir_instr *instr = nir_block_first_instr(block->successors[0]);
250       if (instr && instr->type == nir_instr_type_phi)
251          need_end_copy = true;
252    }
253 
254    if (block->successors[1]) {
255       nir_instr *instr = nir_block_first_instr(block->successors[1]);
256       if (instr && instr->type == nir_instr_type_phi)
257          need_end_copy = true;
258    }
259 
260    if (need_end_copy) {
261       /* If one of our successors has at least one phi node, we need to
262        * create a parallel copy at the end of the block but before the jump
263        * (if there is one).
264        */
265       nir_parallel_copy_instr *pcopy =
266          nir_parallel_copy_instr_create(dead_ctx);
267 
268       nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
269    }
270 
271    return true;
272 }
273 
274 static nir_parallel_copy_instr *
get_parallel_copy_at_end_of_block(nir_block * block)275 get_parallel_copy_at_end_of_block(nir_block *block)
276 {
277    nir_instr *last_instr = nir_block_last_instr(block);
278    if (last_instr == NULL)
279       return NULL;
280 
281    /* The last instruction may be a jump in which case the parallel copy is
282     * right before it.
283     */
284    if (last_instr->type == nir_instr_type_jump)
285       last_instr = nir_instr_prev(last_instr);
286 
287    if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
288       return nir_instr_as_parallel_copy(last_instr);
289    else
290       return NULL;
291 }
292 
293 /** Isolate phi nodes with parallel copies
294  *
295  * In order to solve the dependency problems with the sources and
296  * destinations of phi nodes, we first isolate them by adding parallel
297  * copies to the beginnings and ends of basic blocks.  For every block with
298  * phi nodes, we add a parallel copy immediately following the last phi
299  * node that copies the destinations of all of the phi nodes to new SSA
300  * values.  We also add a parallel copy to the end of every block that has
301  * a successor with phi nodes that, for each phi node in each successor,
302  * copies the corresponding sorce of the phi node and adjust the phi to
303  * used the destination of the parallel copy.
304  *
305  * In SSA form, each value has exactly one definition.  What this does is
306  * ensure that each value used in a phi also has exactly one use.  The
307  * destinations of phis are only used by the parallel copy immediately
308  * following the phi nodes and.  Thanks to the parallel copy at the end of
309  * the predecessor block, the sources of phi nodes are are the only use of
310  * that value.  This allows us to immediately assign all the sources and
311  * destinations of any given phi node to the same register without worrying
312  * about interference at all.  We do coalescing to get rid of the parallel
313  * copies where possible.
314  *
315  * Before this pass can be run, we have to iterate over the blocks with
316  * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
317  * the ends of blocks exist.  We can create the ones at the beginnings as
318  * we go, but the ones at the ends of blocks need to be created ahead of
319  * time because of potential back-edges in the CFG.
320  */
321 static bool
isolate_phi_nodes_block(nir_block * block,void * dead_ctx)322 isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
323 {
324    nir_instr *last_phi_instr = NULL;
325    nir_foreach_instr(instr, block) {
326       /* Phi nodes only ever come at the start of a block */
327       if (instr->type != nir_instr_type_phi)
328          break;
329 
330       last_phi_instr = instr;
331    }
332 
333    /* If we don't have any phis, then there's nothing for us to do. */
334    if (last_phi_instr == NULL)
335       return true;
336 
337    /* If we have phi nodes, we need to create a parallel copy at the
338     * start of this block but after the phi nodes.
339     */
340    nir_parallel_copy_instr *block_pcopy =
341       nir_parallel_copy_instr_create(dead_ctx);
342    nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
343 
344    nir_foreach_instr(instr, block) {
345       /* Phi nodes only ever come at the start of a block */
346       if (instr->type != nir_instr_type_phi)
347          break;
348 
349       nir_phi_instr *phi = nir_instr_as_phi(instr);
350       assert(phi->dest.is_ssa);
351       nir_foreach_phi_src(src, phi) {
352          nir_parallel_copy_instr *pcopy =
353             get_parallel_copy_at_end_of_block(src->pred);
354          assert(pcopy);
355 
356          nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
357                                                   nir_parallel_copy_entry);
358          nir_ssa_dest_init(&pcopy->instr, &entry->dest,
359                            phi->dest.ssa.num_components,
360                            phi->dest.ssa.bit_size, src->src.ssa->name);
361          exec_list_push_tail(&pcopy->entries, &entry->node);
362 
363          assert(src->src.is_ssa);
364          nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
365 
366          nir_instr_rewrite_src(&phi->instr, &src->src,
367                                nir_src_for_ssa(&entry->dest.ssa));
368       }
369 
370       nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
371                                                nir_parallel_copy_entry);
372       nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
373                         phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
374                         phi->dest.ssa.name);
375       exec_list_push_tail(&block_pcopy->entries, &entry->node);
376 
377       nir_ssa_def_rewrite_uses(&phi->dest.ssa,
378                                nir_src_for_ssa(&entry->dest.ssa));
379 
380       nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
381                             nir_src_for_ssa(&phi->dest.ssa));
382    }
383 
384    return true;
385 }
386 
387 static bool
coalesce_phi_nodes_block(nir_block * block,struct from_ssa_state * state)388 coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
389 {
390    nir_foreach_instr(instr, block) {
391       /* Phi nodes only ever come at the start of a block */
392       if (instr->type != nir_instr_type_phi)
393          break;
394 
395       nir_phi_instr *phi = nir_instr_as_phi(instr);
396 
397       assert(phi->dest.is_ssa);
398       merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
399 
400       nir_foreach_phi_src(src, phi) {
401          assert(src->src.is_ssa);
402          merge_node *src_node = get_merge_node(src->src.ssa, state);
403          if (src_node->set != dest_node->set)
404             merge_merge_sets(dest_node->set, src_node->set);
405       }
406    }
407 
408    return true;
409 }
410 
411 static void
aggressive_coalesce_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)412 aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
413                                  struct from_ssa_state *state)
414 {
415    nir_foreach_parallel_copy_entry(entry, pcopy) {
416       if (!entry->src.is_ssa)
417          continue;
418 
419       /* Since load_const instructions are SSA only, we can't replace their
420        * destinations with registers and, therefore, can't coalesce them.
421        */
422       if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
423          continue;
424 
425       /* Don't try and coalesce these */
426       if (entry->dest.ssa.num_components != entry->src.ssa->num_components)
427          continue;
428 
429       merge_node *src_node = get_merge_node(entry->src.ssa, state);
430       merge_node *dest_node = get_merge_node(&entry->dest.ssa, state);
431 
432       if (src_node->set == dest_node->set)
433          continue;
434 
435       if (!merge_sets_interfere(src_node->set, dest_node->set))
436          merge_merge_sets(src_node->set, dest_node->set);
437    }
438 }
439 
440 static bool
aggressive_coalesce_block(nir_block * block,struct from_ssa_state * state)441 aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
442 {
443    nir_parallel_copy_instr *start_pcopy = NULL;
444    nir_foreach_instr(instr, block) {
445       /* Phi nodes only ever come at the start of a block */
446       if (instr->type != nir_instr_type_phi) {
447          if (instr->type != nir_instr_type_parallel_copy)
448             break; /* The parallel copy must be right after the phis */
449 
450          start_pcopy = nir_instr_as_parallel_copy(instr);
451 
452          aggressive_coalesce_parallel_copy(start_pcopy, state);
453 
454          break;
455       }
456    }
457 
458    nir_parallel_copy_instr *end_pcopy =
459       get_parallel_copy_at_end_of_block(block);
460 
461    if (end_pcopy && end_pcopy != start_pcopy)
462       aggressive_coalesce_parallel_copy(end_pcopy, state);
463 
464    return true;
465 }
466 
467 static nir_register *
create_reg_for_ssa_def(nir_ssa_def * def,nir_function_impl * impl)468 create_reg_for_ssa_def(nir_ssa_def *def, nir_function_impl *impl)
469 {
470    nir_register *reg = nir_local_reg_create(impl);
471 
472    reg->name = def->name;
473    reg->num_components = def->num_components;
474    reg->bit_size = def->bit_size;
475    reg->num_array_elems = 0;
476 
477    return reg;
478 }
479 
480 static bool
rewrite_ssa_def(nir_ssa_def * def,void * void_state)481 rewrite_ssa_def(nir_ssa_def *def, void *void_state)
482 {
483    struct from_ssa_state *state = void_state;
484    nir_register *reg;
485 
486    struct hash_entry *entry =
487       _mesa_hash_table_search(state->merge_node_table, def);
488    if (entry) {
489       /* In this case, we're part of a phi web.  Use the web's register. */
490       merge_node *node = (merge_node *)entry->data;
491 
492       /* If it doesn't have a register yet, create one.  Note that all of
493        * the things in the merge set should be the same so it doesn't
494        * matter which node's definition we use.
495        */
496       if (node->set->reg == NULL)
497          node->set->reg = create_reg_for_ssa_def(def, state->builder.impl);
498 
499       reg = node->set->reg;
500    } else {
501       if (state->phi_webs_only)
502          return true;
503 
504       /* We leave load_const SSA values alone.  They act as immediates to
505        * the backend.  If it got coalesced into a phi, that's ok.
506        */
507       if (def->parent_instr->type == nir_instr_type_load_const)
508          return true;
509 
510       reg = create_reg_for_ssa_def(def, state->builder.impl);
511    }
512 
513    nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg));
514    assert(list_is_empty(&def->uses) && list_is_empty(&def->if_uses));
515 
516    if (def->parent_instr->type == nir_instr_type_ssa_undef) {
517       /* If it's an ssa_undef instruction, remove it since we know we just got
518        * rid of all its uses.
519        */
520       nir_instr *parent_instr = def->parent_instr;
521       nir_instr_remove(parent_instr);
522       ralloc_steal(state->dead_ctx, parent_instr);
523       state->progress = true;
524       return true;
525    }
526 
527    assert(def->parent_instr->type != nir_instr_type_load_const);
528 
529    /* At this point we know a priori that this SSA def is part of a
530     * nir_dest.  We can use exec_node_data to get the dest pointer.
531     */
532    nir_dest *dest = exec_node_data(nir_dest, def, ssa);
533 
534    nir_instr_rewrite_dest(state->instr, dest, nir_dest_for_reg(reg));
535    state->progress = true;
536    return true;
537 }
538 
539 /* Resolves ssa definitions to registers.  While we're at it, we also
540  * remove phi nodes.
541  */
542 static void
resolve_registers_block(nir_block * block,struct from_ssa_state * state)543 resolve_registers_block(nir_block *block, struct from_ssa_state *state)
544 {
545    nir_foreach_instr_safe(instr, block) {
546       state->instr = instr;
547       nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
548 
549       if (instr->type == nir_instr_type_phi) {
550          nir_instr_remove(instr);
551          ralloc_steal(state->dead_ctx, instr);
552          state->progress = true;
553       }
554    }
555    state->instr = NULL;
556 }
557 
558 static void
emit_copy(nir_builder * b,nir_src src,nir_src dest_src)559 emit_copy(nir_builder *b, nir_src src, nir_src dest_src)
560 {
561    assert(!dest_src.is_ssa &&
562           dest_src.reg.indirect == NULL &&
563           dest_src.reg.base_offset == 0);
564 
565    if (src.is_ssa)
566       assert(src.ssa->num_components >= dest_src.reg.reg->num_components);
567    else
568       assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components);
569 
570    nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
571    nir_src_copy(&mov->src[0].src, &src, mov);
572    mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg);
573    mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1;
574 
575    nir_builder_instr_insert(b, &mov->instr);
576 }
577 
578 /* Resolves a single parallel copy operation into a sequence of movs
579  *
580  * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
581  * Correctness, Code Quality, and Efficiency" by Boissinot et al.
582  * However, I never got the algorithm to work as written, so this version
583  * is slightly modified.
584  *
585  * The algorithm works by playing this little shell game with the values.
586  * We start by recording where every source value is and which source value
587  * each destination value should receive.  We then grab any copy whose
588  * destination is "empty", i.e. not used as a source, and do the following:
589  *  - Find where its source value currently lives
590  *  - Emit the move instruction
591  *  - Set the location of the source value to the destination
592  *  - Mark the location containing the source value
593  *  - Mark the destination as no longer needing to be copied
594  *
595  * When we run out of "empty" destinations, we have a cycle and so we
596  * create a temporary register, copy to that register, and mark the value
597  * we copied as living in that temporary.  Now, the cycle is broken, so we
598  * can continue with the above steps.
599  */
600 static void
resolve_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)601 resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
602                       struct from_ssa_state *state)
603 {
604    unsigned num_copies = 0;
605    nir_foreach_parallel_copy_entry(entry, pcopy) {
606       /* Sources may be SSA */
607       if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
608          continue;
609 
610       num_copies++;
611    }
612 
613    if (num_copies == 0) {
614       /* Hooray, we don't need any copies! */
615       nir_instr_remove(&pcopy->instr);
616       return;
617    }
618 
619    /* The register/source corresponding to the given index */
620    NIR_VLA_ZERO(nir_src, values, num_copies * 2);
621 
622    /* The current location of a given piece of data.  We will use -1 for "null" */
623    NIR_VLA_FILL(int, loc, num_copies * 2, -1);
624 
625    /* The piece of data that the given piece of data is to be copied from.  We will use -1 for "null" */
626    NIR_VLA_FILL(int, pred, num_copies * 2, -1);
627 
628    /* The destinations we have yet to properly fill */
629    NIR_VLA(int, to_do, num_copies * 2);
630    int to_do_idx = -1;
631 
632    state->builder.cursor = nir_before_instr(&pcopy->instr);
633 
634    /* Now we set everything up:
635     *  - All values get assigned a temporary index
636     *  - Current locations are set from sources
637     *  - Predicessors are recorded from sources and destinations
638     */
639    int num_vals = 0;
640    nir_foreach_parallel_copy_entry(entry, pcopy) {
641       /* Sources may be SSA */
642       if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
643          continue;
644 
645       int src_idx = -1;
646       for (int i = 0; i < num_vals; ++i) {
647          if (nir_srcs_equal(values[i], entry->src))
648             src_idx = i;
649       }
650       if (src_idx < 0) {
651          src_idx = num_vals++;
652          values[src_idx] = entry->src;
653       }
654 
655       nir_src dest_src = nir_src_for_reg(entry->dest.reg.reg);
656 
657       int dest_idx = -1;
658       for (int i = 0; i < num_vals; ++i) {
659          if (nir_srcs_equal(values[i], dest_src)) {
660             /* Each destination of a parallel copy instruction should be
661              * unique.  A destination may get used as a source, so we still
662              * have to walk the list.  However, the predecessor should not,
663              * at this point, be set yet, so we should have -1 here.
664              */
665             assert(pred[i] == -1);
666             dest_idx = i;
667          }
668       }
669       if (dest_idx < 0) {
670          dest_idx = num_vals++;
671          values[dest_idx] = dest_src;
672       }
673 
674       loc[src_idx] = src_idx;
675       pred[dest_idx] = src_idx;
676 
677       to_do[++to_do_idx] = dest_idx;
678    }
679 
680    /* Currently empty destinations we can go ahead and fill */
681    NIR_VLA(int, ready, num_copies * 2);
682    int ready_idx = -1;
683 
684    /* Mark the ones that are ready for copying.  We know an index is a
685     * destination if it has a predecessor and it's ready for copying if
686     * it's not marked as containing data.
687     */
688    for (int i = 0; i < num_vals; i++) {
689       if (pred[i] != -1 && loc[i] == -1)
690          ready[++ready_idx] = i;
691    }
692 
693    while (to_do_idx >= 0) {
694       while (ready_idx >= 0) {
695          int b = ready[ready_idx--];
696          int a = pred[b];
697          emit_copy(&state->builder, values[loc[a]], values[b]);
698 
699          /* b has been filled, mark it as not needing to be copied */
700          pred[b] = -1;
701 
702          /* If a needs to be filled... */
703          if (pred[a] != -1) {
704             /* If any other copies want a they can find it at b */
705             loc[a] = b;
706 
707             /* It's ready for copying now */
708             ready[++ready_idx] = a;
709          }
710       }
711       int b = to_do[to_do_idx--];
712       if (pred[b] == -1)
713          continue;
714 
715       /* If we got here, then we don't have any more trivial copies that we
716        * can do.  We have to break a cycle, so we create a new temporary
717        * register for that purpose.  Normally, if going out of SSA after
718        * register allocation, you would want to avoid creating temporary
719        * registers.  However, we are going out of SSA before register
720        * allocation, so we would rather not create extra register
721        * dependencies for the backend to deal with.  If it wants, the
722        * backend can coalesce the (possibly multiple) temporaries.
723        */
724       assert(num_vals < num_copies * 2);
725       nir_register *reg = nir_local_reg_create(state->builder.impl);
726       reg->name = "copy_temp";
727       reg->num_array_elems = 0;
728       if (values[b].is_ssa) {
729          reg->num_components = values[b].ssa->num_components;
730          reg->bit_size = values[b].ssa->bit_size;
731       } else {
732          reg->num_components = values[b].reg.reg->num_components;
733          reg->bit_size = values[b].reg.reg->bit_size;
734       }
735       values[num_vals].is_ssa = false;
736       values[num_vals].reg.reg = reg;
737 
738       emit_copy(&state->builder, values[b], values[num_vals]);
739       loc[b] = num_vals;
740       ready[++ready_idx] = b;
741       num_vals++;
742    }
743 
744    nir_instr_remove(&pcopy->instr);
745 }
746 
747 /* Resolves the parallel copies in a block.  Each block can have at most
748  * two:  One at the beginning, right after all the phi noces, and one at
749  * the end (or right before the final jump if it exists).
750  */
751 static bool
resolve_parallel_copies_block(nir_block * block,struct from_ssa_state * state)752 resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
753 {
754    /* At this point, we have removed all of the phi nodes.  If a parallel
755     * copy existed right after the phi nodes in this block, it is now the
756     * first instruction.
757     */
758    nir_instr *first_instr = nir_block_first_instr(block);
759    if (first_instr == NULL)
760       return true; /* Empty, nothing to do. */
761 
762    if (first_instr->type == nir_instr_type_parallel_copy) {
763       nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(first_instr);
764 
765       resolve_parallel_copy(pcopy, state);
766    }
767 
768    /* It's possible that the above code already cleaned up the end parallel
769     * copy.  However, doing so removed it form the instructions list so we
770     * won't find it here.  Therefore, it's safe to go ahead and just look
771     * for one and clean it up if it exists.
772     */
773    nir_parallel_copy_instr *end_pcopy =
774       get_parallel_copy_at_end_of_block(block);
775    if (end_pcopy)
776       resolve_parallel_copy(end_pcopy, state);
777 
778    return true;
779 }
780 
781 static bool
nir_convert_from_ssa_impl(nir_function_impl * impl,bool phi_webs_only)782 nir_convert_from_ssa_impl(nir_function_impl *impl, bool phi_webs_only)
783 {
784    struct from_ssa_state state;
785 
786    nir_builder_init(&state.builder, impl);
787    state.dead_ctx = ralloc_context(NULL);
788    state.phi_webs_only = phi_webs_only;
789    state.merge_node_table = _mesa_pointer_hash_table_create(NULL);
790    state.progress = false;
791 
792    nir_foreach_block(block, impl) {
793       add_parallel_copy_to_end_of_block(block, state.dead_ctx);
794    }
795 
796    nir_foreach_block(block, impl) {
797       isolate_phi_nodes_block(block, state.dead_ctx);
798    }
799 
800    /* Mark metadata as dirty before we ask for liveness analysis */
801    nir_metadata_preserve(impl, nir_metadata_block_index |
802                                nir_metadata_dominance);
803 
804    nir_metadata_require(impl, nir_metadata_instr_index |
805                               nir_metadata_live_ssa_defs |
806                               nir_metadata_dominance);
807 
808    nir_foreach_block(block, impl) {
809       coalesce_phi_nodes_block(block, &state);
810    }
811 
812    nir_foreach_block(block, impl) {
813       aggressive_coalesce_block(block, &state);
814    }
815 
816    nir_foreach_block(block, impl) {
817       resolve_registers_block(block, &state);
818    }
819 
820    nir_foreach_block(block, impl) {
821       resolve_parallel_copies_block(block, &state);
822    }
823 
824    nir_metadata_preserve(impl, nir_metadata_block_index |
825                                nir_metadata_dominance);
826 
827    /* Clean up dead instructions and the hash tables */
828    _mesa_hash_table_destroy(state.merge_node_table, NULL);
829    ralloc_free(state.dead_ctx);
830    return state.progress;
831 }
832 
833 bool
nir_convert_from_ssa(nir_shader * shader,bool phi_webs_only)834 nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only)
835 {
836    bool progress = false;
837 
838    nir_foreach_function(function, shader) {
839       if (function->impl)
840          progress |= nir_convert_from_ssa_impl(function->impl, phi_webs_only);
841    }
842 
843    return progress;
844 }
845 
846 
847 static void
place_phi_read(nir_builder * b,nir_register * reg,nir_ssa_def * def,nir_block * block,unsigned depth)848 place_phi_read(nir_builder *b, nir_register *reg,
849                nir_ssa_def *def, nir_block *block, unsigned depth)
850 {
851    if (block != def->parent_instr->block) {
852       /* Try to go up the single-successor tree */
853       bool all_single_successors = true;
854       set_foreach(block->predecessors, entry) {
855          nir_block *pred = (nir_block *)entry->key;
856          if (pred->successors[0] && pred->successors[1]) {
857             all_single_successors = false;
858             break;
859          }
860       }
861 
862       if (all_single_successors && depth < 32) {
863          /* All predecessors of this block have exactly one successor and it
864           * is this block so they must eventually lead here without
865           * intersecting each other.  Place the reads in the predecessors
866           * instead of this block.
867           *
868           * We only let this function recurse 32 times because it can recurse
869           * indefinitely in the presence of infinite loops.  Because we're
870           * crawling a single-successor chain, it doesn't matter where we
871           * place it so it's ok to stop at an arbitrary distance.
872           *
873           * TODO: One day, we could detect back edges and avoid the recursion
874           * that way.
875           */
876          set_foreach(block->predecessors, entry) {
877             place_phi_read(b, reg, def, (nir_block *)entry->key, depth + 1);
878          }
879          return;
880       }
881    }
882 
883    b->cursor = nir_after_block_before_jump(block);
884    nir_store_reg(b, reg, def, ~0);
885 }
886 
887 /** Lower all of the phi nodes in a block to imovs to and from a register
888  *
889  * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
890  * single block to convert all of its phis to a register and some imovs.
891  * The code that is generated, while not optimal for actual codegen in a
892  * back-end, is easy to generate, correct, and will turn into the same set of
893  * phis after you call regs_to_ssa and do some copy propagation.
894  *
895  * The one intelligent thing this pass does is that it places the moves from
896  * the phi sources as high up the predecessor tree as possible instead of in
897  * the exact predecessor.  This means that, in particular, it will crawl into
898  * the deepest nesting of any if-ladders.  In order to ensure that doing so is
899  * safe, it stops as soon as one of the predecessors has multiple successors.
900  */
901 bool
nir_lower_phis_to_regs_block(nir_block * block)902 nir_lower_phis_to_regs_block(nir_block *block)
903 {
904    nir_builder b;
905    nir_builder_init(&b, nir_cf_node_get_function(&block->cf_node));
906 
907    bool progress = false;
908    nir_foreach_instr_safe(instr, block) {
909       if (instr->type != nir_instr_type_phi)
910          break;
911 
912       nir_phi_instr *phi = nir_instr_as_phi(instr);
913       assert(phi->dest.is_ssa);
914 
915       nir_register *reg = create_reg_for_ssa_def(&phi->dest.ssa, b.impl);
916 
917       b.cursor = nir_after_instr(&phi->instr);
918       nir_ssa_def *def = nir_load_reg(&b, reg);
919 
920       nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(def));
921 
922       nir_foreach_phi_src(src, phi) {
923          assert(src->src.is_ssa);
924          place_phi_read(&b, reg, src->src.ssa, src->pred, 0);
925       }
926 
927       nir_instr_remove(&phi->instr);
928 
929       progress = true;
930    }
931 
932    return progress;
933 }
934 
935 struct ssa_def_to_reg_state {
936    nir_function_impl *impl;
937    bool progress;
938 };
939 
940 static bool
dest_replace_ssa_with_reg(nir_dest * dest,void * void_state)941 dest_replace_ssa_with_reg(nir_dest *dest, void *void_state)
942 {
943    struct ssa_def_to_reg_state *state = void_state;
944 
945    if (!dest->is_ssa)
946       return true;
947 
948    nir_register *reg = create_reg_for_ssa_def(&dest->ssa, state->impl);
949 
950    nir_ssa_def_rewrite_uses(&dest->ssa, nir_src_for_reg(reg));
951 
952    nir_instr *instr = dest->ssa.parent_instr;
953    *dest = nir_dest_for_reg(reg);
954    dest->reg.parent_instr = instr;
955    list_addtail(&dest->reg.def_link, &reg->defs);
956 
957    state->progress = true;
958 
959    return true;
960 }
961 
962 static bool
ssa_def_is_local_to_block(nir_ssa_def * def,UNUSED void * state)963 ssa_def_is_local_to_block(nir_ssa_def *def, UNUSED void *state)
964 {
965    nir_block *block = def->parent_instr->block;
966    nir_foreach_use(use_src, def) {
967       if (use_src->parent_instr->block != block ||
968           use_src->parent_instr->type == nir_instr_type_phi) {
969          return false;
970       }
971    }
972 
973    if (!list_is_empty(&def->if_uses))
974       return false;
975 
976    return true;
977 }
978 
979 /** Lower all of the SSA defs in a block to registers
980  *
981  * This performs the very simple operation of blindly replacing all of the SSA
982  * defs in the given block with registers.  If not used carefully, this may
983  * result in phi nodes with register sources which is technically invalid.
984  * Fortunately, the register-based into-SSA pass handles them anyway.
985  */
986 bool
nir_lower_ssa_defs_to_regs_block(nir_block * block)987 nir_lower_ssa_defs_to_regs_block(nir_block *block)
988 {
989    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
990    nir_shader *shader = impl->function->shader;
991 
992    struct ssa_def_to_reg_state state = {
993       .impl = impl,
994       .progress = false,
995    };
996 
997    nir_foreach_instr(instr, block) {
998       if (instr->type == nir_instr_type_ssa_undef) {
999          /* Undefs are just a read of something never written. */
1000          nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
1001          nir_register *reg = create_reg_for_ssa_def(&undef->def, state.impl);
1002          nir_ssa_def_rewrite_uses(&undef->def, nir_src_for_reg(reg));
1003       } else if (instr->type == nir_instr_type_load_const) {
1004          /* Constant loads are SSA-only, we need to insert a move */
1005          nir_load_const_instr *load = nir_instr_as_load_const(instr);
1006          nir_register *reg = create_reg_for_ssa_def(&load->def, state.impl);
1007          nir_ssa_def_rewrite_uses(&load->def, nir_src_for_reg(reg));
1008 
1009          nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
1010          mov->src[0].src = nir_src_for_ssa(&load->def);
1011          mov->dest.dest = nir_dest_for_reg(reg);
1012          mov->dest.write_mask = (1 << reg->num_components) - 1;
1013          nir_instr_insert(nir_after_instr(&load->instr), &mov->instr);
1014       } else if (nir_foreach_ssa_def(instr, ssa_def_is_local_to_block, NULL)) {
1015          /* If the SSA def produced by this instruction is only in the block
1016           * in which it is defined and is not used by ifs or phis, then we
1017           * don't have a reason to convert it to a register.
1018           */
1019       } else {
1020          nir_foreach_dest(instr, dest_replace_ssa_with_reg, &state);
1021       }
1022    }
1023 
1024    return state.progress;
1025 }
1026