• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_builder.h"
30 #include "nir_vla.h"
31 
32 /*
33  * This file implements an out-of-SSA pass as described in "Revisiting
34  * Out-of-SSA Translation for Correctness, Code Quality, and Efficiency" by
35  * Boissinot et. al.
36  */
37 
38 struct from_ssa_state {
39    nir_builder builder;
40    void *dead_ctx;
41    bool phi_webs_only;
42    struct hash_table *merge_node_table;
43    nir_instr *instr;
44 };
45 
46 /* Returns true if a dominates b */
47 static bool
ssa_def_dominates(nir_ssa_def * a,nir_ssa_def * b)48 ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
49 {
50    if (a->live_index == 0) {
51       /* SSA undefs always dominate */
52       return true;
53    } else if (b->live_index < a->live_index) {
54       return false;
55    } else if (a->parent_instr->block == b->parent_instr->block) {
56       return a->live_index <= b->live_index;
57    } else {
58       return nir_block_dominates(a->parent_instr->block,
59                                  b->parent_instr->block);
60    }
61 }
62 
63 
64 /* The following data structure, which I have named merge_set is a way of
65  * representing a set registers of non-interfering registers.  This is
66  * based on the concept of a "dominence forest" presented in "Fast Copy
67  * Coalescing and Live-Range Identification" by Budimlic et. al. but the
68  * implementation concept is taken from  "Revisiting Out-of-SSA Translation
69  * for Correctness, Code Quality, and Efficiency" by Boissinot et. al..
70  *
71  * Each SSA definition is associated with a merge_node and the association
72  * is represented by a combination of a hash table and the "def" parameter
73  * in the merge_node structure.  The merge_set stores a linked list of
74  * merge_node's in dominence order of the ssa definitions.  (Since the
75  * liveness analysis pass indexes the SSA values in dominence order for us,
76  * this is an easy thing to keep up.)  It is assumed that no pair of the
77  * nodes in a given set interfere.  Merging two sets or checking for
78  * interference can be done in a single linear-time merge-sort walk of the
79  * two lists of nodes.
80  */
81 struct merge_set;
82 
83 typedef struct {
84    struct exec_node node;
85    struct merge_set *set;
86    nir_ssa_def *def;
87 } merge_node;
88 
89 typedef struct merge_set {
90    struct exec_list nodes;
91    unsigned size;
92    nir_register *reg;
93 } merge_set;
94 
95 #if 0
96 static void
97 merge_set_dump(merge_set *set, FILE *fp)
98 {
99    nir_ssa_def *dom[set->size];
100    int dom_idx = -1;
101 
102    foreach_list_typed(merge_node, node, node, &set->nodes) {
103       while (dom_idx >= 0 && !ssa_def_dominates(dom[dom_idx], node->def))
104          dom_idx--;
105 
106       for (int i = 0; i <= dom_idx; i++)
107          fprintf(fp, "  ");
108 
109       if (node->def->name)
110          fprintf(fp, "ssa_%d /* %s */\n", node->def->index, node->def->name);
111       else
112          fprintf(fp, "ssa_%d\n", node->def->index);
113 
114       dom[++dom_idx] = node->def;
115    }
116 }
117 #endif
118 
119 static merge_node *
get_merge_node(nir_ssa_def * def,struct from_ssa_state * state)120 get_merge_node(nir_ssa_def *def, struct from_ssa_state *state)
121 {
122    struct hash_entry *entry =
123       _mesa_hash_table_search(state->merge_node_table, def);
124    if (entry)
125       return entry->data;
126 
127    merge_set *set = ralloc(state->dead_ctx, merge_set);
128    exec_list_make_empty(&set->nodes);
129    set->size = 1;
130    set->reg = NULL;
131 
132    merge_node *node = ralloc(state->dead_ctx, merge_node);
133    node->set = set;
134    node->def = def;
135    exec_list_push_head(&set->nodes, &node->node);
136 
137    _mesa_hash_table_insert(state->merge_node_table, def, node);
138 
139    return node;
140 }
141 
142 static bool
merge_nodes_interfere(merge_node * a,merge_node * b)143 merge_nodes_interfere(merge_node *a, merge_node *b)
144 {
145    return nir_ssa_defs_interfere(a->def, b->def);
146 }
147 
148 /* Merges b into a */
149 static merge_set *
merge_merge_sets(merge_set * a,merge_set * b)150 merge_merge_sets(merge_set *a, merge_set *b)
151 {
152    struct exec_node *an = exec_list_get_head(&a->nodes);
153    struct exec_node *bn = exec_list_get_head(&b->nodes);
154    while (!exec_node_is_tail_sentinel(bn)) {
155       merge_node *a_node = exec_node_data(merge_node, an, node);
156       merge_node *b_node = exec_node_data(merge_node, bn, node);
157 
158       if (exec_node_is_tail_sentinel(an) ||
159           a_node->def->live_index > b_node->def->live_index) {
160          struct exec_node *next = bn->next;
161          exec_node_remove(bn);
162          exec_node_insert_node_before(an, bn);
163          exec_node_data(merge_node, bn, node)->set = a;
164          bn = next;
165       } else {
166          an = an->next;
167       }
168    }
169 
170    a->size += b->size;
171    b->size = 0;
172 
173    return a;
174 }
175 
176 /* Checks for any interference between two merge sets
177  *
178  * This is an implementation of Algorithm 2 in "Revisiting Out-of-SSA
179  * Translation for Correctness, Code Quality, and Efficiency" by
180  * Boissinot et. al.
181  */
182 static bool
merge_sets_interfere(merge_set * a,merge_set * b)183 merge_sets_interfere(merge_set *a, merge_set *b)
184 {
185    NIR_VLA(merge_node *, dom, a->size + b->size);
186    int dom_idx = -1;
187 
188    struct exec_node *an = exec_list_get_head(&a->nodes);
189    struct exec_node *bn = exec_list_get_head(&b->nodes);
190    while (!exec_node_is_tail_sentinel(an) ||
191           !exec_node_is_tail_sentinel(bn)) {
192 
193       merge_node *current;
194       if (exec_node_is_tail_sentinel(an)) {
195          current = exec_node_data(merge_node, bn, node);
196          bn = bn->next;
197       } else if (exec_node_is_tail_sentinel(bn)) {
198          current = exec_node_data(merge_node, an, node);
199          an = an->next;
200       } else {
201          merge_node *a_node = exec_node_data(merge_node, an, node);
202          merge_node *b_node = exec_node_data(merge_node, bn, node);
203 
204          if (a_node->def->live_index <= b_node->def->live_index) {
205             current = a_node;
206             an = an->next;
207          } else {
208             current = b_node;
209             bn = bn->next;
210          }
211       }
212 
213       while (dom_idx >= 0 &&
214              !ssa_def_dominates(dom[dom_idx]->def, current->def))
215          dom_idx--;
216 
217       if (dom_idx >= 0 && merge_nodes_interfere(current, dom[dom_idx]))
218          return true;
219 
220       dom[++dom_idx] = current;
221    }
222 
223    return false;
224 }
225 
226 static bool
add_parallel_copy_to_end_of_block(nir_block * block,void * dead_ctx)227 add_parallel_copy_to_end_of_block(nir_block *block, void *dead_ctx)
228 {
229 
230    bool need_end_copy = false;
231    if (block->successors[0]) {
232       nir_instr *instr = nir_block_first_instr(block->successors[0]);
233       if (instr && instr->type == nir_instr_type_phi)
234          need_end_copy = true;
235    }
236 
237    if (block->successors[1]) {
238       nir_instr *instr = nir_block_first_instr(block->successors[1]);
239       if (instr && instr->type == nir_instr_type_phi)
240          need_end_copy = true;
241    }
242 
243    if (need_end_copy) {
244       /* If one of our successors has at least one phi node, we need to
245        * create a parallel copy at the end of the block but before the jump
246        * (if there is one).
247        */
248       nir_parallel_copy_instr *pcopy =
249          nir_parallel_copy_instr_create(dead_ctx);
250 
251       nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
252    }
253 
254    return true;
255 }
256 
257 static nir_parallel_copy_instr *
get_parallel_copy_at_end_of_block(nir_block * block)258 get_parallel_copy_at_end_of_block(nir_block *block)
259 {
260    nir_instr *last_instr = nir_block_last_instr(block);
261    if (last_instr == NULL)
262       return NULL;
263 
264    /* The last instruction may be a jump in which case the parallel copy is
265     * right before it.
266     */
267    if (last_instr->type == nir_instr_type_jump)
268       last_instr = nir_instr_prev(last_instr);
269 
270    if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
271       return nir_instr_as_parallel_copy(last_instr);
272    else
273       return NULL;
274 }
275 
276 /** Isolate phi nodes with parallel copies
277  *
278  * In order to solve the dependency problems with the sources and
279  * destinations of phi nodes, we first isolate them by adding parallel
280  * copies to the beginnings and ends of basic blocks.  For every block with
281  * phi nodes, we add a parallel copy immediately following the last phi
282  * node that copies the destinations of all of the phi nodes to new SSA
283  * values.  We also add a parallel copy to the end of every block that has
284  * a successor with phi nodes that, for each phi node in each successor,
285  * copies the corresponding sorce of the phi node and adjust the phi to
286  * used the destination of the parallel copy.
287  *
288  * In SSA form, each value has exactly one definition.  What this does is
289  * ensure that each value used in a phi also has exactly one use.  The
290  * destinations of phis are only used by the parallel copy immediately
291  * following the phi nodes and.  Thanks to the parallel copy at the end of
292  * the predecessor block, the sources of phi nodes are are the only use of
293  * that value.  This allows us to immediately assign all the sources and
294  * destinations of any given phi node to the same register without worrying
295  * about interference at all.  We do coalescing to get rid of the parallel
296  * copies where possible.
297  *
298  * Before this pass can be run, we have to iterate over the blocks with
299  * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
300  * the ends of blocks exist.  We can create the ones at the beginnings as
301  * we go, but the ones at the ends of blocks need to be created ahead of
302  * time because of potential back-edges in the CFG.
303  */
304 static bool
isolate_phi_nodes_block(nir_block * block,void * dead_ctx)305 isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
306 {
307    nir_instr *last_phi_instr = NULL;
308    nir_foreach_instr(instr, block) {
309       /* Phi nodes only ever come at the start of a block */
310       if (instr->type != nir_instr_type_phi)
311          break;
312 
313       last_phi_instr = instr;
314    }
315 
316    /* If we don't have any phi's, then there's nothing for us to do. */
317    if (last_phi_instr == NULL)
318       return true;
319 
320    /* If we have phi nodes, we need to create a parallel copy at the
321     * start of this block but after the phi nodes.
322     */
323    nir_parallel_copy_instr *block_pcopy =
324       nir_parallel_copy_instr_create(dead_ctx);
325    nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
326 
327    nir_foreach_instr(instr, block) {
328       /* Phi nodes only ever come at the start of a block */
329       if (instr->type != nir_instr_type_phi)
330          break;
331 
332       nir_phi_instr *phi = nir_instr_as_phi(instr);
333       assert(phi->dest.is_ssa);
334       nir_foreach_phi_src(src, phi) {
335          nir_parallel_copy_instr *pcopy =
336             get_parallel_copy_at_end_of_block(src->pred);
337          assert(pcopy);
338 
339          nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
340                                                   nir_parallel_copy_entry);
341          nir_ssa_dest_init(&pcopy->instr, &entry->dest,
342                            phi->dest.ssa.num_components,
343                            phi->dest.ssa.bit_size, src->src.ssa->name);
344          exec_list_push_tail(&pcopy->entries, &entry->node);
345 
346          assert(src->src.is_ssa);
347          nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
348 
349          nir_instr_rewrite_src(&phi->instr, &src->src,
350                                nir_src_for_ssa(&entry->dest.ssa));
351       }
352 
353       nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
354                                                nir_parallel_copy_entry);
355       nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
356                         phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
357                         phi->dest.ssa.name);
358       exec_list_push_tail(&block_pcopy->entries, &entry->node);
359 
360       nir_ssa_def_rewrite_uses(&phi->dest.ssa,
361                                nir_src_for_ssa(&entry->dest.ssa));
362 
363       nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
364                             nir_src_for_ssa(&phi->dest.ssa));
365    }
366 
367    return true;
368 }
369 
370 static bool
coalesce_phi_nodes_block(nir_block * block,struct from_ssa_state * state)371 coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
372 {
373    nir_foreach_instr(instr, block) {
374       /* Phi nodes only ever come at the start of a block */
375       if (instr->type != nir_instr_type_phi)
376          break;
377 
378       nir_phi_instr *phi = nir_instr_as_phi(instr);
379 
380       assert(phi->dest.is_ssa);
381       merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
382 
383       nir_foreach_phi_src(src, phi) {
384          assert(src->src.is_ssa);
385          merge_node *src_node = get_merge_node(src->src.ssa, state);
386          if (src_node->set != dest_node->set)
387             merge_merge_sets(dest_node->set, src_node->set);
388       }
389    }
390 
391    return true;
392 }
393 
394 static void
aggressive_coalesce_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)395 aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
396                                  struct from_ssa_state *state)
397 {
398    nir_foreach_parallel_copy_entry(entry, pcopy) {
399       if (!entry->src.is_ssa)
400          continue;
401 
402       /* Since load_const instructions are SSA only, we can't replace their
403        * destinations with registers and, therefore, can't coalesce them.
404        */
405       if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
406          continue;
407 
408       /* Don't try and coalesce these */
409       if (entry->dest.ssa.num_components != entry->src.ssa->num_components)
410          continue;
411 
412       merge_node *src_node = get_merge_node(entry->src.ssa, state);
413       merge_node *dest_node = get_merge_node(&entry->dest.ssa, state);
414 
415       if (src_node->set == dest_node->set)
416          continue;
417 
418       if (!merge_sets_interfere(src_node->set, dest_node->set))
419          merge_merge_sets(src_node->set, dest_node->set);
420    }
421 }
422 
423 static bool
aggressive_coalesce_block(nir_block * block,struct from_ssa_state * state)424 aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
425 {
426    nir_parallel_copy_instr *start_pcopy = NULL;
427    nir_foreach_instr(instr, block) {
428       /* Phi nodes only ever come at the start of a block */
429       if (instr->type != nir_instr_type_phi) {
430          if (instr->type != nir_instr_type_parallel_copy)
431             break; /* The parallel copy must be right after the phis */
432 
433          start_pcopy = nir_instr_as_parallel_copy(instr);
434 
435          aggressive_coalesce_parallel_copy(start_pcopy, state);
436 
437          break;
438       }
439    }
440 
441    nir_parallel_copy_instr *end_pcopy =
442       get_parallel_copy_at_end_of_block(block);
443 
444    if (end_pcopy && end_pcopy != start_pcopy)
445       aggressive_coalesce_parallel_copy(end_pcopy, state);
446 
447    return true;
448 }
449 
450 static nir_register *
create_reg_for_ssa_def(nir_ssa_def * def,nir_function_impl * impl)451 create_reg_for_ssa_def(nir_ssa_def *def, nir_function_impl *impl)
452 {
453    nir_register *reg = nir_local_reg_create(impl);
454 
455    reg->name = def->name;
456    reg->num_components = def->num_components;
457    reg->bit_size = def->bit_size;
458    reg->num_array_elems = 0;
459 
460    return reg;
461 }
462 
463 static bool
rewrite_ssa_def(nir_ssa_def * def,void * void_state)464 rewrite_ssa_def(nir_ssa_def *def, void *void_state)
465 {
466    struct from_ssa_state *state = void_state;
467    nir_register *reg;
468 
469    struct hash_entry *entry =
470       _mesa_hash_table_search(state->merge_node_table, def);
471    if (entry) {
472       /* In this case, we're part of a phi web.  Use the web's register. */
473       merge_node *node = (merge_node *)entry->data;
474 
475       /* If it doesn't have a register yet, create one.  Note that all of
476        * the things in the merge set should be the same so it doesn't
477        * matter which node's definition we use.
478        */
479       if (node->set->reg == NULL)
480          node->set->reg = create_reg_for_ssa_def(def, state->builder.impl);
481 
482       reg = node->set->reg;
483    } else {
484       if (state->phi_webs_only)
485          return true;
486 
487       /* We leave load_const SSA values alone.  They act as immediates to
488        * the backend.  If it got coalesced into a phi, that's ok.
489        */
490       if (def->parent_instr->type == nir_instr_type_load_const)
491          return true;
492 
493       reg = create_reg_for_ssa_def(def, state->builder.impl);
494    }
495 
496    nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg));
497    assert(list_empty(&def->uses) && list_empty(&def->if_uses));
498 
499    if (def->parent_instr->type == nir_instr_type_ssa_undef) {
500       /* If it's an ssa_undef instruction, remove it since we know we just got
501        * rid of all its uses.
502        */
503       nir_instr *parent_instr = def->parent_instr;
504       nir_instr_remove(parent_instr);
505       ralloc_steal(state->dead_ctx, parent_instr);
506       return true;
507    }
508 
509    assert(def->parent_instr->type != nir_instr_type_load_const);
510 
511    /* At this point we know a priori that this SSA def is part of a
512     * nir_dest.  We can use exec_node_data to get the dest pointer.
513     */
514    nir_dest *dest = exec_node_data(nir_dest, def, ssa);
515 
516    nir_instr_rewrite_dest(state->instr, dest, nir_dest_for_reg(reg));
517 
518    return true;
519 }
520 
521 /* Resolves ssa definitions to registers.  While we're at it, we also
522  * remove phi nodes.
523  */
524 static bool
resolve_registers_block(nir_block * block,struct from_ssa_state * state)525 resolve_registers_block(nir_block *block, struct from_ssa_state *state)
526 {
527    nir_foreach_instr_safe(instr, block) {
528       state->instr = instr;
529       nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
530 
531       if (instr->type == nir_instr_type_phi) {
532          nir_instr_remove(instr);
533          ralloc_steal(state->dead_ctx, instr);
534       }
535    }
536    state->instr = NULL;
537 
538    return true;
539 }
540 
541 static void
emit_copy(nir_builder * b,nir_src src,nir_src dest_src)542 emit_copy(nir_builder *b, nir_src src, nir_src dest_src)
543 {
544    assert(!dest_src.is_ssa &&
545           dest_src.reg.indirect == NULL &&
546           dest_src.reg.base_offset == 0);
547 
548    if (src.is_ssa)
549       assert(src.ssa->num_components >= dest_src.reg.reg->num_components);
550    else
551       assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components);
552 
553    nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_imov);
554    nir_src_copy(&mov->src[0].src, &src, mov);
555    mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg);
556    mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1;
557 
558    nir_builder_instr_insert(b, &mov->instr);
559 }
560 
561 /* Resolves a single parallel copy operation into a sequence of mov's
562  *
563  * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
564  * Correctness, Code Quality, and Efficiency" by Boissinot et. al..
565  * However, I never got the algorithm to work as written, so this version
566  * is slightly modified.
567  *
568  * The algorithm works by playing this little shell game with the values.
569  * We start by recording where every source value is and which source value
570  * each destination value should receive.  We then grab any copy whose
571  * destination is "empty", i.e. not used as a source, and do the following:
572  *  - Find where its source value currently lives
573  *  - Emit the move instruction
574  *  - Set the location of the source value to the destination
575  *  - Mark the location containing the source value
576  *  - Mark the destination as no longer needing to be copied
577  *
578  * When we run out of "empty" destinations, we have a cycle and so we
579  * create a temporary register, copy to that register, and mark the value
580  * we copied as living in that temporary.  Now, the cycle is broken, so we
581  * can continue with the above steps.
582  */
583 static void
resolve_parallel_copy(nir_parallel_copy_instr * pcopy,struct from_ssa_state * state)584 resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
585                       struct from_ssa_state *state)
586 {
587    unsigned num_copies = 0;
588    nir_foreach_parallel_copy_entry(entry, pcopy) {
589       /* Sources may be SSA */
590       if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
591          continue;
592 
593       num_copies++;
594    }
595 
596    if (num_copies == 0) {
597       /* Hooray, we don't need any copies! */
598       nir_instr_remove(&pcopy->instr);
599       return;
600    }
601 
602    /* The register/source corresponding to the given index */
603    NIR_VLA_ZERO(nir_src, values, num_copies * 2);
604 
605    /* The current location of a given piece of data.  We will use -1 for "null" */
606    NIR_VLA_FILL(int, loc, num_copies * 2, -1);
607 
608    /* The piece of data that the given piece of data is to be copied from.  We will use -1 for "null" */
609    NIR_VLA_FILL(int, pred, num_copies * 2, -1);
610 
611    /* The destinations we have yet to properly fill */
612    NIR_VLA(int, to_do, num_copies * 2);
613    int to_do_idx = -1;
614 
615    state->builder.cursor = nir_before_instr(&pcopy->instr);
616 
617    /* Now we set everything up:
618     *  - All values get assigned a temporary index
619     *  - Current locations are set from sources
620     *  - Predicessors are recorded from sources and destinations
621     */
622    int num_vals = 0;
623    nir_foreach_parallel_copy_entry(entry, pcopy) {
624       /* Sources may be SSA */
625       if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
626          continue;
627 
628       int src_idx = -1;
629       for (int i = 0; i < num_vals; ++i) {
630          if (nir_srcs_equal(values[i], entry->src))
631             src_idx = i;
632       }
633       if (src_idx < 0) {
634          src_idx = num_vals++;
635          values[src_idx] = entry->src;
636       }
637 
638       nir_src dest_src = nir_src_for_reg(entry->dest.reg.reg);
639 
640       int dest_idx = -1;
641       for (int i = 0; i < num_vals; ++i) {
642          if (nir_srcs_equal(values[i], dest_src)) {
643             /* Each destination of a parallel copy instruction should be
644              * unique.  A destination may get used as a source, so we still
645              * have to walk the list.  However, the predecessor should not,
646              * at this point, be set yet, so we should have -1 here.
647              */
648             assert(pred[i] == -1);
649             dest_idx = i;
650          }
651       }
652       if (dest_idx < 0) {
653          dest_idx = num_vals++;
654          values[dest_idx] = dest_src;
655       }
656 
657       loc[src_idx] = src_idx;
658       pred[dest_idx] = src_idx;
659 
660       to_do[++to_do_idx] = dest_idx;
661    }
662 
663    /* Currently empty destinations we can go ahead and fill */
664    NIR_VLA(int, ready, num_copies * 2);
665    int ready_idx = -1;
666 
667    /* Mark the ones that are ready for copying.  We know an index is a
668     * destination if it has a predecessor and it's ready for copying if
669     * it's not marked as containing data.
670     */
671    for (int i = 0; i < num_vals; i++) {
672       if (pred[i] != -1 && loc[i] == -1)
673          ready[++ready_idx] = i;
674    }
675 
676    while (to_do_idx >= 0) {
677       while (ready_idx >= 0) {
678          int b = ready[ready_idx--];
679          int a = pred[b];
680          emit_copy(&state->builder, values[loc[a]], values[b]);
681 
682          /* If any other copies want a they can find it at b */
683          loc[a] = b;
684 
685          /* b has been filled, mark it as not needing to be copied */
686          pred[b] = -1;
687 
688          /* If a needs to be filled, it's ready for copying now */
689          if (pred[a] != -1)
690             ready[++ready_idx] = a;
691       }
692       int b = to_do[to_do_idx--];
693       if (pred[b] == -1)
694          continue;
695 
696       /* If we got here, then we don't have any more trivial copies that we
697        * can do.  We have to break a cycle, so we create a new temporary
698        * register for that purpose.  Normally, if going out of SSA after
699        * register allocation, you would want to avoid creating temporary
700        * registers.  However, we are going out of SSA before register
701        * allocation, so we would rather not create extra register
702        * dependencies for the backend to deal with.  If it wants, the
703        * backend can coalesce the (possibly multiple) temporaries.
704        */
705       assert(num_vals < num_copies * 2);
706       nir_register *reg = nir_local_reg_create(state->builder.impl);
707       reg->name = "copy_temp";
708       reg->num_array_elems = 0;
709       if (values[b].is_ssa)
710          reg->num_components = values[b].ssa->num_components;
711       else
712          reg->num_components = values[b].reg.reg->num_components;
713       values[num_vals].is_ssa = false;
714       values[num_vals].reg.reg = reg;
715 
716       emit_copy(&state->builder, values[b], values[num_vals]);
717       loc[b] = num_vals;
718       ready[++ready_idx] = b;
719       num_vals++;
720    }
721 
722    nir_instr_remove(&pcopy->instr);
723 }
724 
725 /* Resolves the parallel copies in a block.  Each block can have at most
726  * two:  One at the beginning, right after all the phi noces, and one at
727  * the end (or right before the final jump if it exists).
728  */
729 static bool
resolve_parallel_copies_block(nir_block * block,struct from_ssa_state * state)730 resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
731 {
732    /* At this point, we have removed all of the phi nodes.  If a parallel
733     * copy existed right after the phi nodes in this block, it is now the
734     * first instruction.
735     */
736    nir_instr *first_instr = nir_block_first_instr(block);
737    if (first_instr == NULL)
738       return true; /* Empty, nothing to do. */
739 
740    if (first_instr->type == nir_instr_type_parallel_copy) {
741       nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(first_instr);
742 
743       resolve_parallel_copy(pcopy, state);
744    }
745 
746    /* It's possible that the above code already cleaned up the end parallel
747     * copy.  However, doing so removed it form the instructions list so we
748     * won't find it here.  Therefore, it's safe to go ahead and just look
749     * for one and clean it up if it exists.
750     */
751    nir_parallel_copy_instr *end_pcopy =
752       get_parallel_copy_at_end_of_block(block);
753    if (end_pcopy)
754       resolve_parallel_copy(end_pcopy, state);
755 
756    return true;
757 }
758 
759 static void
nir_convert_from_ssa_impl(nir_function_impl * impl,bool phi_webs_only)760 nir_convert_from_ssa_impl(nir_function_impl *impl, bool phi_webs_only)
761 {
762    struct from_ssa_state state;
763 
764    nir_builder_init(&state.builder, impl);
765    state.dead_ctx = ralloc_context(NULL);
766    state.phi_webs_only = phi_webs_only;
767    state.merge_node_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
768                                                     _mesa_key_pointer_equal);
769 
770    nir_foreach_block(block, impl) {
771       add_parallel_copy_to_end_of_block(block, state.dead_ctx);
772    }
773 
774    nir_foreach_block(block, impl) {
775       isolate_phi_nodes_block(block, state.dead_ctx);
776    }
777 
778    /* Mark metadata as dirty before we ask for liveness analysis */
779    nir_metadata_preserve(impl, nir_metadata_block_index |
780                                nir_metadata_dominance);
781 
782    nir_metadata_require(impl, nir_metadata_live_ssa_defs |
783                               nir_metadata_dominance);
784 
785    nir_foreach_block(block, impl) {
786       coalesce_phi_nodes_block(block, &state);
787    }
788 
789    nir_foreach_block(block, impl) {
790       aggressive_coalesce_block(block, &state);
791    }
792 
793    nir_foreach_block(block, impl) {
794       resolve_registers_block(block, &state);
795    }
796 
797    nir_foreach_block(block, impl) {
798       resolve_parallel_copies_block(block, &state);
799    }
800 
801    nir_metadata_preserve(impl, nir_metadata_block_index |
802                                nir_metadata_dominance);
803 
804    /* Clean up dead instructions and the hash tables */
805    _mesa_hash_table_destroy(state.merge_node_table, NULL);
806    ralloc_free(state.dead_ctx);
807 }
808 
809 void
nir_convert_from_ssa(nir_shader * shader,bool phi_webs_only)810 nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only)
811 {
812    nir_foreach_function(function, shader) {
813       if (function->impl)
814          nir_convert_from_ssa_impl(function->impl, phi_webs_only);
815    }
816 }
817 
818 
819 static void
place_phi_read(nir_shader * shader,nir_register * reg,nir_ssa_def * def,nir_block * block)820 place_phi_read(nir_shader *shader, nir_register *reg,
821                nir_ssa_def *def, nir_block *block)
822 {
823    if (block != def->parent_instr->block) {
824       /* Try to go up the single-successor tree */
825       bool all_single_successors = true;
826       struct set_entry *entry;
827       set_foreach(block->predecessors, entry) {
828          nir_block *pred = (nir_block *)entry->key;
829          if (pred->successors[0] && pred->successors[1]) {
830             all_single_successors = false;
831             break;
832          }
833       }
834 
835       if (all_single_successors) {
836          /* All predecessors of this block have exactly one successor and it
837           * is this block so they must eventually lead here without
838           * intersecting each other.  Place the reads in the predecessors
839           * instead of this block.
840           */
841          set_foreach(block->predecessors, entry)
842             place_phi_read(shader, reg, def, (nir_block *)entry->key);
843          return;
844       }
845    }
846 
847    nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
848    mov->src[0].src = nir_src_for_ssa(def);
849    mov->dest.dest = nir_dest_for_reg(reg);
850    mov->dest.write_mask = (1 << reg->num_components) - 1;
851    nir_instr_insert(nir_after_block_before_jump(block), &mov->instr);
852 }
853 
854 /** Lower all of the phi nodes in a block to imov's to and from a register
855  *
856  * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
857  * single block to convert all of it's phis to a register and some imov's.
858  * The code that is generated, while not optimal for actual codegen in a
859  * back-end, is easy to generate, correct, and will turn into the same set of
860  * phis after you call regs_to_ssa and do some copy propagation.
861  *
862  * The one intelligent thing this pass does is that it places the moves from
863  * the phi sources as high up the predecessor tree as possible instead of in
864  * the exact predecessor.  This means that, in particular, it will crawl into
865  * the deepest nesting of any if-ladders.  In order to ensure that doing so is
866  * safe, it stops as soon as one of the predecessors has multiple successors.
867  */
868 bool
nir_lower_phis_to_regs_block(nir_block * block)869 nir_lower_phis_to_regs_block(nir_block *block)
870 {
871    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
872    nir_shader *shader = impl->function->shader;
873 
874    bool progress = false;
875    nir_foreach_instr_safe(instr, block) {
876       if (instr->type != nir_instr_type_phi)
877          break;
878 
879       nir_phi_instr *phi = nir_instr_as_phi(instr);
880       assert(phi->dest.is_ssa);
881 
882       nir_register *reg = create_reg_for_ssa_def(&phi->dest.ssa, impl);
883 
884       nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
885       mov->src[0].src = nir_src_for_reg(reg);
886       mov->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
887       nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
888                         phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
889                         phi->dest.ssa.name);
890       nir_instr_insert(nir_after_instr(&phi->instr), &mov->instr);
891 
892       nir_ssa_def_rewrite_uses(&phi->dest.ssa,
893                                nir_src_for_ssa(&mov->dest.dest.ssa));
894 
895       nir_foreach_phi_src(src, phi) {
896          assert(src->src.is_ssa);
897          place_phi_read(shader, reg, src->src.ssa, src->pred);
898       }
899 
900       nir_instr_remove(&phi->instr);
901 
902       progress = true;
903    }
904 
905    return progress;
906 }
907 
908 struct ssa_def_to_reg_state {
909    nir_function_impl *impl;
910    bool progress;
911 };
912 
913 static bool
dest_replace_ssa_with_reg(nir_dest * dest,void * void_state)914 dest_replace_ssa_with_reg(nir_dest *dest, void *void_state)
915 {
916    struct ssa_def_to_reg_state *state = void_state;
917 
918    if (!dest->is_ssa)
919       return true;
920 
921    nir_register *reg = create_reg_for_ssa_def(&dest->ssa, state->impl);
922 
923    nir_ssa_def_rewrite_uses(&dest->ssa, nir_src_for_reg(reg));
924 
925    nir_instr *instr = dest->ssa.parent_instr;
926    *dest = nir_dest_for_reg(reg);
927    dest->reg.parent_instr = instr;
928    list_addtail(&dest->reg.def_link, &reg->defs);
929 
930    state->progress = true;
931 
932    return true;
933 }
934 
935 /** Lower all of the SSA defs in a block to registers
936  *
937  * This performs the very simple operation of blindly replacing all of the SSA
938  * defs in the given block with registers.  If not used carefully, this may
939  * result in phi nodes with register sources which is technically invalid.
940  * Fortunately, the register-based into-SSA pass handles them anyway.
941  */
942 bool
nir_lower_ssa_defs_to_regs_block(nir_block * block)943 nir_lower_ssa_defs_to_regs_block(nir_block *block)
944 {
945    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
946    nir_shader *shader = impl->function->shader;
947 
948    struct ssa_def_to_reg_state state = {
949       .impl = impl,
950       .progress = false,
951    };
952 
953    nir_foreach_instr(instr, block) {
954       if (instr->type == nir_instr_type_ssa_undef) {
955          /* Undefs are just a read of something never written. */
956          nir_ssa_undef_instr *undef = nir_instr_as_ssa_undef(instr);
957          nir_register *reg = create_reg_for_ssa_def(&undef->def, state.impl);
958          nir_ssa_def_rewrite_uses(&undef->def, nir_src_for_reg(reg));
959       } else if (instr->type == nir_instr_type_load_const) {
960          /* Constant loads are SSA-only, we need to insert a move */
961          nir_load_const_instr *load = nir_instr_as_load_const(instr);
962          nir_register *reg = create_reg_for_ssa_def(&load->def, state.impl);
963          nir_ssa_def_rewrite_uses(&load->def, nir_src_for_reg(reg));
964 
965          nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_imov);
966          mov->src[0].src = nir_src_for_ssa(&load->def);
967          mov->dest.dest = nir_dest_for_reg(reg);
968          mov->dest.write_mask = (1 << reg->num_components) - 1;
969          nir_instr_insert(nir_after_instr(&load->instr), &mov->instr);
970       } else {
971          nir_foreach_dest(instr, dest_replace_ssa_with_reg, &state);
972       }
973    }
974 
975    return state.progress;
976 }
977