• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir/nir_builder.h"
26 #include "nir_constant_expressions.h"
27 #include "nir_control_flow.h"
28 #include "nir_loop_analyze.h"
29 
30 static nir_ssa_def *clone_alu_and_replace_src_defs(nir_builder *b,
31                                                    const nir_alu_instr *alu,
32                                                    nir_ssa_def **src_defs);
33 
34 /**
35  * Gets the single block that jumps back to the loop header. Already assumes
36  * there is exactly one such block.
37  */
38 static nir_block*
find_continue_block(nir_loop * loop)39 find_continue_block(nir_loop *loop)
40 {
41    nir_block *header_block = nir_loop_first_block(loop);
42    nir_block *prev_block =
43       nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
44 
45    assert(header_block->predecessors->entries == 2);
46 
47    set_foreach(header_block->predecessors, pred_entry) {
48       if (pred_entry->key != prev_block)
49          return (nir_block*)pred_entry->key;
50    }
51 
52    unreachable("Continue block not found!");
53 }
54 
55 /**
56  * Does a phi have one constant value from outside a loop and one from inside?
57  */
58 static bool
phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr * phi,const nir_block * entry_block,bool * entry_val,bool * continue_val)59 phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr *phi,
60                                                        const nir_block *entry_block,
61                                                        bool *entry_val,
62                                                        bool *continue_val)
63 {
64    /* We already know we have exactly one continue */
65    assert(exec_list_length(&phi->srcs) == 2);
66 
67    *entry_val = false;
68    *continue_val = false;
69 
70     nir_foreach_phi_src(src, phi) {
71        if (!nir_src_is_const(src->src))
72          return false;
73 
74        if (src->pred != entry_block) {
75           *continue_val = nir_src_as_bool(src->src);
76        } else {
77           *entry_val = nir_src_as_bool(src->src);
78        }
79     }
80 
81     return true;
82 }
83 
84 /**
85  * This optimization detects if statements at the tops of loops where the
86  * condition is a phi node of two constants and moves half of the if to above
87  * the loop and the other half of the if to the end of the loop.  A simple for
88  * loop "for (int i = 0; i < 4; i++)", when run through the SPIR-V front-end,
89  * ends up looking something like this:
90  *
91  * vec1 32 ssa_0 = load_const (0x00000000)
92  * vec1 32 ssa_1 = load_const (0xffffffff)
93  * loop {
94  *    block block_1:
95  *    vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
96  *    vec1 32 ssa_3 = phi block_0: ssa_0, block_7: ssa_1
97  *    if ssa_3 {
98  *       block block_2:
99  *       vec1 32 ssa_4 = load_const (0x00000001)
100  *       vec1 32 ssa_5 = iadd ssa_2, ssa_4
101  *    } else {
102  *       block block_3:
103  *    }
104  *    block block_4:
105  *    vec1 32 ssa_6 = load_const (0x00000004)
106  *    vec1 32 ssa_7 = ilt ssa_5, ssa_6
107  *    if ssa_7 {
108  *       block block_5:
109  *    } else {
110  *       block block_6:
111  *       break
112  *    }
113  *    block block_7:
114  * }
115  *
116  * This turns it into something like this:
117  *
118  * // Stuff from block 1
119  * // Stuff from block 3
120  * loop {
121  *    block block_1:
122  *    vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
123  *    vec1 32 ssa_6 = load_const (0x00000004)
124  *    vec1 32 ssa_7 = ilt ssa_2, ssa_6
125  *    if ssa_7 {
126  *       block block_5:
127  *    } else {
128  *       block block_6:
129  *       break
130  *    }
131  *    block block_7:
132  *    // Stuff from block 1
133  *    // Stuff from block 2
134  *    vec1 32 ssa_4 = load_const (0x00000001)
135  *    vec1 32 ssa_5 = iadd ssa_2, ssa_4
136  * }
137  */
138 static bool
opt_peel_loop_initial_if(nir_loop * loop)139 opt_peel_loop_initial_if(nir_loop *loop)
140 {
141    nir_block *header_block = nir_loop_first_block(loop);
142    nir_block *const prev_block =
143       nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
144 
145    /* It would be insane if this were not true */
146    assert(_mesa_set_search(header_block->predecessors, prev_block));
147 
148    /* The loop must have exactly one continue block which could be a block
149     * ending in a continue instruction or the "natural" continue from the
150     * last block in the loop back to the top.
151     */
152    if (header_block->predecessors->entries != 2)
153       return false;
154 
155    nir_cf_node *if_node = nir_cf_node_next(&header_block->cf_node);
156    if (!if_node || if_node->type != nir_cf_node_if)
157       return false;
158 
159    nir_if *nif = nir_cf_node_as_if(if_node);
160    assert(nif->condition.is_ssa);
161 
162    nir_ssa_def *cond = nif->condition.ssa;
163    if (cond->parent_instr->type != nir_instr_type_phi)
164       return false;
165 
166    nir_phi_instr *cond_phi = nir_instr_as_phi(cond->parent_instr);
167    if (cond->parent_instr->block != header_block)
168       return false;
169 
170    bool entry_val = false, continue_val = false;
171    if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
172                                                                prev_block,
173                                                                &entry_val,
174                                                                &continue_val))
175       return false;
176 
177    /* If they both execute or both don't execute, this is a job for
178     * nir_dead_cf, not this pass.
179     */
180    if ((entry_val && continue_val) || (!entry_val && !continue_val))
181       return false;
182 
183    struct exec_list *continue_list, *entry_list;
184    if (continue_val) {
185       continue_list = &nif->then_list;
186       entry_list = &nif->else_list;
187    } else {
188       continue_list = &nif->else_list;
189       entry_list = &nif->then_list;
190    }
191 
192    /* We want to be moving the contents of entry_list to above the loop so it
193     * can't contain any break or continue instructions.
194     */
195    foreach_list_typed(nir_cf_node, cf_node, node, entry_list) {
196       nir_foreach_block_in_cf_node(block, cf_node) {
197          nir_instr *last_instr = nir_block_last_instr(block);
198          if (last_instr && last_instr->type == nir_instr_type_jump)
199             return false;
200       }
201    }
202 
203    /* We're about to re-arrange a bunch of blocks so make sure that we don't
204     * have deref uses which cross block boundaries.  We don't want a deref
205     * accidentally ending up in a phi.
206     */
207    nir_rematerialize_derefs_in_use_blocks_impl(
208       nir_cf_node_get_function(&loop->cf_node));
209 
210    /* Before we do anything, convert the loop to LCSSA.  We're about to
211     * replace a bunch of SSA defs with registers and this will prevent any of
212     * it from leaking outside the loop.
213     */
214    nir_convert_loop_to_lcssa(loop);
215 
216    nir_block *after_if_block =
217       nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
218 
219    /* Get rid of phis in the header block since we will be duplicating it */
220    nir_lower_phis_to_regs_block(header_block);
221    /* Get rid of phis after the if since dominance will change */
222    nir_lower_phis_to_regs_block(after_if_block);
223 
224    /* Get rid of SSA defs in the pieces we're about to move around */
225    nir_lower_ssa_defs_to_regs_block(header_block);
226    nir_foreach_block_in_cf_node(block, &nif->cf_node)
227       nir_lower_ssa_defs_to_regs_block(block);
228 
229    nir_cf_list header, tmp;
230    nir_cf_extract(&header, nir_before_block(header_block),
231                            nir_after_block(header_block));
232 
233    nir_cf_list_clone(&tmp, &header, &loop->cf_node, NULL);
234    nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
235    nir_cf_extract(&tmp, nir_before_cf_list(entry_list),
236                         nir_after_cf_list(entry_list));
237    nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
238 
239    nir_cf_reinsert(&header,
240                    nir_after_block_before_jump(find_continue_block(loop)));
241 
242    bool continue_list_jumps =
243       nir_block_ends_in_jump(exec_node_data(nir_block,
244                                             exec_list_get_tail(continue_list),
245                                             cf_node.node));
246 
247    nir_cf_extract(&tmp, nir_before_cf_list(continue_list),
248                         nir_after_cf_list(continue_list));
249 
250    /* Get continue block again as the previous reinsert might have removed the
251     * block.  Also, if both the continue list and the continue block ends in
252     * jump instructions, removes the jump from the latter, as it will not be
253     * executed if we insert the continue list before it. */
254 
255    nir_block *continue_block = find_continue_block(loop);
256 
257    if (continue_list_jumps) {
258       nir_instr *last_instr = nir_block_last_instr(continue_block);
259       if (last_instr && last_instr->type == nir_instr_type_jump)
260          nir_instr_remove(last_instr);
261    }
262 
263    nir_cf_reinsert(&tmp,
264                    nir_after_block_before_jump(continue_block));
265 
266    nir_cf_node_remove(&nif->cf_node);
267 
268    return true;
269 }
270 
271 static bool
alu_instr_is_comparison(const nir_alu_instr * alu)272 alu_instr_is_comparison(const nir_alu_instr *alu)
273 {
274    switch (alu->op) {
275    case nir_op_flt32:
276    case nir_op_fge32:
277    case nir_op_feq32:
278    case nir_op_fneu32:
279    case nir_op_ilt32:
280    case nir_op_ult32:
281    case nir_op_ige32:
282    case nir_op_uge32:
283    case nir_op_ieq32:
284    case nir_op_ine32:
285       return true;
286    default:
287       return nir_alu_instr_is_comparison(alu);
288    }
289 }
290 
291 static bool
alu_instr_is_type_conversion(const nir_alu_instr * alu)292 alu_instr_is_type_conversion(const nir_alu_instr *alu)
293 {
294    return nir_op_infos[alu->op].num_inputs == 1 &&
295           nir_op_infos[alu->op].output_type != nir_op_infos[alu->op].input_types[0];
296 }
297 
298 static bool
is_trivial_bcsel(const nir_instr * instr,bool allow_non_phi_src)299 is_trivial_bcsel(const nir_instr *instr, bool allow_non_phi_src)
300 {
301    if (instr->type != nir_instr_type_alu)
302       return false;
303 
304    nir_alu_instr *const bcsel = nir_instr_as_alu(instr);
305    if (bcsel->op != nir_op_bcsel &&
306        bcsel->op != nir_op_b32csel &&
307        bcsel->op != nir_op_fcsel)
308       return false;
309 
310    for (unsigned i = 0; i < 3; i++) {
311       if (!nir_alu_src_is_trivial_ssa(bcsel, i) ||
312           bcsel->src[i].src.ssa->parent_instr->block != instr->block)
313          return false;
314 
315       if (bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi) {
316          /* opt_split_alu_of_phi() is able to peel that src from the loop */
317          if (i == 0 || !allow_non_phi_src)
318             return false;
319          allow_non_phi_src = false;
320       }
321    }
322 
323    nir_foreach_phi_src(src, nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr)) {
324       if (!nir_src_is_const(src->src))
325          return false;
326    }
327 
328    return true;
329 }
330 
331 /**
332  * Splits ALU instructions that have a source that is a phi node
333  *
334  * ALU instructions in the header block of a loop that meet the following
335  * criteria can be split.
336  *
337  * - The loop has no continue instructions other than the "natural" continue
338  *   at the bottom of the loop.
339  *
340  * - At least one source of the instruction is a phi node from the header block.
341  *
342  * - Any non-phi sources of the ALU instruction come from a block that
343  *   dominates the block before the loop.  The most common failure mode for
344  *   this check is sources that are generated in the loop header block.
345  *
346  * - The phi node selects a constant or undef from the block before the loop or
347  *   the only ALU user is a trivial bcsel that gets removed by peeling the ALU
348  *
349  * The split process splits the original ALU instruction into two, one at the
350  * bottom of the loop and one at the block before the loop. The instruction
351  * before the loop computes the value on the first iteration, and the
352  * instruction at the bottom computes the value on the second, third, and so
353  * on. A new phi node is added to the header block that selects either the
354  * instruction before the loop or the one at the end, and uses of the original
355  * instruction are replaced by this phi.
356  *
357  * The splitting transforms a loop like:
358  *
359  *    vec1 32 ssa_8 = load_const (0x00000001)
360  *    vec1 32 ssa_10 = load_const (0x00000000)
361  *    // succs: block_1
362  *    loop {
363  *            block block_1:
364  *            // preds: block_0 block_4
365  *            vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
366  *            vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
367  *            vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
368  *            vec1 32 ssa_14 = iadd ssa_11, ssa_8
369  *            vec1 32 ssa_15 = b32csel ssa_13, ssa_14, ssa_12
370  *            ...
371  *            // succs: block_1
372  *    }
373  *
374  * into:
375  *
376  *    vec1 32 ssa_8 = load_const (0x00000001)
377  *    vec1 32 ssa_10 = load_const (0x00000000)
378  *    vec1 32 ssa_22 = iadd ssa_10, ssa_8
379  *    // succs: block_1
380  *    loop {
381  *            block block_1:
382  *            // preds: block_0 block_4
383  *            vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
384  *            vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
385  *            vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
386  *            vec1 32 ssa_21 = phi block_0: ssa_22, block_4: ssa_20
387  *            vec1 32 ssa_15 = b32csel ssa_13, ssa_21, ssa_12
388  *            ...
389  *            vec1 32 ssa_20 = iadd ssa_15, ssa_8
390  *            // succs: block_1
391  *    }
392  */
393 static bool
opt_split_alu_of_phi(nir_builder * b,nir_loop * loop)394 opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
395 {
396    bool progress = false;
397    nir_block *header_block = nir_loop_first_block(loop);
398    nir_block *const prev_block =
399       nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
400 
401    /* It would be insane if this were not true */
402    assert(_mesa_set_search(header_block->predecessors, prev_block));
403 
404    /* The loop must have exactly one continue block which could be a block
405     * ending in a continue instruction or the "natural" continue from the
406     * last block in the loop back to the top.
407     */
408    if (header_block->predecessors->entries != 2)
409       return false;
410 
411    nir_foreach_instr_safe(instr, header_block) {
412       if (instr->type != nir_instr_type_alu)
413          continue;
414 
415       nir_alu_instr *const alu = nir_instr_as_alu(instr);
416 
417       /* nir_op_vec{2,3,4} and nir_op_mov are excluded because they can easily
418        * lead to infinite optimization loops. Splitting comparisons can lead
419        * to loop unrolling not recognizing loop termintators, and type
420        * conversions also lead to regressions.
421        */
422       if (nir_op_is_vec(alu->op) ||
423           alu_instr_is_comparison(alu) ||
424           alu_instr_is_type_conversion(alu))
425          continue;
426 
427       bool has_phi_src_from_prev_block = false;
428       bool all_non_phi_exist_in_prev_block = true;
429       bool is_prev_result_undef = true;
430       bool is_prev_result_const = true;
431       nir_ssa_def *prev_srcs[8];     // FINISHME: Array size?
432       nir_ssa_def *continue_srcs[8]; // FINISHME: Array size?
433 
434       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
435          nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr;
436 
437          /* If the source is a phi in the loop header block, then the
438           * prev_srcs and continue_srcs will come from the different sources
439           * of the phi.
440           */
441          if (src_instr->type == nir_instr_type_phi &&
442              src_instr->block == header_block) {
443             nir_phi_instr *const phi = nir_instr_as_phi(src_instr);
444 
445             /* Only strictly need to NULL out the pointers when the assertions
446              * (below) are compiled in.  Debugging a NULL pointer deref in the
447              * wild is easier than debugging a random pointer deref, so set
448              * NULL unconditionally just to be safe.
449              */
450             prev_srcs[i] = NULL;
451             continue_srcs[i] = NULL;
452 
453             nir_foreach_phi_src(src_of_phi, phi) {
454                if (src_of_phi->pred == prev_block) {
455                   if (src_of_phi->src.ssa->parent_instr->type !=
456                       nir_instr_type_ssa_undef) {
457                      is_prev_result_undef = false;
458                   }
459 
460                   if (src_of_phi->src.ssa->parent_instr->type !=
461                       nir_instr_type_load_const) {
462                      is_prev_result_const = false;
463                   }
464 
465                   prev_srcs[i] = src_of_phi->src.ssa;
466                   has_phi_src_from_prev_block = true;
467                } else
468                   continue_srcs[i] = src_of_phi->src.ssa;
469             }
470 
471             assert(prev_srcs[i] != NULL);
472             assert(continue_srcs[i] != NULL);
473          } else {
474             /* If the source is not a phi (or a phi in a block other than the
475              * loop header), then the value must exist in prev_block.
476              */
477             if (!nir_block_dominates(src_instr->block, prev_block)) {
478                all_non_phi_exist_in_prev_block = false;
479                break;
480             }
481 
482             prev_srcs[i] = alu->src[i].src.ssa;
483             continue_srcs[i] = alu->src[i].src.ssa;
484          }
485       }
486 
487       if (!has_phi_src_from_prev_block || !all_non_phi_exist_in_prev_block)
488          continue;
489 
490       if (!is_prev_result_undef && !is_prev_result_const) {
491          /* check if the only user is a trivial bcsel */
492          if (!list_is_empty(&alu->dest.dest.ssa.if_uses) ||
493              !list_is_singular(&alu->dest.dest.ssa.uses))
494             continue;
495 
496          nir_src *use = list_first_entry(&alu->dest.dest.ssa.uses, nir_src, use_link);
497          if (!is_trivial_bcsel(use->parent_instr, true))
498             continue;
499       }
500 
501       /* Split ALU of Phi */
502       nir_block *const continue_block = find_continue_block(loop);
503 
504       b->cursor = nir_after_block(prev_block);
505       nir_ssa_def *prev_value = clone_alu_and_replace_src_defs(b, alu, prev_srcs);
506 
507       /* Make a copy of the original ALU instruction.  Replace the sources
508        * of the new instruction that read a phi with an undef source from
509        * prev_block with the non-undef source of that phi.
510        *
511        * Insert the new instruction at the end of the continue block.
512        */
513       b->cursor = nir_after_block_before_jump(continue_block);
514 
515       nir_ssa_def *const alu_copy =
516          clone_alu_and_replace_src_defs(b, alu, continue_srcs);
517 
518       /* Make a new phi node that selects a value from prev_block and the
519        * result of the new instruction from continue_block.
520        */
521       nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
522       nir_phi_src *phi_src;
523 
524       phi_src = ralloc(phi, nir_phi_src);
525       phi_src->pred = prev_block;
526       phi_src->src = nir_src_for_ssa(prev_value);
527       exec_list_push_tail(&phi->srcs, &phi_src->node);
528 
529       phi_src = ralloc(phi, nir_phi_src);
530       phi_src->pred = continue_block;
531       phi_src->src = nir_src_for_ssa(alu_copy);
532       exec_list_push_tail(&phi->srcs, &phi_src->node);
533 
534       nir_ssa_dest_init(&phi->instr, &phi->dest,
535                         alu_copy->num_components, alu_copy->bit_size, NULL);
536 
537       b->cursor = nir_after_phis(header_block);
538       nir_builder_instr_insert(b, &phi->instr);
539 
540       /* Modify all readers of the original ALU instruction to read the
541        * result of the phi.
542        */
543       nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
544                                nir_src_for_ssa(&phi->dest.ssa));
545 
546       /* Since the original ALU instruction no longer has any readers, just
547        * remove it.
548        */
549       nir_instr_remove_v(&alu->instr);
550       ralloc_free(alu);
551 
552       progress = true;
553    }
554 
555    return progress;
556 }
557 
558 /**
559  * Get the SSA value from a phi node that corresponds to a specific block
560  */
561 static nir_ssa_def *
ssa_for_phi_from_block(nir_phi_instr * phi,nir_block * block)562 ssa_for_phi_from_block(nir_phi_instr *phi, nir_block *block)
563 {
564    nir_foreach_phi_src(src, phi) {
565       if (src->pred == block)
566          return src->src.ssa;
567    }
568 
569    assert(!"Block is not a predecessor of phi.");
570    return NULL;
571 }
572 
573 /**
574  * Simplify a bcsel whose sources are all phi nodes from the loop header block
575  *
576  * bcsel instructions in a loop that meet the following criteria can be
577  * converted to phi nodes:
578  *
579  * - The loop has no continue instructions other than the "natural" continue
580  *   at the bottom of the loop.
581  *
582  * - All of the sources of the bcsel are phi nodes in the header block of the
583  *   loop.
584  *
585  * - The phi node representing the condition of the bcsel instruction chooses
586  *   only constant values.
587  *
588  * The contant value from the condition will select one of the other sources
589  * when entered from outside the loop and the remaining source when entered
590  * from the continue block.  Since each of these sources is also a phi node in
591  * the header block, the value of the phi node can be "evaluated."  These
592  * evaluated phi nodes provide the sources for a new phi node.  All users of
593  * the bcsel result are updated to use the phi node result.
594  *
595  * The replacement transforms loops like:
596  *
597  *    vec1 32 ssa_7 = undefined
598  *    vec1 32 ssa_8 = load_const (0x00000001)
599  *    vec1 32 ssa_9 = load_const (0x000000c8)
600  *    vec1 32 ssa_10 = load_const (0x00000000)
601  *    // succs: block_1
602  *    loop {
603  *            block block_1:
604  *            // preds: block_0 block_4
605  *            vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
606  *            vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
607  *            vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
608  *            vec1 32 ssa_14 = b32csel ssa_12, ssa_13, ssa_11
609  *            vec1 32 ssa_16 = ige32 ssa_14, ssa_9
610  *            ...
611  *            vec1 32 ssa_15 = load_const (0xffffffff)
612  *            ...
613  *            vec1 32 ssa_25 = iadd ssa_14, ssa_8
614  *            // succs: block_1
615  *    }
616  *
617  * into:
618  *
619  *    vec1 32 ssa_7 = undefined
620  *    vec1 32 ssa_8 = load_const (0x00000001)
621  *    vec1 32 ssa_9 = load_const (0x000000c8)
622  *    vec1 32 ssa_10 = load_const (0x00000000)
623  *    // succs: block_1
624  *    loop {
625  *            block block_1:
626  *            // preds: block_0 block_4
627  *            vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
628  *            vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
629  *            vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
630  *            vec1 32 sss_26 = phi block_0: ssa_1, block_4: ssa_25
631  *            vec1 32 ssa_16 = ige32 ssa_26, ssa_9
632  *            ...
633  *            vec1 32 ssa_15 = load_const (0xffffffff)
634  *            ...
635  *            vec1 32 ssa_25 = iadd ssa_26, ssa_8
636  *            // succs: block_1
637  *    }
638  *
639  * \note
640  * It may be possible modify this function to not require a phi node as the
641  * source of the bcsel that is selected when entering from outside the loop.
642  * The only restriction is that the source must be geneated outside the loop
643  * (since it will become the source of a phi node in the header block of the
644  * loop).
645  */
646 static bool
opt_simplify_bcsel_of_phi(nir_builder * b,nir_loop * loop)647 opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
648 {
649    bool progress = false;
650    nir_block *header_block = nir_loop_first_block(loop);
651    nir_block *const prev_block =
652       nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
653 
654    /* It would be insane if this were not true */
655    assert(_mesa_set_search(header_block->predecessors, prev_block));
656 
657    /* The loop must have exactly one continue block which could be a block
658     * ending in a continue instruction or the "natural" continue from the
659     * last block in the loop back to the top.
660     */
661    if (header_block->predecessors->entries != 2)
662       return false;
663 
664    /* We can move any bcsel that can guaranteed to execut on every iteration
665     * of a loop.  For now this is accomplished by only taking bcsels from the
666     * header_block.  In the future, this could be expanced to include any
667     * bcsel that must come before any break.
668     *
669     * For more details, see
670     * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/170#note_110305
671     */
672    nir_foreach_instr_safe(instr, header_block) {
673       if (!is_trivial_bcsel(instr, false))
674          continue;
675 
676       nir_alu_instr *const bcsel = nir_instr_as_alu(instr);
677       nir_phi_instr *const cond_phi =
678          nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr);
679 
680       bool entry_val = false, continue_val = false;
681       if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
682                                                                   prev_block,
683                                                                   &entry_val,
684                                                                   &continue_val))
685          continue;
686 
687       /* If they both execute or both don't execute, this is a job for
688        * nir_dead_cf, not this pass.
689        */
690       if ((entry_val && continue_val) || (!entry_val && !continue_val))
691          continue;
692 
693       const unsigned entry_src = entry_val ? 1 : 2;
694       const unsigned continue_src = entry_val ? 2 : 1;
695 
696       /* Create a new phi node that selects the value for prev_block from
697        * the bcsel source that is selected by entry_val and the value for
698        * continue_block from the other bcsel source.  Both sources have
699        * already been verified to be phi nodes.
700        */
701       nir_block *const continue_block = find_continue_block(loop);
702       nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
703       nir_phi_src *phi_src;
704 
705       phi_src = ralloc(phi, nir_phi_src);
706       phi_src->pred = prev_block;
707       phi_src->src =
708          nir_src_for_ssa(ssa_for_phi_from_block(nir_instr_as_phi(bcsel->src[entry_src].src.ssa->parent_instr),
709                                                 prev_block));
710       exec_list_push_tail(&phi->srcs, &phi_src->node);
711 
712       phi_src = ralloc(phi, nir_phi_src);
713       phi_src->pred = continue_block;
714       phi_src->src =
715          nir_src_for_ssa(ssa_for_phi_from_block(nir_instr_as_phi(bcsel->src[continue_src].src.ssa->parent_instr),
716                                                 continue_block));
717       exec_list_push_tail(&phi->srcs, &phi_src->node);
718 
719       nir_ssa_dest_init(&phi->instr,
720                         &phi->dest,
721                         nir_dest_num_components(bcsel->dest.dest),
722                         nir_dest_bit_size(bcsel->dest.dest),
723                         NULL);
724 
725       b->cursor = nir_after_phis(header_block);
726       nir_builder_instr_insert(b, &phi->instr);
727 
728       /* Modify all readers of the bcsel instruction to read the result of
729        * the phi.
730        */
731       nir_ssa_def_rewrite_uses(&bcsel->dest.dest.ssa,
732                                nir_src_for_ssa(&phi->dest.ssa));
733 
734       /* Since the original bcsel instruction no longer has any readers,
735        * just remove it.
736        */
737       nir_instr_remove_v(&bcsel->instr);
738       ralloc_free(bcsel);
739 
740       progress = true;
741    }
742 
743    return progress;
744 }
745 
746 static bool
is_block_empty(nir_block * block)747 is_block_empty(nir_block *block)
748 {
749    return nir_cf_node_is_last(&block->cf_node) &&
750           exec_list_is_empty(&block->instr_list);
751 }
752 
753 static bool
nir_block_ends_in_continue(nir_block * block)754 nir_block_ends_in_continue(nir_block *block)
755 {
756    if (exec_list_is_empty(&block->instr_list))
757       return false;
758 
759    nir_instr *instr = nir_block_last_instr(block);
760    return instr->type == nir_instr_type_jump &&
761       nir_instr_as_jump(instr)->type == nir_jump_continue;
762 }
763 
764 /**
765  * This optimization turns:
766  *
767  *     loop {
768  *        ...
769  *        if (cond) {
770  *           do_work_1();
771  *           continue;
772  *        } else {
773  *        }
774  *        do_work_2();
775  *     }
776  *
777  * into:
778  *
779  *     loop {
780  *        ...
781  *        if (cond) {
782  *           do_work_1();
783  *           continue;
784  *        } else {
785  *           do_work_2();
786  *        }
787  *     }
788  *
789  * The continue should then be removed by nir_opt_trivial_continues() and the
790  * loop can potentially be unrolled.
791  *
792  * Note: Unless the function param aggressive_last_continue==true do_work_2()
793  * is only ever blocks and nested loops. We avoid nesting other if-statments
794  * in the branch as this can result in increased register pressure, and in
795  * the i965 driver it causes a large amount of spilling in shader-db.
796  * For RADV however nesting these if-statements allows further continues to be
797  * remove and provides a significant FPS boost in Doom, which is why we have
798  * opted for this special bool to enable more aggresive optimisations.
799  * TODO: The GCM pass solves most of the spilling regressions in i965, if it
800  * is ever enabled we should consider removing the aggressive_last_continue
801  * param.
802  */
803 static bool
opt_if_loop_last_continue(nir_loop * loop,bool aggressive_last_continue)804 opt_if_loop_last_continue(nir_loop *loop, bool aggressive_last_continue)
805 {
806    nir_if *nif;
807    bool then_ends_in_continue = false;
808    bool else_ends_in_continue = false;
809 
810    /* Scan the control flow of the loop from the last to the first node
811     * looking for an if-statement we can optimise.
812     */
813    nir_block *last_block = nir_loop_last_block(loop);
814    nir_cf_node *if_node = nir_cf_node_prev(&last_block->cf_node);
815    while (if_node) {
816       if (if_node->type == nir_cf_node_if) {
817          nif = nir_cf_node_as_if(if_node);
818          nir_block *then_block = nir_if_last_then_block(nif);
819          nir_block *else_block = nir_if_last_else_block(nif);
820 
821          then_ends_in_continue = nir_block_ends_in_continue(then_block);
822          else_ends_in_continue = nir_block_ends_in_continue(else_block);
823 
824          /* If both branches end in a jump do nothing, this should be handled
825           * by nir_opt_dead_cf().
826           */
827          if ((then_ends_in_continue || nir_block_ends_in_break(then_block)) &&
828              (else_ends_in_continue || nir_block_ends_in_break(else_block)))
829             return false;
830 
831          /* If continue found stop scanning and attempt optimisation, or
832           */
833          if (then_ends_in_continue || else_ends_in_continue ||
834              !aggressive_last_continue)
835             break;
836       }
837 
838       if_node = nir_cf_node_prev(if_node);
839    }
840 
841    /* If we didn't find an if to optimise return */
842    if (!then_ends_in_continue && !else_ends_in_continue)
843       return false;
844 
845    /* If there is nothing after the if-statement we bail */
846    if (&nif->cf_node == nir_cf_node_prev(&last_block->cf_node) &&
847        exec_list_is_empty(&last_block->instr_list))
848       return false;
849 
850    /* Move the last block of the loop inside the last if-statement */
851    nir_cf_list tmp;
852    nir_cf_extract(&tmp, nir_after_cf_node(if_node),
853                         nir_after_block(last_block));
854    if (then_ends_in_continue)
855       nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->else_list));
856    else
857       nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->then_list));
858 
859    /* In order to avoid running nir_lower_regs_to_ssa_impl() every time an if
860     * opt makes progress we leave nir_opt_trivial_continues() to remove the
861     * continue now that the end of the loop has been simplified.
862     */
863 
864    return true;
865 }
866 
867 /* Walk all the phis in the block immediately following the if statement and
868  * swap the blocks.
869  */
870 static void
rewrite_phi_predecessor_blocks(nir_if * nif,nir_block * old_then_block,nir_block * old_else_block,nir_block * new_then_block,nir_block * new_else_block)871 rewrite_phi_predecessor_blocks(nir_if *nif,
872                                nir_block *old_then_block,
873                                nir_block *old_else_block,
874                                nir_block *new_then_block,
875                                nir_block *new_else_block)
876 {
877    nir_block *after_if_block =
878       nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
879 
880    nir_foreach_instr(instr, after_if_block) {
881       if (instr->type != nir_instr_type_phi)
882          continue;
883 
884       nir_phi_instr *phi = nir_instr_as_phi(instr);
885 
886       foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
887          if (src->pred == old_then_block) {
888             src->pred = new_then_block;
889          } else if (src->pred == old_else_block) {
890             src->pred = new_else_block;
891          }
892       }
893    }
894 }
895 
896 /**
897  * This optimization turns:
898  *
899  *     if (cond) {
900  *     } else {
901  *         do_work();
902  *     }
903  *
904  * into:
905  *
906  *     if (!cond) {
907  *         do_work();
908  *     } else {
909  *     }
910  */
911 static bool
opt_if_simplification(nir_builder * b,nir_if * nif)912 opt_if_simplification(nir_builder *b, nir_if *nif)
913 {
914    /* Only simplify if the then block is empty and the else block is not. */
915    if (!is_block_empty(nir_if_first_then_block(nif)) ||
916        is_block_empty(nir_if_first_else_block(nif)))
917       return false;
918 
919    /* Make sure the condition is a comparison operation. */
920    nir_instr *src_instr = nif->condition.ssa->parent_instr;
921    if (src_instr->type != nir_instr_type_alu)
922       return false;
923 
924    nir_alu_instr *alu_instr = nir_instr_as_alu(src_instr);
925    if (!nir_alu_instr_is_comparison(alu_instr))
926       return false;
927 
928    /* Insert the inverted instruction and rewrite the condition. */
929    b->cursor = nir_after_instr(&alu_instr->instr);
930 
931    nir_ssa_def *new_condition =
932       nir_inot(b, &alu_instr->dest.dest.ssa);
933 
934    nir_if_rewrite_condition(nif, nir_src_for_ssa(new_condition));
935 
936    /* Grab pointers to the last then/else blocks for fixing up the phis. */
937    nir_block *then_block = nir_if_last_then_block(nif);
938    nir_block *else_block = nir_if_last_else_block(nif);
939 
940    if (nir_block_ends_in_jump(else_block)) {
941       /* Even though this if statement has a jump on one side, we may still have
942        * phis afterwards.  Single-source phis can be produced by loop unrolling
943        * or dead control-flow passes and are perfectly legal.  Run a quick phi
944        * removal on the block after the if to clean up any such phis.
945        */
946       nir_block *const next_block =
947          nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
948       nir_opt_remove_phis_block(next_block);
949    }
950 
951    rewrite_phi_predecessor_blocks(nif, then_block, else_block, else_block,
952                                   then_block);
953 
954    /* Finally, move the else block to the then block. */
955    nir_cf_list tmp;
956    nir_cf_extract(&tmp, nir_before_cf_list(&nif->else_list),
957                         nir_after_cf_list(&nif->else_list));
958    nir_cf_reinsert(&tmp, nir_before_cf_list(&nif->then_list));
959 
960    return true;
961 }
962 
963 /**
964  * This optimization simplifies potential loop terminators which then allows
965  * other passes such as opt_if_simplification() and loop unrolling to progress
966  * further:
967  *
968  *     if (cond) {
969  *        ... then block instructions ...
970  *     } else {
971  *         ...
972  *        break;
973  *     }
974  *
975  * into:
976  *
977  *     if (cond) {
978  *     } else {
979  *         ...
980  *        break;
981  *     }
982  *     ... then block instructions ...
983  */
984 static bool
opt_if_loop_terminator(nir_if * nif)985 opt_if_loop_terminator(nir_if *nif)
986 {
987    nir_block *break_blk = NULL;
988    nir_block *continue_from_blk = NULL;
989    bool continue_from_then = true;
990 
991    nir_block *last_then = nir_if_last_then_block(nif);
992    nir_block *last_else = nir_if_last_else_block(nif);
993 
994    if (nir_block_ends_in_break(last_then)) {
995       break_blk = last_then;
996       continue_from_blk = last_else;
997       continue_from_then = false;
998    } else if (nir_block_ends_in_break(last_else)) {
999       break_blk = last_else;
1000       continue_from_blk = last_then;
1001    }
1002 
1003    /* Continue if the if-statement contained no jumps at all */
1004    if (!break_blk)
1005       return false;
1006 
1007    /* If the continue from block is empty then return as there is nothing to
1008     * move.
1009     */
1010    nir_block *first_continue_from_blk = continue_from_then ?
1011       nir_if_first_then_block(nif) :
1012       nir_if_first_else_block(nif);
1013    if (is_block_empty(first_continue_from_blk))
1014       return false;
1015 
1016    if (nir_block_ends_in_jump(continue_from_blk))
1017       return false;
1018 
1019    /* Even though this if statement has a jump on one side, we may still have
1020     * phis afterwards.  Single-source phis can be produced by loop unrolling
1021     * or dead control-flow passes and are perfectly legal.  Run a quick phi
1022     * removal on the block after the if to clean up any such phis.
1023     */
1024    nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
1025 
1026    /* Finally, move the continue from branch after the if-statement. */
1027    nir_cf_list tmp;
1028    nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
1029                         nir_after_block(continue_from_blk));
1030    nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
1031 
1032    return true;
1033 }
1034 
1035 static bool
evaluate_if_condition(nir_if * nif,nir_cursor cursor,bool * value)1036 evaluate_if_condition(nir_if *nif, nir_cursor cursor, bool *value)
1037 {
1038    nir_block *use_block = nir_cursor_current_block(cursor);
1039    if (nir_block_dominates(nir_if_first_then_block(nif), use_block)) {
1040       *value = true;
1041       return true;
1042    } else if (nir_block_dominates(nir_if_first_else_block(nif), use_block)) {
1043       *value = false;
1044       return true;
1045    } else {
1046       return false;
1047    }
1048 }
1049 
1050 static nir_ssa_def *
clone_alu_and_replace_src_defs(nir_builder * b,const nir_alu_instr * alu,nir_ssa_def ** src_defs)1051 clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
1052                                nir_ssa_def **src_defs)
1053 {
1054    nir_alu_instr *nalu = nir_alu_instr_create(b->shader, alu->op);
1055    nalu->exact = alu->exact;
1056 
1057    nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
1058                      alu->dest.dest.ssa.num_components,
1059                      alu->dest.dest.ssa.bit_size, alu->dest.dest.ssa.name);
1060 
1061    nalu->dest.saturate = alu->dest.saturate;
1062    nalu->dest.write_mask = alu->dest.write_mask;
1063 
1064    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1065       assert(alu->src[i].src.is_ssa);
1066       nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
1067       nalu->src[i].negate = alu->src[i].negate;
1068       nalu->src[i].abs = alu->src[i].abs;
1069       memcpy(nalu->src[i].swizzle, alu->src[i].swizzle,
1070              sizeof(nalu->src[i].swizzle));
1071    }
1072 
1073    nir_builder_instr_insert(b, &nalu->instr);
1074 
1075    return &nalu->dest.dest.ssa;;
1076 }
1077 
1078 /*
1079  * This propagates if condition evaluation down the chain of some alu
1080  * instructions. For example by checking the use of some of the following alu
1081  * instruction we can eventually replace ssa_107 with NIR_TRUE.
1082  *
1083  *   loop {
1084  *      block block_1:
1085  *      vec1 32 ssa_85 = load_const (0x00000002)
1086  *      vec1 32 ssa_86 = ieq ssa_48, ssa_85
1087  *      vec1 32 ssa_87 = load_const (0x00000001)
1088  *      vec1 32 ssa_88 = ieq ssa_48, ssa_87
1089  *      vec1 32 ssa_89 = ior ssa_86, ssa_88
1090  *      vec1 32 ssa_90 = ieq ssa_48, ssa_0
1091  *      vec1 32 ssa_91 = ior ssa_89, ssa_90
1092  *      if ssa_86 {
1093  *         block block_2:
1094  *             ...
1095  *            break
1096  *      } else {
1097  *            block block_3:
1098  *      }
1099  *      block block_4:
1100  *      if ssa_88 {
1101  *            block block_5:
1102  *             ...
1103  *            break
1104  *      } else {
1105  *            block block_6:
1106  *      }
1107  *      block block_7:
1108  *      if ssa_90 {
1109  *            block block_8:
1110  *             ...
1111  *            break
1112  *      } else {
1113  *            block block_9:
1114  *      }
1115  *      block block_10:
1116  *      vec1 32 ssa_107 = inot ssa_91
1117  *      if ssa_107 {
1118  *            block block_11:
1119  *            break
1120  *      } else {
1121  *            block block_12:
1122  *      }
1123  *   }
1124  */
1125 static bool
propagate_condition_eval(nir_builder * b,nir_if * nif,nir_src * use_src,nir_src * alu_use,nir_alu_instr * alu,bool is_if_condition)1126 propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
1127                          nir_src *alu_use, nir_alu_instr *alu,
1128                          bool is_if_condition)
1129 {
1130    bool bool_value;
1131    b->cursor = nir_before_src(alu_use, is_if_condition);
1132    if (!evaluate_if_condition(nif, b->cursor, &bool_value))
1133       return false;
1134 
1135    nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS] = {0};
1136    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1137       if (alu->src[i].src.ssa == use_src->ssa) {
1138          def[i] = nir_imm_bool(b, bool_value);
1139       } else {
1140          def[i] = alu->src[i].src.ssa;
1141       }
1142    }
1143 
1144    nir_ssa_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
1145 
1146    /* Rewrite use to use new alu instruction */
1147    nir_src new_src = nir_src_for_ssa(nalu);
1148 
1149    if (is_if_condition)
1150       nir_if_rewrite_condition(alu_use->parent_if, new_src);
1151    else
1152       nir_instr_rewrite_src(alu_use->parent_instr, alu_use, new_src);
1153 
1154    return true;
1155 }
1156 
1157 static bool
can_propagate_through_alu(nir_src * src)1158 can_propagate_through_alu(nir_src *src)
1159 {
1160    if (src->parent_instr->type != nir_instr_type_alu)
1161       return false;
1162 
1163    nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
1164    switch (alu->op) {
1165       case nir_op_ior:
1166       case nir_op_iand:
1167       case nir_op_inot:
1168       case nir_op_b2i32:
1169          return true;
1170       case nir_op_bcsel:
1171          return src == &alu->src[0].src;
1172       default:
1173          return false;
1174    }
1175 }
1176 
1177 static bool
evaluate_condition_use(nir_builder * b,nir_if * nif,nir_src * use_src,bool is_if_condition)1178 evaluate_condition_use(nir_builder *b, nir_if *nif, nir_src *use_src,
1179                        bool is_if_condition)
1180 {
1181    bool progress = false;
1182 
1183    b->cursor = nir_before_src(use_src, is_if_condition);
1184 
1185    bool bool_value;
1186    if (evaluate_if_condition(nif, b->cursor, &bool_value)) {
1187       /* Rewrite use to use const */
1188       nir_src imm_src = nir_src_for_ssa(nir_imm_bool(b, bool_value));
1189       if (is_if_condition)
1190          nir_if_rewrite_condition(use_src->parent_if, imm_src);
1191       else
1192          nir_instr_rewrite_src(use_src->parent_instr, use_src, imm_src);
1193 
1194       progress = true;
1195    }
1196 
1197    if (!is_if_condition && can_propagate_through_alu(use_src)) {
1198       nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
1199 
1200       nir_foreach_use_safe(alu_use, &alu->dest.dest.ssa) {
1201          progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1202                                               false);
1203       }
1204 
1205       nir_foreach_if_use_safe(alu_use, &alu->dest.dest.ssa) {
1206          progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1207                                               true);
1208       }
1209    }
1210 
1211    return progress;
1212 }
1213 
1214 static bool
opt_if_evaluate_condition_use(nir_builder * b,nir_if * nif)1215 opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
1216 {
1217    bool progress = false;
1218 
1219    /* Evaluate any uses of the if condition inside the if branches */
1220    assert(nif->condition.is_ssa);
1221    nir_foreach_use_safe(use_src, nif->condition.ssa) {
1222       progress |= evaluate_condition_use(b, nif, use_src, false);
1223    }
1224 
1225    nir_foreach_if_use_safe(use_src, nif->condition.ssa) {
1226       if (use_src->parent_if != nif)
1227          progress |= evaluate_condition_use(b, nif, use_src, true);
1228    }
1229 
1230    return progress;
1231 }
1232 
1233 static void
simple_merge_if(nir_if * dest_if,nir_if * src_if,bool dest_if_then,bool src_if_then)1234 simple_merge_if(nir_if *dest_if, nir_if *src_if, bool dest_if_then,
1235                 bool src_if_then)
1236 {
1237    /* Now merge the if branch */
1238    nir_block *dest_blk = dest_if_then ? nir_if_last_then_block(dest_if)
1239                                       : nir_if_last_else_block(dest_if);
1240 
1241    struct exec_list *list = src_if_then ? &src_if->then_list
1242                                         : &src_if->else_list;
1243 
1244    nir_cf_list if_cf_list;
1245    nir_cf_extract(&if_cf_list, nir_before_cf_list(list),
1246                   nir_after_cf_list(list));
1247    nir_cf_reinsert(&if_cf_list, nir_after_block(dest_blk));
1248 }
1249 
1250 static bool
opt_if_merge(nir_if * nif)1251 opt_if_merge(nir_if *nif)
1252 {
1253    bool progress = false;
1254 
1255    nir_block *next_blk = nir_cf_node_cf_tree_next(&nif->cf_node);
1256    if (!next_blk || !nif->condition.is_ssa)
1257       return false;
1258 
1259    nir_if *next_if = nir_block_get_following_if(next_blk);
1260    if (!next_if || !next_if->condition.is_ssa)
1261       return false;
1262 
1263    /* Here we merge two consecutive ifs that have the same condition e.g:
1264     *
1265     *   if ssa_12 {
1266     *      ...
1267     *   } else {
1268     *      ...
1269     *   }
1270     *   if ssa_12 {
1271     *      ...
1272     *   } else {
1273     *      ...
1274     *   }
1275     *
1276     * Note: This only merges if-statements when the block between them is
1277     * empty. The reason we don't try to merge ifs that just have phis between
1278     * them is because this can result in increased register pressure. For
1279     * example when merging if ladders created by indirect indexing.
1280     */
1281    if (nif->condition.ssa == next_if->condition.ssa &&
1282        exec_list_is_empty(&next_blk->instr_list)) {
1283 
1284       /* This optimization isn't made to work in this case and
1285        * opt_if_evaluate_condition_use will optimize it later.
1286        */
1287       if (nir_block_ends_in_jump(nir_if_last_then_block(nif)) ||
1288           nir_block_ends_in_jump(nir_if_last_else_block(nif)))
1289          return false;
1290 
1291       simple_merge_if(nif, next_if, true, true);
1292       simple_merge_if(nif, next_if, false, false);
1293 
1294       nir_block *new_then_block = nir_if_last_then_block(nif);
1295       nir_block *new_else_block = nir_if_last_else_block(nif);
1296 
1297       nir_block *old_then_block = nir_if_last_then_block(next_if);
1298       nir_block *old_else_block = nir_if_last_else_block(next_if);
1299 
1300       /* Rewrite the predecessor block for any phis following the second
1301        * if-statement.
1302        */
1303       rewrite_phi_predecessor_blocks(next_if, old_then_block,
1304                                      old_else_block,
1305                                      new_then_block,
1306                                      new_else_block);
1307 
1308       /* Move phis after merged if to avoid them being deleted when we remove
1309        * the merged if-statement.
1310        */
1311       nir_block *after_next_if_block =
1312          nir_cf_node_as_block(nir_cf_node_next(&next_if->cf_node));
1313 
1314       nir_foreach_instr_safe(instr, after_next_if_block) {
1315          if (instr->type != nir_instr_type_phi)
1316             break;
1317 
1318          exec_node_remove(&instr->node);
1319          exec_list_push_tail(&next_blk->instr_list, &instr->node);
1320          instr->block = next_blk;
1321       }
1322 
1323       nir_cf_node_remove(&next_if->cf_node);
1324 
1325       progress = true;
1326    }
1327 
1328    return progress;
1329 }
1330 
1331 static bool
opt_if_cf_list(nir_builder * b,struct exec_list * cf_list,bool aggressive_last_continue)1332 opt_if_cf_list(nir_builder *b, struct exec_list *cf_list,
1333                bool aggressive_last_continue)
1334 {
1335    bool progress = false;
1336    foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1337       switch (cf_node->type) {
1338       case nir_cf_node_block:
1339          break;
1340 
1341       case nir_cf_node_if: {
1342          nir_if *nif = nir_cf_node_as_if(cf_node);
1343          progress |= opt_if_cf_list(b, &nif->then_list,
1344                                     aggressive_last_continue);
1345          progress |= opt_if_cf_list(b, &nif->else_list,
1346                                     aggressive_last_continue);
1347          progress |= opt_if_loop_terminator(nif);
1348          progress |= opt_if_merge(nif);
1349          progress |= opt_if_simplification(b, nif);
1350          break;
1351       }
1352 
1353       case nir_cf_node_loop: {
1354          nir_loop *loop = nir_cf_node_as_loop(cf_node);
1355          progress |= opt_if_cf_list(b, &loop->body,
1356                                     aggressive_last_continue);
1357          progress |= opt_simplify_bcsel_of_phi(b, loop);
1358          progress |= opt_if_loop_last_continue(loop,
1359                                                aggressive_last_continue);
1360          break;
1361       }
1362 
1363       case nir_cf_node_function:
1364          unreachable("Invalid cf type");
1365       }
1366    }
1367 
1368    return progress;
1369 }
1370 
1371 static bool
opt_peel_loop_initial_if_cf_list(struct exec_list * cf_list)1372 opt_peel_loop_initial_if_cf_list(struct exec_list *cf_list)
1373 {
1374    bool progress = false;
1375    foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1376       switch (cf_node->type) {
1377       case nir_cf_node_block:
1378          break;
1379 
1380       case nir_cf_node_if: {
1381          nir_if *nif = nir_cf_node_as_if(cf_node);
1382          progress |= opt_peel_loop_initial_if_cf_list(&nif->then_list);
1383          progress |= opt_peel_loop_initial_if_cf_list(&nif->else_list);
1384          break;
1385       }
1386 
1387       case nir_cf_node_loop: {
1388          nir_loop *loop = nir_cf_node_as_loop(cf_node);
1389          progress |= opt_peel_loop_initial_if_cf_list(&loop->body);
1390          progress |= opt_peel_loop_initial_if(loop);
1391          break;
1392       }
1393 
1394       case nir_cf_node_function:
1395          unreachable("Invalid cf type");
1396       }
1397    }
1398 
1399    return progress;
1400 }
1401 
1402 /**
1403  * These optimisations depend on nir_metadata_block_index and therefore must
1404  * not do anything to cause the metadata to become invalid.
1405  */
1406 static bool
opt_if_safe_cf_list(nir_builder * b,struct exec_list * cf_list)1407 opt_if_safe_cf_list(nir_builder *b, struct exec_list *cf_list)
1408 {
1409    bool progress = false;
1410    foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1411       switch (cf_node->type) {
1412       case nir_cf_node_block:
1413          break;
1414 
1415       case nir_cf_node_if: {
1416          nir_if *nif = nir_cf_node_as_if(cf_node);
1417          progress |= opt_if_safe_cf_list(b, &nif->then_list);
1418          progress |= opt_if_safe_cf_list(b, &nif->else_list);
1419          progress |= opt_if_evaluate_condition_use(b, nif);
1420          break;
1421       }
1422 
1423       case nir_cf_node_loop: {
1424          nir_loop *loop = nir_cf_node_as_loop(cf_node);
1425          progress |= opt_if_safe_cf_list(b, &loop->body);
1426          progress |= opt_split_alu_of_phi(b, loop);
1427          break;
1428       }
1429 
1430       case nir_cf_node_function:
1431          unreachable("Invalid cf type");
1432       }
1433    }
1434 
1435    return progress;
1436 }
1437 
1438 bool
nir_opt_if(nir_shader * shader,bool aggressive_last_continue)1439 nir_opt_if(nir_shader *shader, bool aggressive_last_continue)
1440 {
1441    bool progress = false;
1442 
1443    nir_foreach_function(function, shader) {
1444       if (function->impl == NULL)
1445          continue;
1446 
1447       nir_builder b;
1448       nir_builder_init(&b, function->impl);
1449 
1450       nir_metadata_require(function->impl, nir_metadata_block_index |
1451                            nir_metadata_dominance);
1452       progress = opt_if_safe_cf_list(&b, &function->impl->body);
1453       nir_metadata_preserve(function->impl, nir_metadata_block_index |
1454                             nir_metadata_dominance);
1455 
1456       bool preserve = true;
1457 
1458       if (opt_if_cf_list(&b, &function->impl->body, aggressive_last_continue)) {
1459          preserve = false;
1460          progress = true;
1461       }
1462 
1463       if (opt_peel_loop_initial_if_cf_list(&function->impl->body)) {
1464          preserve = false;
1465          progress = true;
1466 
1467          /* If that made progress, we're no longer really in SSA form.  We
1468           * need to convert registers back into SSA defs and clean up SSA defs
1469           * that don't dominate their uses.
1470           */
1471          nir_lower_regs_to_ssa_impl(function->impl);
1472       }
1473 
1474       if (preserve) {
1475          nir_metadata_preserve(function->impl, nir_metadata_none);
1476       } else {
1477          nir_metadata_preserve(function->impl, nir_metadata_all);
1478       }
1479    }
1480 
1481    return progress;
1482 }
1483