• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Red Hat
3  * Copyright © 2019 Valve Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Rob Clark (robdclark@gmail.com>
26  *    Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27  *    Rhys Perry (pendingchaos02@gmail.com)
28  *
29  */
30 
31 #include "nir.h"
32 
33 
34 /*
35  * A simple pass that moves some instructions into the least common
36  * anscestor of consuming instructions.
37  */
38 
39 bool
nir_can_move_instr(nir_instr * instr,nir_move_options options)40 nir_can_move_instr(nir_instr *instr, nir_move_options options)
41 {
42    switch (instr->type) {
43    case nir_instr_type_load_const:
44    case nir_instr_type_ssa_undef: {
45       return options & nir_move_const_undef;
46    }
47    case nir_instr_type_alu: {
48       if (nir_op_is_vec(nir_instr_as_alu(instr)->op) ||
49           nir_instr_as_alu(instr)->op == nir_op_b2i32)
50          return options & nir_move_copies;
51       if (nir_alu_instr_is_comparison(nir_instr_as_alu(instr)))
52          return options & nir_move_comparisons;
53       return false;
54    }
55    case nir_instr_type_intrinsic: {
56       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
57       switch (intrin->intrinsic) {
58       case nir_intrinsic_load_ubo:
59          return options & nir_move_load_ubo;
60       case nir_intrinsic_load_input:
61       case nir_intrinsic_load_interpolated_input:
62       case nir_intrinsic_load_per_vertex_input:
63          return options & nir_move_load_input;
64       default:
65          return false;
66       }
67    }
68    default:
69       return false;
70    }
71 }
72 
73 static nir_loop *
get_innermost_loop(nir_cf_node * node)74 get_innermost_loop(nir_cf_node *node)
75 {
76    for (; node != NULL; node = node->parent) {
77       if (node->type == nir_cf_node_loop)
78          return (nir_loop*)node;
79    }
80    return NULL;
81 }
82 
83 static bool
loop_contains_block(nir_loop * loop,nir_block * block)84 loop_contains_block(nir_loop *loop, nir_block *block)
85 {
86    nir_block *before = nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
87    nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node));
88 
89    return block->index > before->index && block->index < after->index;
90 }
91 
92 /* Given the LCA of all uses and the definition, find a block on the path
93  * between them in the dominance tree that is outside of as many loops as
94  * possible. If "sink_out_of_loops" is false, then we disallow sinking the
95  * definition outside of the loop it's defined in (if any).
96  */
97 
98 static nir_block *
adjust_block_for_loops(nir_block * use_block,nir_block * def_block,bool sink_out_of_loops)99 adjust_block_for_loops(nir_block *use_block, nir_block *def_block,
100                        bool sink_out_of_loops)
101 {
102    nir_loop *def_loop = NULL;
103    if (!sink_out_of_loops)
104       def_loop = get_innermost_loop(&def_block->cf_node);
105 
106    for (nir_block *cur_block = use_block; cur_block != def_block->imm_dom;
107         cur_block = cur_block->imm_dom) {
108       if (!sink_out_of_loops && def_loop &&
109           !loop_contains_block(def_loop, use_block)) {
110          use_block = cur_block;
111          continue;
112       }
113 
114       nir_cf_node *next = nir_cf_node_next(&cur_block->cf_node);
115       if (next && next->type == nir_cf_node_loop) {
116          nir_loop *following_loop = nir_cf_node_as_loop(next);
117          if (loop_contains_block(following_loop, use_block)) {
118              use_block = cur_block;
119              continue;
120          }
121       }
122    }
123 
124    return use_block;
125 }
126 
127 /* iterate a ssa def's use's and try to find a more optimal block to
128  * move it to, using the dominance tree.  In short, if all of the uses
129  * are contained in a single block, the load will be moved there,
130  * otherwise it will be move to the least common ancestor block of all
131  * the uses
132  */
133 static nir_block *
get_preferred_block(nir_ssa_def * def,bool sink_into_loops,bool sink_out_of_loops)134 get_preferred_block(nir_ssa_def *def, bool sink_into_loops, bool sink_out_of_loops)
135 {
136    nir_block *lca = NULL;
137 
138    nir_foreach_use(use, def) {
139       nir_instr *instr = use->parent_instr;
140       nir_block *use_block = instr->block;
141 
142       /*
143        * Kind of an ugly special-case, but phi instructions
144        * need to appear first in the block, so by definition
145        * we can't move an instruction into a block where it is
146        * consumed by a phi instruction.  We could conceivably
147        * move it into a dominator block.
148        */
149       if (instr->type == nir_instr_type_phi) {
150          nir_phi_instr *phi = nir_instr_as_phi(instr);
151          nir_block *phi_lca = NULL;
152          nir_foreach_phi_src(src, phi) {
153             if (&src->src == use)
154                phi_lca = nir_dominance_lca(phi_lca, src->pred);
155          }
156          use_block = phi_lca;
157       }
158 
159       lca = nir_dominance_lca(lca, use_block);
160    }
161 
162    nir_foreach_if_use(use, def) {
163       nir_block *use_block =
164          nir_cf_node_as_block(nir_cf_node_prev(&use->parent_if->cf_node));
165 
166       lca = nir_dominance_lca(lca, use_block);
167    }
168 
169    /* If we're moving a load_ubo or load_interpolated_input, we don't want to
170     * sink it down into loops, which may result in accessing memory or shared
171     * functions multiple times.  Sink it just above the start of the loop
172     * where it's used.  For load_consts, undefs, and comparisons, we expect
173     * the driver to be able to emit them as simple ALU ops, so sinking as far
174     * in as we can go is probably worth it for register pressure.
175     */
176    if (!sink_into_loops) {
177       lca = adjust_block_for_loops(lca, def->parent_instr->block,
178                                    sink_out_of_loops);
179       assert(nir_block_dominates(def->parent_instr->block, lca));
180    } else {
181       /* sink_into_loops = true and sink_out_of_loops = false isn't
182        * implemented yet because it's not used.
183        */
184       assert(sink_out_of_loops);
185    }
186 
187 
188    return lca;
189 }
190 
191 /* insert before first non-phi instruction: */
192 static void
insert_after_phi(nir_instr * instr,nir_block * block)193 insert_after_phi(nir_instr *instr, nir_block *block)
194 {
195    nir_foreach_instr(instr2, block) {
196       if (instr2->type == nir_instr_type_phi)
197          continue;
198 
199       exec_node_insert_node_before(&instr2->node,
200                                    &instr->node);
201 
202       return;
203    }
204 
205    /* if haven't inserted it, push to tail (ie. empty block or possibly
206     * a block only containing phi's?)
207     */
208    exec_list_push_tail(&block->instr_list, &instr->node);
209 }
210 
211 bool
nir_opt_sink(nir_shader * shader,nir_move_options options)212 nir_opt_sink(nir_shader *shader, nir_move_options options)
213 {
214    bool progress = false;
215 
216    nir_foreach_function(function, shader) {
217       if (!function->impl)
218          continue;
219 
220       nir_metadata_require(function->impl,
221                            nir_metadata_block_index | nir_metadata_dominance);
222 
223       nir_foreach_block_reverse(block, function->impl) {
224          nir_foreach_instr_reverse_safe(instr, block) {
225             if (!nir_can_move_instr(instr, options))
226                continue;
227 
228             nir_ssa_def *def = nir_instr_ssa_def(instr);
229 
230             bool sink_into_loops = instr->type != nir_instr_type_intrinsic;
231             /* Don't sink load_ubo out of loops because that can make its
232              * resource divergent and break code like that which is generated
233              * by nir_lower_non_uniform_access.
234              */
235             bool sink_out_of_loops =
236                instr->type != nir_instr_type_intrinsic ||
237                nir_instr_as_intrinsic(instr)->intrinsic != nir_intrinsic_load_ubo;
238             nir_block *use_block =
239                   get_preferred_block(def, sink_into_loops, sink_out_of_loops);
240 
241             if (!use_block || use_block == instr->block)
242                continue;
243 
244             exec_node_remove(&instr->node);
245 
246             insert_after_phi(instr, use_block);
247 
248             instr->block = use_block;
249 
250             progress = true;
251          }
252       }
253 
254       nir_metadata_preserve(function->impl,
255                             nir_metadata_block_index | nir_metadata_dominance);
256    }
257 
258    return progress;
259 }
260