• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Red Hat
3  * Copyright © 2019 Valve Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Rob Clark (robdclark@gmail.com>
26  *    Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
27  *    Rhys Perry (pendingchaos02@gmail.com)
28  *
29  */
30 
31 #include "nir.h"
32 
33 /*
34  * A simple pass that moves some instructions into the least common
35  * anscestor of consuming instructions.
36  */
37 
38 /*
39  * Detect whether a source is like a constant for the purposes of register
40  * pressure calculations (e.g. can be remat anywhere effectively for free).
41  */
42 static bool
is_constant_like(nir_src * src)43 is_constant_like(nir_src *src)
44 {
45    /* Constants are constants */
46    if (nir_src_is_const(*src))
47       return true;
48 
49    /* Otherwise, look for constant-like intrinsics */
50    nir_instr *parent = src->ssa->parent_instr;
51    if (parent->type != nir_instr_type_intrinsic)
52       return false;
53 
54    return (nir_instr_as_intrinsic(parent)->intrinsic ==
55            nir_intrinsic_load_preamble);
56 }
57 
58 bool
nir_can_move_instr(nir_instr * instr,nir_move_options options)59 nir_can_move_instr(nir_instr *instr, nir_move_options options)
60 {
61    switch (instr->type) {
62    case nir_instr_type_load_const:
63    case nir_instr_type_undef: {
64       return options & nir_move_const_undef;
65    }
66    case nir_instr_type_alu: {
67       nir_alu_instr *alu = nir_instr_as_alu(instr);
68 
69       /* Derivatives cannot be moved into non-uniform control flow, including
70        * past a discard_if in the same block. Even if they could, sinking
71        * derivatives extends the lifetime of helper invocations which may be
72        * worse than the register pressure decrease. Bail on derivatives.
73        */
74       if (nir_op_is_derivative(alu->op))
75          return false;
76 
77       if (nir_op_is_vec_or_mov(alu->op) || alu->op == nir_op_b2i32)
78          return options & nir_move_copies;
79       if (nir_alu_instr_is_comparison(alu))
80          return options & nir_move_comparisons;
81 
82       /* Assuming that constants do not contribute to register pressure, it is
83        * beneficial to sink ALU instructions where all but one source is
84        * constant. Detect that case last.
85        */
86       if (!(options & nir_move_alu))
87          return false;
88 
89       unsigned inputs = nir_op_infos[alu->op].num_inputs;
90       unsigned constant_inputs = 0;
91 
92       for (unsigned i = 0; i < inputs; ++i) {
93          if (is_constant_like(&alu->src[i].src))
94             constant_inputs++;
95       }
96 
97       return (constant_inputs + 1 >= inputs);
98    }
99    case nir_instr_type_intrinsic: {
100       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
101       switch (intrin->intrinsic) {
102       case nir_intrinsic_load_ubo:
103       case nir_intrinsic_load_ubo_vec4:
104          return options & nir_move_load_ubo;
105       case nir_intrinsic_load_ssbo:
106          return (options & nir_move_load_ssbo) && nir_intrinsic_can_reorder(intrin);
107       case nir_intrinsic_load_input:
108       case nir_intrinsic_load_interpolated_input:
109       case nir_intrinsic_load_per_vertex_input:
110       case nir_intrinsic_load_frag_coord:
111       case nir_intrinsic_load_frag_coord_zw:
112       case nir_intrinsic_load_pixel_coord:
113          return options & nir_move_load_input;
114       case nir_intrinsic_load_uniform:
115          return options & nir_move_load_uniform;
116       case nir_intrinsic_load_constant_agx:
117       case nir_intrinsic_load_local_pixel_agx:
118          return true;
119       default:
120          return false;
121       }
122    }
123    default:
124       return false;
125    }
126 }
127 
128 static nir_loop *
get_innermost_loop(nir_cf_node * node)129 get_innermost_loop(nir_cf_node *node)
130 {
131    for (; node != NULL; node = node->parent) {
132       if (node->type == nir_cf_node_loop)
133          return (nir_loop *)node;
134    }
135    return NULL;
136 }
137 
138 static bool
loop_contains_block(nir_loop * loop,nir_block * block)139 loop_contains_block(nir_loop *loop, nir_block *block)
140 {
141    assert(!nir_loop_has_continue_construct(loop));
142    nir_block *before = nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
143    nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node));
144 
145    return block->index > before->index && block->index < after->index;
146 }
147 
148 /* Given the LCA of all uses and the definition, find a block on the path
149  * between them in the dominance tree that is outside of as many loops as
150  * possible. If "sink_out_of_loops" is false, then we disallow sinking the
151  * definition outside of the loop it's defined in (if any).
152  */
153 
154 static nir_block *
adjust_block_for_loops(nir_block * use_block,nir_block * def_block,bool sink_out_of_loops)155 adjust_block_for_loops(nir_block *use_block, nir_block *def_block,
156                        bool sink_out_of_loops)
157 {
158    nir_loop *def_loop = NULL;
159    if (!sink_out_of_loops)
160       def_loop = get_innermost_loop(&def_block->cf_node);
161 
162    for (nir_block *cur_block = use_block; cur_block != def_block->imm_dom;
163         cur_block = cur_block->imm_dom) {
164       if (!sink_out_of_loops && def_loop &&
165           !loop_contains_block(def_loop, use_block)) {
166          use_block = cur_block;
167          continue;
168       }
169 
170       nir_cf_node *next = nir_cf_node_next(&cur_block->cf_node);
171       if (next && next->type == nir_cf_node_loop) {
172          nir_loop *following_loop = nir_cf_node_as_loop(next);
173          if (loop_contains_block(following_loop, use_block)) {
174             use_block = cur_block;
175             continue;
176          }
177       }
178    }
179 
180    return use_block;
181 }
182 
183 /* iterate a ssa def's use's and try to find a more optimal block to
184  * move it to, using the dominance tree.  In short, if all of the uses
185  * are contained in a single block, the load will be moved there,
186  * otherwise it will be move to the least common ancestor block of all
187  * the uses
188  */
189 static nir_block *
get_preferred_block(nir_def * def,bool sink_out_of_loops)190 get_preferred_block(nir_def *def, bool sink_out_of_loops)
191 {
192    nir_block *lca = NULL;
193 
194    nir_foreach_use_including_if(use, def) {
195       nir_block *use_block;
196 
197       if (nir_src_is_if(use)) {
198          use_block =
199             nir_cf_node_as_block(nir_cf_node_prev(&nir_src_parent_if(use)->cf_node));
200       } else {
201          nir_instr *instr = nir_src_parent_instr(use);
202          use_block = instr->block;
203 
204          /*
205           * Kind of an ugly special-case, but phi instructions
206           * need to appear first in the block, so by definition
207           * we can't move an instruction into a block where it is
208           * consumed by a phi instruction.  We could conceivably
209           * move it into a dominator block.
210           */
211          if (instr->type == nir_instr_type_phi) {
212             nir_phi_instr *phi = nir_instr_as_phi(instr);
213             nir_block *phi_lca = NULL;
214             nir_foreach_phi_src(src, phi) {
215                if (&src->src == use)
216                   phi_lca = nir_dominance_lca(phi_lca, src->pred);
217             }
218             use_block = phi_lca;
219          }
220       }
221 
222       lca = nir_dominance_lca(lca, use_block);
223    }
224 
225    /* return in case, we didn't find a reachable user */
226    if (!lca)
227       return NULL;
228 
229    /* We don't sink any instructions into loops to avoid repeated executions
230     * This might occasionally increase register pressure, but seems overall
231     * the better choice.
232     */
233    lca = adjust_block_for_loops(lca, def->parent_instr->block,
234                                 sink_out_of_loops);
235    assert(nir_block_dominates(def->parent_instr->block, lca));
236 
237    return lca;
238 }
239 
240 static bool
can_sink_out_of_loop(nir_intrinsic_instr * intrin)241 can_sink_out_of_loop(nir_intrinsic_instr *intrin)
242 {
243    /* Don't sink buffer loads out of loops because that can make its
244     * resource divergent and break code like that which is generated
245     * by nir_lower_non_uniform_access.
246     */
247    return intrin->intrinsic != nir_intrinsic_load_ubo &&
248           intrin->intrinsic != nir_intrinsic_load_ssbo;
249 }
250 
251 bool
nir_opt_sink(nir_shader * shader,nir_move_options options)252 nir_opt_sink(nir_shader *shader, nir_move_options options)
253 {
254    bool progress = false;
255 
256    nir_foreach_function_impl(impl, shader) {
257       nir_metadata_require(impl,
258                            nir_metadata_block_index | nir_metadata_dominance);
259 
260       nir_foreach_block_reverse(block, impl) {
261          nir_foreach_instr_reverse_safe(instr, block) {
262             if (!nir_can_move_instr(instr, options))
263                continue;
264 
265             nir_def *def = nir_instr_def(instr);
266 
267             bool sink_out_of_loops =
268                instr->type != nir_instr_type_intrinsic ||
269                can_sink_out_of_loop(nir_instr_as_intrinsic(instr));
270             nir_block *use_block =
271                get_preferred_block(def, sink_out_of_loops);
272 
273             if (!use_block || use_block == instr->block)
274                continue;
275 
276             nir_instr_remove(instr);
277             nir_instr_insert(nir_after_phis(use_block), instr);
278 
279             progress = true;
280          }
281       }
282 
283       nir_metadata_preserve(impl,
284                             nir_metadata_block_index | nir_metadata_dominance);
285    }
286 
287    return progress;
288 }
289