• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_control_flow.h"
27 #include "nir_worklist.h"
28 
29 static bool
nir_texop_implies_derivative(nir_texop op)30 nir_texop_implies_derivative(nir_texop op)
31 {
32    return op == nir_texop_tex ||
33           op == nir_texop_txb ||
34           op == nir_texop_lod;
35 }
36 #define MOVE_INSTR_FLAG            1
37 #define STOP_PROCESSING_INSTR_FLAG 2
38 
39 /** Check recursively if the source can be moved to the top of the shader.
40  *  Sets instr->pass_flags to MOVE_INSTR_FLAG and adds the instr
41  *  to the given worklist
42  */
43 static bool
can_move_src(nir_src * src,void * worklist)44 can_move_src(nir_src *src, void *worklist)
45 {
46    nir_instr *instr = src->ssa->parent_instr;
47    if (instr->pass_flags)
48       return true;
49 
50    /* Phi instructions can't be moved at all.  Also, if we're dependent on
51     * a phi then we are dependent on some other bit of control flow and
52     * it's hard to figure out the proper condition.
53     */
54    if (instr->type == nir_instr_type_phi)
55       return false;
56 
57    if (instr->type == nir_instr_type_intrinsic) {
58       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
59       if (intrin->intrinsic == nir_intrinsic_load_deref) {
60          nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
61          if (!nir_deref_mode_is_one_of(deref, nir_var_read_only_modes))
62             return false;
63       } else if (!(nir_intrinsic_infos[intrin->intrinsic].flags &
64                    NIR_INTRINSIC_CAN_REORDER)) {
65          return false;
66       }
67    }
68 
69    /* set pass_flags and remember the instruction for potential cleanup */
70    instr->pass_flags = MOVE_INSTR_FLAG;
71    nir_instr_worklist_push_tail(worklist, instr);
72 
73    if (!nir_foreach_src(instr, can_move_src, worklist)) {
74       return false;
75    }
76    return true;
77 }
78 
79 /** Try to mark a discard or demote instruction for moving
80  *
81  * This function does two things.  One is that it searches through the
82  * dependency chain to see if this discard is an instruction that we can move
83  * up to the top.  Second, if the discard is one we can move, it tags the
84  * discard and its dependencies (using pass_flags = 1).
85  * Demote are handled the same way, except that they can still be moved up
86  * when implicit derivatives are used.
87  */
88 static bool
try_move_discard(nir_intrinsic_instr * discard)89 try_move_discard(nir_intrinsic_instr *discard)
90 {
91    /* We require the discard to be in the top level of control flow.  We
92     * could, in theory, move discards that are inside ifs or loops but that
93     * would be a lot more work.
94     */
95    if (discard->instr.block->cf_node.parent->type != nir_cf_node_function)
96       return false;
97 
98    /* Build the set of all instructions discard depends on to be able to
99     * clear the flags in case the discard cannot be moved.
100     */
101    nir_instr_worklist *work = nir_instr_worklist_create();
102    if (!work)
103       return false;
104    discard->instr.pass_flags = MOVE_INSTR_FLAG;
105 
106    bool can_move_discard = can_move_src(&discard->src[0], work);
107    if (!can_move_discard) {
108       /* Moving the discard is impossible: clear the flags */
109       discard->instr.pass_flags = 0;
110       nir_foreach_instr_in_worklist(instr, work)
111          instr->pass_flags = 0;
112    }
113 
114    nir_instr_worklist_destroy(work);
115 
116    return can_move_discard;
117 }
118 
119 static bool
opt_move_discards_to_top_impl(nir_function_impl * impl)120 opt_move_discards_to_top_impl(nir_function_impl *impl)
121 {
122    bool progress = false;
123    bool consider_discards = true;
124    bool moved = false;
125 
126    /* Walk through the instructions and look for a discard that we can move
127     * to the top of the program.  If we hit any operation along the way that
128     * we cannot safely move a discard above, break out of the loop and stop
129     * trying to move any more discards.
130     */
131    nir_foreach_block(block, impl) {
132       nir_foreach_instr_safe(instr, block) {
133          instr->pass_flags = 0;
134 
135          switch (instr->type) {
136          case nir_instr_type_alu: {
137             nir_alu_instr *alu = nir_instr_as_alu(instr);
138             if (nir_op_is_derivative(alu->op))
139                consider_discards = false;
140             continue;
141          }
142 
143          case nir_instr_type_deref:
144          case nir_instr_type_load_const:
145          case nir_instr_type_undef:
146          case nir_instr_type_phi:
147             /* These are all safe */
148             continue;
149 
150          case nir_instr_type_call:
151             instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
152             /* We don't know what the function will do */
153             goto break_all;
154 
155          case nir_instr_type_tex: {
156             nir_tex_instr *tex = nir_instr_as_tex(instr);
157             if (nir_texop_implies_derivative(tex->op))
158                consider_discards = false;
159             continue;
160          }
161 
162          case nir_instr_type_intrinsic: {
163             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
164             if (nir_intrinsic_writes_external_memory(intrin)) {
165                instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
166                goto break_all;
167             }
168             switch (intrin->intrinsic) {
169             case nir_intrinsic_quad_broadcast:
170             case nir_intrinsic_quad_swap_horizontal:
171             case nir_intrinsic_quad_swap_vertical:
172             case nir_intrinsic_quad_swap_diagonal:
173             case nir_intrinsic_quad_vote_all:
174             case nir_intrinsic_quad_vote_any:
175             case nir_intrinsic_quad_swizzle_amd:
176                consider_discards = false;
177                break;
178             case nir_intrinsic_vote_any:
179             case nir_intrinsic_vote_all:
180             case nir_intrinsic_vote_feq:
181             case nir_intrinsic_vote_ieq:
182             case nir_intrinsic_ballot:
183             case nir_intrinsic_first_invocation:
184             case nir_intrinsic_read_invocation:
185             case nir_intrinsic_read_first_invocation:
186             case nir_intrinsic_elect:
187             case nir_intrinsic_reduce:
188             case nir_intrinsic_inclusive_scan:
189             case nir_intrinsic_exclusive_scan:
190             case nir_intrinsic_shuffle:
191             case nir_intrinsic_shuffle_xor:
192             case nir_intrinsic_shuffle_up:
193             case nir_intrinsic_shuffle_down:
194             case nir_intrinsic_rotate:
195             case nir_intrinsic_masked_swizzle_amd:
196                instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
197                goto break_all;
198             case nir_intrinsic_discard_if:
199                if (!consider_discards) {
200                   /* assume that a shader either uses discard or demote, but not both */
201                   instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
202                   goto break_all;
203                }
204             FALLTHROUGH;
205             case nir_intrinsic_demote_if:
206                moved = moved || try_move_discard(intrin);
207                break;
208             default:
209                break;
210             }
211             continue;
212          }
213 
214          case nir_instr_type_jump: {
215             nir_jump_instr *jump = nir_instr_as_jump(instr);
216             /* A return would cause the discard to not get executed */
217             if (jump->type == nir_jump_return) {
218                instr->pass_flags = STOP_PROCESSING_INSTR_FLAG;
219                goto break_all;
220             }
221             continue;
222          }
223 
224          case nir_instr_type_parallel_copy:
225             unreachable("Unhanded instruction type");
226          }
227       }
228    }
229 break_all:
230 
231    if (moved) {
232       /* Walk the list of instructions and move the discard/demote and
233        * everything it depends on to the top.  We walk the instruction list
234        * here because it ensures that everything stays in its original order.
235        * This provides stability for the algorithm and ensures that we don't
236        * accidentally get dependencies out-of-order.
237        */
238       nir_cursor cursor = nir_before_impl(impl);
239       nir_foreach_block(block, impl) {
240          nir_foreach_instr_safe(instr, block) {
241             if (instr->pass_flags == STOP_PROCESSING_INSTR_FLAG)
242                return progress;
243             if (instr->pass_flags == MOVE_INSTR_FLAG) {
244                progress |= nir_instr_move(cursor, instr);
245                cursor = nir_after_instr(instr);
246             }
247          }
248       }
249    }
250 
251    return progress;
252 }
253 
254 /* This optimization only operates on discard_if/demoe_if so
255  * nir_opt_conditional_discard and nir_lower_discard_or_demote
256  * should have been called before.
257  */
258 bool
nir_opt_move_discards_to_top(nir_shader * shader)259 nir_opt_move_discards_to_top(nir_shader *shader)
260 {
261    assert(shader->info.stage == MESA_SHADER_FRAGMENT);
262 
263    bool progress = false;
264 
265    if (!shader->info.fs.uses_discard)
266       return false;
267 
268    nir_foreach_function_impl(impl, shader) {
269       if (opt_move_discards_to_top_impl(impl)) {
270          nir_metadata_preserve(impl, nir_metadata_block_index |
271                                         nir_metadata_dominance);
272          progress = true;
273       }
274    }
275 
276    return progress;
277 }
278