• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "compiler/nir/nir_builder.h"
25 #include "ir3_nir.h"
26 
27 /**
28  * This pass moves varying fetches (and the instructions they depend on
29  * into the start block.
30  *
31  * We need to set the (ei) "end input" flag on the last varying fetch.
32  * And we want to ensure that all threads execute the instruction that
33  * sets (ei).  The easiest way to ensure this is to move all varying
34  * fetches into the start block.  Which is something we used to get for
35  * free by using lower_all_io_to_temps=true.
36  *
37  * This may come at the cost of additional register usage.  OTOH setting
38  * the (ei) flag earlier probably frees up more VS to run.
39  *
40  * Not all varying fetches could be pulled into the start block.
41  * If there are fetches we couldn't pull, like load_interpolated_input
42  * with offset which depends on a non-reorderable ssbo load or on a
43  * phi node, this pass is skipped since it would be hard to find a place
44  * to set (ei) flag (beside at the very end).
45  * a5xx and a6xx do automatically release varying storage at the end.
46  */
47 
48 typedef struct {
49    nir_block *start_block;
50    bool precondition_failed;
51 } precond_state;
52 
53 typedef struct {
54    nir_shader *shader;
55    nir_block *start_block;
56 } state;
57 
58 static void check_precondition_instr(precond_state *state, nir_instr *instr);
59 static void move_instruction_to_start_block(state *state, nir_instr *instr);
60 
61 static bool
check_precondition_src(nir_src * src,void * state)62 check_precondition_src(nir_src *src, void *state)
63 {
64    check_precondition_instr(state, src->ssa->parent_instr);
65    return true;
66 }
67 
68 /* Recursively check if there is even a single dependency which
69  * cannot be moved.
70  */
71 static void
check_precondition_instr(precond_state * state,nir_instr * instr)72 check_precondition_instr(precond_state *state, nir_instr *instr)
73 {
74    if (instr->block == state->start_block)
75       return;
76 
77    switch (instr->type) {
78    case nir_instr_type_alu:
79    case nir_instr_type_deref:
80    case nir_instr_type_load_const:
81    case nir_instr_type_ssa_undef:
82       /* These could be safely moved around */
83       break;
84    case nir_instr_type_intrinsic: {
85       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
86       if (!nir_intrinsic_can_reorder(intr)) {
87          state->precondition_failed = true;
88          return;
89       }
90       break;
91    }
92    default:
93       state->precondition_failed = true;
94       return;
95    }
96 
97    nir_foreach_src(instr, check_precondition_src, state);
98 }
99 
100 static void
check_precondition_block(precond_state * state,nir_block * block)101 check_precondition_block(precond_state *state, nir_block *block)
102 {
103    nir_foreach_instr_safe (instr, block) {
104       if (instr->type != nir_instr_type_intrinsic)
105          continue;
106 
107       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
108 
109       switch (intr->intrinsic) {
110       case nir_intrinsic_load_interpolated_input:
111       case nir_intrinsic_load_input:
112          break;
113       default:
114          continue;
115       }
116 
117       check_precondition_instr(state, instr);
118 
119       if (state->precondition_failed)
120          return;
121    }
122 }
123 
124 static bool
move_src(nir_src * src,void * state)125 move_src(nir_src *src, void *state)
126 {
127    /* At this point we shouldn't have any non-ssa src: */
128    debug_assert(src->is_ssa);
129    move_instruction_to_start_block(state, src->ssa->parent_instr);
130    return true;
131 }
132 
133 static void
move_instruction_to_start_block(state * state,nir_instr * instr)134 move_instruction_to_start_block(state *state, nir_instr *instr)
135 {
136    /* nothing to do if the instruction is already in the start block */
137    if (instr->block == state->start_block)
138       return;
139 
140    /* first move (recursively) all src's to ensure they appear before
141     * load*_input that we are trying to move:
142     */
143    nir_foreach_src(instr, move_src, state);
144 
145    /* and then move the instruction itself:
146     */
147    exec_node_remove(&instr->node);
148    exec_list_push_tail(&state->start_block->instr_list, &instr->node);
149    instr->block = state->start_block;
150 }
151 
152 static bool
move_varying_inputs_block(state * state,nir_block * block)153 move_varying_inputs_block(state *state, nir_block *block)
154 {
155    bool progress = false;
156 
157    nir_foreach_instr_safe (instr, block) {
158       if (instr->type != nir_instr_type_intrinsic)
159          continue;
160 
161       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
162 
163       switch (intr->intrinsic) {
164       case nir_intrinsic_load_interpolated_input:
165       case nir_intrinsic_load_input:
166          /* TODO any others to handle? */
167          break;
168       default:
169          continue;
170       }
171 
172       debug_assert(intr->dest.is_ssa);
173 
174       move_instruction_to_start_block(state, instr);
175 
176       progress = true;
177    }
178 
179    return progress;
180 }
181 
182 bool
ir3_nir_move_varying_inputs(nir_shader * shader)183 ir3_nir_move_varying_inputs(nir_shader *shader)
184 {
185    bool progress = false;
186 
187    debug_assert(shader->info.stage == MESA_SHADER_FRAGMENT);
188 
189    nir_foreach_function (function, shader) {
190       precond_state state;
191 
192       if (!function->impl)
193          continue;
194 
195       state.precondition_failed = false;
196       state.start_block = nir_start_block(function->impl);
197 
198       nir_foreach_block (block, function->impl) {
199          if (block == state.start_block)
200             continue;
201 
202          check_precondition_block(&state, block);
203 
204          if (state.precondition_failed)
205             return false;
206       }
207    }
208 
209    nir_foreach_function (function, shader) {
210       state state;
211 
212       if (!function->impl)
213          continue;
214 
215       state.shader = shader;
216       state.start_block = nir_start_block(function->impl);
217 
218       bool progress = false;
219       nir_foreach_block (block, function->impl) {
220          /* don't need to move anything that is already in the first block */
221          if (block == state.start_block)
222             continue;
223          progress |= move_varying_inputs_block(&state, block);
224       }
225 
226       if (progress) {
227          nir_metadata_preserve(
228             function->impl, nir_metadata_block_index | nir_metadata_dominance);
229       }
230    }
231 
232    return progress;
233 }
234