• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "compiler/nir/nir_builder.h"
25 #include "ir3_nir.h"
26 
27 /**
28  * This pass moves varying fetches (and the instructions they depend on
29  * into the start block.
30  *
31  * We need to set the (ei) "end input" flag on the last varying fetch.
32  * And we want to ensure that all threads execute the instruction that
33  * sets (ei).  The easiest way to ensure this is to move all varying
34  * fetches into the start block.  Which is something we used to get for
35  * free by using lower_all_io_to_temps=true.
36  *
37  * This may come at the cost of additional register usage.  OTOH setting
38  * the (ei) flag earlier probably frees up more VS to run.
39  *
40  * Not all varying fetches could be pulled into the start block.
41  * If there are fetches we couldn't pull, like load_interpolated_input
42  * with offset which depends on a non-reorderable ssbo load or on a
43  * phi node, this pass is skipped since it would be hard to find a place
44  * to set (ei) flag (beside at the very end).
45  * a5xx and a6xx do automatically release varying storage at the end.
46  */
47 
48 typedef struct {
49    nir_block *start_block;
50    bool precondition_failed;
51 } precond_state;
52 
53 typedef struct {
54    nir_shader *shader;
55    nir_block *start_block;
56 } state;
57 
58 static void check_precondition_instr(precond_state *state, nir_instr *instr);
59 static void move_instruction_to_start_block(state *state, nir_instr *instr);
60 
61 static bool
check_precondition_src(nir_src * src,void * state)62 check_precondition_src(nir_src *src, void *state)
63 {
64    check_precondition_instr(state, src->ssa->parent_instr);
65    return true;
66 }
67 
68 /* Recursively check if there is even a single dependency which
69  * cannot be moved.
70  */
71 static void
check_precondition_instr(precond_state * state,nir_instr * instr)72 check_precondition_instr(precond_state *state, nir_instr *instr)
73 {
74    if (instr->block == state->start_block)
75       return;
76 
77    switch (instr->type) {
78    case nir_instr_type_alu:
79    case nir_instr_type_deref:
80    case nir_instr_type_load_const:
81    case nir_instr_type_undef:
82       /* These could be safely moved around */
83       break;
84    case nir_instr_type_intrinsic: {
85       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
86       if (!nir_intrinsic_can_reorder(intr)) {
87          state->precondition_failed = true;
88          return;
89       }
90       break;
91    }
92    default:
93       state->precondition_failed = true;
94       return;
95    }
96 
97    nir_foreach_src(instr, check_precondition_src, state);
98 }
99 
100 static void
check_precondition_block(precond_state * state,nir_block * block)101 check_precondition_block(precond_state *state, nir_block *block)
102 {
103    nir_foreach_instr_safe (instr, block) {
104       if (instr->type != nir_instr_type_intrinsic)
105          continue;
106 
107       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
108 
109       switch (intr->intrinsic) {
110       case nir_intrinsic_load_interpolated_input:
111       case nir_intrinsic_load_input:
112          break;
113       default:
114          continue;
115       }
116 
117       check_precondition_instr(state, instr);
118 
119       if (state->precondition_failed)
120          return;
121    }
122 }
123 
124 static bool
move_src(nir_src * src,void * state)125 move_src(nir_src *src, void *state)
126 {
127    move_instruction_to_start_block(state, src->ssa->parent_instr);
128    return true;
129 }
130 
131 static void
move_instruction_to_start_block(state * state,nir_instr * instr)132 move_instruction_to_start_block(state *state, nir_instr *instr)
133 {
134    /* nothing to do if the instruction is already in the start block */
135    if (instr->block == state->start_block)
136       return;
137 
138    /* first move (recursively) all src's to ensure they appear before
139     * load*_input that we are trying to move:
140     */
141    nir_foreach_src(instr, move_src, state);
142 
143    /* and then move the instruction itself:
144     */
145    exec_node_remove(&instr->node);
146    exec_list_push_tail(&state->start_block->instr_list, &instr->node);
147    instr->block = state->start_block;
148 }
149 
150 static bool
move_varying_inputs_block(state * state,nir_block * block)151 move_varying_inputs_block(state *state, nir_block *block)
152 {
153    bool progress = false;
154 
155    nir_foreach_instr_safe (instr, block) {
156       if (instr->type != nir_instr_type_intrinsic)
157          continue;
158 
159       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
160 
161       switch (intr->intrinsic) {
162       case nir_intrinsic_load_interpolated_input:
163       case nir_intrinsic_load_input:
164          /* TODO any others to handle? */
165          break;
166       default:
167          continue;
168       }
169 
170       move_instruction_to_start_block(state, instr);
171 
172       progress = true;
173    }
174 
175    return progress;
176 }
177 
178 bool
ir3_nir_move_varying_inputs(nir_shader * shader)179 ir3_nir_move_varying_inputs(nir_shader *shader)
180 {
181    bool progress = false;
182 
183    assert(shader->info.stage == MESA_SHADER_FRAGMENT);
184 
185    nir_foreach_function (function, shader) {
186       precond_state state;
187 
188       if (!function->impl)
189          continue;
190 
191       state.precondition_failed = false;
192       state.start_block = nir_start_block(function->impl);
193 
194       nir_foreach_block (block, function->impl) {
195          if (block == state.start_block)
196             continue;
197 
198          check_precondition_block(&state, block);
199 
200          if (state.precondition_failed)
201             return false;
202       }
203    }
204 
205    nir_foreach_function (function, shader) {
206       state state;
207 
208       if (!function->impl)
209          continue;
210 
211       state.shader = shader;
212       state.start_block = nir_start_block(function->impl);
213 
214       bool progress = false;
215       nir_foreach_block (block, function->impl) {
216          /* don't need to move anything that is already in the first block */
217          if (block == state.start_block)
218             continue;
219          progress |= move_varying_inputs_block(&state, block);
220       }
221 
222       if (progress) {
223          nir_metadata_preserve(
224             function->impl, nir_metadata_block_index | nir_metadata_dominance);
225       }
226    }
227 
228    return progress;
229 }
230