1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25
26 /*
27 * Implements a pass that lowers vector phi nodes to scalar phi nodes when
28 * we don't think it will hurt anything.
29 */
30
31 struct lower_phis_to_scalar_state {
32 nir_shader *shader;
33 void *mem_ctx;
34 struct exec_list dead_instrs;
35
36 bool lower_all;
37
38 /* Hash table marking which phi nodes are scalarizable. The key is
39 * pointers to phi instructions and the entry is either NULL for not
40 * scalarizable or non-null for scalarizable.
41 */
42 struct hash_table *phi_table;
43 };
44
45 static bool
46 should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state);
47
48 static bool
is_phi_src_scalarizable(nir_phi_src * src,struct lower_phis_to_scalar_state * state)49 is_phi_src_scalarizable(nir_phi_src *src,
50 struct lower_phis_to_scalar_state *state)
51 {
52
53 nir_instr *src_instr = src->src.ssa->parent_instr;
54 switch (src_instr->type) {
55 case nir_instr_type_alu: {
56 nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
57
58 /* ALU operations with output_size == 0 should be scalarized. We
59 * will also see a bunch of vecN operations from scalarizing ALU
60 * operations and, since they can easily be copy-propagated, they
61 * are ok too.
62 */
63 return nir_op_infos[src_alu->op].output_size == 0 ||
64 nir_op_is_vec_or_mov(src_alu->op);
65 }
66
67 case nir_instr_type_phi:
68 /* A phi is scalarizable if we're going to lower it */
69 return should_lower_phi(nir_instr_as_phi(src_instr), state);
70
71 case nir_instr_type_load_const:
72 /* These are trivially scalarizable */
73 return true;
74
75 case nir_instr_type_undef:
76 /* The caller of this function is going to OR the results and we don't
77 * want undefs to count so we return false.
78 */
79 return false;
80
81 case nir_instr_type_intrinsic: {
82 nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
83
84 switch (src_intrin->intrinsic) {
85 case nir_intrinsic_load_deref: {
86 /* Don't scalarize if we see a load of a local variable because it
87 * might turn into one of the things we can't scalarize.
88 */
89 nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
90 return !nir_deref_mode_may_be(deref, nir_var_function_temp |
91 nir_var_shader_temp);
92 }
93
94 case nir_intrinsic_interp_deref_at_centroid:
95 case nir_intrinsic_interp_deref_at_sample:
96 case nir_intrinsic_interp_deref_at_offset:
97 case nir_intrinsic_interp_deref_at_vertex:
98 case nir_intrinsic_load_uniform:
99 case nir_intrinsic_load_ubo:
100 case nir_intrinsic_load_ssbo:
101 case nir_intrinsic_load_global:
102 case nir_intrinsic_load_global_constant:
103 case nir_intrinsic_load_input:
104 return true;
105 default:
106 break;
107 }
108 }
109 FALLTHROUGH;
110
111 default:
112 /* We can't scalarize this type of instruction */
113 return false;
114 }
115 }
116
117 /**
118 * Determines if the given phi node should be lowered. The only phi nodes
119 * we will scalarize at the moment are those where all of the sources are
120 * scalarizable, unless lower_all is set.
121 *
122 * The reason for this comes down to coalescing. Since phi sources can't
123 * swizzle, swizzles on phis have to be resolved by inserting a mov right
124 * before the phi. The choice then becomes between movs to pick off
125 * components for a scalar phi or potentially movs to recombine components
126 * for a vector phi. The problem is that the movs generated to pick off
127 * the components are almost uncoalescable. We can't coalesce them in NIR
128 * because we need them to pick off components and we can't coalesce them
129 * in the backend because the source register is a vector and the
130 * destination is a scalar that may be used at other places in the program.
131 * On the other hand, if we have a bunch of scalars going into a vector
132 * phi, the situation is much better. In this case, if the SSA def is
133 * generated in the predecessor block to the corresponding phi source, the
134 * backend code will be an ALU op into a temporary and then a mov into the
135 * given vector component; this move can almost certainly be coalesced
136 * away.
137 */
138 static bool
should_lower_phi(nir_phi_instr * phi,struct lower_phis_to_scalar_state * state)139 should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
140 {
141 /* Already scalar */
142 if (phi->def.num_components == 1)
143 return false;
144
145 if (state->lower_all)
146 return true;
147
148 struct hash_entry *entry = _mesa_hash_table_search(state->phi_table, phi);
149 if (entry)
150 return entry->data != NULL;
151
152 /* Insert an entry and mark it as scalarizable for now. That way
153 * we don't recurse forever and a cycle in the dependence graph
154 * won't automatically make us fail to scalarize.
155 */
156 entry = _mesa_hash_table_insert(state->phi_table, phi, (void *)(intptr_t)1);
157
158 bool scalarizable = false;
159
160 nir_foreach_phi_src(src, phi) {
161 /* This loop ignores srcs that are not scalarizable because its likely
162 * still worth copying to temps if another phi source is scalarizable.
163 * This reduces register spilling by a huge amount in the i965 driver for
164 * Deus Ex: MD.
165 */
166 scalarizable = is_phi_src_scalarizable(src, state);
167 if (scalarizable)
168 break;
169 }
170
171 /* The hash table entry for 'phi' may have changed while recursing the
172 * dependence graph, so we need to reset it */
173 entry = _mesa_hash_table_search(state->phi_table, phi);
174 assert(entry);
175
176 entry->data = (void *)(intptr_t)scalarizable;
177
178 return scalarizable;
179 }
180
181 static bool
lower_phis_to_scalar_block(nir_block * block,struct lower_phis_to_scalar_state * state)182 lower_phis_to_scalar_block(nir_block *block,
183 struct lower_phis_to_scalar_state *state)
184 {
185 bool progress = false;
186 nir_phi_instr *last_phi = nir_block_last_phi_instr(block);
187
188 /* We have to handle the phi nodes in their own pass due to the way
189 * we're modifying the linked list of instructions.
190 */
191 nir_foreach_phi_safe(phi, block) {
192 if (!should_lower_phi(phi, state))
193 continue;
194
195 unsigned bit_size = phi->def.bit_size;
196
197 /* Create a vecN operation to combine the results. Most of these
198 * will be redundant, but copy propagation should clean them up for
199 * us. No need to add the complexity here.
200 */
201 nir_op vec_op = nir_op_vec(phi->def.num_components);
202
203 nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op);
204 nir_def_init(&vec->instr, &vec->def,
205 phi->def.num_components, bit_size);
206
207 for (unsigned i = 0; i < phi->def.num_components; i++) {
208 nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
209 nir_def_init(&new_phi->instr, &new_phi->def, 1,
210 phi->def.bit_size);
211
212 vec->src[i].src = nir_src_for_ssa(&new_phi->def);
213
214 nir_foreach_phi_src(src, phi) {
215 /* We need to insert a mov to grab the i'th component of src */
216 nir_alu_instr *mov = nir_alu_instr_create(state->shader,
217 nir_op_mov);
218 nir_def_init(&mov->instr, &mov->def, 1, bit_size);
219 mov->src[0].src = nir_src_for_ssa(src->src.ssa);
220 mov->src[0].swizzle[0] = i;
221
222 /* Insert at the end of the predecessor but before the jump */
223 nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
224 if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
225 nir_instr_insert_before(pred_last_instr, &mov->instr);
226 else
227 nir_instr_insert_after_block(src->pred, &mov->instr);
228
229 nir_phi_instr_add_src(new_phi, src->pred, &mov->def);
230 }
231
232 nir_instr_insert_before(&phi->instr, &new_phi->instr);
233 }
234
235 nir_instr_insert_after(&last_phi->instr, &vec->instr);
236
237 nir_def_rewrite_uses(&phi->def,
238 &vec->def);
239
240 nir_instr_remove(&phi->instr);
241 exec_list_push_tail(&state->dead_instrs, &phi->instr.node);
242
243 progress = true;
244
245 /* We're using the safe iterator and inserting all the newly
246 * scalarized phi nodes before their non-scalarized version so that's
247 * ok. However, we are also inserting vec operations after all of
248 * the last phi node so once we get here, we can't trust even the
249 * safe iterator to stop properly. We have to break manually.
250 */
251 if (phi == last_phi)
252 break;
253 }
254
255 return progress;
256 }
257
258 static bool
lower_phis_to_scalar_impl(nir_function_impl * impl,bool lower_all)259 lower_phis_to_scalar_impl(nir_function_impl *impl, bool lower_all)
260 {
261 struct lower_phis_to_scalar_state state;
262 bool progress = false;
263
264 state.shader = impl->function->shader;
265 state.mem_ctx = ralloc_parent(impl);
266 exec_list_make_empty(&state.dead_instrs);
267 state.phi_table = _mesa_pointer_hash_table_create(NULL);
268 state.lower_all = lower_all;
269
270 nir_foreach_block(block, impl) {
271 progress = lower_phis_to_scalar_block(block, &state) || progress;
272 }
273
274 nir_metadata_preserve(impl, nir_metadata_block_index |
275 nir_metadata_dominance);
276
277 nir_instr_free_list(&state.dead_instrs);
278
279 ralloc_free(state.phi_table);
280
281 return progress;
282 }
283
284 /** A pass that lowers vector phi nodes to scalar
285 *
286 * This pass loops through the blocks and lowers looks for vector phi nodes
287 * it can lower to scalar phi nodes. Not all phi nodes are lowered. For
288 * instance, if one of the sources is a non-scalarizable vector, then we
289 * don't bother lowering because that would generate hard-to-coalesce movs.
290 */
291 bool
nir_lower_phis_to_scalar(nir_shader * shader,bool lower_all)292 nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all)
293 {
294 bool progress = false;
295
296 nir_foreach_function_impl(impl, shader) {
297 progress = lower_phis_to_scalar_impl(impl, lower_all) || progress;
298 }
299
300 return progress;
301 }
302