1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_builder.h"
30
31 struct locals_to_regs_state {
32 nir_builder builder;
33
34 /* A hash table mapping derefs to registers */
35 struct hash_table *regs_table;
36
37 bool progress;
38 };
39
40 /* The following two functions implement a hash and equality check for
41 * variable dreferences. When the hash or equality function encounters an
42 * array, it ignores the offset and whether it is direct or indirect
43 * entirely.
44 */
45 static uint32_t
hash_deref(const void * void_deref)46 hash_deref(const void *void_deref)
47 {
48 uint32_t hash = 0;
49
50 for (const nir_deref_instr *deref = void_deref; deref;
51 deref = nir_deref_instr_parent(deref)) {
52 switch (deref->deref_type) {
53 case nir_deref_type_var:
54 return XXH32(&deref->var, sizeof(deref->var), hash);
55
56 case nir_deref_type_array:
57 continue; /* Do nothing */
58
59 case nir_deref_type_struct:
60 hash = XXH32(&deref->strct.index, sizeof(deref->strct.index), hash);
61 continue;
62
63 default:
64 unreachable("Invalid deref type");
65 }
66 }
67
68 unreachable("We should have hit a variable dereference");
69 }
70
71 static bool
derefs_equal(const void * void_a,const void * void_b)72 derefs_equal(const void *void_a, const void *void_b)
73 {
74 for (const nir_deref_instr *a = void_a, *b = void_b; a || b;
75 a = nir_deref_instr_parent(a), b = nir_deref_instr_parent(b)) {
76 if (a->deref_type != b->deref_type)
77 return false;
78
79 switch (a->deref_type) {
80 case nir_deref_type_var:
81 return a->var == b->var;
82
83 case nir_deref_type_array:
84 continue; /* Do nothing */
85
86 case nir_deref_type_struct:
87 if (a->strct.index != b->strct.index)
88 return false;
89 continue;
90
91 default:
92 unreachable("Invalid deref type");
93 }
94 }
95
96 unreachable("We should have hit a variable dereference");
97 }
98
99 static nir_register *
get_reg_for_deref(nir_deref_instr * deref,struct locals_to_regs_state * state)100 get_reg_for_deref(nir_deref_instr *deref, struct locals_to_regs_state *state)
101 {
102 uint32_t hash = hash_deref(deref);
103
104 assert(nir_deref_instr_get_variable(deref)->constant_initializer == NULL &&
105 nir_deref_instr_get_variable(deref)->pointer_initializer == NULL);
106
107 struct hash_entry *entry =
108 _mesa_hash_table_search_pre_hashed(state->regs_table, hash, deref);
109 if (entry)
110 return entry->data;
111
112 unsigned array_size = 1;
113 for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
114 if (d->deref_type == nir_deref_type_array)
115 array_size *= glsl_get_length(nir_deref_instr_parent(d)->type);
116 }
117
118 assert(glsl_type_is_vector_or_scalar(deref->type));
119
120 nir_register *reg = nir_local_reg_create(state->builder.impl);
121 reg->num_components = glsl_get_vector_elements(deref->type);
122 reg->num_array_elems = array_size > 1 ? array_size : 0;
123 reg->bit_size = glsl_get_bit_size(deref->type);
124
125 _mesa_hash_table_insert_pre_hashed(state->regs_table, hash, deref, reg);
126
127 return reg;
128 }
129
130 static nir_src
get_deref_reg_src(nir_deref_instr * deref,struct locals_to_regs_state * state)131 get_deref_reg_src(nir_deref_instr *deref, struct locals_to_regs_state *state)
132 {
133 nir_builder *b = &state->builder;
134
135 nir_src src;
136
137 src.is_ssa = false;
138 src.reg.reg = get_reg_for_deref(deref, state);
139 src.reg.base_offset = 0;
140 src.reg.indirect = NULL;
141
142 /* It is possible for a user to create a shader that has an array with a
143 * single element and then proceed to access it indirectly. Indirectly
144 * accessing a non-array register is not allowed in NIR. In order to
145 * handle this case we just convert it to a direct reference.
146 */
147 if (src.reg.reg->num_array_elems == 0)
148 return src;
149
150 unsigned inner_array_size = 1;
151 for (const nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
152 if (d->deref_type != nir_deref_type_array)
153 continue;
154
155 if (nir_src_is_const(d->arr.index) && !src.reg.indirect) {
156 src.reg.base_offset += nir_src_as_uint(d->arr.index) *
157 inner_array_size;
158 } else {
159 if (src.reg.indirect) {
160 assert(src.reg.base_offset == 0);
161 } else {
162 src.reg.indirect = malloc(sizeof(nir_src));
163 *src.reg.indirect =
164 nir_src_for_ssa(nir_imm_int(b, src.reg.base_offset));
165 src.reg.base_offset = 0;
166 }
167
168 assert(src.reg.indirect->is_ssa);
169 nir_ssa_def *index = nir_i2i(b, nir_ssa_for_src(b, d->arr.index, 1), 32);
170 src.reg.indirect->ssa =
171 nir_iadd(b, src.reg.indirect->ssa,
172 nir_imul_imm(b, index, inner_array_size));
173 }
174
175 inner_array_size *= glsl_get_length(nir_deref_instr_parent(d)->type);
176 }
177
178 return src;
179 }
180
181 static bool
lower_locals_to_regs_block(nir_block * block,struct locals_to_regs_state * state)182 lower_locals_to_regs_block(nir_block *block,
183 struct locals_to_regs_state *state)
184 {
185 nir_builder *b = &state->builder;
186
187 nir_foreach_instr_safe(instr, block) {
188 if (instr->type != nir_instr_type_intrinsic)
189 continue;
190
191 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
192
193 switch (intrin->intrinsic) {
194 case nir_intrinsic_load_deref: {
195 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
196 if (!nir_deref_mode_is(deref, nir_var_function_temp))
197 continue;
198
199 b->cursor = nir_before_instr(&intrin->instr);
200
201 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
202 mov->src[0].src = get_deref_reg_src(deref, state);
203
204 if (mov->src[0].src.reg.reg->num_array_elems != 0 &&
205 mov->src[0].src.reg.base_offset >= mov->src[0].src.reg.reg->num_array_elems) {
206 /* out-of-bounds read, return 0 instead. */
207 mov->src[0].src = nir_src_for_ssa(nir_imm_intN_t(b, 0, mov->src[0].src.reg.reg->bit_size));
208 for (int i = 0; i < intrin->num_components; i++)
209 mov->src[0].swizzle[i] = 0;
210 }
211
212 mov->dest.write_mask = (1 << intrin->num_components) - 1;
213
214 if (intrin->dest.is_ssa) {
215 nir_ssa_dest_init(&mov->instr, &mov->dest.dest,
216 intrin->num_components,
217 intrin->dest.ssa.bit_size, NULL);
218 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
219 &mov->dest.dest.ssa);
220 } else {
221 nir_dest_copy(&mov->dest.dest, &intrin->dest);
222 }
223 nir_builder_instr_insert(b, &mov->instr);
224
225 nir_instr_remove(&intrin->instr);
226 state->progress = true;
227 break;
228 }
229
230 case nir_intrinsic_store_deref: {
231 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
232 if (!nir_deref_mode_is(deref, nir_var_function_temp))
233 continue;
234
235 b->cursor = nir_before_instr(&intrin->instr);
236
237 nir_src reg_src = get_deref_reg_src(deref, state);
238
239 if (reg_src.reg.reg->num_array_elems != 0 &&
240 reg_src.reg.base_offset >= reg_src.reg.reg->num_array_elems) {
241 /* Out of bounds write, just eliminate it. */
242 nir_instr_remove(&intrin->instr);
243 state->progress = true;
244 break;
245 }
246
247 nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
248
249 nir_src_copy(&mov->src[0].src, &intrin->src[1]);
250
251 /* The normal NIR SSA copy propagate pass can't happen after this pass,
252 * so do an ad-hoc copy propagate since this ALU op can do swizzles
253 * while the deref couldn't.
254 */
255 if (mov->src[0].src.is_ssa) {
256 nir_instr *parent = mov->src[0].src.ssa->parent_instr;
257 if (parent->type == nir_instr_type_alu) {
258 nir_alu_instr *parent_alu = nir_instr_as_alu(parent);
259 if (parent_alu->op == nir_op_mov && parent_alu->src[0].src.is_ssa) {
260 for (unsigned i = 0; i < intrin->num_components; i++)
261 mov->src[0].swizzle[i] = parent_alu->src[0].swizzle[mov->src[0].swizzle[i]];
262 mov->src[0].abs = parent_alu->src[0].abs;
263 mov->src[0].negate = parent_alu->src[0].negate;
264 mov->src[0].src = parent_alu->src[0].src;
265 }
266 }
267 }
268
269 mov->dest.write_mask = nir_intrinsic_write_mask(intrin);
270 mov->dest.dest.is_ssa = false;
271 mov->dest.dest.reg.reg = reg_src.reg.reg;
272 mov->dest.dest.reg.base_offset = reg_src.reg.base_offset;
273 mov->dest.dest.reg.indirect = reg_src.reg.indirect;
274
275 nir_builder_instr_insert(b, &mov->instr);
276
277 nir_instr_remove(&intrin->instr);
278 state->progress = true;
279 break;
280 }
281
282 case nir_intrinsic_copy_deref:
283 unreachable("There should be no copies whatsoever at this point");
284 break;
285
286 default:
287 continue;
288 }
289 }
290
291 return true;
292 }
293
294 static bool
nir_lower_locals_to_regs_impl(nir_function_impl * impl)295 nir_lower_locals_to_regs_impl(nir_function_impl *impl)
296 {
297 struct locals_to_regs_state state;
298
299 nir_builder_init(&state.builder, impl);
300 state.progress = false;
301 state.regs_table = _mesa_hash_table_create(NULL, hash_deref, derefs_equal);
302
303 nir_metadata_require(impl, nir_metadata_dominance);
304
305 nir_foreach_block(block, impl) {
306 lower_locals_to_regs_block(block, &state);
307 }
308
309 nir_metadata_preserve(impl, nir_metadata_block_index |
310 nir_metadata_dominance);
311
312 _mesa_hash_table_destroy(state.regs_table, NULL);
313
314 return state.progress;
315 }
316
317 bool
nir_lower_locals_to_regs(nir_shader * shader)318 nir_lower_locals_to_regs(nir_shader *shader)
319 {
320 bool progress = false;
321
322 nir_foreach_function(function, shader) {
323 if (function->impl)
324 progress = nir_lower_locals_to_regs_impl(function->impl) || progress;
325 }
326
327 return progress;
328 }
329