1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 /** @file nir_lower_io_to_scalar.c
29 *
30 * Replaces nir_load_input/nir_store_output operations with num_components !=
31 * 1 with individual per-channel operations.
32 */
33
34 static void
set_io_semantics(nir_intrinsic_instr * scalar_intr,nir_intrinsic_instr * vec_intr,unsigned component)35 set_io_semantics(nir_intrinsic_instr *scalar_intr,
36 nir_intrinsic_instr *vec_intr, unsigned component)
37 {
38 nir_io_semantics sem = nir_intrinsic_io_semantics(vec_intr);
39 sem.gs_streams = (sem.gs_streams >> (component * 2)) & 0x3;
40 nir_intrinsic_set_io_semantics(scalar_intr, sem);
41 }
42
43 static void
lower_load_input_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)44 lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
45 {
46 b->cursor = nir_before_instr(&intr->instr);
47
48 nir_def *loads[NIR_MAX_VEC_COMPONENTS];
49
50 for (unsigned i = 0; i < intr->num_components; i++) {
51 bool is_64bit = (nir_intrinsic_instr_dest_type(intr) & NIR_ALU_TYPE_SIZE_MASK) == 64;
52 unsigned newi = is_64bit ? i * 2 : i;
53 unsigned newc = nir_intrinsic_component(intr);
54 nir_intrinsic_instr *chan_intr =
55 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
56 nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
57 intr->def.bit_size);
58 chan_intr->num_components = 1;
59
60 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
61 nir_intrinsic_set_component(chan_intr, (newc + newi) % 4);
62 nir_intrinsic_set_dest_type(chan_intr, nir_intrinsic_dest_type(intr));
63 set_io_semantics(chan_intr, intr, i);
64 /* offset and vertex (if needed) */
65 for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j)
66 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
67 if (newc + newi > 3) {
68 nir_src *src = nir_get_io_offset_src(chan_intr);
69 nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
70 *src = nir_src_for_ssa(offset);
71 }
72
73 nir_builder_instr_insert(b, &chan_intr->instr);
74
75 loads[i] = &chan_intr->def;
76 }
77
78 nir_def_rewrite_uses(&intr->def,
79 nir_vec(b, loads, intr->num_components));
80 nir_instr_remove(&intr->instr);
81 }
82
83 static void
lower_load_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)84 lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
85 {
86 b->cursor = nir_before_instr(&intr->instr);
87
88 nir_def *loads[NIR_MAX_VEC_COMPONENTS];
89 nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
90
91 for (unsigned i = 0; i < intr->num_components; i++) {
92 nir_intrinsic_instr *chan_intr =
93 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
94 nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
95 intr->def.bit_size);
96 chan_intr->num_components = 1;
97
98 nir_intrinsic_set_align_offset(chan_intr,
99 (nir_intrinsic_align_offset(intr) +
100 i * (intr->def.bit_size / 8)) %
101 nir_intrinsic_align_mul(intr));
102 nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
103 if (nir_intrinsic_has_access(intr))
104 nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
105 if (nir_intrinsic_has_range(intr))
106 nir_intrinsic_set_range(chan_intr, nir_intrinsic_range(intr));
107 if (nir_intrinsic_has_range_base(intr))
108 nir_intrinsic_set_range_base(chan_intr, nir_intrinsic_range_base(intr));
109 if (nir_intrinsic_has_base(intr))
110 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
111 for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
112 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
113
114 /* increment offset per component */
115 nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->def.bit_size / 8));
116 *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
117
118 nir_builder_instr_insert(b, &chan_intr->instr);
119
120 loads[i] = &chan_intr->def;
121 }
122
123 nir_def_rewrite_uses(&intr->def,
124 nir_vec(b, loads, intr->num_components));
125 nir_instr_remove(&intr->instr);
126 }
127
128 static void
lower_store_output_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)129 lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
130 {
131 b->cursor = nir_before_instr(&intr->instr);
132
133 nir_def *value = intr->src[0].ssa;
134
135 for (unsigned i = 0; i < intr->num_components; i++) {
136 if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
137 continue;
138
139 bool is_64bit = (nir_intrinsic_instr_src_type(intr, 0) & NIR_ALU_TYPE_SIZE_MASK) == 64;
140 unsigned newi = is_64bit ? i * 2 : i;
141 unsigned newc = nir_intrinsic_component(intr);
142 nir_intrinsic_instr *chan_intr =
143 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
144 chan_intr->num_components = 1;
145
146 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
147 nir_intrinsic_set_write_mask(chan_intr, 0x1);
148 nir_intrinsic_set_component(chan_intr, (newc + newi) % 4);
149 nir_intrinsic_set_src_type(chan_intr, nir_intrinsic_src_type(intr));
150 set_io_semantics(chan_intr, intr, i);
151
152 if (nir_intrinsic_has_io_xfb(intr)) {
153 /* Scalarize transform feedback info. */
154 unsigned component = nir_intrinsic_component(chan_intr);
155
156 for (unsigned c = 0; c <= component; c++) {
157 nir_io_xfb xfb = c < 2 ? nir_intrinsic_io_xfb(intr) : nir_intrinsic_io_xfb2(intr);
158
159 if (component < c + xfb.out[c % 2].num_components) {
160 nir_io_xfb scalar_xfb;
161
162 memset(&scalar_xfb, 0, sizeof(scalar_xfb));
163 scalar_xfb.out[component % 2].num_components = is_64bit ? 2 : 1;
164 scalar_xfb.out[component % 2].buffer = xfb.out[c % 2].buffer;
165 scalar_xfb.out[component % 2].offset = xfb.out[c % 2].offset +
166 component - c;
167 if (component < 2)
168 nir_intrinsic_set_io_xfb(chan_intr, scalar_xfb);
169 else
170 nir_intrinsic_set_io_xfb2(chan_intr, scalar_xfb);
171 break;
172 }
173 }
174 }
175
176 /* value */
177 chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
178 /* offset and vertex (if needed) */
179 for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j)
180 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
181 if (newc + newi > 3) {
182 nir_src *src = nir_get_io_offset_src(chan_intr);
183 nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
184 *src = nir_src_for_ssa(offset);
185 }
186
187 nir_builder_instr_insert(b, &chan_intr->instr);
188 }
189
190 nir_instr_remove(&intr->instr);
191 }
192
193 static void
lower_store_to_scalar(nir_builder * b,nir_intrinsic_instr * intr)194 lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
195 {
196 b->cursor = nir_before_instr(&intr->instr);
197
198 nir_def *value = intr->src[0].ssa;
199 nir_def *base_offset = nir_get_io_offset_src(intr)->ssa;
200
201 /* iterate wrmask instead of num_components to handle split components */
202 u_foreach_bit(i, nir_intrinsic_write_mask(intr)) {
203 nir_intrinsic_instr *chan_intr =
204 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
205 chan_intr->num_components = 1;
206
207 nir_intrinsic_set_write_mask(chan_intr, 0x1);
208 nir_intrinsic_set_align_offset(chan_intr,
209 (nir_intrinsic_align_offset(intr) +
210 i * (value->bit_size / 8)) %
211 nir_intrinsic_align_mul(intr));
212 nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
213 if (nir_intrinsic_has_access(intr))
214 nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
215 if (nir_intrinsic_has_base(intr))
216 nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
217
218 /* value */
219 chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
220 for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
221 chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
222
223 /* increment offset per component */
224 nir_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8));
225 *nir_get_io_offset_src(chan_intr) = nir_src_for_ssa(offset);
226
227 nir_builder_instr_insert(b, &chan_intr->instr);
228 }
229
230 nir_instr_remove(&intr->instr);
231 }
232
233 struct scalarize_state {
234 nir_variable_mode mask;
235 nir_instr_filter_cb filter;
236 void *filter_data;
237 };
238
239 static bool
nir_lower_io_to_scalar_instr(nir_builder * b,nir_instr * instr,void * data)240 nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
241 {
242 struct scalarize_state *state = data;
243
244 if (instr->type != nir_instr_type_intrinsic)
245 return false;
246
247 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
248
249 if (intr->num_components == 1)
250 return false;
251
252 if ((intr->intrinsic == nir_intrinsic_load_input ||
253 intr->intrinsic == nir_intrinsic_load_per_vertex_input ||
254 intr->intrinsic == nir_intrinsic_load_interpolated_input) &&
255 (state->mask & nir_var_shader_in) &&
256 (!state->filter || state->filter(instr, state->filter_data))) {
257 lower_load_input_to_scalar(b, intr);
258 return true;
259 }
260
261 if ((intr->intrinsic == nir_intrinsic_load_output ||
262 intr->intrinsic == nir_intrinsic_load_per_vertex_output) &&
263 (state->mask & nir_var_shader_out) &&
264 (!state->filter || state->filter(instr, state->filter_data))) {
265 lower_load_input_to_scalar(b, intr);
266 return true;
267 }
268
269 if (((intr->intrinsic == nir_intrinsic_load_ubo && (state->mask & nir_var_mem_ubo)) ||
270 (intr->intrinsic == nir_intrinsic_load_ssbo && (state->mask & nir_var_mem_ssbo)) ||
271 (intr->intrinsic == nir_intrinsic_load_global && (state->mask & nir_var_mem_global)) ||
272 (intr->intrinsic == nir_intrinsic_load_shared && (state->mask & nir_var_mem_shared))) &&
273 (!state->filter || state->filter(instr, state->filter_data))) {
274 lower_load_to_scalar(b, intr);
275 return true;
276 }
277
278 if ((intr->intrinsic == nir_intrinsic_store_output ||
279 intr->intrinsic == nir_intrinsic_store_per_vertex_output) &&
280 state->mask & nir_var_shader_out &&
281 (!state->filter || state->filter(instr, state->filter_data))) {
282 lower_store_output_to_scalar(b, intr);
283 return true;
284 }
285
286 if (((intr->intrinsic == nir_intrinsic_store_ssbo && (state->mask & nir_var_mem_ssbo)) ||
287 (intr->intrinsic == nir_intrinsic_store_global && (state->mask & nir_var_mem_global)) ||
288 (intr->intrinsic == nir_intrinsic_store_shared && (state->mask & nir_var_mem_shared))) &&
289 (!state->filter || state->filter(instr, state->filter_data))) {
290 lower_store_to_scalar(b, intr);
291 return true;
292 }
293
294 return false;
295 }
296
297 bool
nir_lower_io_to_scalar(nir_shader * shader,nir_variable_mode mask,nir_instr_filter_cb filter,void * filter_data)298 nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_instr_filter_cb filter, void *filter_data)
299 {
300 struct scalarize_state state = {
301 mask,
302 filter,
303 filter_data
304 };
305 return nir_shader_instructions_pass(shader,
306 nir_lower_io_to_scalar_instr,
307 nir_metadata_block_index |
308 nir_metadata_dominance,
309 &state);
310 }
311
312 static nir_variable **
get_channel_variables(struct hash_table * ht,nir_variable * var)313 get_channel_variables(struct hash_table *ht, nir_variable *var)
314 {
315 nir_variable **chan_vars;
316 struct hash_entry *entry = _mesa_hash_table_search(ht, var);
317 if (!entry) {
318 chan_vars = (nir_variable **)calloc(4, sizeof(nir_variable *));
319 _mesa_hash_table_insert(ht, var, chan_vars);
320 } else {
321 chan_vars = (nir_variable **)entry->data;
322 }
323
324 return chan_vars;
325 }
326
327 /*
328 * Note that the src deref that we are cloning is the head of the
329 * chain of deref instructions from the original intrinsic, but
330 * the dst we are cloning to is the tail (because chains of deref
331 * instructions are created back to front)
332 */
333
334 static nir_deref_instr *
clone_deref_array(nir_builder * b,nir_deref_instr * dst_tail,const nir_deref_instr * src_head)335 clone_deref_array(nir_builder *b, nir_deref_instr *dst_tail,
336 const nir_deref_instr *src_head)
337 {
338 const nir_deref_instr *parent = nir_deref_instr_parent(src_head);
339
340 if (!parent)
341 return dst_tail;
342
343 assert(src_head->deref_type == nir_deref_type_array);
344
345 dst_tail = clone_deref_array(b, dst_tail, parent);
346
347 return nir_build_deref_array(b, dst_tail,
348 src_head->arr.index.ssa);
349 }
350
351 static void
lower_load_to_scalar_early(nir_builder * b,nir_intrinsic_instr * intr,nir_variable * var,struct hash_table * split_inputs,struct hash_table * split_outputs)352 lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
353 nir_variable *var, struct hash_table *split_inputs,
354 struct hash_table *split_outputs)
355 {
356 b->cursor = nir_before_instr(&intr->instr);
357
358 nir_def *loads[NIR_MAX_VEC_COMPONENTS];
359
360 nir_variable **chan_vars;
361 if (var->data.mode == nir_var_shader_in) {
362 chan_vars = get_channel_variables(split_inputs, var);
363 } else {
364 chan_vars = get_channel_variables(split_outputs, var);
365 }
366
367 for (unsigned i = 0; i < intr->num_components; i++) {
368 nir_variable *chan_var = chan_vars[var->data.location_frac + i];
369 if (!chan_vars[var->data.location_frac + i]) {
370 chan_var = nir_variable_clone(var, b->shader);
371 chan_var->data.location_frac = var->data.location_frac + i;
372 chan_var->type = glsl_channel_type(chan_var->type);
373
374 chan_vars[var->data.location_frac + i] = chan_var;
375
376 nir_shader_add_variable(b->shader, chan_var);
377 }
378
379 nir_intrinsic_instr *chan_intr =
380 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
381 nir_def_init(&chan_intr->instr, &chan_intr->def, 1,
382 intr->def.bit_size);
383 chan_intr->num_components = 1;
384
385 nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
386
387 deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
388
389 chan_intr->src[0] = nir_src_for_ssa(&deref->def);
390
391 if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
392 intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
393 intr->intrinsic == nir_intrinsic_interp_deref_at_vertex)
394 chan_intr->src[1] = nir_src_for_ssa(intr->src[1].ssa);
395
396 nir_builder_instr_insert(b, &chan_intr->instr);
397
398 loads[i] = &chan_intr->def;
399 }
400
401 nir_def_rewrite_uses(&intr->def,
402 nir_vec(b, loads, intr->num_components));
403
404 /* Remove the old load intrinsic */
405 nir_instr_remove(&intr->instr);
406 }
407
408 static void
lower_store_output_to_scalar_early(nir_builder * b,nir_intrinsic_instr * intr,nir_variable * var,struct hash_table * split_outputs)409 lower_store_output_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
410 nir_variable *var,
411 struct hash_table *split_outputs)
412 {
413 b->cursor = nir_before_instr(&intr->instr);
414
415 nir_def *value = intr->src[1].ssa;
416
417 nir_variable **chan_vars = get_channel_variables(split_outputs, var);
418 for (unsigned i = 0; i < intr->num_components; i++) {
419 if (!(nir_intrinsic_write_mask(intr) & (1 << i)))
420 continue;
421
422 nir_variable *chan_var = chan_vars[var->data.location_frac + i];
423 if (!chan_vars[var->data.location_frac + i]) {
424 chan_var = nir_variable_clone(var, b->shader);
425 chan_var->data.location_frac = var->data.location_frac + i;
426 chan_var->type = glsl_channel_type(chan_var->type);
427
428 chan_vars[var->data.location_frac + i] = chan_var;
429
430 nir_shader_add_variable(b->shader, chan_var);
431 }
432
433 nir_intrinsic_instr *chan_intr =
434 nir_intrinsic_instr_create(b->shader, intr->intrinsic);
435 chan_intr->num_components = 1;
436
437 nir_intrinsic_set_write_mask(chan_intr, 0x1);
438
439 nir_deref_instr *deref = nir_build_deref_var(b, chan_var);
440
441 deref = clone_deref_array(b, deref, nir_src_as_deref(intr->src[0]));
442
443 chan_intr->src[0] = nir_src_for_ssa(&deref->def);
444 chan_intr->src[1] = nir_src_for_ssa(nir_channel(b, value, i));
445
446 nir_builder_instr_insert(b, &chan_intr->instr);
447 }
448
449 /* Remove the old store intrinsic */
450 nir_instr_remove(&intr->instr);
451 }
452
453 struct io_to_scalar_early_state {
454 struct hash_table *split_inputs, *split_outputs;
455 nir_variable_mode mask;
456 };
457
458 static bool
nir_lower_io_to_scalar_early_instr(nir_builder * b,nir_instr * instr,void * data)459 nir_lower_io_to_scalar_early_instr(nir_builder *b, nir_instr *instr, void *data)
460 {
461 struct io_to_scalar_early_state *state = data;
462
463 if (instr->type != nir_instr_type_intrinsic)
464 return false;
465
466 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
467
468 if (intr->num_components == 1)
469 return false;
470
471 if (intr->intrinsic != nir_intrinsic_load_deref &&
472 intr->intrinsic != nir_intrinsic_store_deref &&
473 intr->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
474 intr->intrinsic != nir_intrinsic_interp_deref_at_sample &&
475 intr->intrinsic != nir_intrinsic_interp_deref_at_offset &&
476 intr->intrinsic != nir_intrinsic_interp_deref_at_vertex)
477 return false;
478
479 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
480 if (!nir_deref_mode_is_one_of(deref, state->mask))
481 return false;
482
483 nir_variable *var = nir_deref_instr_get_variable(deref);
484 nir_variable_mode mode = var->data.mode;
485
486 /* TODO: add patch support */
487 if (var->data.patch)
488 return false;
489
490 /* TODO: add doubles support */
491 if (glsl_type_is_64bit(glsl_without_array(var->type)))
492 return false;
493
494 if (!(b->shader->info.stage == MESA_SHADER_VERTEX &&
495 mode == nir_var_shader_in) &&
496 var->data.location < VARYING_SLOT_VAR0 &&
497 var->data.location >= 0)
498 return false;
499
500 /* Don't bother splitting if we can't opt away any unused
501 * components.
502 */
503 if (var->data.always_active_io)
504 return false;
505
506 if (var->data.must_be_shader_input)
507 return false;
508
509 /* Skip types we cannot split */
510 if (glsl_type_is_matrix(glsl_without_array(var->type)) ||
511 glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
512 return false;
513
514 switch (intr->intrinsic) {
515 case nir_intrinsic_interp_deref_at_centroid:
516 case nir_intrinsic_interp_deref_at_sample:
517 case nir_intrinsic_interp_deref_at_offset:
518 case nir_intrinsic_interp_deref_at_vertex:
519 case nir_intrinsic_load_deref:
520 if ((state->mask & nir_var_shader_in && mode == nir_var_shader_in) ||
521 (state->mask & nir_var_shader_out && mode == nir_var_shader_out)) {
522 lower_load_to_scalar_early(b, intr, var, state->split_inputs,
523 state->split_outputs);
524 return true;
525 }
526 break;
527 case nir_intrinsic_store_deref:
528 if (state->mask & nir_var_shader_out &&
529 mode == nir_var_shader_out) {
530 lower_store_output_to_scalar_early(b, intr, var, state->split_outputs);
531 return true;
532 }
533 break;
534 default:
535 break;
536 }
537
538 return false;
539 }
540
541 /*
542 * This function is intended to be called earlier than nir_lower_io_to_scalar()
543 * i.e. before nir_lower_io() is called.
544 */
545 bool
nir_lower_io_to_scalar_early(nir_shader * shader,nir_variable_mode mask)546 nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask)
547 {
548 struct io_to_scalar_early_state state = {
549 .split_inputs = _mesa_pointer_hash_table_create(NULL),
550 .split_outputs = _mesa_pointer_hash_table_create(NULL),
551 .mask = mask
552 };
553
554 bool progress = nir_shader_instructions_pass(shader,
555 nir_lower_io_to_scalar_early_instr,
556 nir_metadata_block_index |
557 nir_metadata_dominance,
558 &state);
559
560 /* Remove old input from the shaders inputs list */
561 hash_table_foreach(state.split_inputs, entry) {
562 nir_variable *var = (nir_variable *)entry->key;
563 exec_node_remove(&var->node);
564
565 free(entry->data);
566 }
567
568 /* Remove old output from the shaders outputs list */
569 hash_table_foreach(state.split_outputs, entry) {
570 nir_variable *var = (nir_variable *)entry->key;
571 exec_node_remove(&var->node);
572
573 free(entry->data);
574 }
575
576 _mesa_hash_table_destroy(state.split_inputs, NULL);
577 _mesa_hash_table_destroy(state.split_outputs, NULL);
578
579 nir_remove_dead_derefs(shader);
580
581 return progress;
582 }
583