1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 /** @file nir_lower_io_to_vector.c
29 *
30 * Merges compatible input/output variables residing in different components
31 * of the same location. It's expected that further passes such as
32 * nir_lower_io_to_temporaries will combine loads and stores of the merged
33 * variables, producing vector nir_load_input/nir_store_output instructions
34 * when all is said and done.
35 */
36
37 /* FRAG_RESULT_MAX+1 instead of just FRAG_RESULT_MAX because of how this pass
38 * handles dual source blending */
39 #define MAX_SLOTS MAX2(VARYING_SLOT_TESS_MAX, FRAG_RESULT_MAX+1)
40
41 static unsigned
get_slot(const nir_variable * var)42 get_slot(const nir_variable *var)
43 {
44 /* This handling of dual-source blending might not be correct when more than
45 * one render target is supported, but it seems no driver supports more than
46 * one. */
47 return var->data.location + var->data.index;
48 }
49
50 static const struct glsl_type *
get_per_vertex_type(const nir_shader * shader,const nir_variable * var,unsigned * num_vertices)51 get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
52 unsigned *num_vertices)
53 {
54 if (nir_is_arrayed_io(var, shader->info.stage)) {
55 assert(glsl_type_is_array(var->type));
56 if (num_vertices)
57 *num_vertices = glsl_get_length(var->type);
58 return glsl_get_array_element(var->type);
59 } else {
60 if (num_vertices)
61 *num_vertices = 0;
62 return var->type;
63 }
64 }
65
66 static const struct glsl_type *
resize_array_vec_type(const struct glsl_type * type,unsigned num_components)67 resize_array_vec_type(const struct glsl_type *type, unsigned num_components)
68 {
69 if (glsl_type_is_array(type)) {
70 const struct glsl_type *arr_elem =
71 resize_array_vec_type(glsl_get_array_element(type), num_components);
72 return glsl_array_type(arr_elem, glsl_get_length(type), 0);
73 } else {
74 assert(glsl_type_is_vector_or_scalar(type));
75 return glsl_vector_type(glsl_get_base_type(type), num_components);
76 }
77 }
78
79 static bool
variables_can_merge(const nir_shader * shader,const nir_variable * a,const nir_variable * b,bool same_array_structure)80 variables_can_merge(const nir_shader *shader,
81 const nir_variable *a, const nir_variable *b,
82 bool same_array_structure)
83 {
84 if (a->data.compact || b->data.compact)
85 return false;
86
87 if (a->data.per_view || b->data.per_view)
88 return false;
89
90 const struct glsl_type *a_type_tail = a->type;
91 const struct glsl_type *b_type_tail = b->type;
92
93 if (nir_is_arrayed_io(a, shader->info.stage) !=
94 nir_is_arrayed_io(b, shader->info.stage))
95 return false;
96
97 /* They must have the same array structure */
98 if (same_array_structure) {
99 while (glsl_type_is_array(a_type_tail)) {
100 if (!glsl_type_is_array(b_type_tail))
101 return false;
102
103 if (glsl_get_length(a_type_tail) != glsl_get_length(b_type_tail))
104 return false;
105
106 a_type_tail = glsl_get_array_element(a_type_tail);
107 b_type_tail = glsl_get_array_element(b_type_tail);
108 }
109 if (glsl_type_is_array(b_type_tail))
110 return false;
111 } else {
112 a_type_tail = glsl_without_array(a_type_tail);
113 b_type_tail = glsl_without_array(b_type_tail);
114 }
115
116 if (!glsl_type_is_vector_or_scalar(a_type_tail) ||
117 !glsl_type_is_vector_or_scalar(b_type_tail))
118 return false;
119
120 if (glsl_get_base_type(a_type_tail) != glsl_get_base_type(b_type_tail))
121 return false;
122
123 /* TODO: add 64/16bit support ? */
124 if (glsl_get_bit_size(a_type_tail) != 32)
125 return false;
126
127 assert(a->data.mode == b->data.mode);
128 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
129 a->data.mode == nir_var_shader_in &&
130 (a->data.interpolation != b->data.interpolation ||
131 a->data.centroid != b->data.centroid ||
132 a->data.sample != b->data.sample))
133 return false;
134
135 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
136 a->data.mode == nir_var_shader_out &&
137 a->data.index != b->data.index)
138 return false;
139
140 /* It's tricky to merge XFB-outputs correctly, because we need there
141 * to not be any overlaps when we get to
142 * nir_gather_xfb_info_with_varyings later on. We'll end up
143 * triggering an assert there if we merge here.
144 */
145 if ((shader->info.stage == MESA_SHADER_VERTEX ||
146 shader->info.stage == MESA_SHADER_TESS_EVAL ||
147 shader->info.stage == MESA_SHADER_GEOMETRY) &&
148 a->data.mode == nir_var_shader_out &&
149 (a->data.explicit_xfb_buffer || b->data.explicit_xfb_buffer))
150 return false;
151
152 return true;
153 }
154
155 static const struct glsl_type *
get_flat_type(const nir_shader * shader,nir_variable * old_vars[MAX_SLOTS][4],unsigned * loc,nir_variable ** first_var,unsigned * num_vertices)156 get_flat_type(const nir_shader *shader, nir_variable *old_vars[MAX_SLOTS][4],
157 unsigned *loc, nir_variable **first_var, unsigned *num_vertices)
158 {
159 unsigned todo = 1;
160 unsigned slots = 0;
161 unsigned num_vars = 0;
162 enum glsl_base_type base;
163 *num_vertices = 0;
164 *first_var = NULL;
165
166 while (todo) {
167 assert(*loc < MAX_SLOTS);
168 for (unsigned frac = 0; frac < 4; frac++) {
169 nir_variable *var = old_vars[*loc][frac];
170 if (!var)
171 continue;
172 if ((*first_var &&
173 !variables_can_merge(shader, var, *first_var, false)) ||
174 var->data.compact) {
175 (*loc)++;
176 return NULL;
177 }
178
179 if (!*first_var) {
180 if (!glsl_type_is_vector_or_scalar(glsl_without_array(var->type))) {
181 (*loc)++;
182 return NULL;
183 }
184 *first_var = var;
185 base = glsl_get_base_type(
186 glsl_without_array(get_per_vertex_type(shader, var, NULL)));
187 }
188
189 bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
190 var->data.mode == nir_var_shader_in;
191 unsigned var_slots = glsl_count_attribute_slots(
192 get_per_vertex_type(shader, var, num_vertices), vs_in);
193 todo = MAX2(todo, var_slots);
194 num_vars++;
195 }
196 todo--;
197 slots++;
198 (*loc)++;
199 }
200
201 if (num_vars <= 1)
202 return NULL;
203
204 if (slots == 1)
205 return glsl_vector_type(base, 4);
206 else
207 return glsl_array_type(glsl_vector_type(base, 4), slots, 0);
208 }
209
210 static bool
create_new_io_vars(nir_shader * shader,nir_variable_mode mode,nir_variable * new_vars[MAX_SLOTS][4],bool flat_vars[MAX_SLOTS])211 create_new_io_vars(nir_shader *shader, nir_variable_mode mode,
212 nir_variable *new_vars[MAX_SLOTS][4],
213 bool flat_vars[MAX_SLOTS])
214 {
215 nir_variable *old_vars[MAX_SLOTS][4] = {{0}};
216
217 bool has_io_var = false;
218 nir_foreach_variable_with_modes(var, shader, mode) {
219 unsigned frac = var->data.location_frac;
220 old_vars[get_slot(var)][frac] = var;
221 has_io_var = true;
222 }
223
224 if (!has_io_var)
225 return false;
226
227 bool merged_any_vars = false;
228
229 for (unsigned loc = 0; loc < MAX_SLOTS; loc++) {
230 unsigned frac = 0;
231 while (frac < 4) {
232 nir_variable *first_var = old_vars[loc][frac];
233 if (!first_var) {
234 frac++;
235 continue;
236 }
237
238 int first = frac;
239 bool found_merge = false;
240
241 while (frac < 4) {
242 nir_variable *var = old_vars[loc][frac];
243 if (!var)
244 break;
245
246 if (var != first_var) {
247 if (!variables_can_merge(shader, first_var, var, true))
248 break;
249
250 found_merge = true;
251 }
252
253 const unsigned num_components =
254 glsl_get_components(glsl_without_array(var->type));
255 if (!num_components) {
256 assert(frac == 0);
257 frac++;
258 break; /* The type was a struct. */
259 }
260
261 /* We had better not have any overlapping vars */
262 for (unsigned i = 1; i < num_components; i++)
263 assert(old_vars[loc][frac + i] == NULL);
264
265 frac += num_components;
266 }
267
268 if (!found_merge)
269 continue;
270
271 merged_any_vars = true;
272
273 nir_variable *var = nir_variable_clone(old_vars[loc][first], shader);
274 var->data.location_frac = first;
275 var->type = resize_array_vec_type(var->type, frac - first);
276
277 nir_shader_add_variable(shader, var);
278 for (unsigned i = first; i < frac; i++) {
279 new_vars[loc][i] = var;
280 old_vars[loc][i] = NULL;
281 }
282
283 old_vars[loc][first] = var;
284 }
285 }
286
287 /* "flat" mode: tries to ensure there is at most one variable per slot by
288 * merging variables into vec4s
289 */
290 for (unsigned loc = 0; loc < MAX_SLOTS;) {
291 nir_variable *first_var;
292 unsigned num_vertices;
293 unsigned new_loc = loc;
294 const struct glsl_type *flat_type =
295 get_flat_type(shader, old_vars, &new_loc, &first_var, &num_vertices);
296 if (flat_type) {
297 merged_any_vars = true;
298
299 nir_variable *var = nir_variable_clone(first_var, shader);
300 var->data.location_frac = 0;
301 if (num_vertices)
302 var->type = glsl_array_type(flat_type, num_vertices, 0);
303 else
304 var->type = flat_type;
305
306 nir_shader_add_variable(shader, var);
307 for (unsigned i = 0; i < glsl_get_length(flat_type); i++) {
308 for (unsigned j = 0; j < 4; j++)
309 new_vars[loc + i][j] = var;
310 flat_vars[loc + i] = true;
311 }
312 }
313 loc = new_loc;
314 }
315
316 return merged_any_vars;
317 }
318
319 static nir_deref_instr *
build_array_deref_of_new_var(nir_builder * b,nir_variable * new_var,nir_deref_instr * leader)320 build_array_deref_of_new_var(nir_builder *b, nir_variable *new_var,
321 nir_deref_instr *leader)
322 {
323 if (leader->deref_type == nir_deref_type_var)
324 return nir_build_deref_var(b, new_var);
325
326 nir_deref_instr *parent =
327 build_array_deref_of_new_var(b, new_var, nir_deref_instr_parent(leader));
328
329 return nir_build_deref_follower(b, parent, leader);
330 }
331
332 static nir_ssa_def *
build_array_index(nir_builder * b,nir_deref_instr * deref,nir_ssa_def * base,bool vs_in,bool per_vertex)333 build_array_index(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *base,
334 bool vs_in, bool per_vertex)
335 {
336 switch (deref->deref_type) {
337 case nir_deref_type_var:
338 return base;
339 case nir_deref_type_array: {
340 nir_ssa_def *index = nir_i2i(b, deref->arr.index.ssa,
341 deref->dest.ssa.bit_size);
342
343 if (nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var &&
344 per_vertex)
345 return base;
346
347 return nir_iadd(
348 b, build_array_index(b, nir_deref_instr_parent(deref), base, vs_in, per_vertex),
349 nir_amul_imm(b, index, glsl_count_attribute_slots(deref->type, vs_in)));
350 }
351 default:
352 unreachable("Invalid deref instruction type");
353 }
354 }
355
356 static nir_deref_instr *
build_array_deref_of_new_var_flat(nir_shader * shader,nir_builder * b,nir_variable * new_var,nir_deref_instr * leader,unsigned base)357 build_array_deref_of_new_var_flat(nir_shader *shader,
358 nir_builder *b, nir_variable *new_var,
359 nir_deref_instr *leader, unsigned base)
360 {
361 nir_deref_instr *deref = nir_build_deref_var(b, new_var);
362
363 bool per_vertex = nir_is_arrayed_io(new_var, shader->info.stage);
364 if (per_vertex) {
365 nir_deref_path path;
366 nir_deref_path_init(&path, leader, NULL);
367
368 assert(path.path[0]->deref_type == nir_deref_type_var);
369 nir_deref_instr *p = path.path[1];
370 nir_deref_path_finish(&path);
371
372 nir_ssa_def *index = p->arr.index.ssa;
373 deref = nir_build_deref_array(b, deref, index);
374 }
375
376 if (!glsl_type_is_array(deref->type))
377 return deref;
378
379 bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
380 new_var->data.mode == nir_var_shader_in;
381 return nir_build_deref_array(b, deref,
382 build_array_index(b, leader, nir_imm_int(b, base), vs_in, per_vertex));
383 }
384
385 ASSERTED static bool
nir_shader_can_read_output(const shader_info * info)386 nir_shader_can_read_output(const shader_info *info)
387 {
388 switch (info->stage) {
389 case MESA_SHADER_TESS_CTRL:
390 case MESA_SHADER_FRAGMENT:
391 return true;
392
393 case MESA_SHADER_TASK:
394 case MESA_SHADER_MESH:
395 /* TODO(mesh): This will not be allowed on EXT. */
396 return true;
397
398 default:
399 return false;
400 }
401 }
402
403 static bool
nir_lower_io_to_vector_impl(nir_function_impl * impl,nir_variable_mode modes)404 nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
405 {
406 assert(!(modes & ~(nir_var_shader_in | nir_var_shader_out)));
407
408 nir_builder b;
409 nir_builder_init(&b, impl);
410
411 nir_metadata_require(impl, nir_metadata_dominance);
412
413 nir_shader *shader = impl->function->shader;
414 nir_variable *new_inputs[MAX_SLOTS][4] = {{0}};
415 nir_variable *new_outputs[MAX_SLOTS][4] = {{0}};
416 bool flat_inputs[MAX_SLOTS] = {0};
417 bool flat_outputs[MAX_SLOTS] = {0};
418
419 if (modes & nir_var_shader_in) {
420 /* Vertex shaders support overlapping inputs. We don't do those */
421 assert(b.shader->info.stage != MESA_SHADER_VERTEX);
422
423 /* If we don't actually merge any variables, remove that bit from modes
424 * so we don't bother doing extra non-work.
425 */
426 if (!create_new_io_vars(shader, nir_var_shader_in,
427 new_inputs, flat_inputs))
428 modes &= ~nir_var_shader_in;
429 }
430
431 if (modes & nir_var_shader_out) {
432 /* If we don't actually merge any variables, remove that bit from modes
433 * so we don't bother doing extra non-work.
434 */
435 if (!create_new_io_vars(shader, nir_var_shader_out,
436 new_outputs, flat_outputs))
437 modes &= ~nir_var_shader_out;
438 }
439
440 if (!modes)
441 return false;
442
443 bool progress = false;
444
445 /* Actually lower all the IO load/store intrinsics. Load instructions are
446 * lowered to a vector load and an ALU instruction to grab the channels we
447 * want. Outputs are lowered to a write-masked store of the vector output.
448 * For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
449 * to clean up the partial writes.
450 */
451 nir_foreach_block(block, impl) {
452 nir_foreach_instr_safe(instr, block) {
453 if (instr->type != nir_instr_type_intrinsic)
454 continue;
455
456 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
457
458 switch (intrin->intrinsic) {
459 case nir_intrinsic_load_deref:
460 case nir_intrinsic_interp_deref_at_centroid:
461 case nir_intrinsic_interp_deref_at_sample:
462 case nir_intrinsic_interp_deref_at_offset:
463 case nir_intrinsic_interp_deref_at_vertex: {
464 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
465 if (!nir_deref_mode_is_one_of(old_deref, modes))
466 break;
467
468 if (nir_deref_mode_is(old_deref, nir_var_shader_out))
469 assert(nir_shader_can_read_output(&b.shader->info));
470
471 nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
472
473 const unsigned loc = get_slot(old_var);
474 const unsigned old_frac = old_var->data.location_frac;
475 nir_variable *new_var = old_var->data.mode == nir_var_shader_in ?
476 new_inputs[loc][old_frac] :
477 new_outputs[loc][old_frac];
478 bool flat = old_var->data.mode == nir_var_shader_in ?
479 flat_inputs[loc] : flat_outputs[loc];
480 if (!new_var)
481 break;
482
483 const unsigned new_frac = new_var->data.location_frac;
484
485 nir_component_mask_t vec4_comp_mask =
486 ((1 << intrin->num_components) - 1) << old_frac;
487
488 b.cursor = nir_before_instr(&intrin->instr);
489
490 /* Rewrite the load to use the new variable and only select a
491 * portion of the result.
492 */
493 nir_deref_instr *new_deref;
494 if (flat) {
495 new_deref = build_array_deref_of_new_var_flat(
496 shader, &b, new_var, old_deref, loc - get_slot(new_var));
497 } else {
498 assert(get_slot(new_var) == loc);
499 new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
500 assert(glsl_type_is_vector(new_deref->type));
501 }
502 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
503 nir_src_for_ssa(&new_deref->dest.ssa));
504
505 intrin->num_components =
506 glsl_get_components(new_deref->type);
507 intrin->dest.ssa.num_components = intrin->num_components;
508
509 b.cursor = nir_after_instr(&intrin->instr);
510
511 nir_ssa_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
512 vec4_comp_mask >> new_frac);
513 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
514 new_vec,
515 new_vec->parent_instr);
516
517 progress = true;
518 break;
519 }
520
521 case nir_intrinsic_store_deref: {
522 nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
523 if (!nir_deref_mode_is(old_deref, nir_var_shader_out))
524 break;
525
526 nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
527
528 const unsigned loc = get_slot(old_var);
529 const unsigned old_frac = old_var->data.location_frac;
530 nir_variable *new_var = new_outputs[loc][old_frac];
531 bool flat = flat_outputs[loc];
532 if (!new_var)
533 break;
534
535 const unsigned new_frac = new_var->data.location_frac;
536
537 b.cursor = nir_before_instr(&intrin->instr);
538
539 /* Rewrite the store to be a masked store to the new variable */
540 nir_deref_instr *new_deref;
541 if (flat) {
542 new_deref = build_array_deref_of_new_var_flat(
543 shader, &b, new_var, old_deref, loc - get_slot(new_var));
544 } else {
545 assert(get_slot(new_var) == loc);
546 new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
547 assert(glsl_type_is_vector(new_deref->type));
548 }
549 nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
550 nir_src_for_ssa(&new_deref->dest.ssa));
551
552 intrin->num_components =
553 glsl_get_components(new_deref->type);
554
555 nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
556
557 assert(intrin->src[1].is_ssa);
558 nir_ssa_def *old_value = intrin->src[1].ssa;
559 nir_ssa_def *comps[4];
560 for (unsigned c = 0; c < intrin->num_components; c++) {
561 if (new_frac + c >= old_frac &&
562 (old_wrmask & 1 << (new_frac + c - old_frac))) {
563 comps[c] = nir_channel(&b, old_value,
564 new_frac + c - old_frac);
565 } else {
566 comps[c] = nir_ssa_undef(&b, old_value->num_components,
567 old_value->bit_size);
568 }
569 }
570 nir_ssa_def *new_value = nir_vec(&b, comps, intrin->num_components);
571 nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
572 nir_src_for_ssa(new_value));
573
574 nir_intrinsic_set_write_mask(intrin,
575 old_wrmask << (old_frac - new_frac));
576
577 progress = true;
578 break;
579 }
580
581 default:
582 break;
583 }
584 }
585 }
586
587 if (progress) {
588 nir_metadata_preserve(impl, nir_metadata_block_index |
589 nir_metadata_dominance);
590 }
591
592 return progress;
593 }
594
595 bool
nir_lower_io_to_vector(nir_shader * shader,nir_variable_mode modes)596 nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode modes)
597 {
598 bool progress = false;
599
600 nir_foreach_function(function, shader) {
601 if (function->impl)
602 progress |= nir_lower_io_to_vector_impl(function->impl, modes);
603 }
604
605 return progress;
606 }
607
608 static bool
nir_vectorize_tess_levels_impl(nir_function_impl * impl)609 nir_vectorize_tess_levels_impl(nir_function_impl *impl)
610 {
611 bool progress = false;
612 nir_builder b;
613 nir_builder_init(&b, impl);
614
615 nir_foreach_block(block, impl) {
616 nir_foreach_instr(instr, block) {
617 if (instr->type != nir_instr_type_intrinsic)
618 continue;
619
620 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
621 if (intrin->intrinsic != nir_intrinsic_load_deref &&
622 intrin->intrinsic != nir_intrinsic_store_deref)
623 continue;
624
625 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
626 if (!nir_deref_mode_is(deref, nir_var_shader_out))
627 continue;
628
629 nir_variable *var = nir_deref_instr_get_variable(deref);
630 if (var->data.location != VARYING_SLOT_TESS_LEVEL_OUTER &&
631 var->data.location != VARYING_SLOT_TESS_LEVEL_INNER)
632 continue;
633
634 assert(deref->deref_type == nir_deref_type_array);
635 assert(nir_src_is_const(deref->arr.index));
636 unsigned index = nir_src_as_uint(deref->arr.index);;
637
638 b.cursor = nir_before_instr(instr);
639 nir_ssa_def *new_deref = &nir_build_deref_var(&b, var)->dest.ssa;
640 nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(new_deref));
641
642 nir_deref_instr_remove_if_unused(deref);
643
644 intrin->num_components = glsl_get_vector_elements(var->type);
645
646 if (intrin->intrinsic == nir_intrinsic_store_deref) {
647 nir_intrinsic_set_write_mask(intrin, 1 << index);
648 nir_ssa_def *new_val = nir_ssa_undef(&b, intrin->num_components, 32);
649 new_val = nir_vector_insert_imm(&b, new_val, intrin->src[1].ssa, index);
650 nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(new_val));
651 } else {
652 b.cursor = nir_after_instr(instr);
653 nir_ssa_def *val = &intrin->dest.ssa;
654 val->num_components = intrin->num_components;
655 nir_ssa_def *comp = nir_channel(&b, val, index);
656 nir_ssa_def_rewrite_uses_after(val, comp, comp->parent_instr);
657 }
658
659 progress = true;
660 }
661 }
662
663 return progress;
664 }
665
666 /* Make the tess factor variables vectors instead of compact arrays, so accesses
667 * can be combined by nir_opt_cse()/nir_opt_combine_stores().
668 */
669 bool
nir_vectorize_tess_levels(nir_shader * shader)670 nir_vectorize_tess_levels(nir_shader *shader)
671 {
672 bool progress = false;
673
674 nir_foreach_shader_out_variable(var, shader) {
675 if (var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER ||
676 var->data.location == VARYING_SLOT_TESS_LEVEL_INNER) {
677 var->type = glsl_vector_type(GLSL_TYPE_FLOAT, glsl_get_length(var->type));
678 var->data.compact = false;
679 progress = true;
680 }
681 }
682
683 nir_foreach_function(function, shader) {
684 if (function->impl)
685 progress |= nir_vectorize_tess_levels_impl(function->impl);
686 }
687
688 return progress;
689 }
690