• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27 
28 /** @file nir_lower_io_to_vector.c
29  *
30  * Merges compatible input/output variables residing in different components
31  * of the same location. It's expected that further passes such as
32  * nir_lower_io_to_temporaries will combine loads and stores of the merged
33  * variables, producing vector nir_load_input/nir_store_output instructions
34  * when all is said and done.
35  */
36 
37 /* FRAG_RESULT_MAX+1 instead of just FRAG_RESULT_MAX because of how this pass
38  * handles dual source blending */
39 #define MAX_SLOTS MAX2(VARYING_SLOT_TESS_MAX, FRAG_RESULT_MAX+1)
40 
41 static unsigned
get_slot(const nir_variable * var)42 get_slot(const nir_variable *var)
43 {
44    /* This handling of dual-source blending might not be correct when more than
45     * one render target is supported, but it seems no driver supports more than
46     * one. */
47    return var->data.location + var->data.index;
48 }
49 
50 static const struct glsl_type *
get_per_vertex_type(const nir_shader * shader,const nir_variable * var,unsigned * num_vertices)51 get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
52                     unsigned *num_vertices)
53 {
54    if (nir_is_arrayed_io(var, shader->info.stage)) {
55       assert(glsl_type_is_array(var->type));
56       if (num_vertices)
57          *num_vertices = glsl_get_length(var->type);
58       return glsl_get_array_element(var->type);
59    } else {
60       if (num_vertices)
61          *num_vertices = 0;
62       return var->type;
63    }
64 }
65 
66 static const struct glsl_type *
resize_array_vec_type(const struct glsl_type * type,unsigned num_components)67 resize_array_vec_type(const struct glsl_type *type, unsigned num_components)
68 {
69    if (glsl_type_is_array(type)) {
70       const struct glsl_type *arr_elem =
71          resize_array_vec_type(glsl_get_array_element(type), num_components);
72       return glsl_array_type(arr_elem, glsl_get_length(type), 0);
73    } else {
74       assert(glsl_type_is_vector_or_scalar(type));
75       return glsl_vector_type(glsl_get_base_type(type), num_components);
76    }
77 }
78 
79 static bool
variables_can_merge(const nir_shader * shader,const nir_variable * a,const nir_variable * b,bool same_array_structure)80 variables_can_merge(const nir_shader *shader,
81                     const nir_variable *a, const nir_variable *b,
82                     bool same_array_structure)
83 {
84    if (a->data.compact || b->data.compact)
85       return false;
86 
87    if (a->data.per_view || b->data.per_view)
88       return false;
89 
90    const struct glsl_type *a_type_tail = a->type;
91    const struct glsl_type *b_type_tail = b->type;
92 
93    if (nir_is_arrayed_io(a, shader->info.stage) !=
94        nir_is_arrayed_io(b, shader->info.stage))
95       return false;
96 
97    /* They must have the same array structure */
98    if (same_array_structure) {
99       while (glsl_type_is_array(a_type_tail)) {
100          if (!glsl_type_is_array(b_type_tail))
101             return false;
102 
103          if (glsl_get_length(a_type_tail) != glsl_get_length(b_type_tail))
104             return false;
105 
106          a_type_tail = glsl_get_array_element(a_type_tail);
107          b_type_tail = glsl_get_array_element(b_type_tail);
108       }
109       if (glsl_type_is_array(b_type_tail))
110          return false;
111    } else {
112       a_type_tail = glsl_without_array(a_type_tail);
113       b_type_tail = glsl_without_array(b_type_tail);
114    }
115 
116    if (!glsl_type_is_vector_or_scalar(a_type_tail) ||
117        !glsl_type_is_vector_or_scalar(b_type_tail))
118       return false;
119 
120    if (glsl_get_base_type(a_type_tail) != glsl_get_base_type(b_type_tail))
121       return false;
122 
123    /* TODO: add 64/16bit support ? */
124    if (glsl_get_bit_size(a_type_tail) != 32)
125       return false;
126 
127    assert(a->data.mode == b->data.mode);
128    if (shader->info.stage == MESA_SHADER_FRAGMENT &&
129        a->data.mode == nir_var_shader_in &&
130        (a->data.interpolation != b->data.interpolation ||
131         a->data.centroid != b->data.centroid ||
132         a->data.sample != b->data.sample))
133       return false;
134 
135    if (shader->info.stage == MESA_SHADER_FRAGMENT &&
136        a->data.mode == nir_var_shader_out &&
137        a->data.index != b->data.index)
138       return false;
139 
140    /* It's tricky to merge XFB-outputs correctly, because we need there
141     * to not be any overlaps when we get to
142     * nir_gather_xfb_info_with_varyings later on. We'll end up
143     * triggering an assert there if we merge here.
144     */
145    if ((shader->info.stage == MESA_SHADER_VERTEX ||
146         shader->info.stage == MESA_SHADER_TESS_EVAL ||
147         shader->info.stage == MESA_SHADER_GEOMETRY) &&
148        a->data.mode == nir_var_shader_out &&
149        (a->data.explicit_xfb_buffer || b->data.explicit_xfb_buffer))
150       return false;
151 
152    return true;
153 }
154 
155 static const struct glsl_type *
get_flat_type(const nir_shader * shader,nir_variable * old_vars[MAX_SLOTS][4],unsigned * loc,nir_variable ** first_var,unsigned * num_vertices)156 get_flat_type(const nir_shader *shader, nir_variable *old_vars[MAX_SLOTS][4],
157               unsigned *loc, nir_variable **first_var, unsigned *num_vertices)
158 {
159    unsigned todo = 1;
160    unsigned slots = 0;
161    unsigned num_vars = 0;
162    enum glsl_base_type base;
163    *num_vertices = 0;
164    *first_var = NULL;
165 
166    while (todo) {
167       assert(*loc < MAX_SLOTS);
168       for (unsigned frac = 0; frac < 4; frac++) {
169          nir_variable *var = old_vars[*loc][frac];
170          if (!var)
171             continue;
172          if ((*first_var &&
173               !variables_can_merge(shader, var, *first_var, false)) ||
174              var->data.compact) {
175             (*loc)++;
176             return NULL;
177          }
178 
179          if (!*first_var) {
180             if (!glsl_type_is_vector_or_scalar(glsl_without_array(var->type))) {
181                (*loc)++;
182                return NULL;
183             }
184             *first_var = var;
185             base = glsl_get_base_type(
186                glsl_without_array(get_per_vertex_type(shader, var, NULL)));
187          }
188 
189          bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
190                       var->data.mode == nir_var_shader_in;
191          unsigned var_slots = glsl_count_attribute_slots(
192             get_per_vertex_type(shader, var, num_vertices), vs_in);
193          todo = MAX2(todo, var_slots);
194          num_vars++;
195       }
196       todo--;
197       slots++;
198       (*loc)++;
199    }
200 
201    if (num_vars <= 1)
202       return NULL;
203 
204    if (slots == 1)
205       return glsl_vector_type(base, 4);
206    else
207       return glsl_array_type(glsl_vector_type(base, 4), slots, 0);
208 }
209 
210 static bool
create_new_io_vars(nir_shader * shader,nir_variable_mode mode,nir_variable * new_vars[MAX_SLOTS][4],bool flat_vars[MAX_SLOTS])211 create_new_io_vars(nir_shader *shader, nir_variable_mode mode,
212                    nir_variable *new_vars[MAX_SLOTS][4],
213                    bool flat_vars[MAX_SLOTS])
214 {
215    nir_variable *old_vars[MAX_SLOTS][4] = {{0}};
216 
217    bool has_io_var = false;
218    nir_foreach_variable_with_modes(var, shader, mode) {
219       unsigned frac = var->data.location_frac;
220       old_vars[get_slot(var)][frac] = var;
221       has_io_var = true;
222    }
223 
224    if (!has_io_var)
225       return false;
226 
227    bool merged_any_vars = false;
228 
229    for (unsigned loc = 0; loc < MAX_SLOTS; loc++) {
230       unsigned frac = 0;
231       while (frac < 4) {
232          nir_variable *first_var = old_vars[loc][frac];
233          if (!first_var) {
234             frac++;
235             continue;
236          }
237 
238          int first = frac;
239          bool found_merge = false;
240 
241          while (frac < 4) {
242             nir_variable *var = old_vars[loc][frac];
243             if (!var)
244                break;
245 
246             if (var != first_var) {
247                if (!variables_can_merge(shader, first_var, var, true))
248                   break;
249 
250                found_merge = true;
251             }
252 
253             const unsigned num_components =
254                glsl_get_components(glsl_without_array(var->type));
255             if (!num_components) {
256                assert(frac == 0);
257                frac++;
258                break; /* The type was a struct. */
259             }
260 
261             /* We had better not have any overlapping vars */
262             for (unsigned i = 1; i < num_components; i++)
263                assert(old_vars[loc][frac + i] == NULL);
264 
265             frac += num_components;
266          }
267 
268          if (!found_merge)
269             continue;
270 
271          merged_any_vars = true;
272 
273          nir_variable *var = nir_variable_clone(old_vars[loc][first], shader);
274          var->data.location_frac = first;
275          var->type = resize_array_vec_type(var->type, frac - first);
276 
277          nir_shader_add_variable(shader, var);
278          for (unsigned i = first; i < frac; i++) {
279             new_vars[loc][i] = var;
280             old_vars[loc][i] = NULL;
281          }
282 
283          old_vars[loc][first] = var;
284       }
285    }
286 
287    /* "flat" mode: tries to ensure there is at most one variable per slot by
288     * merging variables into vec4s
289     */
290    for (unsigned loc = 0; loc < MAX_SLOTS;) {
291       nir_variable *first_var;
292       unsigned num_vertices;
293       unsigned new_loc = loc;
294       const struct glsl_type *flat_type =
295          get_flat_type(shader, old_vars, &new_loc, &first_var, &num_vertices);
296       if (flat_type) {
297          merged_any_vars = true;
298 
299          nir_variable *var = nir_variable_clone(first_var, shader);
300          var->data.location_frac = 0;
301          if (num_vertices)
302             var->type = glsl_array_type(flat_type, num_vertices, 0);
303          else
304             var->type = flat_type;
305 
306          nir_shader_add_variable(shader, var);
307          unsigned num_slots = MAX2(glsl_get_length(flat_type), 1);
308          for (unsigned i = 0; i < num_slots; i++) {
309             for (unsigned j = 0; j < 4; j++)
310                new_vars[loc + i][j] = var;
311             flat_vars[loc + i] = true;
312          }
313       }
314       loc = new_loc;
315    }
316 
317    return merged_any_vars;
318 }
319 
320 static nir_deref_instr *
build_array_deref_of_new_var(nir_builder * b,nir_variable * new_var,nir_deref_instr * leader)321 build_array_deref_of_new_var(nir_builder *b, nir_variable *new_var,
322                              nir_deref_instr *leader)
323 {
324    if (leader->deref_type == nir_deref_type_var)
325       return nir_build_deref_var(b, new_var);
326 
327    nir_deref_instr *parent =
328       build_array_deref_of_new_var(b, new_var, nir_deref_instr_parent(leader));
329 
330    return nir_build_deref_follower(b, parent, leader);
331 }
332 
333 static nir_ssa_def *
build_array_index(nir_builder * b,nir_deref_instr * deref,nir_ssa_def * base,bool vs_in,bool per_vertex)334 build_array_index(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *base,
335                   bool vs_in, bool per_vertex)
336 {
337    switch (deref->deref_type) {
338    case nir_deref_type_var:
339       return base;
340    case nir_deref_type_array: {
341       nir_ssa_def *index = nir_i2i(b, deref->arr.index.ssa,
342                                    deref->dest.ssa.bit_size);
343 
344       if (nir_deref_instr_parent(deref)->deref_type == nir_deref_type_var &&
345           per_vertex)
346          return base;
347 
348       return nir_iadd(
349          b, build_array_index(b, nir_deref_instr_parent(deref), base, vs_in, per_vertex),
350          nir_amul_imm(b, index, glsl_count_attribute_slots(deref->type, vs_in)));
351    }
352    default:
353       unreachable("Invalid deref instruction type");
354    }
355 }
356 
357 static nir_deref_instr *
build_array_deref_of_new_var_flat(nir_shader * shader,nir_builder * b,nir_variable * new_var,nir_deref_instr * leader,unsigned base)358 build_array_deref_of_new_var_flat(nir_shader *shader,
359                                   nir_builder *b, nir_variable *new_var,
360                                   nir_deref_instr *leader, unsigned base)
361 {
362    nir_deref_instr *deref = nir_build_deref_var(b, new_var);
363 
364    bool per_vertex = nir_is_arrayed_io(new_var, shader->info.stage);
365    if (per_vertex) {
366       nir_deref_path path;
367       nir_deref_path_init(&path, leader, NULL);
368 
369       assert(path.path[0]->deref_type == nir_deref_type_var);
370       nir_deref_instr *p = path.path[1];
371       nir_deref_path_finish(&path);
372 
373       nir_ssa_def *index = p->arr.index.ssa;
374       deref = nir_build_deref_array(b, deref, index);
375    }
376 
377    if (!glsl_type_is_array(deref->type))
378       return deref;
379 
380    bool vs_in = shader->info.stage == MESA_SHADER_VERTEX &&
381                 new_var->data.mode == nir_var_shader_in;
382    return nir_build_deref_array(b, deref,
383       build_array_index(b, leader, nir_imm_int(b, base), vs_in, per_vertex));
384 }
385 
386 ASSERTED static bool
nir_shader_can_read_output(const shader_info * info)387 nir_shader_can_read_output(const shader_info *info)
388 {
389    switch (info->stage) {
390    case MESA_SHADER_TESS_CTRL:
391    case MESA_SHADER_FRAGMENT:
392       return true;
393 
394    case MESA_SHADER_TASK:
395    case MESA_SHADER_MESH:
396       /* TODO(mesh): This will not be allowed on EXT. */
397       return true;
398 
399    default:
400       return false;
401    }
402 }
403 
404 static bool
nir_lower_io_to_vector_impl(nir_function_impl * impl,nir_variable_mode modes)405 nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
406 {
407    assert(!(modes & ~(nir_var_shader_in | nir_var_shader_out)));
408 
409    nir_builder b;
410    nir_builder_init(&b, impl);
411 
412    nir_metadata_require(impl, nir_metadata_dominance);
413 
414    nir_shader *shader = impl->function->shader;
415    nir_variable *new_inputs[MAX_SLOTS][4] = {{0}};
416    nir_variable *new_outputs[MAX_SLOTS][4] = {{0}};
417    bool flat_inputs[MAX_SLOTS] = {0};
418    bool flat_outputs[MAX_SLOTS] = {0};
419 
420    if (modes & nir_var_shader_in) {
421       /* Vertex shaders support overlapping inputs.  We don't do those */
422       assert(b.shader->info.stage != MESA_SHADER_VERTEX);
423 
424       /* If we don't actually merge any variables, remove that bit from modes
425        * so we don't bother doing extra non-work.
426        */
427       if (!create_new_io_vars(shader, nir_var_shader_in,
428                               new_inputs, flat_inputs))
429          modes &= ~nir_var_shader_in;
430    }
431 
432    if (modes & nir_var_shader_out) {
433       /* If we don't actually merge any variables, remove that bit from modes
434        * so we don't bother doing extra non-work.
435        */
436       if (!create_new_io_vars(shader, nir_var_shader_out,
437                               new_outputs, flat_outputs))
438          modes &= ~nir_var_shader_out;
439    }
440 
441    if (!modes)
442       return false;
443 
444    bool progress = false;
445 
446    /* Actually lower all the IO load/store intrinsics.  Load instructions are
447     * lowered to a vector load and an ALU instruction to grab the channels we
448     * want.  Outputs are lowered to a write-masked store of the vector output.
449     * For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
450     * to clean up the partial writes.
451     */
452    nir_foreach_block(block, impl) {
453       nir_foreach_instr_safe(instr, block) {
454          if (instr->type != nir_instr_type_intrinsic)
455             continue;
456 
457          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
458 
459          switch (intrin->intrinsic) {
460          case nir_intrinsic_load_deref:
461          case nir_intrinsic_interp_deref_at_centroid:
462          case nir_intrinsic_interp_deref_at_sample:
463          case nir_intrinsic_interp_deref_at_offset:
464          case nir_intrinsic_interp_deref_at_vertex: {
465             nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
466             if (!nir_deref_mode_is_one_of(old_deref, modes))
467                break;
468 
469             if (nir_deref_mode_is(old_deref, nir_var_shader_out))
470                assert(nir_shader_can_read_output(&b.shader->info));
471 
472             nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
473 
474             const unsigned loc = get_slot(old_var);
475             const unsigned old_frac = old_var->data.location_frac;
476             nir_variable *new_var = old_var->data.mode == nir_var_shader_in ?
477                                     new_inputs[loc][old_frac] :
478                                     new_outputs[loc][old_frac];
479             bool flat = old_var->data.mode == nir_var_shader_in ?
480                         flat_inputs[loc] : flat_outputs[loc];
481             if (!new_var)
482                break;
483 
484             const unsigned new_frac = new_var->data.location_frac;
485 
486             nir_component_mask_t vec4_comp_mask =
487                ((1 << intrin->num_components) - 1) << old_frac;
488 
489             b.cursor = nir_before_instr(&intrin->instr);
490 
491             /* Rewrite the load to use the new variable and only select a
492              * portion of the result.
493              */
494             nir_deref_instr *new_deref;
495             if (flat) {
496                new_deref = build_array_deref_of_new_var_flat(
497                   shader, &b, new_var, old_deref, loc - get_slot(new_var));
498             } else {
499                assert(get_slot(new_var) == loc);
500                new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
501                assert(glsl_type_is_vector(new_deref->type));
502             }
503             nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
504                                   nir_src_for_ssa(&new_deref->dest.ssa));
505 
506             intrin->num_components =
507                glsl_get_components(new_deref->type);
508             intrin->dest.ssa.num_components = intrin->num_components;
509 
510             b.cursor = nir_after_instr(&intrin->instr);
511 
512             nir_ssa_def *new_vec = nir_channels(&b, &intrin->dest.ssa,
513                                                 vec4_comp_mask >> new_frac);
514             nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
515                                            new_vec,
516                                            new_vec->parent_instr);
517 
518             progress = true;
519             break;
520          }
521 
522          case nir_intrinsic_store_deref: {
523             nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
524             if (!nir_deref_mode_is(old_deref, nir_var_shader_out))
525                break;
526 
527             nir_variable *old_var = nir_deref_instr_get_variable(old_deref);
528 
529             const unsigned loc = get_slot(old_var);
530             const unsigned old_frac = old_var->data.location_frac;
531             nir_variable *new_var = new_outputs[loc][old_frac];
532             bool flat = flat_outputs[loc];
533             if (!new_var)
534                break;
535 
536             const unsigned new_frac = new_var->data.location_frac;
537 
538             b.cursor = nir_before_instr(&intrin->instr);
539 
540             /* Rewrite the store to be a masked store to the new variable */
541             nir_deref_instr *new_deref;
542             if (flat) {
543                new_deref = build_array_deref_of_new_var_flat(
544                   shader, &b, new_var, old_deref, loc - get_slot(new_var));
545             } else {
546                assert(get_slot(new_var) == loc);
547                new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
548                assert(glsl_type_is_vector(new_deref->type));
549             }
550             nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
551                                   nir_src_for_ssa(&new_deref->dest.ssa));
552 
553             intrin->num_components =
554                glsl_get_components(new_deref->type);
555 
556             nir_component_mask_t old_wrmask = nir_intrinsic_write_mask(intrin);
557 
558             assert(intrin->src[1].is_ssa);
559             nir_ssa_def *old_value = intrin->src[1].ssa;
560             nir_ssa_scalar comps[4];
561             for (unsigned c = 0; c < intrin->num_components; c++) {
562                if (new_frac + c >= old_frac &&
563                    (old_wrmask & 1 << (new_frac + c - old_frac))) {
564                   comps[c] = nir_get_ssa_scalar(old_value,
565                                          new_frac + c - old_frac);
566                } else {
567                   comps[c] = nir_get_ssa_scalar(nir_ssa_undef(&b, old_value->num_components,
568                                                               old_value->bit_size), 0);
569                }
570             }
571             nir_ssa_def *new_value = nir_vec_scalars(&b, comps, intrin->num_components);
572             nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
573                                   nir_src_for_ssa(new_value));
574 
575             nir_intrinsic_set_write_mask(intrin,
576                                          old_wrmask << (old_frac - new_frac));
577 
578             progress = true;
579             break;
580          }
581 
582          default:
583             break;
584          }
585       }
586    }
587 
588    if (progress) {
589       nir_metadata_preserve(impl, nir_metadata_block_index |
590                                   nir_metadata_dominance);
591    }
592 
593    return progress;
594 }
595 
596 bool
nir_lower_io_to_vector(nir_shader * shader,nir_variable_mode modes)597 nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode modes)
598 {
599    bool progress = false;
600 
601    nir_foreach_function(function, shader) {
602       if (function->impl)
603          progress |= nir_lower_io_to_vector_impl(function->impl, modes);
604    }
605 
606    return progress;
607 }
608 
609 static bool
nir_vectorize_tess_levels_impl(nir_function_impl * impl)610 nir_vectorize_tess_levels_impl(nir_function_impl *impl)
611 {
612    bool progress = false;
613    nir_builder b;
614    nir_builder_init(&b, impl);
615 
616    nir_foreach_block(block, impl) {
617       nir_foreach_instr_safe(instr, block) {
618          if (instr->type != nir_instr_type_intrinsic)
619             continue;
620 
621          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
622          if (intrin->intrinsic != nir_intrinsic_load_deref &&
623              intrin->intrinsic != nir_intrinsic_store_deref)
624             continue;
625 
626          nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
627          if (!nir_deref_mode_is(deref, nir_var_shader_out))
628             continue;
629 
630          nir_variable *var = nir_deref_instr_get_variable(deref);
631          if (var->data.location != VARYING_SLOT_TESS_LEVEL_OUTER &&
632              var->data.location != VARYING_SLOT_TESS_LEVEL_INNER)
633             continue;
634 
635          assert(deref->deref_type == nir_deref_type_array);
636          assert(nir_src_is_const(deref->arr.index));
637          unsigned index = nir_src_as_uint(deref->arr.index);
638          unsigned vec_size = glsl_get_vector_elements(var->type);
639 
640          b.cursor = nir_before_instr(instr);
641          nir_ssa_def *new_deref = &nir_build_deref_var(&b, var)->dest.ssa;
642          nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(new_deref));
643 
644          nir_deref_instr_remove_if_unused(deref);
645 
646          intrin->num_components = vec_size;
647 
648          /* Handle out of bounds access. */
649          if (index >= vec_size) {
650             if (intrin->intrinsic == nir_intrinsic_load_deref) {
651                /* Return undef from out of bounds loads. */
652                b.cursor = nir_after_instr(instr);
653                nir_ssa_def *val = &intrin->dest.ssa;
654                nir_ssa_def *u = nir_ssa_undef(&b, val->num_components, val->bit_size);
655                nir_ssa_def_rewrite_uses(val, u);
656             }
657 
658             /* Finally, remove the out of bounds access. */
659             nir_instr_remove(instr);
660             progress = true;
661             continue;
662          }
663 
664          if (intrin->intrinsic == nir_intrinsic_store_deref) {
665             nir_intrinsic_set_write_mask(intrin, 1 << index);
666             nir_ssa_def *new_val = nir_ssa_undef(&b, intrin->num_components, 32);
667             new_val = nir_vector_insert_imm(&b, new_val, intrin->src[1].ssa, index);
668             nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(new_val));
669          } else {
670             b.cursor = nir_after_instr(instr);
671             nir_ssa_def *val = &intrin->dest.ssa;
672             val->num_components = intrin->num_components;
673             nir_ssa_def *comp = nir_channel(&b, val, index);
674             nir_ssa_def_rewrite_uses_after(val, comp, comp->parent_instr);
675          }
676 
677          progress = true;
678       }
679    }
680 
681    if (progress)
682       nir_metadata_preserve(impl, nir_metadata_block_index | nir_metadata_dominance);
683    else
684       nir_metadata_preserve(impl, nir_metadata_all);
685 
686    return progress;
687 }
688 
689 /* Make the tess factor variables vectors instead of compact arrays, so accesses
690  * can be combined by nir_opt_cse()/nir_opt_combine_stores().
691  */
692 bool
nir_vectorize_tess_levels(nir_shader * shader)693 nir_vectorize_tess_levels(nir_shader *shader)
694 {
695    bool progress = false;
696 
697    nir_foreach_shader_out_variable(var, shader) {
698       if (var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER ||
699           var->data.location == VARYING_SLOT_TESS_LEVEL_INNER) {
700          var->type = glsl_vector_type(GLSL_TYPE_FLOAT, glsl_get_length(var->type));
701          var->data.compact = false;
702          progress = true;
703       }
704    }
705 
706    nir_foreach_function(function, shader) {
707       if (function->impl)
708          progress |= nir_vectorize_tess_levels_impl(function->impl);
709    }
710 
711    return progress;
712 }
713