• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "util/hash_table.h"
25 #include "util/set.h"
26 #include "nir.h"
27 #include "nir_builder.h"
28 
29 /* This file contains various little helpers for doing simple linking in
30  * NIR.  Eventually, we'll probably want a full-blown varying packing
31  * implementation in here.  Right now, it just deletes unused things.
32  */
33 
34 /**
35  * Returns the bits in the inputs_read, or outputs_written
36  * bitfield corresponding to this variable.
37  */
38 static uint64_t
get_variable_io_mask(nir_variable * var,gl_shader_stage stage)39 get_variable_io_mask(nir_variable *var, gl_shader_stage stage)
40 {
41    if (var->data.location < 0)
42       return 0;
43 
44    unsigned location = var->data.patch ? var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
45 
46    assert(var->data.mode == nir_var_shader_in ||
47           var->data.mode == nir_var_shader_out);
48    assert(var->data.location >= 0);
49    assert(location < 64);
50 
51    const struct glsl_type *type = var->type;
52    if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
53       assert(glsl_type_is_array(type));
54       type = glsl_get_array_element(type);
55    }
56 
57    unsigned slots = glsl_count_attribute_slots(type, false);
58    return BITFIELD64_MASK(slots) << location;
59 }
60 
61 static bool
is_non_generic_patch_var(nir_variable * var)62 is_non_generic_patch_var(nir_variable *var)
63 {
64    return var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
65           var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER ||
66           var->data.location == VARYING_SLOT_BOUNDING_BOX0 ||
67           var->data.location == VARYING_SLOT_BOUNDING_BOX1;
68 }
69 
70 static uint8_t
get_num_components(nir_variable * var)71 get_num_components(nir_variable *var)
72 {
73    if (glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
74       return 4;
75 
76    return glsl_get_vector_elements(glsl_without_array(var->type));
77 }
78 
79 static void
tcs_add_output_reads(nir_shader * shader,uint64_t * read,uint64_t * patches_read)80 tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read)
81 {
82    nir_foreach_function_impl(impl, shader) {
83       nir_foreach_block(block, impl) {
84          nir_foreach_instr(instr, block) {
85             if (instr->type != nir_instr_type_intrinsic)
86                continue;
87 
88             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
89             if (intrin->intrinsic != nir_intrinsic_load_deref)
90                continue;
91 
92             nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
93             if (!nir_deref_mode_is(deref, nir_var_shader_out))
94                continue;
95 
96             nir_variable *var = nir_deref_instr_get_variable(deref);
97             for (unsigned i = 0; i < get_num_components(var); i++) {
98                if (var->data.patch) {
99                   if (is_non_generic_patch_var(var))
100                      continue;
101 
102                   patches_read[var->data.location_frac + i] |=
103                      get_variable_io_mask(var, shader->info.stage);
104                } else {
105                   read[var->data.location_frac + i] |=
106                      get_variable_io_mask(var, shader->info.stage);
107                }
108             }
109          }
110       }
111    }
112 }
113 
114 /**
115  * Helper for removing unused shader I/O variables, by demoting them to global
116  * variables (which may then by dead code eliminated).
117  *
118  * Example usage is:
119  *
120  * progress = nir_remove_unused_io_vars(producer, nir_var_shader_out,
121  *                                      read, patches_read) ||
122  *                                      progress;
123  *
124  * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
125  * representing each .location_frac used.  Note that for vector variables,
126  * only the first channel (.location_frac) is examined for deciding if the
127  * variable is used!
128  */
129 bool
nir_remove_unused_io_vars(nir_shader * shader,nir_variable_mode mode,uint64_t * used_by_other_stage,uint64_t * used_by_other_stage_patches)130 nir_remove_unused_io_vars(nir_shader *shader,
131                           nir_variable_mode mode,
132                           uint64_t *used_by_other_stage,
133                           uint64_t *used_by_other_stage_patches)
134 {
135    bool progress = false;
136    uint64_t *used;
137 
138    assert(mode == nir_var_shader_in || mode == nir_var_shader_out);
139 
140    nir_foreach_variable_with_modes_safe(var, shader, mode) {
141       if (var->data.patch)
142          used = used_by_other_stage_patches;
143       else
144          used = used_by_other_stage;
145 
146       if (var->data.location < VARYING_SLOT_VAR0 && var->data.location >= 0)
147          if (shader->info.stage != MESA_SHADER_MESH || var->data.location != VARYING_SLOT_PRIMITIVE_ID)
148             continue;
149 
150       if (var->data.always_active_io)
151          continue;
152 
153       if (var->data.explicit_xfb_buffer)
154          continue;
155 
156       uint64_t other_stage = used[var->data.location_frac];
157 
158       if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
159          /* This one is invalid, make it a global variable instead */
160          if (shader->info.stage == MESA_SHADER_MESH &&
161              (shader->info.outputs_read & BITFIELD64_BIT(var->data.location)))
162             var->data.mode = nir_var_mem_shared;
163          else
164             var->data.mode = nir_var_shader_temp;
165          var->data.location = 0;
166 
167          progress = true;
168       }
169    }
170 
171    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
172    if (progress) {
173       nir_metadata_preserve(impl, nir_metadata_dominance |
174                                      nir_metadata_block_index);
175       nir_fixup_deref_modes(shader);
176    } else {
177       nir_metadata_preserve(impl, nir_metadata_all);
178    }
179 
180    return progress;
181 }
182 
183 bool
nir_remove_unused_varyings(nir_shader * producer,nir_shader * consumer)184 nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
185 {
186    assert(producer->info.stage != MESA_SHADER_FRAGMENT);
187    assert(consumer->info.stage != MESA_SHADER_VERTEX);
188 
189    uint64_t read[4] = { 0 }, written[4] = { 0 };
190    uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };
191 
192    nir_foreach_shader_out_variable(var, producer) {
193       for (unsigned i = 0; i < get_num_components(var); i++) {
194          if (var->data.patch) {
195             if (is_non_generic_patch_var(var))
196                continue;
197 
198             patches_written[var->data.location_frac + i] |=
199                get_variable_io_mask(var, producer->info.stage);
200          } else {
201             written[var->data.location_frac + i] |=
202                get_variable_io_mask(var, producer->info.stage);
203          }
204       }
205    }
206 
207    nir_foreach_shader_in_variable(var, consumer) {
208       for (unsigned i = 0; i < get_num_components(var); i++) {
209          if (var->data.patch) {
210             if (is_non_generic_patch_var(var))
211                continue;
212 
213             patches_read[var->data.location_frac + i] |=
214                get_variable_io_mask(var, consumer->info.stage);
215          } else {
216             read[var->data.location_frac + i] |=
217                get_variable_io_mask(var, consumer->info.stage);
218          }
219       }
220    }
221 
222    /* Each TCS invocation can read data written by other TCS invocations,
223     * so even if the outputs are not used by the TES we must also make
224     * sure they are not read by the TCS before demoting them to globals.
225     */
226    if (producer->info.stage == MESA_SHADER_TESS_CTRL)
227       tcs_add_output_reads(producer, read, patches_read);
228 
229    bool progress = false;
230    progress = nir_remove_unused_io_vars(producer, nir_var_shader_out, read,
231                                         patches_read);
232 
233    progress = nir_remove_unused_io_vars(consumer, nir_var_shader_in, written,
234                                         patches_written) ||
235               progress;
236 
237    return progress;
238 }
239 
240 static uint8_t
get_interp_type(nir_variable * var,const struct glsl_type * type,bool default_to_smooth_interp)241 get_interp_type(nir_variable *var, const struct glsl_type *type,
242                 bool default_to_smooth_interp)
243 {
244    if (var->data.per_primitive)
245       return INTERP_MODE_NONE;
246    if (glsl_type_is_integer(type))
247       return INTERP_MODE_FLAT;
248    else if (var->data.interpolation != INTERP_MODE_NONE)
249       return var->data.interpolation;
250    else if (default_to_smooth_interp)
251       return INTERP_MODE_SMOOTH;
252    else
253       return INTERP_MODE_NONE;
254 }
255 
256 #define INTERPOLATE_LOC_SAMPLE   0
257 #define INTERPOLATE_LOC_CENTROID 1
258 #define INTERPOLATE_LOC_CENTER   2
259 
260 static uint8_t
get_interp_loc(nir_variable * var)261 get_interp_loc(nir_variable *var)
262 {
263    if (var->data.sample)
264       return INTERPOLATE_LOC_SAMPLE;
265    else if (var->data.centroid)
266       return INTERPOLATE_LOC_CENTROID;
267    else
268       return INTERPOLATE_LOC_CENTER;
269 }
270 
271 static bool
is_packing_supported_for_type(const struct glsl_type * type)272 is_packing_supported_for_type(const struct glsl_type *type)
273 {
274    /* We ignore complex types such as arrays, matrices, structs and bitsizes
275     * other then 32bit. All other vector types should have been split into
276     * scalar variables by the lower_io_to_scalar pass. The only exception
277     * should be OpenGL xfb varyings.
278     * TODO: add support for more complex types?
279     */
280    return glsl_type_is_scalar(type) && glsl_type_is_32bit(type);
281 }
282 
283 struct assigned_comps {
284    uint8_t comps;
285    uint8_t interp_type;
286    uint8_t interp_loc;
287    bool is_32bit;
288    bool is_mediump;
289    bool is_per_primitive;
290 };
291 
292 /* Packing arrays and dual slot varyings is difficult so to avoid complex
293  * algorithms this function just assigns them their existing location for now.
294  * TODO: allow better packing of complex types.
295  */
296 static void
get_unmoveable_components_masks(nir_shader * shader,nir_variable_mode mode,struct assigned_comps * comps,gl_shader_stage stage,bool default_to_smooth_interp)297 get_unmoveable_components_masks(nir_shader *shader,
298                                 nir_variable_mode mode,
299                                 struct assigned_comps *comps,
300                                 gl_shader_stage stage,
301                                 bool default_to_smooth_interp)
302 {
303    nir_foreach_variable_with_modes_safe(var, shader, mode) {
304       assert(var->data.location >= 0);
305 
306       /* Only remap things that aren't built-ins. */
307       if (var->data.location >= VARYING_SLOT_VAR0 &&
308           var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
309 
310          const struct glsl_type *type = var->type;
311          if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
312             assert(glsl_type_is_array(type));
313             type = glsl_get_array_element(type);
314          }
315 
316          /* If we can pack this varying then don't mark the components as
317           * used.
318           */
319          if (is_packing_supported_for_type(type) &&
320              !var->data.always_active_io)
321             continue;
322 
323          unsigned location = var->data.location - VARYING_SLOT_VAR0;
324 
325          unsigned elements =
326             glsl_type_is_vector_or_scalar(glsl_without_array(type)) ? glsl_get_vector_elements(glsl_without_array(type)) : 4;
327 
328          bool dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
329          unsigned slots = glsl_count_attribute_slots(type, false);
330          unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
331          unsigned comps_slot2 = 0;
332          for (unsigned i = 0; i < slots; i++) {
333             if (dual_slot) {
334                if (i & 1) {
335                   comps[location + i].comps |= ((1 << comps_slot2) - 1);
336                } else {
337                   unsigned num_comps = 4 - var->data.location_frac;
338                   comps_slot2 = (elements * dmul) - num_comps;
339 
340                   /* Assume ARB_enhanced_layouts packing rules for doubles */
341                   assert(var->data.location_frac == 0 ||
342                          var->data.location_frac == 2);
343                   assert(comps_slot2 <= 4);
344 
345                   comps[location + i].comps |=
346                      ((1 << num_comps) - 1) << var->data.location_frac;
347                }
348             } else {
349                comps[location + i].comps |=
350                   ((1 << (elements * dmul)) - 1) << var->data.location_frac;
351             }
352 
353             comps[location + i].interp_type =
354                get_interp_type(var, type, default_to_smooth_interp);
355             comps[location + i].interp_loc = get_interp_loc(var);
356             comps[location + i].is_32bit =
357                glsl_type_is_32bit(glsl_without_array(type));
358             comps[location + i].is_mediump =
359                var->data.precision == GLSL_PRECISION_MEDIUM ||
360                var->data.precision == GLSL_PRECISION_LOW;
361             comps[location + i].is_per_primitive = var->data.per_primitive;
362          }
363       }
364    }
365 }
366 
367 struct varying_loc {
368    uint8_t component;
369    uint32_t location;
370 };
371 
372 static void
mark_all_used_slots(nir_variable * var,uint64_t * slots_used,uint64_t slots_used_mask,unsigned num_slots)373 mark_all_used_slots(nir_variable *var, uint64_t *slots_used,
374                     uint64_t slots_used_mask, unsigned num_slots)
375 {
376    unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
377 
378    slots_used[var->data.patch ? 1 : 0] |= slots_used_mask &
379                                           BITFIELD64_RANGE(var->data.location - loc_offset, num_slots);
380 }
381 
382 static void
mark_used_slot(nir_variable * var,uint64_t * slots_used,unsigned offset)383 mark_used_slot(nir_variable *var, uint64_t *slots_used, unsigned offset)
384 {
385    unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
386 
387    slots_used[var->data.patch ? 1 : 0] |=
388       BITFIELD64_BIT(var->data.location - loc_offset + offset);
389 }
390 
391 static void
remap_slots_and_components(nir_shader * shader,nir_variable_mode mode,struct varying_loc (* remap)[4],uint64_t * slots_used,uint64_t * out_slots_read,uint32_t * p_slots_used,uint32_t * p_out_slots_read)392 remap_slots_and_components(nir_shader *shader, nir_variable_mode mode,
393                            struct varying_loc (*remap)[4],
394                            uint64_t *slots_used, uint64_t *out_slots_read,
395                            uint32_t *p_slots_used, uint32_t *p_out_slots_read)
396 {
397    const gl_shader_stage stage = shader->info.stage;
398    uint64_t out_slots_read_tmp[2] = { 0 };
399    uint64_t slots_used_tmp[2] = { 0 };
400 
401    /* We don't touch builtins so just copy the bitmask */
402    slots_used_tmp[0] = *slots_used & BITFIELD64_RANGE(0, VARYING_SLOT_VAR0);
403 
404    nir_foreach_variable_with_modes(var, shader, mode) {
405       assert(var->data.location >= 0);
406 
407       /* Only remap things that aren't built-ins */
408       if (var->data.location >= VARYING_SLOT_VAR0 &&
409           var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
410 
411          const struct glsl_type *type = var->type;
412          if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
413             assert(glsl_type_is_array(type));
414             type = glsl_get_array_element(type);
415          }
416 
417          unsigned num_slots = glsl_count_attribute_slots(type, false);
418          bool used_across_stages = false;
419          bool outputs_read = false;
420 
421          unsigned location = var->data.location - VARYING_SLOT_VAR0;
422          struct varying_loc *new_loc = &remap[location][var->data.location_frac];
423 
424          unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
425          uint64_t used = var->data.patch ? *p_slots_used : *slots_used;
426          uint64_t outs_used =
427             var->data.patch ? *p_out_slots_read : *out_slots_read;
428          uint64_t slots =
429             BITFIELD64_RANGE(var->data.location - loc_offset, num_slots);
430 
431          if (slots & used)
432             used_across_stages = true;
433 
434          if (slots & outs_used)
435             outputs_read = true;
436 
437          if (new_loc->location) {
438             var->data.location = new_loc->location;
439             var->data.location_frac = new_loc->component;
440          }
441 
442          if (var->data.always_active_io) {
443             /* We can't apply link time optimisations (specifically array
444              * splitting) to these so we need to copy the existing mask
445              * otherwise we will mess up the mask for things like partially
446              * marked arrays.
447              */
448             if (used_across_stages)
449                mark_all_used_slots(var, slots_used_tmp, used, num_slots);
450 
451             if (outputs_read) {
452                mark_all_used_slots(var, out_slots_read_tmp, outs_used,
453                                    num_slots);
454             }
455          } else {
456             for (unsigned i = 0; i < num_slots; i++) {
457                if (used_across_stages)
458                   mark_used_slot(var, slots_used_tmp, i);
459 
460                if (outputs_read)
461                   mark_used_slot(var, out_slots_read_tmp, i);
462             }
463          }
464       }
465    }
466 
467    *slots_used = slots_used_tmp[0];
468    *out_slots_read = out_slots_read_tmp[0];
469    *p_slots_used = slots_used_tmp[1];
470    *p_out_slots_read = out_slots_read_tmp[1];
471 }
472 
473 struct varying_component {
474    nir_variable *var;
475    uint8_t interp_type;
476    uint8_t interp_loc;
477    bool is_32bit;
478    bool is_patch;
479    bool is_per_primitive;
480    bool is_mediump;
481    bool is_intra_stage_only;
482    bool initialised;
483 };
484 
485 static int
cmp_varying_component(const void * comp1_v,const void * comp2_v)486 cmp_varying_component(const void *comp1_v, const void *comp2_v)
487 {
488    struct varying_component *comp1 = (struct varying_component *)comp1_v;
489    struct varying_component *comp2 = (struct varying_component *)comp2_v;
490 
491    /* We want patches to be order at the end of the array */
492    if (comp1->is_patch != comp2->is_patch)
493       return comp1->is_patch ? 1 : -1;
494 
495    /* Sort per-primitive outputs after per-vertex ones to allow
496     * better compaction when they are mixed in the shader's source.
497     */
498    if (comp1->is_per_primitive != comp2->is_per_primitive)
499       return comp1->is_per_primitive ? 1 : -1;
500 
501    /* We want to try to group together TCS outputs that are only read by other
502     * TCS invocations and not consumed by the follow stage.
503     */
504    if (comp1->is_intra_stage_only != comp2->is_intra_stage_only)
505       return comp1->is_intra_stage_only ? 1 : -1;
506 
507    /* Group mediump varyings together. */
508    if (comp1->is_mediump != comp2->is_mediump)
509       return comp1->is_mediump ? 1 : -1;
510 
511    /* We can only pack varyings with matching interpolation types so group
512     * them together.
513     */
514    if (comp1->interp_type != comp2->interp_type)
515       return comp1->interp_type - comp2->interp_type;
516 
517    /* Interpolation loc must match also. */
518    if (comp1->interp_loc != comp2->interp_loc)
519       return comp1->interp_loc - comp2->interp_loc;
520 
521    /* If everything else matches just use the original location to sort */
522    const struct nir_variable_data *const data1 = &comp1->var->data;
523    const struct nir_variable_data *const data2 = &comp2->var->data;
524    if (data1->location != data2->location)
525       return data1->location - data2->location;
526    return (int)data1->location_frac - (int)data2->location_frac;
527 }
528 
529 static void
gather_varying_component_info(nir_shader * producer,nir_shader * consumer,struct varying_component ** varying_comp_info,unsigned * varying_comp_info_size,bool default_to_smooth_interp)530 gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
531                               struct varying_component **varying_comp_info,
532                               unsigned *varying_comp_info_size,
533                               bool default_to_smooth_interp)
534 {
535    unsigned store_varying_info_idx[MAX_VARYINGS_INCL_PATCH][4] = { { 0 } };
536    unsigned num_of_comps_to_pack = 0;
537 
538    /* Count the number of varying that can be packed and create a mapping
539     * of those varyings to the array we will pass to qsort.
540     */
541    nir_foreach_shader_out_variable(var, producer) {
542 
543       /* Only remap things that aren't builtins. */
544       if (var->data.location >= VARYING_SLOT_VAR0 &&
545           var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
546 
547          /* We can't repack xfb varyings. */
548          if (var->data.always_active_io)
549             continue;
550 
551          const struct glsl_type *type = var->type;
552          if (nir_is_arrayed_io(var, producer->info.stage) || var->data.per_view) {
553             assert(glsl_type_is_array(type));
554             type = glsl_get_array_element(type);
555          }
556 
557          if (!is_packing_supported_for_type(type))
558             continue;
559 
560          unsigned loc = var->data.location - VARYING_SLOT_VAR0;
561          store_varying_info_idx[loc][var->data.location_frac] =
562             ++num_of_comps_to_pack;
563       }
564    }
565 
566    *varying_comp_info_size = num_of_comps_to_pack;
567    *varying_comp_info = rzalloc_array(NULL, struct varying_component,
568                                       num_of_comps_to_pack);
569 
570    nir_function_impl *impl = nir_shader_get_entrypoint(consumer);
571 
572    /* Walk over the shader and populate the varying component info array */
573    nir_foreach_block(block, impl) {
574       nir_foreach_instr(instr, block) {
575          if (instr->type != nir_instr_type_intrinsic)
576             continue;
577 
578          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
579          if (intr->intrinsic != nir_intrinsic_load_deref &&
580              intr->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
581              intr->intrinsic != nir_intrinsic_interp_deref_at_sample &&
582              intr->intrinsic != nir_intrinsic_interp_deref_at_offset &&
583              intr->intrinsic != nir_intrinsic_interp_deref_at_vertex)
584             continue;
585 
586          nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
587          if (!nir_deref_mode_is(deref, nir_var_shader_in))
588             continue;
589 
590          /* We only remap things that aren't builtins. */
591          nir_variable *in_var = nir_deref_instr_get_variable(deref);
592          if (in_var->data.location < VARYING_SLOT_VAR0)
593             continue;
594 
595          /* Do not remap per-vertex shader inputs because it's an array of
596           * 3-elements and this isn't supported.
597           */
598          if (in_var->data.per_vertex)
599             continue;
600 
601          unsigned location = in_var->data.location - VARYING_SLOT_VAR0;
602          if (location >= MAX_VARYINGS_INCL_PATCH)
603             continue;
604 
605          unsigned var_info_idx =
606             store_varying_info_idx[location][in_var->data.location_frac];
607          if (!var_info_idx)
608             continue;
609 
610          struct varying_component *vc_info =
611             &(*varying_comp_info)[var_info_idx - 1];
612 
613          if (!vc_info->initialised) {
614             const struct glsl_type *type = in_var->type;
615             if (nir_is_arrayed_io(in_var, consumer->info.stage) ||
616                 in_var->data.per_view) {
617                assert(glsl_type_is_array(type));
618                type = glsl_get_array_element(type);
619             }
620 
621             vc_info->var = in_var;
622             vc_info->interp_type =
623                get_interp_type(in_var, type, default_to_smooth_interp);
624             vc_info->interp_loc = get_interp_loc(in_var);
625             vc_info->is_32bit = glsl_type_is_32bit(type);
626             vc_info->is_patch = in_var->data.patch;
627             vc_info->is_per_primitive = in_var->data.per_primitive;
628             vc_info->is_mediump = !producer->options->linker_ignore_precision &&
629                                   (in_var->data.precision == GLSL_PRECISION_MEDIUM ||
630                                    in_var->data.precision == GLSL_PRECISION_LOW);
631             vc_info->is_intra_stage_only = false;
632             vc_info->initialised = true;
633          }
634       }
635    }
636 
637    /* Walk over the shader and populate the varying component info array
638     * for varyings which are read by other TCS instances but are not consumed
639     * by the TES.
640     */
641    if (producer->info.stage == MESA_SHADER_TESS_CTRL) {
642       impl = nir_shader_get_entrypoint(producer);
643 
644       nir_foreach_block(block, impl) {
645          nir_foreach_instr(instr, block) {
646             if (instr->type != nir_instr_type_intrinsic)
647                continue;
648 
649             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
650             if (intr->intrinsic != nir_intrinsic_load_deref)
651                continue;
652 
653             nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
654             if (!nir_deref_mode_is(deref, nir_var_shader_out))
655                continue;
656 
657             /* We only remap things that aren't builtins. */
658             nir_variable *out_var = nir_deref_instr_get_variable(deref);
659             if (out_var->data.location < VARYING_SLOT_VAR0)
660                continue;
661 
662             unsigned location = out_var->data.location - VARYING_SLOT_VAR0;
663             if (location >= MAX_VARYINGS_INCL_PATCH)
664                continue;
665 
666             unsigned var_info_idx =
667                store_varying_info_idx[location][out_var->data.location_frac];
668             if (!var_info_idx) {
669                /* Something went wrong, the shader interfaces didn't match, so
670                 * abandon packing. This can happen for example when the
671                 * inputs are scalars but the outputs are struct members.
672                 */
673                *varying_comp_info_size = 0;
674                break;
675             }
676 
677             struct varying_component *vc_info =
678                &(*varying_comp_info)[var_info_idx - 1];
679 
680             if (!vc_info->initialised) {
681                const struct glsl_type *type = out_var->type;
682                if (nir_is_arrayed_io(out_var, producer->info.stage)) {
683                   assert(glsl_type_is_array(type));
684                   type = glsl_get_array_element(type);
685                }
686 
687                vc_info->var = out_var;
688                vc_info->interp_type =
689                   get_interp_type(out_var, type, default_to_smooth_interp);
690                vc_info->interp_loc = get_interp_loc(out_var);
691                vc_info->is_32bit = glsl_type_is_32bit(type);
692                vc_info->is_patch = out_var->data.patch;
693                vc_info->is_per_primitive = out_var->data.per_primitive;
694                vc_info->is_mediump = !producer->options->linker_ignore_precision &&
695                                      (out_var->data.precision == GLSL_PRECISION_MEDIUM ||
696                                       out_var->data.precision == GLSL_PRECISION_LOW);
697                vc_info->is_intra_stage_only = true;
698                vc_info->initialised = true;
699             }
700          }
701       }
702    }
703 
704    for (unsigned i = 0; i < *varying_comp_info_size; i++) {
705       struct varying_component *vc_info = &(*varying_comp_info)[i];
706       if (!vc_info->initialised) {
707          /* Something went wrong, the shader interfaces didn't match, so
708           * abandon packing. This can happen for example when the outputs are
709           * scalars but the inputs are struct members.
710           */
711          *varying_comp_info_size = 0;
712          break;
713       }
714    }
715 }
716 
717 static bool
allow_pack_interp_type(nir_io_options options,int type)718 allow_pack_interp_type(nir_io_options options, int type)
719 {
720    switch (type) {
721    case INTERP_MODE_NONE:
722    case INTERP_MODE_SMOOTH:
723    case INTERP_MODE_NOPERSPECTIVE:
724       return options & nir_io_has_flexible_input_interpolation_except_flat;
725    default:
726       return false;
727    }
728 }
729 
730 static void
assign_remap_locations(struct varying_loc (* remap)[4],struct assigned_comps * assigned_comps,struct varying_component * info,unsigned * cursor,unsigned * comp,unsigned max_location,nir_io_options options)731    assign_remap_locations(struct varying_loc (*remap)[4],
732                           struct assigned_comps *assigned_comps,
733                           struct varying_component *info,
734                           unsigned *cursor, unsigned *comp,
735                           unsigned max_location,
736                           nir_io_options options)
737 {
738    unsigned tmp_cursor = *cursor;
739    unsigned tmp_comp = *comp;
740 
741    for (; tmp_cursor < max_location; tmp_cursor++) {
742 
743       if (assigned_comps[tmp_cursor].comps) {
744          /* Don't pack per-primitive and per-vertex varyings together. */
745          if (assigned_comps[tmp_cursor].is_per_primitive != info->is_per_primitive) {
746             tmp_comp = 0;
747             continue;
748          }
749 
750          /* We can only pack varyings with matching precision. */
751          if (assigned_comps[tmp_cursor].is_mediump != info->is_mediump) {
752             tmp_comp = 0;
753             continue;
754          }
755 
756          /* We can only pack varyings with matching interpolation type
757           * if driver does not support it.
758           */
759          if (assigned_comps[tmp_cursor].interp_type != info->interp_type &&
760              (!allow_pack_interp_type(options, assigned_comps[tmp_cursor].interp_type) ||
761               !allow_pack_interp_type(options, info->interp_type))) {
762             tmp_comp = 0;
763             continue;
764          }
765 
766          /* We can only pack varyings with matching interpolation location
767           * if driver does not support it.
768           */
769          if (assigned_comps[tmp_cursor].interp_loc != info->interp_loc &&
770              !(options & nir_io_has_flexible_input_interpolation_except_flat)) {
771             tmp_comp = 0;
772             continue;
773          }
774 
775          /* We can only pack varyings with matching types, and the current
776           * algorithm only supports packing 32-bit.
777           */
778          if (!assigned_comps[tmp_cursor].is_32bit) {
779             tmp_comp = 0;
780             continue;
781          }
782 
783          while (tmp_comp < 4 &&
784                 (assigned_comps[tmp_cursor].comps & (1 << tmp_comp))) {
785             tmp_comp++;
786          }
787       }
788 
789       if (tmp_comp == 4) {
790          tmp_comp = 0;
791          continue;
792       }
793 
794       unsigned location = info->var->data.location - VARYING_SLOT_VAR0;
795 
796       /* Once we have assigned a location mark it as used */
797       assigned_comps[tmp_cursor].comps |= (1 << tmp_comp);
798       assigned_comps[tmp_cursor].interp_type = info->interp_type;
799       assigned_comps[tmp_cursor].interp_loc = info->interp_loc;
800       assigned_comps[tmp_cursor].is_32bit = info->is_32bit;
801       assigned_comps[tmp_cursor].is_mediump = info->is_mediump;
802       assigned_comps[tmp_cursor].is_per_primitive = info->is_per_primitive;
803 
804       /* Assign remap location */
805       remap[location][info->var->data.location_frac].component = tmp_comp++;
806       remap[location][info->var->data.location_frac].location =
807          tmp_cursor + VARYING_SLOT_VAR0;
808 
809       break;
810    }
811 
812    *cursor = tmp_cursor;
813    *comp = tmp_comp;
814 }
815 
816 /* If there are empty components in the slot compact the remaining components
817  * as close to component 0 as possible. This will make it easier to fill the
818  * empty components with components from a different slot in a following pass.
819  */
820 static void
compact_components(nir_shader * producer,nir_shader * consumer,struct assigned_comps * assigned_comps,bool default_to_smooth_interp)821 compact_components(nir_shader *producer, nir_shader *consumer,
822                    struct assigned_comps *assigned_comps,
823                    bool default_to_smooth_interp)
824 {
825    struct varying_loc remap[MAX_VARYINGS_INCL_PATCH][4] = { { { 0 }, { 0 } } };
826    struct varying_component *varying_comp_info;
827    unsigned varying_comp_info_size;
828 
829    /* Gather varying component info */
830    gather_varying_component_info(producer, consumer, &varying_comp_info,
831                                  &varying_comp_info_size,
832                                  default_to_smooth_interp);
833 
834    /* Sort varying components. */
835    qsort(varying_comp_info, varying_comp_info_size,
836          sizeof(struct varying_component), cmp_varying_component);
837 
838    unsigned cursor = 0;
839    unsigned comp = 0;
840 
841    /* Set the remap array based on the sorted components */
842    for (unsigned i = 0; i < varying_comp_info_size; i++) {
843       struct varying_component *info = &varying_comp_info[i];
844 
845       assert(info->is_patch || cursor < MAX_VARYING);
846       if (info->is_patch) {
847          /* The list should be sorted with all non-patch inputs first followed
848           * by patch inputs.  When we hit our first patch input, we need to
849           * reset the cursor to MAX_VARYING so we put them in the right slot.
850           */
851          if (cursor < MAX_VARYING) {
852             cursor = MAX_VARYING;
853             comp = 0;
854          }
855 
856          assign_remap_locations(remap, assigned_comps, info,
857                                 &cursor, &comp, MAX_VARYINGS_INCL_PATCH,
858                                 consumer->options->io_options);
859       } else {
860          assign_remap_locations(remap, assigned_comps, info,
861                                 &cursor, &comp, MAX_VARYING,
862                                 consumer->options->io_options);
863 
864          /* Check if we failed to assign a remap location. This can happen if
865           * for example there are a bunch of unmovable components with
866           * mismatching interpolation types causing us to skip over locations
867           * that would have been useful for packing later components.
868           * The solution is to iterate over the locations again (this should
869           * happen very rarely in practice).
870           */
871          if (cursor == MAX_VARYING) {
872             cursor = 0;
873             comp = 0;
874             assign_remap_locations(remap, assigned_comps, info,
875                                    &cursor, &comp, MAX_VARYING,
876                                    consumer->options->io_options);
877          }
878       }
879    }
880 
881    ralloc_free(varying_comp_info);
882 
883    uint64_t zero = 0;
884    uint32_t zero32 = 0;
885    remap_slots_and_components(consumer, nir_var_shader_in, remap,
886                               &consumer->info.inputs_read, &zero,
887                               &consumer->info.patch_inputs_read, &zero32);
888    remap_slots_and_components(producer, nir_var_shader_out, remap,
889                               &producer->info.outputs_written,
890                               &producer->info.outputs_read,
891                               &producer->info.patch_outputs_written,
892                               &producer->info.patch_outputs_read);
893 }
894 
895 /* We assume that this has been called more-or-less directly after
896  * remove_unused_varyings.  At this point, all of the varyings that we
897  * aren't going to be using have been completely removed and the
898  * inputs_read and outputs_written fields in nir_shader_info reflect
899  * this.  Therefore, the total set of valid slots is the OR of the two
900  * sets of varyings;  this accounts for varyings which one side may need
901  * to read/write even if the other doesn't.  This can happen if, for
902  * instance, an array is used indirectly from one side causing it to be
903  * unsplittable but directly from the other.
904  */
905 void
nir_compact_varyings(nir_shader * producer,nir_shader * consumer,bool default_to_smooth_interp)906 nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
907                      bool default_to_smooth_interp)
908 {
909    assert(producer->info.stage != MESA_SHADER_FRAGMENT);
910    assert(consumer->info.stage != MESA_SHADER_VERTEX);
911 
912    struct assigned_comps assigned_comps[MAX_VARYINGS_INCL_PATCH] = { { 0 } };
913 
914    get_unmoveable_components_masks(producer, nir_var_shader_out,
915                                    assigned_comps,
916                                    producer->info.stage,
917                                    default_to_smooth_interp);
918    get_unmoveable_components_masks(consumer, nir_var_shader_in,
919                                    assigned_comps,
920                                    consumer->info.stage,
921                                    default_to_smooth_interp);
922 
923    compact_components(producer, consumer, assigned_comps,
924                       default_to_smooth_interp);
925 }
926 
927 /*
928  * Mark XFB varyings as always_active_io in the consumer so the linking opts
929  * don't touch them.
930  */
931 void
nir_link_xfb_varyings(nir_shader * producer,nir_shader * consumer)932 nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer)
933 {
934    nir_variable *input_vars[MAX_VARYING][4] = { 0 };
935 
936    nir_foreach_shader_in_variable(var, consumer) {
937       if (var->data.location >= VARYING_SLOT_VAR0 &&
938           var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
939 
940          unsigned location = var->data.location - VARYING_SLOT_VAR0;
941          input_vars[location][var->data.location_frac] = var;
942       }
943    }
944 
945    nir_foreach_shader_out_variable(var, producer) {
946       if (var->data.location >= VARYING_SLOT_VAR0 &&
947           var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
948 
949          if (!var->data.always_active_io)
950             continue;
951 
952          unsigned location = var->data.location - VARYING_SLOT_VAR0;
953          if (input_vars[location][var->data.location_frac]) {
954             input_vars[location][var->data.location_frac]->data.always_active_io = true;
955          }
956       }
957    }
958 }
959 
960 static bool
does_varying_match(nir_variable * out_var,nir_variable * in_var)961 does_varying_match(nir_variable *out_var, nir_variable *in_var)
962 {
963    return in_var->data.location == out_var->data.location &&
964           in_var->data.location_frac == out_var->data.location_frac &&
965           in_var->type == out_var->type;
966 }
967 
968 static nir_variable *
get_matching_input_var(nir_shader * consumer,nir_variable * out_var)969 get_matching_input_var(nir_shader *consumer, nir_variable *out_var)
970 {
971    nir_foreach_shader_in_variable(var, consumer) {
972       if (does_varying_match(out_var, var))
973          return var;
974    }
975 
976    return NULL;
977 }
978 
979 static bool
can_replace_varying(nir_variable * out_var)980 can_replace_varying(nir_variable *out_var)
981 {
982    /* Skip types that require more complex handling.
983     * TODO: add support for these types.
984     */
985    if (glsl_type_is_array(out_var->type) ||
986        glsl_type_is_dual_slot(out_var->type) ||
987        glsl_type_is_matrix(out_var->type) ||
988        glsl_type_is_struct_or_ifc(out_var->type))
989       return false;
990 
991    /* Limit this pass to scalars for now to keep things simple. Most varyings
992     * should have been lowered to scalars at this point anyway.
993     */
994    if (!glsl_type_is_scalar(out_var->type))
995       return false;
996 
997    if (out_var->data.location < VARYING_SLOT_VAR0 ||
998        out_var->data.location - VARYING_SLOT_VAR0 >= MAX_VARYING)
999       return false;
1000 
1001    return true;
1002 }
1003 
1004 static bool
replace_varying_input_by_constant_load(nir_shader * shader,nir_intrinsic_instr * store_intr)1005 replace_varying_input_by_constant_load(nir_shader *shader,
1006                                        nir_intrinsic_instr *store_intr)
1007 {
1008    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
1009 
1010    nir_builder b = nir_builder_create(impl);
1011 
1012    nir_variable *out_var = nir_intrinsic_get_var(store_intr, 0);
1013 
1014    bool progress = false;
1015    nir_foreach_block(block, impl) {
1016       nir_foreach_instr(instr, block) {
1017          if (instr->type != nir_instr_type_intrinsic)
1018             continue;
1019 
1020          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1021          if (intr->intrinsic != nir_intrinsic_load_deref)
1022             continue;
1023 
1024          nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
1025          if (!nir_deref_mode_is(in_deref, nir_var_shader_in))
1026             continue;
1027 
1028          nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
1029 
1030          if (!does_varying_match(out_var, in_var))
1031             continue;
1032 
1033          b.cursor = nir_before_instr(instr);
1034 
1035          nir_load_const_instr *out_const =
1036             nir_instr_as_load_const(store_intr->src[1].ssa->parent_instr);
1037 
1038          /* Add new const to replace the input */
1039          nir_def *nconst = nir_build_imm(&b, store_intr->num_components,
1040                                          intr->def.bit_size,
1041                                          out_const->value);
1042 
1043          nir_def_rewrite_uses(&intr->def, nconst);
1044 
1045          progress = true;
1046       }
1047    }
1048 
1049    return progress;
1050 }
1051 
1052 static bool
replace_duplicate_input(nir_shader * shader,nir_variable * input_var,nir_intrinsic_instr * dup_store_intr)1053 replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
1054                         nir_intrinsic_instr *dup_store_intr)
1055 {
1056    assert(input_var);
1057 
1058    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
1059 
1060    nir_builder b = nir_builder_create(impl);
1061 
1062    nir_variable *dup_out_var = nir_intrinsic_get_var(dup_store_intr, 0);
1063 
1064    bool progress = false;
1065    nir_foreach_block(block, impl) {
1066       nir_foreach_instr(instr, block) {
1067          if (instr->type != nir_instr_type_intrinsic)
1068             continue;
1069 
1070          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1071          if (intr->intrinsic != nir_intrinsic_load_deref)
1072             continue;
1073 
1074          nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
1075          if (!nir_deref_mode_is(in_deref, nir_var_shader_in))
1076             continue;
1077 
1078          nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
1079 
1080          if (!does_varying_match(dup_out_var, in_var) ||
1081              in_var->data.interpolation != input_var->data.interpolation ||
1082              get_interp_loc(in_var) != get_interp_loc(input_var) ||
1083              in_var->data.per_vertex)
1084             continue;
1085 
1086          b.cursor = nir_before_instr(instr);
1087 
1088          nir_def *load = nir_load_var(&b, input_var);
1089          nir_def_rewrite_uses(&intr->def, load);
1090 
1091          progress = true;
1092       }
1093    }
1094 
1095    return progress;
1096 }
1097 
1098 static bool
is_direct_uniform_load(nir_def * def,nir_scalar * s)1099 is_direct_uniform_load(nir_def *def, nir_scalar *s)
1100 {
1101    /* def is sure to be scalar as can_replace_varying() filter out vector case. */
1102    assert(def->num_components == 1);
1103 
1104    /* Uniform load may hide behind some move instruction for converting
1105     * vector to scalar:
1106     *
1107     *     vec1 32 ssa_1 = deref_var &color (uniform vec3)
1108     *     vec3 32 ssa_2 = intrinsic load_deref (ssa_1) (0)
1109     *     vec1 32 ssa_3 = mov ssa_2.x
1110     *     vec1 32 ssa_4 = deref_var &color_out (shader_out float)
1111     *     intrinsic store_deref (ssa_4, ssa_3) (1, 0)
1112     */
1113    *s = nir_scalar_resolved(def, 0);
1114 
1115    nir_def *ssa = s->def;
1116    if (ssa->parent_instr->type != nir_instr_type_intrinsic)
1117       return false;
1118 
1119    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(ssa->parent_instr);
1120    if (intr->intrinsic != nir_intrinsic_load_deref)
1121       return false;
1122 
1123    nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
1124    /* TODO: support nir_var_mem_ubo. */
1125    if (!nir_deref_mode_is(deref, nir_var_uniform))
1126       return false;
1127 
1128    /* Does not support indirect uniform load. */
1129    return !nir_deref_instr_has_indirect(deref);
1130 }
1131 
1132 /**
1133  * Add a uniform variable from one shader to a different shader.
1134  *
1135  * \param nir     The shader where to add the uniform
1136  * \param uniform The uniform that's declared in another shader.
1137  */
1138 nir_variable *
nir_clone_uniform_variable(nir_shader * nir,nir_variable * uniform,bool spirv)1139 nir_clone_uniform_variable(nir_shader *nir, nir_variable *uniform, bool spirv)
1140 {
1141    /* Find if uniform already exists in consumer. */
1142    nir_variable *new_var = NULL;
1143    nir_foreach_variable_with_modes(v, nir, uniform->data.mode) {
1144       if ((spirv && uniform->data.mode & nir_var_mem_ubo &&
1145            v->data.binding == uniform->data.binding) ||
1146           (!spirv && !strcmp(uniform->name, v->name))) {
1147          new_var = v;
1148          break;
1149       }
1150    }
1151 
1152    /* Create a variable if not exist. */
1153    if (!new_var) {
1154       new_var = nir_variable_clone(uniform, nir);
1155       nir_shader_add_variable(nir, new_var);
1156    }
1157 
1158    return new_var;
1159 }
1160 
1161 nir_deref_instr *
nir_clone_deref_instr(nir_builder * b,nir_variable * var,nir_deref_instr * deref)1162 nir_clone_deref_instr(nir_builder *b, nir_variable *var,
1163                       nir_deref_instr *deref)
1164 {
1165    if (deref->deref_type == nir_deref_type_var)
1166       return nir_build_deref_var(b, var);
1167 
1168    nir_deref_instr *parent_deref = nir_deref_instr_parent(deref);
1169    nir_deref_instr *parent = nir_clone_deref_instr(b, var, parent_deref);
1170 
1171    /* Build array and struct deref instruction.
1172     * "deref" instr is sure to be direct (see is_direct_uniform_load()).
1173     */
1174    switch (deref->deref_type) {
1175    case nir_deref_type_array: {
1176       nir_load_const_instr *index =
1177          nir_instr_as_load_const(deref->arr.index.ssa->parent_instr);
1178       return nir_build_deref_array_imm(b, parent, index->value->i64);
1179    }
1180    case nir_deref_type_ptr_as_array: {
1181       nir_load_const_instr *index =
1182          nir_instr_as_load_const(deref->arr.index.ssa->parent_instr);
1183       nir_def *ssa = nir_imm_intN_t(b, index->value->i64,
1184                                     parent->def.bit_size);
1185       return nir_build_deref_ptr_as_array(b, parent, ssa);
1186    }
1187    case nir_deref_type_struct:
1188       return nir_build_deref_struct(b, parent, deref->strct.index);
1189    default:
1190       unreachable("invalid type");
1191       return NULL;
1192    }
1193 }
1194 
1195 static bool
replace_varying_input_by_uniform_load(nir_shader * shader,nir_intrinsic_instr * store_intr,nir_scalar * scalar)1196 replace_varying_input_by_uniform_load(nir_shader *shader,
1197                                       nir_intrinsic_instr *store_intr,
1198                                       nir_scalar *scalar)
1199 {
1200    nir_function_impl *impl = nir_shader_get_entrypoint(shader);
1201 
1202    nir_builder b = nir_builder_create(impl);
1203 
1204    nir_variable *out_var = nir_intrinsic_get_var(store_intr, 0);
1205 
1206    nir_intrinsic_instr *load = nir_instr_as_intrinsic(scalar->def->parent_instr);
1207    nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
1208    nir_variable *uni_var = nir_deref_instr_get_variable(deref);
1209    uni_var = nir_clone_uniform_variable(shader, uni_var, false);
1210 
1211    bool progress = false;
1212    nir_foreach_block(block, impl) {
1213       nir_foreach_instr(instr, block) {
1214          if (instr->type != nir_instr_type_intrinsic)
1215             continue;
1216 
1217          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1218          if (intr->intrinsic != nir_intrinsic_load_deref)
1219             continue;
1220 
1221          nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
1222          if (!nir_deref_mode_is(in_deref, nir_var_shader_in))
1223             continue;
1224 
1225          nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
1226 
1227          if (!does_varying_match(out_var, in_var))
1228             continue;
1229 
1230          b.cursor = nir_before_instr(instr);
1231 
1232          /* Clone instructions start from deref load to variable deref. */
1233          nir_deref_instr *uni_deref = nir_clone_deref_instr(&b, uni_var, deref);
1234          nir_def *uni_def = nir_load_deref(&b, uni_deref);
1235 
1236          /* Add a vector to scalar move if uniform is a vector. */
1237          if (uni_def->num_components > 1) {
1238             nir_alu_src src = { 0 };
1239             src.src = nir_src_for_ssa(uni_def);
1240             src.swizzle[0] = scalar->comp;
1241             uni_def = nir_mov_alu(&b, src, 1);
1242          }
1243 
1244          /* Replace load input with load uniform. */
1245          nir_def_rewrite_uses(&intr->def, uni_def);
1246 
1247          progress = true;
1248       }
1249    }
1250 
1251    return progress;
1252 }
1253 
1254 /* The GLSL ES 3.20 spec says:
1255  *
1256  * "The precision of a vertex output does not need to match the precision of
1257  * the corresponding fragment input. The minimum precision at which vertex
1258  * outputs are interpolated is the minimum of the vertex output precision and
1259  * the fragment input precision, with the exception that for highp,
1260  * implementations do not have to support full IEEE 754 precision." (9.1 "Input
1261  * Output Matching by Name in Linked Programs")
1262  *
1263  * To implement this, when linking shaders we will take the minimum precision
1264  * qualifier (allowing drivers to interpolate at lower precision). For
1265  * input/output between non-fragment stages (e.g. VERTEX to GEOMETRY), the spec
1266  * requires we use the *last* specified precision if there is a conflict.
1267  *
1268  * Precisions are ordered as (NONE, HIGH, MEDIUM, LOW). If either precision is
1269  * NONE, we'll return the other precision, since there is no conflict.
1270  * Otherwise for fragment interpolation, we'll pick the smallest of (HIGH,
1271  * MEDIUM, LOW) by picking the maximum of the raw values - note the ordering is
1272  * "backwards". For non-fragment stages, we'll pick the latter precision to
1273  * comply with the spec. (Note that the order matters.)
1274  *
1275  * For streamout, "Variables declared with lowp or mediump precision are
1276  * promoted to highp before being written." (12.2 "Transform Feedback", p. 341
1277  * of OpenGL ES 3.2 specification). So drivers should promote them
1278  * the transform feedback memory store, but not the output store.
1279  */
1280 
1281 static unsigned
nir_link_precision(unsigned producer,unsigned consumer,bool fs)1282 nir_link_precision(unsigned producer, unsigned consumer, bool fs)
1283 {
1284    if (producer == GLSL_PRECISION_NONE)
1285       return consumer;
1286    else if (consumer == GLSL_PRECISION_NONE)
1287       return producer;
1288    else
1289       return fs ? MAX2(producer, consumer) : consumer;
1290 }
1291 
1292 static nir_variable *
find_consumer_variable(const nir_shader * consumer,const nir_variable * producer_var)1293 find_consumer_variable(const nir_shader *consumer,
1294                        const nir_variable *producer_var)
1295 {
1296    nir_foreach_variable_with_modes(var, consumer, nir_var_shader_in) {
1297       if (var->data.location == producer_var->data.location &&
1298           var->data.location_frac == producer_var->data.location_frac)
1299          return var;
1300    }
1301    return NULL;
1302 }
1303 
1304 void
nir_link_varying_precision(nir_shader * producer,nir_shader * consumer)1305 nir_link_varying_precision(nir_shader *producer, nir_shader *consumer)
1306 {
1307    bool frag = consumer->info.stage == MESA_SHADER_FRAGMENT;
1308 
1309    nir_foreach_shader_out_variable(producer_var, producer) {
1310       /* Skip if the slot is not assigned */
1311       if (producer_var->data.location < 0)
1312          continue;
1313 
1314       nir_variable *consumer_var = find_consumer_variable(consumer,
1315                                                           producer_var);
1316 
1317       /* Skip if the variable will be eliminated */
1318       if (!consumer_var)
1319          continue;
1320 
1321       /* Now we have a pair of variables. Let's pick the smaller precision. */
1322       unsigned precision_1 = producer_var->data.precision;
1323       unsigned precision_2 = consumer_var->data.precision;
1324       unsigned minimum = nir_link_precision(precision_1, precision_2, frag);
1325 
1326       /* Propagate the new precision */
1327       producer_var->data.precision = consumer_var->data.precision = minimum;
1328    }
1329 }
1330 
1331 bool
nir_link_opt_varyings(nir_shader * producer,nir_shader * consumer)1332 nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
1333 {
1334    /* TODO: Add support for more shader stage combinations */
1335    if (consumer->info.stage != MESA_SHADER_FRAGMENT ||
1336        (producer->info.stage != MESA_SHADER_VERTEX &&
1337         producer->info.stage != MESA_SHADER_TESS_EVAL))
1338       return false;
1339 
1340    bool progress = false;
1341 
1342    nir_function_impl *impl = nir_shader_get_entrypoint(producer);
1343 
1344    struct hash_table *varying_values = _mesa_pointer_hash_table_create(NULL);
1345 
1346    /* If we find a store in the last block of the producer we can be sure this
1347     * is the only possible value for this output.
1348     */
1349    nir_block *last_block = nir_impl_last_block(impl);
1350    nir_foreach_instr_reverse(instr, last_block) {
1351       if (instr->type != nir_instr_type_intrinsic)
1352          continue;
1353 
1354       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1355 
1356       if (intr->intrinsic != nir_intrinsic_store_deref)
1357          continue;
1358 
1359       nir_deref_instr *out_deref = nir_src_as_deref(intr->src[0]);
1360       if (!nir_deref_mode_is(out_deref, nir_var_shader_out))
1361          continue;
1362 
1363       nir_variable *out_var = nir_deref_instr_get_variable(out_deref);
1364       if (!can_replace_varying(out_var))
1365          continue;
1366 
1367       nir_def *ssa = intr->src[1].ssa;
1368       if (ssa->parent_instr->type == nir_instr_type_load_const) {
1369          progress |= replace_varying_input_by_constant_load(consumer, intr);
1370          continue;
1371       }
1372 
1373       nir_scalar uni_scalar;
1374       if (is_direct_uniform_load(ssa, &uni_scalar)) {
1375          if (consumer->options->lower_varying_from_uniform) {
1376             progress |= replace_varying_input_by_uniform_load(consumer, intr,
1377                                                               &uni_scalar);
1378             continue;
1379          } else {
1380             nir_variable *in_var = get_matching_input_var(consumer, out_var);
1381             /* The varying is loaded from same uniform, so no need to do any
1382              * interpolation. Mark it as flat explicitly.
1383              */
1384             if (!consumer->options->no_integers &&
1385                 in_var && in_var->data.interpolation <= INTERP_MODE_NOPERSPECTIVE) {
1386                in_var->data.interpolation = INTERP_MODE_FLAT;
1387                out_var->data.interpolation = INTERP_MODE_FLAT;
1388             }
1389          }
1390       }
1391 
1392       struct hash_entry *entry = _mesa_hash_table_search(varying_values, ssa);
1393       if (entry) {
1394          progress |= replace_duplicate_input(consumer,
1395                                              (nir_variable *)entry->data,
1396                                              intr);
1397       } else {
1398          nir_variable *in_var = get_matching_input_var(consumer, out_var);
1399          if (in_var) {
1400             _mesa_hash_table_insert(varying_values, ssa, in_var);
1401          }
1402       }
1403    }
1404 
1405    _mesa_hash_table_destroy(varying_values, NULL);
1406 
1407    return progress;
1408 }
1409 
1410 /* TODO any better helper somewhere to sort a list? */
1411 
1412 static void
insert_sorted(struct exec_list * var_list,nir_variable * new_var)1413 insert_sorted(struct exec_list *var_list, nir_variable *new_var)
1414 {
1415    nir_foreach_variable_in_list(var, var_list) {
1416       /* Use the `per_primitive` bool to sort per-primitive variables
1417        * to the end of the list, so they get the last driver locations
1418        * by nir_assign_io_var_locations.
1419        *
1420        * This is done because AMD HW requires that per-primitive outputs
1421        * are the last params.
1422        * In the future we can add an option for this, if needed by other HW.
1423        */
1424       if (new_var->data.per_primitive < var->data.per_primitive ||
1425           (new_var->data.per_primitive == var->data.per_primitive &&
1426            (var->data.location > new_var->data.location ||
1427             (var->data.location == new_var->data.location &&
1428              var->data.location_frac > new_var->data.location_frac)))) {
1429          exec_node_insert_node_before(&var->node, &new_var->node);
1430          return;
1431       }
1432    }
1433    exec_list_push_tail(var_list, &new_var->node);
1434 }
1435 
1436 static void
sort_varyings(nir_shader * shader,nir_variable_mode mode,struct exec_list * sorted_list)1437 sort_varyings(nir_shader *shader, nir_variable_mode mode,
1438               struct exec_list *sorted_list)
1439 {
1440    exec_list_make_empty(sorted_list);
1441    nir_foreach_variable_with_modes_safe(var, shader, mode) {
1442       exec_node_remove(&var->node);
1443       insert_sorted(sorted_list, var);
1444    }
1445 }
1446 
1447 void
nir_sort_variables_by_location(nir_shader * shader,nir_variable_mode mode)1448 nir_sort_variables_by_location(nir_shader *shader, nir_variable_mode mode)
1449 {
1450    struct exec_list vars;
1451 
1452    sort_varyings(shader, mode, &vars);
1453    exec_list_append(&shader->variables, &vars);
1454 }
1455 
1456 void
nir_assign_io_var_locations(nir_shader * shader,nir_variable_mode mode,unsigned * size,gl_shader_stage stage)1457 nir_assign_io_var_locations(nir_shader *shader, nir_variable_mode mode,
1458                             unsigned *size, gl_shader_stage stage)
1459 {
1460    unsigned location = 0;
1461    unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
1462    uint64_t processed_locs[2] = { 0 };
1463 
1464    struct exec_list io_vars;
1465    sort_varyings(shader, mode, &io_vars);
1466 
1467    int ASSERTED last_loc = 0;
1468    bool ASSERTED last_per_prim = false;
1469    bool last_partial = false;
1470    nir_foreach_variable_in_list(var, &io_vars) {
1471       const struct glsl_type *type = var->type;
1472       if (nir_is_arrayed_io(var, stage)) {
1473          assert(glsl_type_is_array(type));
1474          type = glsl_get_array_element(type);
1475       }
1476 
1477       int base;
1478       if (var->data.mode == nir_var_shader_in && stage == MESA_SHADER_VERTEX)
1479          base = VERT_ATTRIB_GENERIC0;
1480       else if (var->data.mode == nir_var_shader_out &&
1481                stage == MESA_SHADER_FRAGMENT)
1482          base = FRAG_RESULT_DATA0;
1483       else
1484          base = VARYING_SLOT_VAR0;
1485 
1486       unsigned var_size, driver_size;
1487       if (var->data.compact) {
1488          /* If we are inside a partial compact,
1489           * don't allow another compact to be in this slot
1490           * if it starts at component 0.
1491           */
1492          if (last_partial && var->data.location_frac == 0) {
1493             location++;
1494          }
1495 
1496          /* compact variables must be arrays of scalars */
1497          assert(!var->data.per_view);
1498          assert(glsl_type_is_array(type));
1499          assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1500          unsigned start = 4 * location + var->data.location_frac;
1501          unsigned end = start + glsl_get_length(type);
1502          var_size = driver_size = end / 4 - location;
1503          last_partial = end % 4 != 0;
1504       } else {
1505          /* Compact variables bypass the normal varying compacting pass,
1506           * which means they cannot be in the same vec4 slot as a normal
1507           * variable. If part of the current slot is taken up by a compact
1508           * variable, we need to go to the next one.
1509           */
1510          if (last_partial) {
1511             location++;
1512             last_partial = false;
1513          }
1514 
1515          /* per-view variables have an extra array dimension, which is ignored
1516           * when counting user-facing slots (var->data.location), but *not*
1517           * with driver slots (var->data.driver_location). That is, each user
1518           * slot maps to multiple driver slots.
1519           */
1520          driver_size = glsl_count_attribute_slots(type, false);
1521          if (var->data.per_view) {
1522             assert(glsl_type_is_array(type));
1523             var_size =
1524                glsl_count_attribute_slots(glsl_get_array_element(type), false);
1525          } else {
1526             var_size = driver_size;
1527          }
1528       }
1529 
1530       /* Builtins don't allow component packing so we only need to worry about
1531        * user defined varyings sharing the same location.
1532        */
1533       bool processed = false;
1534       if (var->data.location >= base) {
1535          unsigned glsl_location = var->data.location - base;
1536 
1537          for (unsigned i = 0; i < var_size; i++) {
1538             if (processed_locs[var->data.index] &
1539                 ((uint64_t)1 << (glsl_location + i)))
1540                processed = true;
1541             else
1542                processed_locs[var->data.index] |=
1543                   ((uint64_t)1 << (glsl_location + i));
1544          }
1545       }
1546 
1547       /* Because component packing allows varyings to share the same location
1548        * we may have already have processed this location.
1549        */
1550       if (processed) {
1551          /* TODO handle overlapping per-view variables */
1552          assert(!var->data.per_view);
1553          unsigned driver_location = assigned_locations[var->data.location];
1554          var->data.driver_location = driver_location;
1555 
1556          /* An array may be packed such that is crosses multiple other arrays
1557           * or variables, we need to make sure we have allocated the elements
1558           * consecutively if the previously proccessed var was shorter than
1559           * the current array we are processing.
1560           *
1561           * NOTE: The code below assumes the var list is ordered in ascending
1562           * location order, but per-vertex/per-primitive outputs may be
1563           * grouped separately.
1564           */
1565          assert(last_loc <= var->data.location ||
1566                 last_per_prim != var->data.per_primitive);
1567          last_loc = var->data.location;
1568          last_per_prim = var->data.per_primitive;
1569          unsigned last_slot_location = driver_location + var_size;
1570          if (last_slot_location > location) {
1571             unsigned num_unallocated_slots = last_slot_location - location;
1572             unsigned first_unallocated_slot = var_size - num_unallocated_slots;
1573             for (unsigned i = first_unallocated_slot; i < var_size; i++) {
1574                assigned_locations[var->data.location + i] = location;
1575                location++;
1576             }
1577          }
1578          continue;
1579       }
1580 
1581       for (unsigned i = 0; i < var_size; i++) {
1582          assigned_locations[var->data.location + i] = location + i;
1583       }
1584 
1585       var->data.driver_location = location;
1586       location += driver_size;
1587    }
1588 
1589    if (last_partial)
1590       location++;
1591 
1592    exec_list_append(&shader->variables, &io_vars);
1593    *size = location;
1594 }
1595 
1596 static uint64_t
get_linked_variable_location(unsigned location,bool patch)1597 get_linked_variable_location(unsigned location, bool patch)
1598 {
1599    if (!patch)
1600       return location;
1601 
1602    /* Reserve locations 0...3 for special patch variables
1603     * like tess factors and bounding boxes, and the generic patch
1604     * variables will come after them.
1605     */
1606    if (location >= VARYING_SLOT_PATCH0)
1607       return location - VARYING_SLOT_PATCH0 + 4;
1608    else if (location >= VARYING_SLOT_TESS_LEVEL_OUTER &&
1609             location <= VARYING_SLOT_BOUNDING_BOX1)
1610       return location - VARYING_SLOT_TESS_LEVEL_OUTER;
1611    else
1612       unreachable("Unsupported variable in get_linked_variable_location.");
1613 }
1614 
1615 static uint64_t
get_linked_variable_io_mask(nir_variable * variable,gl_shader_stage stage)1616 get_linked_variable_io_mask(nir_variable *variable, gl_shader_stage stage)
1617 {
1618    const struct glsl_type *type = variable->type;
1619 
1620    if (nir_is_arrayed_io(variable, stage)) {
1621       assert(glsl_type_is_array(type));
1622       type = glsl_get_array_element(type);
1623    }
1624 
1625    unsigned slots = glsl_count_attribute_slots(type, false);
1626    if (variable->data.compact) {
1627       unsigned component_count = variable->data.location_frac + glsl_get_length(type);
1628       slots = DIV_ROUND_UP(component_count, 4);
1629    }
1630 
1631    uint64_t mask = u_bit_consecutive64(0, slots);
1632    return mask;
1633 }
1634 
1635 nir_linked_io_var_info
nir_assign_linked_io_var_locations(nir_shader * producer,nir_shader * consumer)1636 nir_assign_linked_io_var_locations(nir_shader *producer, nir_shader *consumer)
1637 {
1638    assert(producer);
1639    assert(consumer);
1640 
1641    uint64_t producer_output_mask = 0;
1642    uint64_t producer_patch_output_mask = 0;
1643 
1644    nir_foreach_shader_out_variable(variable, producer) {
1645       uint64_t mask = get_linked_variable_io_mask(variable, producer->info.stage);
1646       uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
1647 
1648       if (variable->data.patch)
1649          producer_patch_output_mask |= mask << loc;
1650       else
1651          producer_output_mask |= mask << loc;
1652    }
1653 
1654    uint64_t consumer_input_mask = 0;
1655    uint64_t consumer_patch_input_mask = 0;
1656 
1657    nir_foreach_shader_in_variable(variable, consumer) {
1658       uint64_t mask = get_linked_variable_io_mask(variable, consumer->info.stage);
1659       uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
1660 
1661       if (variable->data.patch)
1662          consumer_patch_input_mask |= mask << loc;
1663       else
1664          consumer_input_mask |= mask << loc;
1665    }
1666 
1667    uint64_t io_mask = producer_output_mask | consumer_input_mask;
1668    uint64_t patch_io_mask = producer_patch_output_mask | consumer_patch_input_mask;
1669 
1670    nir_foreach_shader_out_variable(variable, producer) {
1671       uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
1672 
1673       if (variable->data.patch)
1674          variable->data.driver_location = util_bitcount64(patch_io_mask & u_bit_consecutive64(0, loc));
1675       else
1676          variable->data.driver_location = util_bitcount64(io_mask & u_bit_consecutive64(0, loc));
1677    }
1678 
1679    nir_foreach_shader_in_variable(variable, consumer) {
1680       uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch);
1681 
1682       if (variable->data.patch)
1683          variable->data.driver_location = util_bitcount64(patch_io_mask & u_bit_consecutive64(0, loc));
1684       else
1685          variable->data.driver_location = util_bitcount64(io_mask & u_bit_consecutive64(0, loc));
1686    }
1687 
1688    nir_linked_io_var_info result = {
1689       .num_linked_io_vars = util_bitcount64(io_mask),
1690       .num_linked_patch_io_vars = util_bitcount64(patch_io_mask),
1691    };
1692 
1693    return result;
1694 }
1695