• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "nir.h"
25 #include "nir_deref.h"
26 #include "gl_nir_linker.h"
27 #include "linker_util.h"
28 #include "util/u_dynarray.h"
29 #include "util/u_math.h"
30 #include "main/consts_exts.h"
31 #include "main/shader_types.h"
32 
33 /**
34  * This file do the common link for GLSL uniforms, using NIR, instead of IR as
35  * the counter-part glsl/link_uniforms.cpp
36  */
37 
38 #define UNMAPPED_UNIFORM_LOC ~0u
39 
40 struct uniform_array_info {
41    /** List of dereferences of the uniform array. */
42    struct util_dynarray *deref_list;
43 
44    /** Set of bit-flags to note which array elements have been accessed. */
45    BITSET_WORD *indices;
46 };
47 
48 static unsigned
uniform_storage_size(const struct glsl_type * type)49 uniform_storage_size(const struct glsl_type *type)
50 {
51    switch (glsl_get_base_type(type)) {
52    case GLSL_TYPE_STRUCT:
53    case GLSL_TYPE_INTERFACE: {
54       unsigned size = 0;
55       for (unsigned i = 0; i < glsl_get_length(type); i++)
56          size += uniform_storage_size(glsl_get_struct_field(type, i));
57       return size;
58    }
59    case GLSL_TYPE_ARRAY: {
60       const struct glsl_type *e_type = glsl_get_array_element(type);
61       enum glsl_base_type e_base_type = glsl_get_base_type(e_type);
62       if (e_base_type == GLSL_TYPE_STRUCT ||
63           e_base_type == GLSL_TYPE_INTERFACE ||
64           e_base_type == GLSL_TYPE_ARRAY) {
65          unsigned length = !glsl_type_is_unsized_array(type) ?
66             glsl_get_length(type) : 1;
67          return length * uniform_storage_size(e_type);
68       } else
69          return 1;
70    }
71    default:
72       return 1;
73    }
74 }
75 
76 /**
77  * Update the sizes of linked shader uniform arrays to the maximum
78  * array index used.
79  *
80  * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
81  *
82  *     If one or more elements of an array are active,
83  *     GetActiveUniform will return the name of the array in name,
84  *     subject to the restrictions listed above. The type of the array
85  *     is returned in type. The size parameter contains the highest
86  *     array element index used, plus one. The compiler or linker
87  *     determines the highest index used.  There will be only one
88  *     active uniform reported by the GL per uniform array.
89  */
90 static void
update_array_sizes(struct gl_shader_program * prog,nir_variable * var,struct hash_table ** referenced_uniforms,unsigned current_var_stage)91 update_array_sizes(struct gl_shader_program *prog, nir_variable *var,
92                    struct hash_table **referenced_uniforms,
93                    unsigned current_var_stage)
94 {
95    /* For now we only resize 1D arrays.
96     * TODO: add support for resizing more complex array types ??
97     */
98    if (!glsl_type_is_array(var->type) ||
99        glsl_type_is_array(glsl_get_array_element(var->type)))
100       return;
101 
102    /* GL_ARB_uniform_buffer_object says that std140 uniforms
103     * will not be eliminated.  Since we always do std140, just
104     * don't resize arrays in UBOs.
105     *
106     * Atomic counters are supposed to get deterministic
107     * locations assigned based on the declaration ordering and
108     * sizes, array compaction would mess that up.
109     *
110     * Subroutine uniforms are not removed.
111     */
112    if (nir_variable_is_in_block(var) || glsl_contains_atomic(var->type) ||
113        glsl_get_base_type(glsl_without_array(var->type)) == GLSL_TYPE_SUBROUTINE ||
114        var->constant_initializer)
115       return;
116 
117    struct uniform_array_info *ainfo = NULL;
118    int words = BITSET_WORDS(glsl_array_size(var->type));
119    int max_array_size = 0;
120    for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
121       struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
122       if (!sh)
123          continue;
124 
125       struct hash_entry *entry =
126          _mesa_hash_table_search(referenced_uniforms[stage], var->name);
127       if (entry) {
128          ainfo = (struct uniform_array_info *)  entry->data;
129          max_array_size = MAX2(BITSET_LAST_BIT_SIZED(ainfo->indices, words),
130                                max_array_size);
131       }
132 
133       if (max_array_size == glsl_array_size(var->type))
134          return;
135    }
136 
137    if (max_array_size != glsl_array_size(var->type)) {
138       /* If this is a built-in uniform (i.e., it's backed by some
139        * fixed-function state), adjust the number of state slots to
140        * match the new array size.  The number of slots per array entry
141        * is not known.  It seems safe to assume that the total number of
142        * slots is an integer multiple of the number of array elements.
143        * Determine the number of slots per array element by dividing by
144        * the old (total) size.
145        */
146       const unsigned num_slots = var->num_state_slots;
147       if (num_slots > 0) {
148          var->num_state_slots =
149             (max_array_size * (num_slots / glsl_array_size(var->type)));
150       }
151 
152       var->type = glsl_array_type(glsl_get_array_element(var->type),
153                                   max_array_size, 0);
154 
155       /* Update the types of dereferences in case we changed any. */
156       struct hash_entry *entry =
157          _mesa_hash_table_search(referenced_uniforms[current_var_stage], var->name);
158       if (entry) {
159          struct uniform_array_info *ainfo =
160             (struct uniform_array_info *) entry->data;
161          util_dynarray_foreach(ainfo->deref_list, nir_deref_instr *, deref) {
162             (*deref)->type = var->type;
163          }
164       }
165    }
166 }
167 
168 static void
nir_setup_uniform_remap_tables(const struct gl_constants * consts,struct gl_shader_program * prog)169 nir_setup_uniform_remap_tables(const struct gl_constants *consts,
170                                struct gl_shader_program *prog)
171 {
172    unsigned total_entries = prog->NumExplicitUniformLocations;
173 
174    /* For glsl this may have been allocated by reserve_explicit_locations() so
175     * that we can keep track of unused uniforms with explicit locations.
176     */
177    assert(!prog->data->spirv ||
178           (prog->data->spirv && !prog->UniformRemapTable));
179    if (!prog->UniformRemapTable) {
180       prog->UniformRemapTable = rzalloc_array(prog,
181                                               struct gl_uniform_storage *,
182                                               prog->NumUniformRemapTable);
183    }
184 
185    union gl_constant_value *data =
186       rzalloc_array(prog->data,
187                     union gl_constant_value, prog->data->NumUniformDataSlots);
188    if (!prog->UniformRemapTable || !data) {
189       linker_error(prog, "Out of memory during linking.\n");
190       return;
191    }
192    prog->data->UniformDataSlots = data;
193 
194    prog->data->UniformDataDefaults =
195          rzalloc_array(prog->data->UniformDataSlots,
196                        union gl_constant_value, prog->data->NumUniformDataSlots);
197 
198    unsigned data_pos = 0;
199 
200    /* Reserve all the explicit locations of the active uniforms. */
201    for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
202       struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
203 
204       if (uniform->hidden)
205          continue;
206 
207       if (uniform->is_shader_storage ||
208           glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
209          continue;
210 
211       if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
212          continue;
213 
214       /* How many new entries for this uniform? */
215       const unsigned entries = MAX2(1, uniform->array_elements);
216       unsigned num_slots = glsl_get_component_slots(uniform->type);
217 
218       uniform->storage = &data[data_pos];
219 
220       /* Set remap table entries point to correct gl_uniform_storage. */
221       for (unsigned j = 0; j < entries; j++) {
222          unsigned element_loc = uniform->remap_location + j;
223          prog->UniformRemapTable[element_loc] = uniform;
224 
225          data_pos += num_slots;
226       }
227    }
228 
229    /* Reserve locations for rest of the uniforms. */
230    if (prog->data->spirv)
231       link_util_update_empty_uniform_locations(prog);
232 
233    for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
234       struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
235 
236       if (uniform->hidden)
237          continue;
238 
239       if (uniform->is_shader_storage ||
240           glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
241          continue;
242 
243       /* Built-in uniforms should not get any location. */
244       if (uniform->builtin)
245          continue;
246 
247       /* Explicit ones have been set already. */
248       if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
249          continue;
250 
251       /* How many entries for this uniform? */
252       const unsigned entries = MAX2(1, uniform->array_elements);
253 
254       /* Add new entries to the total amount for checking against MAX_UNIFORM-
255        * _LOCATIONS. This only applies to the default uniform block (-1),
256        * because locations of uniform block entries are not assignable.
257        */
258       if (prog->data->UniformStorage[i].block_index == -1)
259          total_entries += entries;
260 
261       unsigned location =
262          link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
263 
264       if (location == -1) {
265          location = prog->NumUniformRemapTable;
266 
267          /* resize remap table to fit new entries */
268          prog->UniformRemapTable =
269             reralloc(prog,
270                      prog->UniformRemapTable,
271                      struct gl_uniform_storage *,
272                      prog->NumUniformRemapTable + entries);
273          prog->NumUniformRemapTable += entries;
274       }
275 
276       /* set the base location in remap table for the uniform */
277       uniform->remap_location = location;
278 
279       unsigned num_slots = glsl_get_component_slots(uniform->type);
280 
281       if (uniform->block_index == -1)
282          uniform->storage = &data[data_pos];
283 
284       /* Set remap table entries point to correct gl_uniform_storage. */
285       for (unsigned j = 0; j < entries; j++) {
286          unsigned element_loc = uniform->remap_location + j;
287          prog->UniformRemapTable[element_loc] = uniform;
288 
289          if (uniform->block_index == -1)
290             data_pos += num_slots;
291       }
292    }
293 
294    /* Verify that total amount of entries for explicit and implicit locations
295     * is less than MAX_UNIFORM_LOCATIONS.
296     */
297    if (total_entries > consts->MaxUserAssignableUniformLocations) {
298       linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
299                    "(%u > %u)", total_entries,
300                    consts->MaxUserAssignableUniformLocations);
301    }
302 
303    /* Reserve all the explicit locations of the active subroutine uniforms. */
304    for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
305       struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
306 
307       if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
308          continue;
309 
310       if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
311          continue;
312 
313       /* How many new entries for this uniform? */
314       const unsigned entries =
315          MAX2(1, prog->data->UniformStorage[i].array_elements);
316 
317       uniform->storage = &data[data_pos];
318 
319       unsigned num_slots = glsl_get_component_slots(uniform->type);
320       unsigned mask = prog->data->linked_stages;
321       while (mask) {
322          const int j = u_bit_scan(&mask);
323          struct gl_program *p = prog->_LinkedShaders[j]->Program;
324 
325          if (!prog->data->UniformStorage[i].opaque[j].active)
326             continue;
327 
328          /* Set remap table entries point to correct gl_uniform_storage. */
329          for (unsigned k = 0; k < entries; k++) {
330             unsigned element_loc =
331                prog->data->UniformStorage[i].remap_location + k;
332             p->sh.SubroutineUniformRemapTable[element_loc] =
333                &prog->data->UniformStorage[i];
334 
335             data_pos += num_slots;
336          }
337       }
338    }
339 
340    /* reserve subroutine locations */
341    for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
342       struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
343 
344       if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
345          continue;
346 
347       if (prog->data->UniformStorage[i].remap_location !=
348           UNMAPPED_UNIFORM_LOC)
349          continue;
350 
351       const unsigned entries =
352          MAX2(1, prog->data->UniformStorage[i].array_elements);
353 
354       uniform->storage = &data[data_pos];
355 
356       unsigned num_slots = glsl_get_component_slots(uniform->type);
357       unsigned mask = prog->data->linked_stages;
358       while (mask) {
359          const int j = u_bit_scan(&mask);
360          struct gl_program *p = prog->_LinkedShaders[j]->Program;
361 
362          if (!prog->data->UniformStorage[i].opaque[j].active)
363             continue;
364 
365          p->sh.SubroutineUniformRemapTable =
366             reralloc(p,
367                      p->sh.SubroutineUniformRemapTable,
368                      struct gl_uniform_storage *,
369                      p->sh.NumSubroutineUniformRemapTable + entries);
370 
371          for (unsigned k = 0; k < entries; k++) {
372             p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
373                &prog->data->UniformStorage[i];
374 
375             data_pos += num_slots;
376          }
377          prog->data->UniformStorage[i].remap_location =
378             p->sh.NumSubroutineUniformRemapTable;
379          p->sh.NumSubroutineUniformRemapTable += entries;
380       }
381    }
382 
383    /* assign storage to hidden uniforms */
384    for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
385       struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
386 
387       if (!uniform->hidden ||
388           glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
389          continue;
390 
391       const unsigned entries =
392          MAX2(1, prog->data->UniformStorage[i].array_elements);
393 
394       uniform->storage = &data[data_pos];
395 
396       unsigned num_slots = glsl_get_component_slots(uniform->type);
397       for (unsigned k = 0; k < entries; k++)
398          data_pos += num_slots;
399    }
400 }
401 
402 static void
add_var_use_deref(nir_deref_instr * deref,struct hash_table * live,struct array_deref_range ** derefs,unsigned * derefs_size)403 add_var_use_deref(nir_deref_instr *deref, struct hash_table *live,
404                   struct array_deref_range **derefs, unsigned *derefs_size)
405 {
406    nir_deref_path path;
407    nir_deref_path_init(&path, deref, NULL);
408 
409    deref = path.path[0];
410    if (deref->deref_type != nir_deref_type_var ||
411        !nir_deref_mode_is_one_of(deref, nir_var_uniform |
412                                         nir_var_mem_ubo |
413                                         nir_var_mem_ssbo |
414                                         nir_var_image)) {
415       nir_deref_path_finish(&path);
416       return;
417    }
418 
419    /* Number of derefs used in current processing. */
420    unsigned num_derefs = 0;
421 
422    const struct glsl_type *deref_type = deref->var->type;
423    nir_deref_instr **p = &path.path[1];
424    for (; *p; p++) {
425       if ((*p)->deref_type == nir_deref_type_array) {
426 
427          /* Skip matrix derefences */
428          if (!glsl_type_is_array(deref_type))
429             break;
430 
431          if ((num_derefs + 1) * sizeof(struct array_deref_range) > *derefs_size) {
432             void *ptr = reralloc_size(NULL, *derefs, *derefs_size + 4096);
433 
434             if (ptr == NULL) {
435                nir_deref_path_finish(&path);
436                return;
437             }
438 
439             *derefs_size += 4096;
440             *derefs = (struct array_deref_range *)ptr;
441          }
442 
443          struct array_deref_range *dr = &(*derefs)[num_derefs];
444          num_derefs++;
445 
446          dr->size = glsl_get_length(deref_type);
447 
448          if (nir_src_is_const((*p)->arr.index)) {
449             dr->index = nir_src_as_uint((*p)->arr.index);
450          } else {
451             /* An unsized array can occur at the end of an SSBO.  We can't track
452              * accesses to such an array, so bail.
453              */
454             if (dr->size == 0) {
455                nir_deref_path_finish(&path);
456                return;
457             }
458 
459             dr->index = dr->size;
460          }
461 
462          deref_type = glsl_get_array_element(deref_type);
463       } else if ((*p)->deref_type == nir_deref_type_struct) {
464          /* We have reached the end of the array. */
465          break;
466       }
467    }
468 
469    nir_deref_path_finish(&path);
470 
471 
472    struct uniform_array_info *ainfo = NULL;
473 
474    struct hash_entry *entry =
475       _mesa_hash_table_search(live, deref->var->name);
476    if (!entry && glsl_type_is_array(deref->var->type)) {
477       ainfo = ralloc(live, struct uniform_array_info);
478 
479       unsigned num_bits = MAX2(1, glsl_get_aoa_size(deref->var->type));
480       ainfo->indices = rzalloc_array(live, BITSET_WORD, BITSET_WORDS(num_bits));
481 
482       ainfo->deref_list = ralloc(live, struct util_dynarray);
483       util_dynarray_init(ainfo->deref_list, live);
484    }
485 
486    if (entry)
487       ainfo = (struct uniform_array_info *) entry->data;
488 
489    if (glsl_type_is_array(deref->var->type)) {
490       /* Count the "depth" of the arrays-of-arrays. */
491       unsigned array_depth = 0;
492       for (const struct glsl_type *type = deref->var->type;
493            glsl_type_is_array(type);
494            type = glsl_get_array_element(type)) {
495          array_depth++;
496       }
497 
498       link_util_mark_array_elements_referenced(*derefs, num_derefs, array_depth,
499                                                ainfo->indices);
500 
501       util_dynarray_append(ainfo->deref_list, nir_deref_instr *, deref);
502    }
503 
504    assert(deref->modes == deref->var->data.mode);
505    _mesa_hash_table_insert(live, deref->var->name, ainfo);
506 }
507 
508 /* Iterate over the shader and collect infomation about uniform use */
509 static void
add_var_use_shader(nir_shader * shader,struct hash_table * live)510 add_var_use_shader(nir_shader *shader, struct hash_table *live)
511 {
512    /* Currently allocated buffer block of derefs. */
513    struct array_deref_range *derefs = NULL;
514 
515    /* Size of the derefs buffer in bytes. */
516    unsigned derefs_size = 0;
517 
518    nir_foreach_function_impl(impl, shader) {
519       nir_foreach_block(block, impl) {
520          nir_foreach_instr(instr, block) {
521             if (instr->type == nir_instr_type_intrinsic) {
522                nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
523                switch (intr->intrinsic) {
524                case nir_intrinsic_atomic_counter_read_deref:
525                case nir_intrinsic_atomic_counter_inc_deref:
526                case nir_intrinsic_atomic_counter_pre_dec_deref:
527                case nir_intrinsic_atomic_counter_post_dec_deref:
528                case nir_intrinsic_atomic_counter_add_deref:
529                case nir_intrinsic_atomic_counter_min_deref:
530                case nir_intrinsic_atomic_counter_max_deref:
531                case nir_intrinsic_atomic_counter_and_deref:
532                case nir_intrinsic_atomic_counter_or_deref:
533                case nir_intrinsic_atomic_counter_xor_deref:
534                case nir_intrinsic_atomic_counter_exchange_deref:
535                case nir_intrinsic_atomic_counter_comp_swap_deref:
536                case nir_intrinsic_image_deref_load:
537                case nir_intrinsic_image_deref_store:
538                case nir_intrinsic_image_deref_atomic:
539                case nir_intrinsic_image_deref_atomic_swap:
540                case nir_intrinsic_image_deref_size:
541                case nir_intrinsic_image_deref_samples:
542                case nir_intrinsic_load_deref:
543                case nir_intrinsic_store_deref:
544                   add_var_use_deref(nir_src_as_deref(intr->src[0]), live,
545                                     &derefs, &derefs_size);
546                   break;
547 
548                default:
549                   /* Nothing to do */
550                   break;
551                }
552             } else if (instr->type == nir_instr_type_tex) {
553                nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
554                int sampler_idx =
555                   nir_tex_instr_src_index(tex_instr,
556                                           nir_tex_src_sampler_deref);
557                int texture_idx =
558                   nir_tex_instr_src_index(tex_instr,
559                                           nir_tex_src_texture_deref);
560 
561                if (sampler_idx >= 0) {
562                   nir_deref_instr *deref =
563                      nir_src_as_deref(tex_instr->src[sampler_idx].src);
564                   add_var_use_deref(deref, live, &derefs, &derefs_size);
565                }
566 
567                if (texture_idx >= 0) {
568                   nir_deref_instr *deref =
569                      nir_src_as_deref(tex_instr->src[texture_idx].src);
570                   add_var_use_deref(deref, live, &derefs, &derefs_size);
571                }
572             }
573          }
574       }
575    }
576 
577    ralloc_free(derefs);
578 }
579 
580 static void
mark_stage_as_active(struct gl_uniform_storage * uniform,unsigned stage)581 mark_stage_as_active(struct gl_uniform_storage *uniform,
582                      unsigned stage)
583 {
584    uniform->active_shader_mask |= 1 << stage;
585 }
586 
587 /* Used to build a tree representing the glsl_type so that we can have a place
588  * to store the next index for opaque types. Array types are expanded so that
589  * they have a single child which is used for all elements of the array.
590  * Struct types have a child for each member. The tree is walked while
591  * processing a uniform so that we can recognise when an opaque type is
592  * encountered a second time in order to reuse the same range of indices that
593  * was reserved the first time. That way the sampler indices can be arranged
594  * so that members of an array are placed sequentially even if the array is an
595  * array of structs containing other opaque members.
596  */
597 struct type_tree_entry {
598    /* For opaque types, this will be the next index to use. If we haven’t
599     * encountered this member yet, it will be UINT_MAX.
600     */
601    unsigned next_index;
602    unsigned array_size;
603    struct type_tree_entry *parent;
604    struct type_tree_entry *next_sibling;
605    struct type_tree_entry *children;
606 };
607 
608 struct nir_link_uniforms_state {
609    /* per-whole program */
610    unsigned num_hidden_uniforms;
611    unsigned num_values;
612    unsigned max_uniform_location;
613 
614    /* per-shader stage */
615    unsigned next_bindless_image_index;
616    unsigned next_bindless_sampler_index;
617    unsigned next_image_index;
618    unsigned next_sampler_index;
619    unsigned next_subroutine;
620    unsigned num_shader_samplers;
621    unsigned num_shader_images;
622    unsigned num_shader_uniform_components;
623    unsigned shader_samplers_used;
624    unsigned shader_shadow_samplers;
625    unsigned shader_storage_blocks_write_access;
626    struct gl_program_parameter_list *params;
627 
628    /* per-variable */
629    nir_variable *current_var;
630    const struct glsl_type *current_ifc_type;
631    int offset;
632    bool var_is_in_block;
633    bool set_top_level_array;
634    int top_level_array_size;
635    int top_level_array_stride;
636 
637    struct type_tree_entry *current_type;
638    struct hash_table *referenced_uniforms[MESA_SHADER_STAGES];
639    struct hash_table *uniform_hash;
640 };
641 
642 static void
add_parameter(struct gl_uniform_storage * uniform,const struct gl_constants * consts,struct gl_shader_program * prog,const struct glsl_type * type,struct nir_link_uniforms_state * state)643 add_parameter(struct gl_uniform_storage *uniform,
644               const struct gl_constants *consts,
645               struct gl_shader_program *prog,
646               const struct glsl_type *type,
647               struct nir_link_uniforms_state *state)
648 {
649    /* Builtin uniforms are backed by PROGRAM_STATE_VAR, so don't add them as
650     * uniforms.
651     */
652    if (uniform->builtin)
653       return;
654 
655    if (!state->params || uniform->is_shader_storage ||
656        (glsl_contains_opaque(type) && !state->current_var->data.bindless))
657       return;
658 
659    unsigned num_params = glsl_get_aoa_size(type);
660    num_params = MAX2(num_params, 1);
661    num_params *= glsl_get_matrix_columns(glsl_without_array(type));
662 
663    bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
664    if (is_dual_slot)
665       num_params *= 2;
666 
667    struct gl_program_parameter_list *params = state->params;
668    int base_index = params->NumParameters;
669    _mesa_reserve_parameter_storage(params, num_params, num_params);
670 
671    if (consts->PackedDriverUniformStorage) {
672       for (unsigned i = 0; i < num_params; i++) {
673          unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
674          unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
675          if (is_dual_slot) {
676             if (i & 0x1)
677                comps -= 4;
678             else
679                comps = 4;
680          }
681 
682          /* TODO: This will waste space with 1 and 3 16-bit components. */
683          if (glsl_type_is_16bit(glsl_without_array(type)))
684             comps = DIV_ROUND_UP(comps, 2);
685 
686          _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name.string, comps,
687                              glsl_get_gl_type(type), NULL, NULL, false);
688       }
689    } else {
690       for (unsigned i = 0; i < num_params; i++) {
691          _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name.string, 4,
692                              glsl_get_gl_type(type), NULL, NULL, true);
693       }
694    }
695 
696    /* Each Parameter will hold the index to the backing uniform storage.
697     * This avoids relying on names to match parameters and uniform
698     * storages.
699     */
700    for (unsigned i = 0; i < num_params; i++) {
701       struct gl_program_parameter *param = &params->Parameters[base_index + i];
702       param->UniformStorageIndex = uniform - prog->data->UniformStorage;
703       param->MainUniformStorageIndex = state->current_var->data.location;
704    }
705 }
706 
707 static unsigned
get_next_index(struct nir_link_uniforms_state * state,const struct gl_uniform_storage * uniform,unsigned * next_index,bool * initialised)708 get_next_index(struct nir_link_uniforms_state *state,
709                const struct gl_uniform_storage *uniform,
710                unsigned *next_index, bool *initialised)
711 {
712    /* If we’ve already calculated an index for this member then we can just
713     * offset from there.
714     */
715    if (state->current_type->next_index == UINT_MAX) {
716       /* Otherwise we need to reserve enough indices for all of the arrays
717        * enclosing this member.
718        */
719 
720       unsigned array_size = 1;
721 
722       for (const struct type_tree_entry *p = state->current_type;
723            p;
724            p = p->parent) {
725          array_size *= p->array_size;
726       }
727 
728       state->current_type->next_index = *next_index;
729       *next_index += array_size;
730       *initialised = true;
731    } else
732       *initialised = false;
733 
734    unsigned index = state->current_type->next_index;
735 
736    state->current_type->next_index += MAX2(1, uniform->array_elements);
737 
738    return index;
739 }
740 
741 static gl_texture_index
texture_index_for_type(const struct glsl_type * type)742 texture_index_for_type(const struct glsl_type *type)
743 {
744    const bool sampler_array = glsl_sampler_type_is_array(type);
745    switch (glsl_get_sampler_dim(type)) {
746    case GLSL_SAMPLER_DIM_1D:
747       return sampler_array ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
748    case GLSL_SAMPLER_DIM_2D:
749       return sampler_array ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
750    case GLSL_SAMPLER_DIM_3D:
751       return TEXTURE_3D_INDEX;
752    case GLSL_SAMPLER_DIM_CUBE:
753       return sampler_array ? TEXTURE_CUBE_ARRAY_INDEX : TEXTURE_CUBE_INDEX;
754    case GLSL_SAMPLER_DIM_RECT:
755       return TEXTURE_RECT_INDEX;
756    case GLSL_SAMPLER_DIM_BUF:
757       return TEXTURE_BUFFER_INDEX;
758    case GLSL_SAMPLER_DIM_EXTERNAL:
759       return TEXTURE_EXTERNAL_INDEX;
760    case GLSL_SAMPLER_DIM_MS:
761       return sampler_array ? TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX :
762                              TEXTURE_2D_MULTISAMPLE_INDEX;
763    default:
764       assert(!"Should not get here.");
765       return TEXTURE_BUFFER_INDEX;
766    }
767 }
768 
769 /* Update the uniforms info for the current shader stage */
770 static void
update_uniforms_shader_info(struct gl_shader_program * prog,struct nir_link_uniforms_state * state,struct gl_uniform_storage * uniform,const struct glsl_type * type,unsigned stage)771 update_uniforms_shader_info(struct gl_shader_program *prog,
772                             struct nir_link_uniforms_state *state,
773                             struct gl_uniform_storage *uniform,
774                             const struct glsl_type *type,
775                             unsigned stage)
776 {
777    unsigned values = glsl_get_component_slots(type);
778    const struct glsl_type *type_no_array = glsl_without_array(type);
779 
780    if (glsl_type_is_sampler(type_no_array)) {
781       bool init_idx;
782       /* ARB_bindless_texture spec says:
783        *
784        *    "When used as shader inputs, outputs, uniform block members,
785        *     or temporaries, the value of the sampler is a 64-bit unsigned
786        *     integer handle and never refers to a texture image unit."
787        */
788       bool is_bindless = state->current_var->data.bindless || state->var_is_in_block;
789       unsigned *next_index = is_bindless ?
790          &state->next_bindless_sampler_index :
791          &state->next_sampler_index;
792       int sampler_index = get_next_index(state, uniform, next_index, &init_idx);
793       struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
794 
795       if (is_bindless) {
796          if (init_idx) {
797             sh->Program->sh.BindlessSamplers =
798                rerzalloc(sh->Program, sh->Program->sh.BindlessSamplers,
799                          struct gl_bindless_sampler,
800                          sh->Program->sh.NumBindlessSamplers,
801                          state->next_bindless_sampler_index);
802 
803             for (unsigned j = sh->Program->sh.NumBindlessSamplers;
804                  j < state->next_bindless_sampler_index; j++) {
805                sh->Program->sh.BindlessSamplers[j].target =
806                   texture_index_for_type(type_no_array);
807             }
808 
809             sh->Program->sh.NumBindlessSamplers =
810                state->next_bindless_sampler_index;
811          }
812 
813          if (!state->var_is_in_block)
814             state->num_shader_uniform_components += values;
815       } else {
816          /* Samplers (bound or bindless) are counted as two components
817           * as specified by ARB_bindless_texture.
818           */
819          state->num_shader_samplers += values / 2;
820 
821          if (init_idx) {
822             const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
823             for (unsigned i = sampler_index;
824                  i < MIN2(state->next_sampler_index, MAX_SAMPLERS); i++) {
825                sh->Program->sh.SamplerTargets[i] =
826                   texture_index_for_type(type_no_array);
827                state->shader_samplers_used |= 1U << i;
828                state->shader_shadow_samplers |= shadow << i;
829             }
830          }
831       }
832 
833       uniform->opaque[stage].active = true;
834       uniform->opaque[stage].index = sampler_index;
835    } else if (glsl_type_is_image(type_no_array)) {
836       struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
837 
838       /* Set image access qualifiers */
839       enum gl_access_qualifier image_access =
840          state->current_var->data.access;
841 
842       int image_index;
843       if (state->current_var->data.bindless) {
844          image_index = state->next_bindless_image_index;
845          state->next_bindless_image_index += MAX2(1, uniform->array_elements);
846 
847          sh->Program->sh.BindlessImages =
848             rerzalloc(sh->Program, sh->Program->sh.BindlessImages,
849                       struct gl_bindless_image,
850                       sh->Program->sh.NumBindlessImages,
851                       state->next_bindless_image_index);
852 
853          for (unsigned j = sh->Program->sh.NumBindlessImages;
854               j < state->next_bindless_image_index; j++) {
855             sh->Program->sh.BindlessImages[j].image_access = image_access;
856          }
857 
858          sh->Program->sh.NumBindlessImages = state->next_bindless_image_index;
859 
860       } else {
861          image_index = state->next_image_index;
862          state->next_image_index += MAX2(1, uniform->array_elements);
863 
864          /* Images (bound or bindless) are counted as two components as
865           * specified by ARB_bindless_texture.
866           */
867          state->num_shader_images += values / 2;
868 
869          for (unsigned i = image_index;
870               i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS); i++) {
871             sh->Program->sh.image_access[i] = image_access;
872          }
873       }
874 
875       uniform->opaque[stage].active = true;
876       uniform->opaque[stage].index = image_index;
877 
878       if (!uniform->is_shader_storage)
879          state->num_shader_uniform_components += values;
880    } else {
881       if (glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE) {
882          struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
883 
884          uniform->opaque[stage].index = state->next_subroutine;
885          uniform->opaque[stage].active = true;
886 
887          sh->Program->sh.NumSubroutineUniforms++;
888 
889          /* Increment the subroutine index by 1 for non-arrays and by the
890           * number of array elements for arrays.
891           */
892          state->next_subroutine += MAX2(1, uniform->array_elements);
893       }
894 
895       if (!state->var_is_in_block)
896          state->num_shader_uniform_components += values;
897    }
898 }
899 
900 static bool
find_and_update_named_uniform_storage(const struct gl_constants * consts,struct gl_shader_program * prog,struct nir_link_uniforms_state * state,nir_variable * var,char ** name,size_t name_length,const struct glsl_type * type,unsigned stage,bool * first_element)901 find_and_update_named_uniform_storage(const struct gl_constants *consts,
902                                       struct gl_shader_program *prog,
903                                       struct nir_link_uniforms_state *state,
904                                       nir_variable *var, char **name,
905                                       size_t name_length,
906                                       const struct glsl_type *type,
907                                       unsigned stage, bool *first_element)
908 {
909    /* gl_uniform_storage can cope with one level of array, so if the type is a
910     * composite type or an array where each element occupies more than one
911     * location than we need to recursively process it.
912     */
913    if (glsl_type_is_struct_or_ifc(type) ||
914        (glsl_type_is_array(type) &&
915         (glsl_type_is_array(glsl_get_array_element(type)) ||
916          glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
917 
918       struct type_tree_entry *old_type = state->current_type;
919       state->current_type = old_type->children;
920 
921       /* Shader storage block unsized arrays: add subscript [0] to variable
922        * names.
923        */
924       unsigned length = glsl_get_length(type);
925       if (glsl_type_is_unsized_array(type))
926          length = 1;
927 
928       bool result = false;
929       for (unsigned i = 0; i < length; i++) {
930          const struct glsl_type *field_type;
931          size_t new_length = name_length;
932 
933          if (glsl_type_is_struct_or_ifc(type)) {
934             field_type = glsl_get_struct_field(type, i);
935 
936             /* Append '.field' to the current variable name. */
937             if (name) {
938                ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
939                                             glsl_get_struct_elem_name(type, i));
940             }
941          } else {
942             field_type = glsl_get_array_element(type);
943 
944             /* Append the subscript to the current variable name */
945             if (name)
946                ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
947          }
948 
949          result = find_and_update_named_uniform_storage(consts, prog, state,
950                                                         var, name, new_length,
951                                                         field_type, stage,
952                                                         first_element);
953 
954          if (glsl_type_is_struct_or_ifc(type))
955             state->current_type = state->current_type->next_sibling;
956 
957          if (!result) {
958             state->current_type = old_type;
959             return false;
960          }
961       }
962 
963       state->current_type = old_type;
964 
965       return result;
966    } else {
967       struct hash_entry *entry =
968          _mesa_hash_table_search(state->uniform_hash, *name);
969       if (entry) {
970          unsigned i = (unsigned) (intptr_t) entry->data;
971          struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
972 
973          if (*first_element && !state->var_is_in_block) {
974             *first_element = false;
975             var->data.location = uniform - prog->data->UniformStorage;
976          }
977 
978          update_uniforms_shader_info(prog, state, uniform, type, stage);
979 
980          const struct glsl_type *type_no_array = glsl_without_array(type);
981          struct hash_entry *entry = prog->data->spirv ? NULL :
982             _mesa_hash_table_search(state->referenced_uniforms[stage],
983                                     state->current_var->name);
984          if (entry != NULL ||
985              glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
986              prog->data->spirv)
987             uniform->active_shader_mask |= 1 << stage;
988 
989          if (!state->var_is_in_block)
990             add_parameter(uniform, consts, prog, type, state);
991 
992          return true;
993       }
994    }
995 
996    return false;
997 }
998 
999 /**
1000  * Finds, returns, and updates the stage info for any uniform in UniformStorage
1001  * defined by @var. For GLSL this is done using the name, for SPIR-V in general
1002  * is this done using the explicit location, except:
1003  *
1004  * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
1005  *   them. That means that more that one entry at the uniform storage can be
1006  *   found. In that case all of them are updated, and the first entry is
1007  *   returned, in order to update the location of the nir variable.
1008  *
1009  * * Special uniforms: like atomic counters. They lack a explicit location,
1010  *   so they are skipped. They will be handled and assigned a location later.
1011  *
1012  */
1013 static bool
find_and_update_previous_uniform_storage(const struct gl_constants * consts,struct gl_shader_program * prog,struct nir_link_uniforms_state * state,nir_variable * var,char * name,const struct glsl_type * type,unsigned stage)1014 find_and_update_previous_uniform_storage(const struct gl_constants *consts,
1015                                          struct gl_shader_program *prog,
1016                                          struct nir_link_uniforms_state *state,
1017                                          nir_variable *var, char *name,
1018                                          const struct glsl_type *type,
1019                                          unsigned stage)
1020 {
1021    if (!prog->data->spirv) {
1022       bool first_element = true;
1023       char *name_tmp = ralloc_strdup(NULL, name);
1024       bool r = find_and_update_named_uniform_storage(consts, prog, state, var,
1025                                                      &name_tmp,
1026                                                      strlen(name_tmp), type,
1027                                                      stage, &first_element);
1028       ralloc_free(name_tmp);
1029 
1030       return r;
1031    }
1032 
1033    if (nir_variable_is_in_block(var)) {
1034       struct gl_uniform_storage *uniform = NULL;
1035 
1036       ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
1037          prog->data->NumUniformBlocks :
1038          prog->data->NumShaderStorageBlocks;
1039 
1040       struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
1041          prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
1042 
1043       bool result = false;
1044       for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1045          /* UniformStorage contains both variables from ubos and ssbos */
1046          if ( prog->data->UniformStorage[i].is_shader_storage !=
1047               nir_variable_is_in_ssbo(var))
1048             continue;
1049 
1050          int block_index = prog->data->UniformStorage[i].block_index;
1051          if (block_index != -1) {
1052             assert(block_index < num_blks);
1053 
1054             if (var->data.binding == blks[block_index].Binding) {
1055                if (!uniform)
1056                   uniform = &prog->data->UniformStorage[i];
1057                mark_stage_as_active(&prog->data->UniformStorage[i],
1058                                       stage);
1059                result = true;
1060             }
1061          }
1062       }
1063 
1064       if (result)
1065          var->data.location = uniform - prog->data->UniformStorage;
1066       return result;
1067    }
1068 
1069    /* Beyond blocks, there are still some corner cases of uniforms without
1070     * location (ie: atomic counters) that would have a initial location equal
1071     * to -1. We just return on that case. Those uniforms will be handled
1072     * later.
1073     */
1074    if (var->data.location == -1)
1075       return false;
1076 
1077    /* TODO: following search can be problematic with shaders with a lot of
1078     * uniforms. Would it be better to use some type of hash
1079     */
1080    for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1081       if (prog->data->UniformStorage[i].remap_location == var->data.location) {
1082          mark_stage_as_active(&prog->data->UniformStorage[i], stage);
1083 
1084          struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
1085          var->data.location = uniform - prog->data->UniformStorage;
1086          add_parameter(uniform, consts, prog, var->type, state);
1087          return true;
1088       }
1089    }
1090 
1091    return false;
1092 }
1093 
1094 static struct type_tree_entry *
build_type_tree_for_type(const struct glsl_type * type)1095 build_type_tree_for_type(const struct glsl_type *type)
1096 {
1097    struct type_tree_entry *entry = malloc(sizeof *entry);
1098 
1099    entry->array_size = 1;
1100    entry->next_index = UINT_MAX;
1101    entry->children = NULL;
1102    entry->next_sibling = NULL;
1103    entry->parent = NULL;
1104 
1105    if (glsl_type_is_array(type)) {
1106       entry->array_size = glsl_get_length(type);
1107       entry->children = build_type_tree_for_type(glsl_get_array_element(type));
1108       entry->children->parent = entry;
1109    } else if (glsl_type_is_struct_or_ifc(type)) {
1110       struct type_tree_entry *last = NULL;
1111 
1112       for (unsigned i = 0; i < glsl_get_length(type); i++) {
1113          const struct glsl_type *field_type = glsl_get_struct_field(type, i);
1114          struct type_tree_entry *field_entry =
1115             build_type_tree_for_type(field_type);
1116 
1117          if (last == NULL)
1118             entry->children = field_entry;
1119          else
1120             last->next_sibling = field_entry;
1121 
1122          field_entry->parent = entry;
1123 
1124          last = field_entry;
1125       }
1126    }
1127 
1128    return entry;
1129 }
1130 
1131 static void
free_type_tree(struct type_tree_entry * entry)1132 free_type_tree(struct type_tree_entry *entry)
1133 {
1134    struct type_tree_entry *p, *next;
1135 
1136    for (p = entry->children; p; p = next) {
1137       next = p->next_sibling;
1138       free_type_tree(p);
1139    }
1140 
1141    free(entry);
1142 }
1143 
1144 static void
hash_free_uniform_name(struct hash_entry * entry)1145 hash_free_uniform_name(struct hash_entry *entry)
1146 {
1147    free((void*)entry->key);
1148 }
1149 
1150 static void
enter_record(struct nir_link_uniforms_state * state,const struct gl_constants * consts,const struct glsl_type * type,bool row_major)1151 enter_record(struct nir_link_uniforms_state *state,
1152              const struct gl_constants *consts,
1153              const struct glsl_type *type,
1154              bool row_major)
1155 {
1156    assert(glsl_type_is_struct(type));
1157    if (!state->var_is_in_block)
1158       return;
1159 
1160    bool use_std430 = consts->UseSTD430AsDefaultPacking;
1161    const enum glsl_interface_packing packing =
1162       glsl_get_internal_ifc_packing(state->current_var->interface_type,
1163                                     use_std430);
1164 
1165    if (packing == GLSL_INTERFACE_PACKING_STD430)
1166       state->offset = align(
1167          state->offset, glsl_get_std430_base_alignment(type, row_major));
1168    else
1169       state->offset = align(
1170          state->offset, glsl_get_std140_base_alignment(type, row_major));
1171 }
1172 
1173 static void
leave_record(struct nir_link_uniforms_state * state,const struct gl_constants * consts,const struct glsl_type * type,bool row_major)1174 leave_record(struct nir_link_uniforms_state *state,
1175              const struct gl_constants *consts,
1176              const struct glsl_type *type,
1177              bool row_major)
1178 {
1179    assert(glsl_type_is_struct(type));
1180    if (!state->var_is_in_block)
1181       return;
1182 
1183    bool use_std430 = consts->UseSTD430AsDefaultPacking;
1184    const enum glsl_interface_packing packing =
1185       glsl_get_internal_ifc_packing(state->current_var->interface_type,
1186                                     use_std430);
1187 
1188    if (packing == GLSL_INTERFACE_PACKING_STD430)
1189       state->offset = align(
1190          state->offset, glsl_get_std430_base_alignment(type, row_major));
1191    else
1192       state->offset = align(
1193          state->offset, glsl_get_std140_base_alignment(type, row_major));
1194 }
1195 
1196 /**
1197  * Creates the neccessary entries in UniformStorage for the uniform. Returns
1198  * the number of locations used or -1 on failure.
1199  */
1200 static int
nir_link_uniform(const struct gl_constants * consts,struct gl_shader_program * prog,struct gl_program * stage_program,gl_shader_stage stage,const struct glsl_type * type,unsigned index_in_parent,int location,struct nir_link_uniforms_state * state,char ** name,size_t name_length,bool row_major)1201 nir_link_uniform(const struct gl_constants *consts,
1202                  struct gl_shader_program *prog,
1203                  struct gl_program *stage_program,
1204                  gl_shader_stage stage,
1205                  const struct glsl_type *type,
1206                  unsigned index_in_parent,
1207                  int location,
1208                  struct nir_link_uniforms_state *state,
1209                  char **name, size_t name_length, bool row_major)
1210 {
1211    struct gl_uniform_storage *uniform = NULL;
1212 
1213    if (state->set_top_level_array &&
1214        nir_variable_is_in_ssbo(state->current_var)) {
1215       /* Type is the top level SSBO member */
1216       if (glsl_type_is_array(type) &&
1217           (glsl_type_is_array(glsl_get_array_element(type)) ||
1218            glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
1219          /* Type is a top-level array (array of aggregate types) */
1220          state->top_level_array_size = glsl_get_length(type);
1221          state->top_level_array_stride = glsl_get_explicit_stride(type);
1222       } else {
1223          state->top_level_array_size = 1;
1224          state->top_level_array_stride = 0;
1225       }
1226 
1227       state->set_top_level_array = false;
1228    }
1229 
1230    /* gl_uniform_storage can cope with one level of array, so if the type is a
1231     * composite type or an array where each element occupies more than one
1232     * location than we need to recursively process it.
1233     */
1234    if (glsl_type_is_struct_or_ifc(type) ||
1235        (glsl_type_is_array(type) &&
1236         (glsl_type_is_array(glsl_get_array_element(type)) ||
1237          glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
1238       int location_count = 0;
1239       struct type_tree_entry *old_type = state->current_type;
1240       unsigned int struct_base_offset = state->offset;
1241 
1242       state->current_type = old_type->children;
1243 
1244       /* Shader storage block unsized arrays: add subscript [0] to variable
1245        * names.
1246        */
1247       unsigned length = glsl_get_length(type);
1248       if (glsl_type_is_unsized_array(type))
1249          length = 1;
1250 
1251       if (glsl_type_is_struct(type) && !prog->data->spirv)
1252          enter_record(state, consts, type, row_major);
1253 
1254       for (unsigned i = 0; i < length; i++) {
1255          const struct glsl_type *field_type;
1256          size_t new_length = name_length;
1257          bool field_row_major = row_major;
1258 
1259          if (glsl_type_is_struct_or_ifc(type)) {
1260             field_type = glsl_get_struct_field(type, i);
1261             /* Use the offset inside the struct only for variables backed by
1262              * a buffer object. For variables not backed by a buffer object,
1263              * offset is -1.
1264              */
1265             if (state->var_is_in_block) {
1266                if (prog->data->spirv) {
1267                   state->offset =
1268                      struct_base_offset + glsl_get_struct_field_offset(type, i);
1269                } else if (glsl_get_struct_field_offset(type, i) != -1 &&
1270                           type == state->current_ifc_type) {
1271                   state->offset = glsl_get_struct_field_offset(type, i);
1272                }
1273 
1274                if (glsl_type_is_interface(type))
1275                   state->set_top_level_array = true;
1276             }
1277 
1278             /* Append '.field' to the current variable name. */
1279             if (name) {
1280                ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
1281                                             glsl_get_struct_elem_name(type, i));
1282             }
1283 
1284 
1285             /* The layout of structures at the top level of the block is set
1286              * during parsing.  For matrices contained in multiple levels of
1287              * structures in the block, the inner structures have no layout.
1288              * These cases must potentially inherit the layout from the outer
1289              * levels.
1290              */
1291             const enum glsl_matrix_layout matrix_layout =
1292                glsl_get_struct_field_data(type, i)->matrix_layout;
1293             if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
1294                field_row_major = true;
1295             } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
1296                field_row_major = false;
1297             }
1298          } else {
1299             field_type = glsl_get_array_element(type);
1300 
1301             /* Append the subscript to the current variable name */
1302             if (name)
1303                ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
1304          }
1305 
1306          int entries = nir_link_uniform(consts, prog, stage_program, stage,
1307                                         field_type, i, location,
1308                                         state, name, new_length,
1309                                         field_row_major);
1310 
1311          if (entries == -1)
1312             return -1;
1313 
1314          if (location != -1)
1315             location += entries;
1316          location_count += entries;
1317 
1318          if (glsl_type_is_struct_or_ifc(type))
1319             state->current_type = state->current_type->next_sibling;
1320       }
1321 
1322       if (glsl_type_is_struct(type) && !prog->data->spirv)
1323          leave_record(state, consts, type, row_major);
1324 
1325       state->current_type = old_type;
1326 
1327       return location_count;
1328    } else {
1329       /* TODO: reallocating storage is slow, we should figure out a way to
1330        * allocate storage up front for spirv like we do for GLSL.
1331        */
1332       if (prog->data->spirv) {
1333          /* Create a new uniform storage entry */
1334          prog->data->UniformStorage =
1335             reralloc(prog->data,
1336                      prog->data->UniformStorage,
1337                      struct gl_uniform_storage,
1338                      prog->data->NumUniformStorage + 1);
1339          if (!prog->data->UniformStorage) {
1340             linker_error(prog, "Out of memory during linking.\n");
1341             return -1;
1342          }
1343       }
1344 
1345       uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
1346       prog->data->NumUniformStorage++;
1347 
1348       /* Initialize its members */
1349       memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
1350 
1351       uniform->name.string =
1352          name ? ralloc_strdup(prog->data->UniformStorage, *name) : NULL;
1353       resource_name_updated(&uniform->name);
1354 
1355       const struct glsl_type *type_no_array = glsl_without_array(type);
1356       if (glsl_type_is_array(type)) {
1357          uniform->type = type_no_array;
1358          uniform->array_elements = glsl_get_length(type);
1359       } else {
1360          uniform->type = type;
1361          uniform->array_elements = 0;
1362       }
1363       uniform->top_level_array_size = state->top_level_array_size;
1364       uniform->top_level_array_stride = state->top_level_array_stride;
1365 
1366       struct hash_entry *entry = prog->data->spirv ? NULL :
1367          _mesa_hash_table_search(state->referenced_uniforms[stage],
1368                                  state->current_var->name);
1369       if (entry != NULL ||
1370           glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
1371           prog->data->spirv)
1372          uniform->active_shader_mask |= 1 << stage;
1373 
1374       if (location >= 0) {
1375          /* Uniform has an explicit location */
1376          uniform->remap_location = location;
1377       } else {
1378          uniform->remap_location = UNMAPPED_UNIFORM_LOC;
1379       }
1380 
1381       uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
1382       if (uniform->hidden)
1383          state->num_hidden_uniforms++;
1384 
1385       uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
1386       uniform->is_bindless = state->current_var->data.bindless;
1387 
1388       /* Set fields whose default value depend on the variable being inside a
1389        * block.
1390        *
1391        * From the OpenGL 4.6 spec, 7.3 Program objects:
1392        *
1393        * "For the property ARRAY_STRIDE, ... For active variables not declared
1394        * as an array of basic types, zero is written to params. For active
1395        * variables not backed by a buffer object, -1 is written to params,
1396        * regardless of the variable type."
1397        *
1398        * "For the property MATRIX_STRIDE, ... For active variables not declared
1399        * as a matrix or array of matrices, zero is written to params. For active
1400        * variables not backed by a buffer object, -1 is written to params,
1401        * regardless of the variable type."
1402        *
1403        * For the property IS_ROW_MAJOR, ... For active variables backed by a
1404        * buffer object, declared as a single matrix or array of matrices, and
1405        * stored in row-major order, one is written to params. For all other
1406        * active variables, zero is written to params.
1407        */
1408       uniform->array_stride = -1;
1409       uniform->matrix_stride = -1;
1410       uniform->row_major = false;
1411 
1412       if (state->var_is_in_block) {
1413          uniform->array_stride = glsl_type_is_array(type) ?
1414             glsl_get_explicit_stride(type) : 0;
1415 
1416          if (glsl_type_is_matrix(uniform->type)) {
1417             uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
1418             uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
1419          } else {
1420             uniform->matrix_stride = 0;
1421          }
1422 
1423          if (!prog->data->spirv) {
1424             bool use_std430 = consts->UseSTD430AsDefaultPacking;
1425             const enum glsl_interface_packing packing =
1426                glsl_get_internal_ifc_packing(state->current_var->interface_type,
1427                                              use_std430);
1428 
1429             unsigned alignment =
1430                glsl_get_std140_base_alignment(type, uniform->row_major);
1431             if (packing == GLSL_INTERFACE_PACKING_STD430) {
1432                alignment =
1433                   glsl_get_std430_base_alignment(type, uniform->row_major);
1434             }
1435             state->offset = align(state->offset, alignment);
1436          }
1437       }
1438 
1439       uniform->offset = state->var_is_in_block ? state->offset : -1;
1440 
1441       int buffer_block_index = -1;
1442       /* If the uniform is inside a uniform block determine its block index by
1443        * comparing the bindings, we can not use names.
1444        */
1445       if (state->var_is_in_block) {
1446          struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
1447             prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1448 
1449          int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
1450             prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1451 
1452          if (!prog->data->spirv) {
1453             bool is_interface_array =
1454                glsl_without_array(state->current_var->type) == state->current_var->interface_type &&
1455                glsl_type_is_array(state->current_var->type);
1456 
1457             const char *ifc_name =
1458                glsl_get_type_name(state->current_var->interface_type);
1459             if (is_interface_array) {
1460                unsigned l = strlen(ifc_name);
1461                for (unsigned i = 0; i < num_blocks; i++) {
1462                   if (strncmp(ifc_name, blocks[i].name.string, l) == 0 &&
1463                       blocks[i].name.string[l] == '[') {
1464                      buffer_block_index = i;
1465                      break;
1466                   }
1467                }
1468             } else {
1469                for (unsigned i = 0; i < num_blocks; i++) {
1470                   if (strcmp(ifc_name, blocks[i].name.string) == 0) {
1471                      buffer_block_index = i;
1472                      break;
1473                   }
1474                }
1475             }
1476 
1477             /* Compute the next offset. */
1478             bool use_std430 = consts->UseSTD430AsDefaultPacking;
1479             const enum glsl_interface_packing packing =
1480                glsl_get_internal_ifc_packing(state->current_var->interface_type,
1481                                              use_std430);
1482             if (packing == GLSL_INTERFACE_PACKING_STD430)
1483                state->offset += glsl_get_std430_size(type, uniform->row_major);
1484             else
1485                state->offset += glsl_get_std140_size(type, uniform->row_major);
1486          } else {
1487             for (unsigned i = 0; i < num_blocks; i++) {
1488                if (state->current_var->data.binding == blocks[i].Binding) {
1489                   buffer_block_index = i;
1490                   break;
1491                }
1492             }
1493 
1494             /* Compute the next offset. */
1495             state->offset += glsl_get_explicit_size(type, true);
1496          }
1497          assert(buffer_block_index >= 0);
1498       }
1499 
1500       uniform->block_index = buffer_block_index;
1501       uniform->builtin = is_gl_identifier(uniform->name.string);
1502       uniform->atomic_buffer_index = -1;
1503 
1504       /* The following are not for features not supported by ARB_gl_spirv */
1505       uniform->num_compatible_subroutines = 0;
1506 
1507       unsigned entries = MAX2(1, uniform->array_elements);
1508       unsigned values = glsl_get_component_slots(type);
1509 
1510       update_uniforms_shader_info(prog, state, uniform, type, stage);
1511 
1512       if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
1513           state->max_uniform_location < uniform->remap_location + entries)
1514          state->max_uniform_location = uniform->remap_location + entries;
1515 
1516       if (!state->var_is_in_block)
1517          add_parameter(uniform, consts, prog, type, state);
1518 
1519       if (name) {
1520          _mesa_hash_table_insert(state->uniform_hash, strdup(*name),
1521                                  (void *) (intptr_t)
1522                                     (prog->data->NumUniformStorage - 1));
1523       }
1524 
1525       if (!is_gl_identifier(uniform->name.string) && !uniform->is_shader_storage &&
1526           !state->var_is_in_block)
1527          state->num_values += values;
1528 
1529       return MAX2(uniform->array_elements, 1);
1530    }
1531 }
1532 
1533 bool
gl_nir_link_uniforms(const struct gl_constants * consts,struct gl_shader_program * prog,bool fill_parameters)1534 gl_nir_link_uniforms(const struct gl_constants *consts,
1535                      struct gl_shader_program *prog,
1536                      bool fill_parameters)
1537 {
1538    /* First free up any previous UniformStorage items */
1539    ralloc_free(prog->data->UniformStorage);
1540    prog->data->UniformStorage = NULL;
1541    prog->data->NumUniformStorage = 0;
1542 
1543    /* Iterate through all linked shaders */
1544    struct nir_link_uniforms_state state = {0,};
1545 
1546    if (!prog->data->spirv) {
1547       /* Gather information on uniform use */
1548       for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1549          struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1550          if (!sh)
1551             continue;
1552 
1553          state.referenced_uniforms[stage] =
1554             _mesa_hash_table_create(NULL, _mesa_hash_string,
1555                                     _mesa_key_string_equal);
1556 
1557          nir_shader *nir = sh->Program->nir;
1558          add_var_use_shader(nir, state.referenced_uniforms[stage]);
1559       }
1560 
1561       if(!consts->DisableUniformArrayResize) {
1562          /* Resize uniform arrays based on the maximum array index */
1563          for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1564             struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1565             if (!sh)
1566                continue;
1567 
1568             nir_foreach_gl_uniform_variable(var, sh->Program->nir)
1569                update_array_sizes(prog, var, state.referenced_uniforms, stage);
1570          }
1571       }
1572    }
1573 
1574    /* Count total number of uniforms and allocate storage */
1575    unsigned storage_size = 0;
1576    if (!prog->data->spirv) {
1577       struct set *storage_counted =
1578          _mesa_set_create(NULL, _mesa_hash_string, _mesa_key_string_equal);
1579       for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1580          struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1581          if (!sh)
1582             continue;
1583 
1584          nir_foreach_gl_uniform_variable(var, sh->Program->nir) {
1585             const struct glsl_type *type = var->type;
1586             const char *name = var->name;
1587             if (nir_variable_is_in_block(var) &&
1588                 glsl_without_array(type) == var->interface_type) {
1589                type = glsl_without_array(var->type);
1590                name = glsl_get_type_name(type);
1591             }
1592 
1593             struct set_entry *entry = _mesa_set_search(storage_counted, name);
1594             if (!entry) {
1595                storage_size += uniform_storage_size(type);
1596                _mesa_set_add(storage_counted, name);
1597             }
1598          }
1599       }
1600       _mesa_set_destroy(storage_counted, NULL);
1601 
1602       prog->data->UniformStorage = rzalloc_array(prog->data,
1603                                                  struct gl_uniform_storage,
1604                                                  storage_size);
1605       if (!prog->data->UniformStorage) {
1606          linker_error(prog, "Out of memory while linking uniforms.\n");
1607          return false;
1608       }
1609    }
1610 
1611    /* Iterate through all linked shaders */
1612    state.uniform_hash = _mesa_hash_table_create(NULL, _mesa_hash_string,
1613                                                 _mesa_key_string_equal);
1614 
1615    for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
1616       struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
1617       if (!sh)
1618          continue;
1619 
1620       nir_shader *nir = sh->Program->nir;
1621       assert(nir);
1622 
1623       state.next_bindless_image_index = 0;
1624       state.next_bindless_sampler_index = 0;
1625       state.next_image_index = 0;
1626       state.next_sampler_index = 0;
1627       state.num_shader_samplers = 0;
1628       state.num_shader_images = 0;
1629       state.num_shader_uniform_components = 0;
1630       state.shader_storage_blocks_write_access = 0;
1631       state.shader_samplers_used = 0;
1632       state.shader_shadow_samplers = 0;
1633       state.params = fill_parameters ? sh->Program->Parameters : NULL;
1634 
1635       nir_foreach_gl_uniform_variable(var, nir) {
1636          state.current_var = var;
1637          state.current_ifc_type = NULL;
1638          state.offset = 0;
1639          state.var_is_in_block = nir_variable_is_in_block(var);
1640          state.set_top_level_array = false;
1641          state.top_level_array_size = 0;
1642          state.top_level_array_stride = 0;
1643 
1644          /*
1645           * From ARB_program_interface spec, issue (16):
1646           *
1647           * "RESOLVED: We will follow the default rule for enumerating block
1648           *  members in the OpenGL API, which is:
1649           *
1650           *  * If a variable is a member of an interface block without an
1651           *    instance name, it is enumerated using just the variable name.
1652           *
1653           *  * If a variable is a member of an interface block with an
1654           *    instance name, it is enumerated as "BlockName.Member", where
1655           *    "BlockName" is the name of the interface block (not the
1656           *    instance name) and "Member" is the name of the variable.
1657           *
1658           * For example, in the following code:
1659           *
1660           * uniform Block1 {
1661           *   int member1;
1662           * };
1663           * uniform Block2 {
1664           *   int member2;
1665           * } instance2;
1666           * uniform Block3 {
1667           *  int member3;
1668           * } instance3[2];  // uses two separate buffer bindings
1669           *
1670           * the three uniforms (if active) are enumerated as "member1",
1671           * "Block2.member2", and "Block3.member3"."
1672           *
1673           * Note that in the last example, with an array of ubo, only one
1674           * uniform is generated. For that reason, while unrolling the
1675           * uniforms of a ubo, or the variables of a ssbo, we need to treat
1676           * arrays of instance as a single block.
1677           */
1678          char *name;
1679          const struct glsl_type *type = var->type;
1680          if (state.var_is_in_block &&
1681              ((!prog->data->spirv && glsl_without_array(type) == var->interface_type) ||
1682               (prog->data->spirv && type == var->interface_type))) {
1683             type = glsl_without_array(var->type);
1684             state.current_ifc_type = type;
1685             name = ralloc_strdup(NULL, glsl_get_type_name(type));
1686          } else {
1687             state.set_top_level_array = true;
1688             name = ralloc_strdup(NULL, var->name);
1689          }
1690 
1691          struct type_tree_entry *type_tree =
1692             build_type_tree_for_type(type);
1693          state.current_type = type_tree;
1694 
1695          int location = var->data.location;
1696 
1697          struct gl_uniform_block *blocks = NULL;
1698          int num_blocks = 0;
1699          int buffer_block_index = -1;
1700          bool is_interface_array = false;
1701          if (state.var_is_in_block) {
1702             /* If the uniform is inside a uniform block determine its block index by
1703              * comparing the bindings, we can not use names.
1704              */
1705             blocks = nir_variable_is_in_ssbo(state.current_var) ?
1706                prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1707             num_blocks = nir_variable_is_in_ssbo(state.current_var) ?
1708                prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1709 
1710             is_interface_array =
1711                glsl_without_array(state.current_var->type) == state.current_var->interface_type &&
1712                glsl_type_is_array(state.current_var->type);
1713 
1714             const char *ifc_name =
1715                glsl_get_type_name(state.current_var->interface_type);
1716 
1717             if (is_interface_array && !prog->data->spirv) {
1718                unsigned l = strlen(ifc_name);
1719 
1720                /* Even when a match is found, do not "break" here.  As this is
1721                 * an array of instances, all elements of the array need to be
1722                 * marked as referenced.
1723                 */
1724                for (unsigned i = 0; i < num_blocks; i++) {
1725                   if (strncmp(ifc_name, blocks[i].name.string, l) == 0 &&
1726                       blocks[i].name.string[l] == '[') {
1727                      if (buffer_block_index == -1)
1728                         buffer_block_index = i;
1729 
1730                      struct hash_entry *entry =
1731                         _mesa_hash_table_search(state.referenced_uniforms[shader_type],
1732                                                 var->name);
1733                      if (entry) {
1734                         struct uniform_array_info *ainfo =
1735                            (struct uniform_array_info *) entry->data;
1736                         if (BITSET_TEST(ainfo->indices, blocks[i].linearized_array_index))
1737                            blocks[i].stageref |= 1U << shader_type;
1738                      }
1739                   }
1740                }
1741             } else {
1742                for (unsigned i = 0; i < num_blocks; i++) {
1743                   bool match = false;
1744                   if (!prog->data->spirv) {
1745                      match = strcmp(ifc_name, blocks[i].name.string) == 0;
1746                   } else {
1747                      match = var->data.binding == blocks[i].Binding;
1748                   }
1749                   if (match) {
1750                      buffer_block_index = i;
1751 
1752                      if (!prog->data->spirv) {
1753                         struct hash_entry *entry =
1754                            _mesa_hash_table_search(state.referenced_uniforms[shader_type],
1755                                                    var->name);
1756                         if (entry)
1757                            blocks[i].stageref |= 1U << shader_type;
1758                      }
1759 
1760                      break;
1761                   }
1762                }
1763             }
1764          }
1765 
1766          if (nir_variable_is_in_ssbo(var) &&
1767              !(var->data.access & ACCESS_NON_WRITEABLE)) {
1768             unsigned array_size = is_interface_array ?
1769                glsl_get_length(var->type) : 1;
1770 
1771             STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
1772 
1773             /* Buffers from each stage are pointers to the one stored in the program. We need
1774              * to account for this before computing the mask below otherwise the mask will be
1775              * incorrect.
1776              *    sh->Program->sh.SSBlocks: [a][b][c][d][e][f]
1777              *    VS prog->data->SSBlocks : [a][b][c]
1778              *    FS prog->data->SSBlocks : [d][e][f]
1779              * eg for FS buffer 1, buffer_block_index will be 4 but sh_block_index will be 1.
1780              */
1781             int base = 0;
1782             base = sh->Program->sh.ShaderStorageBlocks[0] - prog->data->ShaderStorageBlocks;
1783 
1784             assert(base >= 0);
1785 
1786             int sh_block_index = buffer_block_index - base;
1787             /* Shaders that use too many SSBOs will fail to compile, which
1788              * we don't care about.
1789              *
1790              * This is true for shaders that do not use too many SSBOs:
1791              */
1792             if (sh_block_index + array_size <= 32) {
1793                state.shader_storage_blocks_write_access |=
1794                   u_bit_consecutive(sh_block_index, array_size);
1795             }
1796          }
1797 
1798          if (blocks && !prog->data->spirv && state.var_is_in_block) {
1799             if (glsl_without_array(state.current_var->type) != state.current_var->interface_type) {
1800                /* this is nested at some offset inside the block */
1801                bool found = false;
1802                char sentinel = '\0';
1803 
1804                if (glsl_type_is_struct(state.current_var->type)) {
1805                   sentinel = '.';
1806                } else if (glsl_type_is_array(state.current_var->type) &&
1807                           (glsl_type_is_array(glsl_get_array_element(state.current_var->type))
1808                            || glsl_type_is_struct(glsl_without_array(state.current_var->type)))) {
1809                  sentinel = '[';
1810                }
1811 
1812                const unsigned l = strlen(state.current_var->name);
1813                for (unsigned i = 0; i < num_blocks; i++) {
1814                   for (unsigned j = 0; j < blocks[i].NumUniforms; j++) {
1815                     if (sentinel) {
1816                         const char *begin = blocks[i].Uniforms[j].Name;
1817                         const char *end = strchr(begin, sentinel);
1818 
1819                         if (end == NULL)
1820                            continue;
1821 
1822                         if ((ptrdiff_t) l != (end - begin))
1823                            continue;
1824                         found = strncmp(state.current_var->name, begin, l) == 0;
1825                      } else {
1826                         found = strcmp(state.current_var->name, blocks[i].Uniforms[j].Name) == 0;
1827                      }
1828 
1829                      if (found) {
1830                         location = j;
1831 
1832                         struct hash_entry *entry =
1833                            _mesa_hash_table_search(state.referenced_uniforms[shader_type], var->name);
1834                         if (entry)
1835                            blocks[i].stageref |= 1U << shader_type;
1836 
1837                         break;
1838                      }
1839                   }
1840 
1841                   if (found)
1842                      break;
1843                }
1844                assert(found);
1845                var->data.location = location;
1846             } else {
1847                /* this is the base block offset */
1848                var->data.location = buffer_block_index;
1849                location = 0;
1850             }
1851             assert(buffer_block_index >= 0);
1852             const struct gl_uniform_block *const block =
1853                &blocks[buffer_block_index];
1854             assert(location >= 0 && location < block->NumUniforms);
1855 
1856             const struct gl_uniform_buffer_variable *const ubo_var =
1857                &block->Uniforms[location];
1858 
1859             state.offset = ubo_var->Offset;
1860          }
1861 
1862          /* Check if the uniform has been processed already for
1863           * other stage. If so, validate they are compatible and update
1864           * the active stage mask.
1865           */
1866          if (find_and_update_previous_uniform_storage(consts, prog, &state, var,
1867                                                       name, type, shader_type)) {
1868             ralloc_free(name);
1869             free_type_tree(type_tree);
1870             continue;
1871          }
1872 
1873          /* From now on the variable’s location will be its uniform index */
1874          if (!state.var_is_in_block)
1875             var->data.location = prog->data->NumUniformStorage;
1876          else
1877             location = -1;
1878 
1879          bool row_major =
1880             var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
1881          int res = nir_link_uniform(consts, prog, sh->Program, shader_type, type,
1882                                     0, location,
1883                                     &state,
1884                                     !prog->data->spirv ? &name : NULL,
1885                                     !prog->data->spirv ? strlen(name) : 0,
1886                                     row_major);
1887 
1888          free_type_tree(type_tree);
1889          ralloc_free(name);
1890 
1891          if (res == -1)
1892             return false;
1893       }
1894 
1895       if (!prog->data->spirv) {
1896          _mesa_hash_table_destroy(state.referenced_uniforms[shader_type],
1897                                   NULL);
1898       }
1899 
1900       if (state.num_shader_samplers >
1901           consts->Program[shader_type].MaxTextureImageUnits) {
1902          linker_error(prog, "Too many %s shader texture samplers\n",
1903                       _mesa_shader_stage_to_string(shader_type));
1904          continue;
1905       }
1906 
1907       if (state.num_shader_images >
1908           consts->Program[shader_type].MaxImageUniforms) {
1909          linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
1910                       _mesa_shader_stage_to_string(shader_type),
1911                       state.num_shader_images,
1912                       consts->Program[shader_type].MaxImageUniforms);
1913          continue;
1914       }
1915 
1916       sh->Program->SamplersUsed = state.shader_samplers_used;
1917       sh->Program->sh.ShaderStorageBlocksWriteAccess =
1918          state.shader_storage_blocks_write_access;
1919       sh->shadow_samplers = state.shader_shadow_samplers;
1920       sh->Program->info.num_textures = state.num_shader_samplers;
1921       sh->Program->info.num_images = state.num_shader_images;
1922       sh->num_uniform_components = state.num_shader_uniform_components;
1923       sh->num_combined_uniform_components = sh->num_uniform_components;
1924    }
1925 
1926    prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
1927    prog->data->NumUniformDataSlots = state.num_values;
1928 
1929    assert(prog->data->spirv || prog->data->NumUniformStorage == storage_size);
1930 
1931    if (prog->data->spirv)
1932       prog->NumUniformRemapTable = state.max_uniform_location;
1933 
1934    nir_setup_uniform_remap_tables(consts, prog);
1935    gl_nir_set_uniform_initializers(consts, prog);
1936 
1937    _mesa_hash_table_destroy(state.uniform_hash, hash_free_uniform_name);
1938 
1939    return true;
1940 }
1941