• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file linker.cpp
26  * GLSL linker implementation
27  *
28  * Given a set of shaders that are to be linked to generate a final program,
29  * there are three distinct stages.
30  *
31  * In the first stage shaders are partitioned into groups based on the shader
32  * type.  All shaders of a particular type (e.g., vertex shaders) are linked
33  * together.
34  *
35  *   - Undefined references in each shader are resolve to definitions in
36  *     another shader.
37  *   - Types and qualifiers of uniforms, outputs, and global variables defined
38  *     in multiple shaders with the same name are verified to be the same.
39  *   - Initializers for uniforms and global variables defined
40  *     in multiple shaders with the same name are verified to be the same.
41  *
42  * The result, in the terminology of the GLSL spec, is a set of shader
43  * executables for each processing unit.
44  *
45  * After the first stage is complete, a series of semantic checks are performed
46  * on each of the shader executables.
47  *
48  *   - Each shader executable must define a \c main function.
49  *   - Each vertex shader executable must write to \c gl_Position.
50  *   - Each fragment shader executable must write to either \c gl_FragData or
51  *     \c gl_FragColor.
52  *
53  * In the final stage individual shader executables are linked to create a
54  * complete exectuable.
55  *
56  *   - Types of uniforms defined in multiple shader stages with the same name
57  *     are verified to be the same.
58  *   - Initializers for uniforms defined in multiple shader stages with the
59  *     same name are verified to be the same.
60  *   - Types and qualifiers of outputs defined in one stage are verified to
61  *     be the same as the types and qualifiers of inputs defined with the same
62  *     name in a later stage.
63  *
64  * \author Ian Romanick <ian.d.romanick@intel.com>
65  */
66 
67 #include <ctype.h>
68 #include "util/strndup.h"
69 #include "main/core.h"
70 #include "glsl_symbol_table.h"
71 #include "glsl_parser_extras.h"
72 #include "ir.h"
73 #include "program.h"
74 #include "program/prog_instruction.h"
75 #include "program/program.h"
76 #include "util/set.h"
77 #include "util/string_to_uint_map.h"
78 #include "linker.h"
79 #include "link_varyings.h"
80 #include "ir_optimization.h"
81 #include "ir_rvalue_visitor.h"
82 #include "ir_uniform.h"
83 
84 #include "main/shaderobj.h"
85 #include "main/enums.h"
86 
87 
88 namespace {
89 
90 /**
91  * Visitor that determines whether or not a variable is ever written.
92  */
93 class find_assignment_visitor : public ir_hierarchical_visitor {
94 public:
find_assignment_visitor(const char * name)95    find_assignment_visitor(const char *name)
96       : name(name), found(false)
97    {
98       /* empty */
99    }
100 
visit_enter(ir_assignment * ir)101    virtual ir_visitor_status visit_enter(ir_assignment *ir)
102    {
103       ir_variable *const var = ir->lhs->variable_referenced();
104 
105       if (strcmp(name, var->name) == 0) {
106          found = true;
107          return visit_stop;
108       }
109 
110       return visit_continue_with_parent;
111    }
112 
visit_enter(ir_call * ir)113    virtual ir_visitor_status visit_enter(ir_call *ir)
114    {
115       foreach_two_lists(formal_node, &ir->callee->parameters,
116                         actual_node, &ir->actual_parameters) {
117          ir_rvalue *param_rval = (ir_rvalue *) actual_node;
118          ir_variable *sig_param = (ir_variable *) formal_node;
119 
120          if (sig_param->data.mode == ir_var_function_out ||
121              sig_param->data.mode == ir_var_function_inout) {
122             ir_variable *var = param_rval->variable_referenced();
123             if (var && strcmp(name, var->name) == 0) {
124                found = true;
125                return visit_stop;
126             }
127          }
128       }
129 
130       if (ir->return_deref != NULL) {
131          ir_variable *const var = ir->return_deref->variable_referenced();
132 
133          if (strcmp(name, var->name) == 0) {
134             found = true;
135             return visit_stop;
136          }
137       }
138 
139       return visit_continue_with_parent;
140    }
141 
variable_found()142    bool variable_found()
143    {
144       return found;
145    }
146 
147 private:
148    const char *name;       /**< Find writes to a variable with this name. */
149    bool found;             /**< Was a write to the variable found? */
150 };
151 
152 
153 /**
154  * Visitor that determines whether or not a variable is ever read.
155  */
156 class find_deref_visitor : public ir_hierarchical_visitor {
157 public:
find_deref_visitor(const char * name)158    find_deref_visitor(const char *name)
159       : name(name), found(false)
160    {
161       /* empty */
162    }
163 
visit(ir_dereference_variable * ir)164    virtual ir_visitor_status visit(ir_dereference_variable *ir)
165    {
166       if (strcmp(this->name, ir->var->name) == 0) {
167          this->found = true;
168          return visit_stop;
169       }
170 
171       return visit_continue;
172    }
173 
variable_found() const174    bool variable_found() const
175    {
176       return this->found;
177    }
178 
179 private:
180    const char *name;       /**< Find writes to a variable with this name. */
181    bool found;             /**< Was a write to the variable found? */
182 };
183 
184 
185 /**
186  * A visitor helper that provides methods for updating the types of
187  * ir_dereferences.  Classes that update variable types (say, updating
188  * array sizes) will want to use this so that dereference types stay in sync.
189  */
190 class deref_type_updater : public ir_hierarchical_visitor {
191 public:
visit(ir_dereference_variable * ir)192    virtual ir_visitor_status visit(ir_dereference_variable *ir)
193    {
194       ir->type = ir->var->type;
195       return visit_continue;
196    }
197 
visit_leave(ir_dereference_array * ir)198    virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
199    {
200       const glsl_type *const vt = ir->array->type;
201       if (vt->is_array())
202          ir->type = vt->fields.array;
203       return visit_continue;
204    }
205 
visit_leave(ir_dereference_record * ir)206    virtual ir_visitor_status visit_leave(ir_dereference_record *ir)
207    {
208       for (unsigned i = 0; i < ir->record->type->length; i++) {
209          const struct glsl_struct_field *field =
210             &ir->record->type->fields.structure[i];
211          if (strcmp(field->name, ir->field) == 0) {
212             ir->type = field->type;
213             break;
214          }
215       }
216       return visit_continue;
217    }
218 };
219 
220 
221 class array_resize_visitor : public deref_type_updater {
222 public:
223    unsigned num_vertices;
224    gl_shader_program *prog;
225    gl_shader_stage stage;
226 
array_resize_visitor(unsigned num_vertices,gl_shader_program * prog,gl_shader_stage stage)227    array_resize_visitor(unsigned num_vertices,
228                         gl_shader_program *prog,
229                         gl_shader_stage stage)
230    {
231       this->num_vertices = num_vertices;
232       this->prog = prog;
233       this->stage = stage;
234    }
235 
~array_resize_visitor()236    virtual ~array_resize_visitor()
237    {
238       /* empty */
239    }
240 
visit(ir_variable * var)241    virtual ir_visitor_status visit(ir_variable *var)
242    {
243       if (!var->type->is_array() || var->data.mode != ir_var_shader_in ||
244           var->data.patch)
245          return visit_continue;
246 
247       unsigned size = var->type->length;
248 
249       if (stage == MESA_SHADER_GEOMETRY) {
250          /* Generate a link error if the shader has declared this array with
251           * an incorrect size.
252           */
253          if (!var->data.implicit_sized_array &&
254              size && size != this->num_vertices) {
255             linker_error(this->prog, "size of array %s declared as %u, "
256                          "but number of input vertices is %u\n",
257                          var->name, size, this->num_vertices);
258             return visit_continue;
259          }
260 
261          /* Generate a link error if the shader attempts to access an input
262           * array using an index too large for its actual size assigned at
263           * link time.
264           */
265          if (var->data.max_array_access >= (int)this->num_vertices) {
266             linker_error(this->prog, "%s shader accesses element %i of "
267                          "%s, but only %i input vertices\n",
268                          _mesa_shader_stage_to_string(this->stage),
269                          var->data.max_array_access, var->name, this->num_vertices);
270             return visit_continue;
271          }
272       }
273 
274       var->type = glsl_type::get_array_instance(var->type->fields.array,
275                                                 this->num_vertices);
276       var->data.max_array_access = this->num_vertices - 1;
277 
278       return visit_continue;
279    }
280 };
281 
282 /**
283  * Visitor that determines the highest stream id to which a (geometry) shader
284  * emits vertices. It also checks whether End{Stream}Primitive is ever called.
285  */
286 class find_emit_vertex_visitor : public ir_hierarchical_visitor {
287 public:
find_emit_vertex_visitor(int max_allowed)288    find_emit_vertex_visitor(int max_allowed)
289       : max_stream_allowed(max_allowed),
290         invalid_stream_id(0),
291         invalid_stream_id_from_emit_vertex(false),
292         end_primitive_found(false),
293         uses_non_zero_stream(false)
294    {
295       /* empty */
296    }
297 
visit_leave(ir_emit_vertex * ir)298    virtual ir_visitor_status visit_leave(ir_emit_vertex *ir)
299    {
300       int stream_id = ir->stream_id();
301 
302       if (stream_id < 0) {
303          invalid_stream_id = stream_id;
304          invalid_stream_id_from_emit_vertex = true;
305          return visit_stop;
306       }
307 
308       if (stream_id > max_stream_allowed) {
309          invalid_stream_id = stream_id;
310          invalid_stream_id_from_emit_vertex = true;
311          return visit_stop;
312       }
313 
314       if (stream_id != 0)
315          uses_non_zero_stream = true;
316 
317       return visit_continue;
318    }
319 
visit_leave(ir_end_primitive * ir)320    virtual ir_visitor_status visit_leave(ir_end_primitive *ir)
321    {
322       end_primitive_found = true;
323 
324       int stream_id = ir->stream_id();
325 
326       if (stream_id < 0) {
327          invalid_stream_id = stream_id;
328          invalid_stream_id_from_emit_vertex = false;
329          return visit_stop;
330       }
331 
332       if (stream_id > max_stream_allowed) {
333          invalid_stream_id = stream_id;
334          invalid_stream_id_from_emit_vertex = false;
335          return visit_stop;
336       }
337 
338       if (stream_id != 0)
339          uses_non_zero_stream = true;
340 
341       return visit_continue;
342    }
343 
error()344    bool error()
345    {
346       return invalid_stream_id != 0;
347    }
348 
error_func()349    const char *error_func()
350    {
351       return invalid_stream_id_from_emit_vertex ?
352          "EmitStreamVertex" : "EndStreamPrimitive";
353    }
354 
error_stream()355    int error_stream()
356    {
357       return invalid_stream_id;
358    }
359 
uses_streams()360    bool uses_streams()
361    {
362       return uses_non_zero_stream;
363    }
364 
uses_end_primitive()365    bool uses_end_primitive()
366    {
367       return end_primitive_found;
368    }
369 
370 private:
371    int max_stream_allowed;
372    int invalid_stream_id;
373    bool invalid_stream_id_from_emit_vertex;
374    bool end_primitive_found;
375    bool uses_non_zero_stream;
376 };
377 
378 /* Class that finds array derefs and check if indexes are dynamic. */
379 class dynamic_sampler_array_indexing_visitor : public ir_hierarchical_visitor
380 {
381 public:
dynamic_sampler_array_indexing_visitor()382    dynamic_sampler_array_indexing_visitor() :
383       dynamic_sampler_array_indexing(false)
384    {
385    }
386 
visit_enter(ir_dereference_array * ir)387    ir_visitor_status visit_enter(ir_dereference_array *ir)
388    {
389       if (!ir->variable_referenced())
390          return visit_continue;
391 
392       if (!ir->variable_referenced()->type->contains_sampler())
393          return visit_continue;
394 
395       if (!ir->array_index->constant_expression_value()) {
396          dynamic_sampler_array_indexing = true;
397          return visit_stop;
398       }
399       return visit_continue;
400    }
401 
uses_dynamic_sampler_array_indexing()402    bool uses_dynamic_sampler_array_indexing()
403    {
404       return dynamic_sampler_array_indexing;
405    }
406 
407 private:
408    bool dynamic_sampler_array_indexing;
409 };
410 
411 } /* anonymous namespace */
412 
413 void
linker_error(gl_shader_program * prog,const char * fmt,...)414 linker_error(gl_shader_program *prog, const char *fmt, ...)
415 {
416    va_list ap;
417 
418    ralloc_strcat(&prog->data->InfoLog, "error: ");
419    va_start(ap, fmt);
420    ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
421    va_end(ap);
422 
423    prog->data->LinkStatus = false;
424 }
425 
426 
427 void
linker_warning(gl_shader_program * prog,const char * fmt,...)428 linker_warning(gl_shader_program *prog, const char *fmt, ...)
429 {
430    va_list ap;
431 
432    ralloc_strcat(&prog->data->InfoLog, "warning: ");
433    va_start(ap, fmt);
434    ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
435    va_end(ap);
436 
437 }
438 
439 
440 /**
441  * Given a string identifying a program resource, break it into a base name
442  * and an optional array index in square brackets.
443  *
444  * If an array index is present, \c out_base_name_end is set to point to the
445  * "[" that precedes the array index, and the array index itself is returned
446  * as a long.
447  *
448  * If no array index is present (or if the array index is negative or
449  * mal-formed), \c out_base_name_end, is set to point to the null terminator
450  * at the end of the input string, and -1 is returned.
451  *
452  * Only the final array index is parsed; if the string contains other array
453  * indices (or structure field accesses), they are left in the base name.
454  *
455  * No attempt is made to check that the base name is properly formed;
456  * typically the caller will look up the base name in a hash table, so
457  * ill-formed base names simply turn into hash table lookup failures.
458  */
459 long
parse_program_resource_name(const GLchar * name,const GLchar ** out_base_name_end)460 parse_program_resource_name(const GLchar *name,
461                             const GLchar **out_base_name_end)
462 {
463    /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
464     *
465     *     "When an integer array element or block instance number is part of
466     *     the name string, it will be specified in decimal form without a "+"
467     *     or "-" sign or any extra leading zeroes. Additionally, the name
468     *     string will not include white space anywhere in the string."
469     */
470 
471    const size_t len = strlen(name);
472    *out_base_name_end = name + len;
473 
474    if (len == 0 || name[len-1] != ']')
475       return -1;
476 
477    /* Walk backwards over the string looking for a non-digit character.  This
478     * had better be the opening bracket for an array index.
479     *
480     * Initially, i specifies the location of the ']'.  Since the string may
481     * contain only the ']' charcater, walk backwards very carefully.
482     */
483    unsigned i;
484    for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
485       /* empty */ ;
486 
487    if ((i == 0) || name[i-1] != '[')
488       return -1;
489 
490    long array_index = strtol(&name[i], NULL, 10);
491    if (array_index < 0)
492       return -1;
493 
494    /* Check for leading zero */
495    if (name[i] == '0' && name[i+1] != ']')
496       return -1;
497 
498    *out_base_name_end = name + (i - 1);
499    return array_index;
500 }
501 
502 
503 void
link_invalidate_variable_locations(exec_list * ir)504 link_invalidate_variable_locations(exec_list *ir)
505 {
506    foreach_in_list(ir_instruction, node, ir) {
507       ir_variable *const var = node->as_variable();
508 
509       if (var == NULL)
510          continue;
511 
512       /* Only assign locations for variables that lack an explicit location.
513        * Explicit locations are set for all built-in variables, generic vertex
514        * shader inputs (via layout(location=...)), and generic fragment shader
515        * outputs (also via layout(location=...)).
516        */
517       if (!var->data.explicit_location) {
518          var->data.location = -1;
519          var->data.location_frac = 0;
520       }
521 
522       /* ir_variable::is_unmatched_generic_inout is used by the linker while
523        * connecting outputs from one stage to inputs of the next stage.
524        */
525       if (var->data.explicit_location &&
526           var->data.location < VARYING_SLOT_VAR0) {
527          var->data.is_unmatched_generic_inout = 0;
528       } else {
529          var->data.is_unmatched_generic_inout = 1;
530       }
531    }
532 }
533 
534 
535 /**
536  * Set clip_distance_array_size based and cull_distance_array_size on the given
537  * shader.
538  *
539  * Also check for errors based on incorrect usage of gl_ClipVertex and
540  * gl_ClipDistance and gl_CullDistance.
541  * Additionally test whether the arrays gl_ClipDistance and gl_CullDistance
542  * exceed the maximum size defined by gl_MaxCombinedClipAndCullDistances.
543  *
544  * Return false if an error was reported.
545  */
546 static void
analyze_clip_cull_usage(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx,GLuint * clip_distance_array_size,GLuint * cull_distance_array_size)547 analyze_clip_cull_usage(struct gl_shader_program *prog,
548                         struct gl_linked_shader *shader,
549                         struct gl_context *ctx,
550                         GLuint *clip_distance_array_size,
551                         GLuint *cull_distance_array_size)
552 {
553    *clip_distance_array_size = 0;
554    *cull_distance_array_size = 0;
555 
556    if (prog->data->Version >= (prog->IsES ? 300 : 130)) {
557       /* From section 7.1 (Vertex Shader Special Variables) of the
558        * GLSL 1.30 spec:
559        *
560        *   "It is an error for a shader to statically write both
561        *   gl_ClipVertex and gl_ClipDistance."
562        *
563        * This does not apply to GLSL ES shaders, since GLSL ES defines neither
564        * gl_ClipVertex nor gl_ClipDistance. However with
565        * GL_EXT_clip_cull_distance, this functionality is exposed in ES 3.0.
566        */
567       find_assignment_visitor clip_distance("gl_ClipDistance");
568       find_assignment_visitor cull_distance("gl_CullDistance");
569 
570       clip_distance.run(shader->ir);
571       cull_distance.run(shader->ir);
572 
573       /* From the ARB_cull_distance spec:
574        *
575        * It is a compile-time or link-time error for the set of shaders forming
576        * a program to statically read or write both gl_ClipVertex and either
577        * gl_ClipDistance or gl_CullDistance.
578        *
579        * This does not apply to GLSL ES shaders, since GLSL ES doesn't define
580        * gl_ClipVertex.
581        */
582       if (!prog->IsES) {
583          find_assignment_visitor clip_vertex("gl_ClipVertex");
584 
585          clip_vertex.run(shader->ir);
586 
587          if (clip_vertex.variable_found() && clip_distance.variable_found()) {
588             linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
589                          "and `gl_ClipDistance'\n",
590                          _mesa_shader_stage_to_string(shader->Stage));
591             return;
592          }
593          if (clip_vertex.variable_found() && cull_distance.variable_found()) {
594             linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
595                          "and `gl_CullDistance'\n",
596                          _mesa_shader_stage_to_string(shader->Stage));
597             return;
598          }
599       }
600 
601       if (clip_distance.variable_found()) {
602          ir_variable *clip_distance_var =
603                 shader->symbols->get_variable("gl_ClipDistance");
604          assert(clip_distance_var);
605          *clip_distance_array_size = clip_distance_var->type->length;
606       }
607       if (cull_distance.variable_found()) {
608          ir_variable *cull_distance_var =
609                 shader->symbols->get_variable("gl_CullDistance");
610          assert(cull_distance_var);
611          *cull_distance_array_size = cull_distance_var->type->length;
612       }
613       /* From the ARB_cull_distance spec:
614        *
615        * It is a compile-time or link-time error for the set of shaders forming
616        * a program to have the sum of the sizes of the gl_ClipDistance and
617        * gl_CullDistance arrays to be larger than
618        * gl_MaxCombinedClipAndCullDistances.
619        */
620       if ((*clip_distance_array_size + *cull_distance_array_size) >
621           ctx->Const.MaxClipPlanes) {
622           linker_error(prog, "%s shader: the combined size of "
623                        "'gl_ClipDistance' and 'gl_CullDistance' size cannot "
624                        "be larger than "
625                        "gl_MaxCombinedClipAndCullDistances (%u)",
626                        _mesa_shader_stage_to_string(shader->Stage),
627                        ctx->Const.MaxClipPlanes);
628       }
629    }
630 }
631 
632 
633 /**
634  * Verify that a vertex shader executable meets all semantic requirements.
635  *
636  * Also sets prog->Vert.ClipDistanceArraySize and
637  * prog->Vert.CullDistanceArraySize as a side effect.
638  *
639  * \param shader  Vertex shader executable to be verified
640  */
641 void
validate_vertex_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx)642 validate_vertex_shader_executable(struct gl_shader_program *prog,
643                                   struct gl_linked_shader *shader,
644                                   struct gl_context *ctx)
645 {
646    if (shader == NULL)
647       return;
648 
649    /* From the GLSL 1.10 spec, page 48:
650     *
651     *     "The variable gl_Position is available only in the vertex
652     *      language and is intended for writing the homogeneous vertex
653     *      position. All executions of a well-formed vertex shader
654     *      executable must write a value into this variable. [...] The
655     *      variable gl_Position is available only in the vertex
656     *      language and is intended for writing the homogeneous vertex
657     *      position. All executions of a well-formed vertex shader
658     *      executable must write a value into this variable."
659     *
660     * while in GLSL 1.40 this text is changed to:
661     *
662     *     "The variable gl_Position is available only in the vertex
663     *      language and is intended for writing the homogeneous vertex
664     *      position. It can be written at any time during shader
665     *      execution. It may also be read back by a vertex shader
666     *      after being written. This value will be used by primitive
667     *      assembly, clipping, culling, and other fixed functionality
668     *      operations, if present, that operate on primitives after
669     *      vertex processing has occurred. Its value is undefined if
670     *      the vertex shader executable does not write gl_Position."
671     *
672     * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
673     * gl_Position is not an error.
674     */
675    if (prog->data->Version < (prog->IsES ? 300 : 140)) {
676       find_assignment_visitor find("gl_Position");
677       find.run(shader->ir);
678       if (!find.variable_found()) {
679         if (prog->IsES) {
680           linker_warning(prog,
681                          "vertex shader does not write to `gl_Position'. "
682                          "Its value is undefined. \n");
683         } else {
684           linker_error(prog,
685                        "vertex shader does not write to `gl_Position'. \n");
686         }
687          return;
688       }
689    }
690 
691    analyze_clip_cull_usage(prog, shader, ctx,
692                            &prog->Vert.ClipDistanceArraySize,
693                            &prog->Vert.CullDistanceArraySize);
694 }
695 
696 void
validate_tess_eval_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx)697 validate_tess_eval_shader_executable(struct gl_shader_program *prog,
698                                      struct gl_linked_shader *shader,
699                                      struct gl_context *ctx)
700 {
701    if (shader == NULL)
702       return;
703 
704    analyze_clip_cull_usage(prog, shader, ctx,
705                            &prog->TessEval.ClipDistanceArraySize,
706                            &prog->TessEval.CullDistanceArraySize);
707 }
708 
709 
710 /**
711  * Verify that a fragment shader executable meets all semantic requirements
712  *
713  * \param shader  Fragment shader executable to be verified
714  */
715 void
validate_fragment_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader)716 validate_fragment_shader_executable(struct gl_shader_program *prog,
717                                     struct gl_linked_shader *shader)
718 {
719    if (shader == NULL)
720       return;
721 
722    find_assignment_visitor frag_color("gl_FragColor");
723    find_assignment_visitor frag_data("gl_FragData");
724 
725    frag_color.run(shader->ir);
726    frag_data.run(shader->ir);
727 
728    if (frag_color.variable_found() && frag_data.variable_found()) {
729       linker_error(prog,  "fragment shader writes to both "
730                    "`gl_FragColor' and `gl_FragData'\n");
731    }
732 }
733 
734 /**
735  * Verify that a geometry shader executable meets all semantic requirements
736  *
737  * Also sets prog->Geom.VerticesIn, and prog->Geom.ClipDistanceArraySize and
738  * prog->Geom.CullDistanceArraySize as a side effect.
739  *
740  * \param shader Geometry shader executable to be verified
741  */
742 void
validate_geometry_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,struct gl_context * ctx)743 validate_geometry_shader_executable(struct gl_shader_program *prog,
744                                     struct gl_linked_shader *shader,
745                                     struct gl_context *ctx)
746 {
747    if (shader == NULL)
748       return;
749 
750    unsigned num_vertices = vertices_per_prim(shader->info.Geom.InputType);
751    prog->Geom.VerticesIn = num_vertices;
752 
753    analyze_clip_cull_usage(prog, shader, ctx,
754                            &prog->Geom.ClipDistanceArraySize,
755                            &prog->Geom.CullDistanceArraySize);
756 }
757 
758 /**
759  * Check if geometry shaders emit to non-zero streams and do corresponding
760  * validations.
761  */
762 static void
validate_geometry_shader_emissions(struct gl_context * ctx,struct gl_shader_program * prog)763 validate_geometry_shader_emissions(struct gl_context *ctx,
764                                    struct gl_shader_program *prog)
765 {
766    struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
767 
768    if (sh != NULL) {
769       find_emit_vertex_visitor emit_vertex(ctx->Const.MaxVertexStreams - 1);
770       emit_vertex.run(sh->ir);
771       if (emit_vertex.error()) {
772          linker_error(prog, "Invalid call %s(%d). Accepted values for the "
773                       "stream parameter are in the range [0, %d].\n",
774                       emit_vertex.error_func(),
775                       emit_vertex.error_stream(),
776                       ctx->Const.MaxVertexStreams - 1);
777       }
778       prog->Geom.UsesStreams = emit_vertex.uses_streams();
779       prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive();
780 
781       /* From the ARB_gpu_shader5 spec:
782        *
783        *   "Multiple vertex streams are supported only if the output primitive
784        *    type is declared to be "points".  A program will fail to link if it
785        *    contains a geometry shader calling EmitStreamVertex() or
786        *    EndStreamPrimitive() if its output primitive type is not "points".
787        *
788        * However, in the same spec:
789        *
790        *   "The function EmitVertex() is equivalent to calling EmitStreamVertex()
791        *    with <stream> set to zero."
792        *
793        * And:
794        *
795        *   "The function EndPrimitive() is equivalent to calling
796        *    EndStreamPrimitive() with <stream> set to zero."
797        *
798        * Since we can call EmitVertex() and EndPrimitive() when we output
799        * primitives other than points, calling EmitStreamVertex(0) or
800        * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
801        * does. Currently we only set prog->Geom.UsesStreams to TRUE when
802        * EmitStreamVertex() or EmitEndPrimitive() are called with a non-zero
803        * stream.
804        */
805       if (prog->Geom.UsesStreams && sh->info.Geom.OutputType != GL_POINTS) {
806          linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
807                       "with n>0 requires point output\n");
808       }
809    }
810 }
811 
812 bool
validate_intrastage_arrays(struct gl_shader_program * prog,ir_variable * const var,ir_variable * const existing)813 validate_intrastage_arrays(struct gl_shader_program *prog,
814                            ir_variable *const var,
815                            ir_variable *const existing)
816 {
817    /* Consider the types to be "the same" if both types are arrays
818     * of the same type and one of the arrays is implicitly sized.
819     * In addition, set the type of the linked variable to the
820     * explicitly sized array.
821     */
822    if (var->type->is_array() && existing->type->is_array()) {
823       if ((var->type->fields.array == existing->type->fields.array) &&
824           ((var->type->length == 0)|| (existing->type->length == 0))) {
825          if (var->type->length != 0) {
826             if ((int)var->type->length <= existing->data.max_array_access) {
827                linker_error(prog, "%s `%s' declared as type "
828                            "`%s' but outermost dimension has an index"
829                            " of `%i'\n",
830                            mode_string(var),
831                            var->name, var->type->name,
832                            existing->data.max_array_access);
833             }
834             existing->type = var->type;
835             return true;
836          } else if (existing->type->length != 0) {
837             if((int)existing->type->length <= var->data.max_array_access &&
838                !existing->data.from_ssbo_unsized_array) {
839                linker_error(prog, "%s `%s' declared as type "
840                            "`%s' but outermost dimension has an index"
841                            " of `%i'\n",
842                            mode_string(var),
843                            var->name, existing->type->name,
844                            var->data.max_array_access);
845             }
846             return true;
847          }
848       } else {
849          /* The arrays of structs could have different glsl_type pointers but
850           * they are actually the same type. Use record_compare() to check that.
851           */
852          if (existing->type->fields.array->is_record() &&
853              var->type->fields.array->is_record() &&
854              existing->type->fields.array->record_compare(var->type->fields.array))
855             return true;
856       }
857    }
858    return false;
859 }
860 
861 
862 /**
863  * Perform validation of global variables used across multiple shaders
864  */
865 void
cross_validate_globals(struct gl_shader_program * prog,struct exec_list * ir,glsl_symbol_table * variables,bool uniforms_only)866 cross_validate_globals(struct gl_shader_program *prog,
867                        struct exec_list *ir, glsl_symbol_table *variables,
868                        bool uniforms_only)
869 {
870    foreach_in_list(ir_instruction, node, ir) {
871       ir_variable *const var = node->as_variable();
872 
873       if (var == NULL)
874          continue;
875 
876       if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
877          continue;
878 
879       /* don't cross validate subroutine uniforms */
880       if (var->type->contains_subroutine())
881          continue;
882 
883       /* Don't cross validate temporaries that are at global scope.  These
884        * will eventually get pulled into the shaders 'main'.
885        */
886       if (var->data.mode == ir_var_temporary)
887          continue;
888 
889       /* If a global with this name has already been seen, verify that the
890        * new instance has the same type.  In addition, if the globals have
891        * initializers, the values of the initializers must be the same.
892        */
893       ir_variable *const existing = variables->get_variable(var->name);
894       if (existing != NULL) {
895          /* Check if types match. Interface blocks have some special
896           * rules so we handle those elsewhere.
897           */
898          if (var->type != existing->type &&
899              !var->is_interface_instance()) {
900             if (!validate_intrastage_arrays(prog, var, existing)) {
901                if (var->type->is_record() && existing->type->is_record()
902                    && existing->type->record_compare(var->type)) {
903                    existing->type = var->type;
904                } else {
905                   /* If it is an unsized array in a Shader Storage Block,
906                    * two different shaders can access to different elements.
907                    * Because of that, they might be converted to different
908                    * sized arrays, then check that they are compatible but
909                    * ignore the array size.
910                    */
911                   if (!(var->data.mode == ir_var_shader_storage &&
912                         var->data.from_ssbo_unsized_array &&
913                         existing->data.mode == ir_var_shader_storage &&
914                         existing->data.from_ssbo_unsized_array &&
915                         var->type->gl_type == existing->type->gl_type)) {
916                      linker_error(prog, "%s `%s' declared as type "
917                                   "`%s' and type `%s'\n",
918                                   mode_string(var),
919                                   var->name, var->type->name,
920                                   existing->type->name);
921                      return;
922                   }
923                }
924             }
925          }
926 
927          if (var->data.explicit_location) {
928             if (existing->data.explicit_location
929                 && (var->data.location != existing->data.location)) {
930                linker_error(prog, "explicit locations for %s "
931                             "`%s' have differing values\n",
932                             mode_string(var), var->name);
933                return;
934             }
935 
936             if (var->data.location_frac != existing->data.location_frac) {
937                linker_error(prog, "explicit components for %s `%s' have "
938                             "differing values\n", mode_string(var), var->name);
939                return;
940             }
941 
942             existing->data.location = var->data.location;
943             existing->data.explicit_location = true;
944          } else {
945             /* Check if uniform with implicit location was marked explicit
946              * by earlier shader stage. If so, mark it explicit in this stage
947              * too to make sure later processing does not treat it as
948              * implicit one.
949              */
950             if (existing->data.explicit_location) {
951                var->data.location = existing->data.location;
952                var->data.explicit_location = true;
953             }
954          }
955 
956          /* From the GLSL 4.20 specification:
957           * "A link error will result if two compilation units in a program
958           *  specify different integer-constant bindings for the same
959           *  opaque-uniform name.  However, it is not an error to specify a
960           *  binding on some but not all declarations for the same name"
961           */
962          if (var->data.explicit_binding) {
963             if (existing->data.explicit_binding &&
964                 var->data.binding != existing->data.binding) {
965                linker_error(prog, "explicit bindings for %s "
966                             "`%s' have differing values\n",
967                             mode_string(var), var->name);
968                return;
969             }
970 
971             existing->data.binding = var->data.binding;
972             existing->data.explicit_binding = true;
973          }
974 
975          if (var->type->contains_atomic() &&
976              var->data.offset != existing->data.offset) {
977             linker_error(prog, "offset specifications for %s "
978                          "`%s' have differing values\n",
979                          mode_string(var), var->name);
980             return;
981          }
982 
983          /* Validate layout qualifiers for gl_FragDepth.
984           *
985           * From the AMD/ARB_conservative_depth specs:
986           *
987           *    "If gl_FragDepth is redeclared in any fragment shader in a
988           *    program, it must be redeclared in all fragment shaders in
989           *    that program that have static assignments to
990           *    gl_FragDepth. All redeclarations of gl_FragDepth in all
991           *    fragment shaders in a single program must have the same set
992           *    of qualifiers."
993           */
994          if (strcmp(var->name, "gl_FragDepth") == 0) {
995             bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
996             bool layout_differs =
997                var->data.depth_layout != existing->data.depth_layout;
998 
999             if (layout_declared && layout_differs) {
1000                linker_error(prog,
1001                             "All redeclarations of gl_FragDepth in all "
1002                             "fragment shaders in a single program must have "
1003                             "the same set of qualifiers.\n");
1004             }
1005 
1006             if (var->data.used && layout_differs) {
1007                linker_error(prog,
1008                             "If gl_FragDepth is redeclared with a layout "
1009                             "qualifier in any fragment shader, it must be "
1010                             "redeclared with the same layout qualifier in "
1011                             "all fragment shaders that have assignments to "
1012                             "gl_FragDepth\n");
1013             }
1014          }
1015 
1016          /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
1017           *
1018           *     "If a shared global has multiple initializers, the
1019           *     initializers must all be constant expressions, and they
1020           *     must all have the same value. Otherwise, a link error will
1021           *     result. (A shared global having only one initializer does
1022           *     not require that initializer to be a constant expression.)"
1023           *
1024           * Previous to 4.20 the GLSL spec simply said that initializers
1025           * must have the same value.  In this case of non-constant
1026           * initializers, this was impossible to determine.  As a result,
1027           * no vendor actually implemented that behavior.  The 4.20
1028           * behavior matches the implemented behavior of at least one other
1029           * vendor, so we'll implement that for all GLSL versions.
1030           */
1031          if (var->constant_initializer != NULL) {
1032             if (existing->constant_initializer != NULL) {
1033                if (!var->constant_initializer->has_value(existing->constant_initializer)) {
1034                   linker_error(prog, "initializers for %s "
1035                                "`%s' have differing values\n",
1036                                mode_string(var), var->name);
1037                   return;
1038                }
1039             } else {
1040                /* If the first-seen instance of a particular uniform did
1041                 * not have an initializer but a later instance does,
1042                 * replace the former with the later.
1043                 */
1044                variables->replace_variable(existing->name, var);
1045             }
1046          }
1047 
1048          if (var->data.has_initializer) {
1049             if (existing->data.has_initializer
1050                 && (var->constant_initializer == NULL
1051                     || existing->constant_initializer == NULL)) {
1052                linker_error(prog,
1053                             "shared global variable `%s' has multiple "
1054                             "non-constant initializers.\n",
1055                             var->name);
1056                return;
1057             }
1058          }
1059 
1060          if (existing->data.invariant != var->data.invariant) {
1061             linker_error(prog, "declarations for %s `%s' have "
1062                          "mismatching invariant qualifiers\n",
1063                          mode_string(var), var->name);
1064             return;
1065          }
1066          if (existing->data.centroid != var->data.centroid) {
1067             linker_error(prog, "declarations for %s `%s' have "
1068                          "mismatching centroid qualifiers\n",
1069                          mode_string(var), var->name);
1070             return;
1071          }
1072          if (existing->data.sample != var->data.sample) {
1073             linker_error(prog, "declarations for %s `%s` have "
1074                          "mismatching sample qualifiers\n",
1075                          mode_string(var), var->name);
1076             return;
1077          }
1078          if (existing->data.image_format != var->data.image_format) {
1079             linker_error(prog, "declarations for %s `%s` have "
1080                          "mismatching image format qualifiers\n",
1081                          mode_string(var), var->name);
1082             return;
1083          }
1084 
1085          /* Only in GLSL ES 3.10, the precision qualifier should not match
1086           * between block members defined in matched block names within a
1087           * shader interface.
1088           *
1089           * In GLSL ES 3.00 and ES 3.20, precision qualifier for each block
1090           * member should match.
1091           */
1092          if (prog->IsES && (prog->data->Version != 310 ||
1093                             !var->get_interface_type()) &&
1094              existing->data.precision != var->data.precision) {
1095             linker_error(prog, "declarations for %s `%s` have "
1096                          "mismatching precision qualifiers\n",
1097                          mode_string(var), var->name);
1098             return;
1099          }
1100       } else
1101          variables->add_variable(var);
1102    }
1103 }
1104 
1105 
1106 /**
1107  * Perform validation of uniforms used across multiple shader stages
1108  */
1109 void
cross_validate_uniforms(struct gl_shader_program * prog)1110 cross_validate_uniforms(struct gl_shader_program *prog)
1111 {
1112    glsl_symbol_table variables;
1113    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1114       if (prog->_LinkedShaders[i] == NULL)
1115          continue;
1116 
1117       cross_validate_globals(prog, prog->_LinkedShaders[i]->ir, &variables,
1118                              true);
1119    }
1120 }
1121 
1122 /**
1123  * Accumulates the array of buffer blocks and checks that all definitions of
1124  * blocks agree on their contents.
1125  */
1126 static bool
interstage_cross_validate_uniform_blocks(struct gl_shader_program * prog,bool validate_ssbo)1127 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
1128                                          bool validate_ssbo)
1129 {
1130    int *InterfaceBlockStageIndex[MESA_SHADER_STAGES];
1131    struct gl_uniform_block *blks = NULL;
1132    unsigned *num_blks = validate_ssbo ? &prog->data->NumShaderStorageBlocks :
1133       &prog->data->NumUniformBlocks;
1134 
1135    unsigned max_num_buffer_blocks = 0;
1136    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1137       if (prog->_LinkedShaders[i]) {
1138          if (validate_ssbo) {
1139             max_num_buffer_blocks +=
1140                prog->_LinkedShaders[i]->Program->info.num_ssbos;
1141          } else {
1142             max_num_buffer_blocks +=
1143                prog->_LinkedShaders[i]->Program->info.num_ubos;
1144          }
1145       }
1146    }
1147 
1148    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1149       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1150 
1151       InterfaceBlockStageIndex[i] = new int[max_num_buffer_blocks];
1152       for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
1153          InterfaceBlockStageIndex[i][j] = -1;
1154 
1155       if (sh == NULL)
1156          continue;
1157 
1158       unsigned sh_num_blocks;
1159       struct gl_uniform_block **sh_blks;
1160       if (validate_ssbo) {
1161          sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ssbos;
1162          sh_blks = sh->Program->sh.ShaderStorageBlocks;
1163       } else {
1164          sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ubos;
1165          sh_blks = sh->Program->sh.UniformBlocks;
1166       }
1167 
1168       for (unsigned int j = 0; j < sh_num_blocks; j++) {
1169          int index = link_cross_validate_uniform_block(prog, &blks, num_blks,
1170                                                        sh_blks[j]);
1171 
1172          if (index == -1) {
1173             linker_error(prog, "buffer block `%s' has mismatching "
1174                          "definitions\n", sh_blks[j]->Name);
1175 
1176             for (unsigned k = 0; k <= i; k++) {
1177                delete[] InterfaceBlockStageIndex[k];
1178             }
1179 
1180             /* Reset the block count. This will help avoid various segfaults
1181              * from api calls that assume the array exists due to the count
1182              * being non-zero.
1183              */
1184             *num_blks = 0;
1185             return false;
1186          }
1187 
1188          InterfaceBlockStageIndex[i][index] = j;
1189       }
1190    }
1191 
1192    /* Update per stage block pointers to point to the program list.
1193     * FIXME: We should be able to free the per stage blocks here.
1194     */
1195    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1196       for (unsigned j = 0; j < *num_blks; j++) {
1197          int stage_index = InterfaceBlockStageIndex[i][j];
1198 
1199          if (stage_index != -1) {
1200             struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1201 
1202             struct gl_uniform_block **sh_blks = validate_ssbo ?
1203                sh->Program->sh.ShaderStorageBlocks :
1204                sh->Program->sh.UniformBlocks;
1205 
1206             blks[j].stageref |= sh_blks[stage_index]->stageref;
1207             sh_blks[stage_index] = &blks[j];
1208          }
1209       }
1210    }
1211 
1212    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1213       delete[] InterfaceBlockStageIndex[i];
1214    }
1215 
1216    if (validate_ssbo)
1217       prog->data->ShaderStorageBlocks = blks;
1218    else
1219       prog->data->UniformBlocks = blks;
1220 
1221    return true;
1222 }
1223 
1224 
1225 /**
1226  * Populates a shaders symbol table with all global declarations
1227  */
1228 static void
populate_symbol_table(gl_linked_shader * sh)1229 populate_symbol_table(gl_linked_shader *sh)
1230 {
1231    sh->symbols = new(sh) glsl_symbol_table;
1232 
1233    foreach_in_list(ir_instruction, inst, sh->ir) {
1234       ir_variable *var;
1235       ir_function *func;
1236 
1237       if ((func = inst->as_function()) != NULL) {
1238          sh->symbols->add_function(func);
1239       } else if ((var = inst->as_variable()) != NULL) {
1240          if (var->data.mode != ir_var_temporary)
1241             sh->symbols->add_variable(var);
1242       }
1243    }
1244 }
1245 
1246 
1247 /**
1248  * Remap variables referenced in an instruction tree
1249  *
1250  * This is used when instruction trees are cloned from one shader and placed in
1251  * another.  These trees will contain references to \c ir_variable nodes that
1252  * do not exist in the target shader.  This function finds these \c ir_variable
1253  * references and replaces the references with matching variables in the target
1254  * shader.
1255  *
1256  * If there is no matching variable in the target shader, a clone of the
1257  * \c ir_variable is made and added to the target shader.  The new variable is
1258  * added to \b both the instruction stream and the symbol table.
1259  *
1260  * \param inst         IR tree that is to be processed.
1261  * \param symbols      Symbol table containing global scope symbols in the
1262  *                     linked shader.
1263  * \param instructions Instruction stream where new variable declarations
1264  *                     should be added.
1265  */
1266 void
remap_variables(ir_instruction * inst,struct gl_linked_shader * target,hash_table * temps)1267 remap_variables(ir_instruction *inst, struct gl_linked_shader *target,
1268                 hash_table *temps)
1269 {
1270    class remap_visitor : public ir_hierarchical_visitor {
1271    public:
1272          remap_visitor(struct gl_linked_shader *target, hash_table *temps)
1273       {
1274          this->target = target;
1275          this->symbols = target->symbols;
1276          this->instructions = target->ir;
1277          this->temps = temps;
1278       }
1279 
1280       virtual ir_visitor_status visit(ir_dereference_variable *ir)
1281       {
1282          if (ir->var->data.mode == ir_var_temporary) {
1283             hash_entry *entry = _mesa_hash_table_search(temps, ir->var);
1284             ir_variable *var = entry ? (ir_variable *) entry->data : NULL;
1285 
1286             assert(var != NULL);
1287             ir->var = var;
1288             return visit_continue;
1289          }
1290 
1291          ir_variable *const existing =
1292             this->symbols->get_variable(ir->var->name);
1293          if (existing != NULL)
1294             ir->var = existing;
1295          else {
1296             ir_variable *copy = ir->var->clone(this->target, NULL);
1297 
1298             this->symbols->add_variable(copy);
1299             this->instructions->push_head(copy);
1300             ir->var = copy;
1301          }
1302 
1303          return visit_continue;
1304       }
1305 
1306    private:
1307       struct gl_linked_shader *target;
1308       glsl_symbol_table *symbols;
1309       exec_list *instructions;
1310       hash_table *temps;
1311    };
1312 
1313    remap_visitor v(target, temps);
1314 
1315    inst->accept(&v);
1316 }
1317 
1318 
1319 /**
1320  * Move non-declarations from one instruction stream to another
1321  *
1322  * The intended usage pattern of this function is to pass the pointer to the
1323  * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
1324  * pointer) for \c last and \c false for \c make_copies on the first
1325  * call.  Successive calls pass the return value of the previous call for
1326  * \c last and \c true for \c make_copies.
1327  *
1328  * \param instructions Source instruction stream
1329  * \param last         Instruction after which new instructions should be
1330  *                     inserted in the target instruction stream
1331  * \param make_copies  Flag selecting whether instructions in \c instructions
1332  *                     should be copied (via \c ir_instruction::clone) into the
1333  *                     target list or moved.
1334  *
1335  * \return
1336  * The new "last" instruction in the target instruction stream.  This pointer
1337  * is suitable for use as the \c last parameter of a later call to this
1338  * function.
1339  */
1340 exec_node *
move_non_declarations(exec_list * instructions,exec_node * last,bool make_copies,gl_linked_shader * target)1341 move_non_declarations(exec_list *instructions, exec_node *last,
1342                       bool make_copies, gl_linked_shader *target)
1343 {
1344    hash_table *temps = NULL;
1345 
1346    if (make_copies)
1347       temps = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1348                                       _mesa_key_pointer_equal);
1349 
1350    foreach_in_list_safe(ir_instruction, inst, instructions) {
1351       if (inst->as_function())
1352          continue;
1353 
1354       ir_variable *var = inst->as_variable();
1355       if ((var != NULL) && (var->data.mode != ir_var_temporary))
1356          continue;
1357 
1358       assert(inst->as_assignment()
1359              || inst->as_call()
1360              || inst->as_if() /* for initializers with the ?: operator */
1361              || ((var != NULL) && (var->data.mode == ir_var_temporary)));
1362 
1363       if (make_copies) {
1364          inst = inst->clone(target, NULL);
1365 
1366          if (var != NULL)
1367             _mesa_hash_table_insert(temps, var, inst);
1368          else
1369             remap_variables(inst, target, temps);
1370       } else {
1371          inst->remove();
1372       }
1373 
1374       last->insert_after(inst);
1375       last = inst;
1376    }
1377 
1378    if (make_copies)
1379       _mesa_hash_table_destroy(temps, NULL);
1380 
1381    return last;
1382 }
1383 
1384 
1385 /**
1386  * This class is only used in link_intrastage_shaders() below but declaring
1387  * it inside that function leads to compiler warnings with some versions of
1388  * gcc.
1389  */
1390 class array_sizing_visitor : public deref_type_updater {
1391 public:
array_sizing_visitor()1392    array_sizing_visitor()
1393       : mem_ctx(ralloc_context(NULL)),
1394         unnamed_interfaces(_mesa_hash_table_create(NULL, _mesa_hash_pointer,
1395                                                    _mesa_key_pointer_equal))
1396    {
1397    }
1398 
~array_sizing_visitor()1399    ~array_sizing_visitor()
1400    {
1401       _mesa_hash_table_destroy(this->unnamed_interfaces, NULL);
1402       ralloc_free(this->mem_ctx);
1403    }
1404 
visit(ir_variable * var)1405    virtual ir_visitor_status visit(ir_variable *var)
1406    {
1407       const glsl_type *type_without_array;
1408       bool implicit_sized_array = var->data.implicit_sized_array;
1409       fixup_type(&var->type, var->data.max_array_access,
1410                  var->data.from_ssbo_unsized_array,
1411                  &implicit_sized_array);
1412       var->data.implicit_sized_array = implicit_sized_array;
1413       type_without_array = var->type->without_array();
1414       if (var->type->is_interface()) {
1415          if (interface_contains_unsized_arrays(var->type)) {
1416             const glsl_type *new_type =
1417                resize_interface_members(var->type,
1418                                         var->get_max_ifc_array_access(),
1419                                         var->is_in_shader_storage_block());
1420             var->type = new_type;
1421             var->change_interface_type(new_type);
1422          }
1423       } else if (type_without_array->is_interface()) {
1424          if (interface_contains_unsized_arrays(type_without_array)) {
1425             const glsl_type *new_type =
1426                resize_interface_members(type_without_array,
1427                                         var->get_max_ifc_array_access(),
1428                                         var->is_in_shader_storage_block());
1429             var->change_interface_type(new_type);
1430             var->type = update_interface_members_array(var->type, new_type);
1431          }
1432       } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1433          /* Store a pointer to the variable in the unnamed_interfaces
1434           * hashtable.
1435           */
1436          hash_entry *entry =
1437                _mesa_hash_table_search(this->unnamed_interfaces,
1438                                        ifc_type);
1439 
1440          ir_variable **interface_vars = entry ? (ir_variable **) entry->data : NULL;
1441 
1442          if (interface_vars == NULL) {
1443             interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1444                                            ifc_type->length);
1445             _mesa_hash_table_insert(this->unnamed_interfaces, ifc_type,
1446                                     interface_vars);
1447          }
1448          unsigned index = ifc_type->field_index(var->name);
1449          assert(index < ifc_type->length);
1450          assert(interface_vars[index] == NULL);
1451          interface_vars[index] = var;
1452       }
1453       return visit_continue;
1454    }
1455 
1456    /**
1457     * For each unnamed interface block that was discovered while running the
1458     * visitor, adjust the interface type to reflect the newly assigned array
1459     * sizes, and fix up the ir_variable nodes to point to the new interface
1460     * type.
1461     */
fixup_unnamed_interface_types()1462    void fixup_unnamed_interface_types()
1463    {
1464       hash_table_call_foreach(this->unnamed_interfaces,
1465                               fixup_unnamed_interface_type, NULL);
1466    }
1467 
1468 private:
1469    /**
1470     * If the type pointed to by \c type represents an unsized array, replace
1471     * it with a sized array whose size is determined by max_array_access.
1472     */
fixup_type(const glsl_type ** type,unsigned max_array_access,bool from_ssbo_unsized_array,bool * implicit_sized)1473    static void fixup_type(const glsl_type **type, unsigned max_array_access,
1474                           bool from_ssbo_unsized_array, bool *implicit_sized)
1475    {
1476       if (!from_ssbo_unsized_array && (*type)->is_unsized_array()) {
1477          *type = glsl_type::get_array_instance((*type)->fields.array,
1478                                                max_array_access + 1);
1479          *implicit_sized = true;
1480          assert(*type != NULL);
1481       }
1482    }
1483 
1484    static const glsl_type *
update_interface_members_array(const glsl_type * type,const glsl_type * new_interface_type)1485    update_interface_members_array(const glsl_type *type,
1486                                   const glsl_type *new_interface_type)
1487    {
1488       const glsl_type *element_type = type->fields.array;
1489       if (element_type->is_array()) {
1490          const glsl_type *new_array_type =
1491             update_interface_members_array(element_type, new_interface_type);
1492          return glsl_type::get_array_instance(new_array_type, type->length);
1493       } else {
1494          return glsl_type::get_array_instance(new_interface_type,
1495                                               type->length);
1496       }
1497    }
1498 
1499    /**
1500     * Determine whether the given interface type contains unsized arrays (if
1501     * it doesn't, array_sizing_visitor doesn't need to process it).
1502     */
interface_contains_unsized_arrays(const glsl_type * type)1503    static bool interface_contains_unsized_arrays(const glsl_type *type)
1504    {
1505       for (unsigned i = 0; i < type->length; i++) {
1506          const glsl_type *elem_type = type->fields.structure[i].type;
1507          if (elem_type->is_unsized_array())
1508             return true;
1509       }
1510       return false;
1511    }
1512 
1513    /**
1514     * Create a new interface type based on the given type, with unsized arrays
1515     * replaced by sized arrays whose size is determined by
1516     * max_ifc_array_access.
1517     */
1518    static const glsl_type *
resize_interface_members(const glsl_type * type,const int * max_ifc_array_access,bool is_ssbo)1519    resize_interface_members(const glsl_type *type,
1520                             const int *max_ifc_array_access,
1521                             bool is_ssbo)
1522    {
1523       unsigned num_fields = type->length;
1524       glsl_struct_field *fields = new glsl_struct_field[num_fields];
1525       memcpy(fields, type->fields.structure,
1526              num_fields * sizeof(*fields));
1527       for (unsigned i = 0; i < num_fields; i++) {
1528          bool implicit_sized_array = fields[i].implicit_sized_array;
1529          /* If SSBO last member is unsized array, we don't replace it by a sized
1530           * array.
1531           */
1532          if (is_ssbo && i == (num_fields - 1))
1533             fixup_type(&fields[i].type, max_ifc_array_access[i],
1534                        true, &implicit_sized_array);
1535          else
1536             fixup_type(&fields[i].type, max_ifc_array_access[i],
1537                        false, &implicit_sized_array);
1538          fields[i].implicit_sized_array = implicit_sized_array;
1539       }
1540       glsl_interface_packing packing =
1541          (glsl_interface_packing) type->interface_packing;
1542       bool row_major = (bool) type->interface_row_major;
1543       const glsl_type *new_ifc_type =
1544          glsl_type::get_interface_instance(fields, num_fields,
1545                                            packing, row_major, type->name);
1546       delete [] fields;
1547       return new_ifc_type;
1548    }
1549 
fixup_unnamed_interface_type(const void * key,void * data,void *)1550    static void fixup_unnamed_interface_type(const void *key, void *data,
1551                                             void *)
1552    {
1553       const glsl_type *ifc_type = (const glsl_type *) key;
1554       ir_variable **interface_vars = (ir_variable **) data;
1555       unsigned num_fields = ifc_type->length;
1556       glsl_struct_field *fields = new glsl_struct_field[num_fields];
1557       memcpy(fields, ifc_type->fields.structure,
1558              num_fields * sizeof(*fields));
1559       bool interface_type_changed = false;
1560       for (unsigned i = 0; i < num_fields; i++) {
1561          if (interface_vars[i] != NULL &&
1562              fields[i].type != interface_vars[i]->type) {
1563             fields[i].type = interface_vars[i]->type;
1564             interface_type_changed = true;
1565          }
1566       }
1567       if (!interface_type_changed) {
1568          delete [] fields;
1569          return;
1570       }
1571       glsl_interface_packing packing =
1572          (glsl_interface_packing) ifc_type->interface_packing;
1573       bool row_major = (bool) ifc_type->interface_row_major;
1574       const glsl_type *new_ifc_type =
1575          glsl_type::get_interface_instance(fields, num_fields, packing,
1576                                            row_major, ifc_type->name);
1577       delete [] fields;
1578       for (unsigned i = 0; i < num_fields; i++) {
1579          if (interface_vars[i] != NULL)
1580             interface_vars[i]->change_interface_type(new_ifc_type);
1581       }
1582    }
1583 
1584    /**
1585     * Memory context used to allocate the data in \c unnamed_interfaces.
1586     */
1587    void *mem_ctx;
1588 
1589    /**
1590     * Hash table from const glsl_type * to an array of ir_variable *'s
1591     * pointing to the ir_variables constituting each unnamed interface block.
1592     */
1593    hash_table *unnamed_interfaces;
1594 };
1595 
1596 /**
1597  * Check for conflicting xfb_stride default qualifiers and store buffer stride
1598  * for later use.
1599  */
1600 static void
link_xfb_stride_layout_qualifiers(struct gl_context * ctx,struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)1601 link_xfb_stride_layout_qualifiers(struct gl_context *ctx,
1602                                   struct gl_shader_program *prog,
1603                                   struct gl_linked_shader *linked_shader,
1604                                   struct gl_shader **shader_list,
1605                                   unsigned num_shaders)
1606 {
1607    for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
1608       linked_shader->info.TransformFeedback.BufferStride[i] = 0;
1609    }
1610 
1611    for (unsigned i = 0; i < num_shaders; i++) {
1612       struct gl_shader *shader = shader_list[i];
1613 
1614       for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1615          if (shader->info.TransformFeedback.BufferStride[j]) {
1616             if (linked_shader->info.TransformFeedback.BufferStride[j] != 0 &&
1617                 shader->info.TransformFeedback.BufferStride[j] != 0 &&
1618                 linked_shader->info.TransformFeedback.BufferStride[j] !=
1619                    shader->info.TransformFeedback.BufferStride[j]) {
1620                linker_error(prog,
1621                             "intrastage shaders defined with conflicting "
1622                             "xfb_stride for buffer %d (%d and %d)\n", j,
1623                             linked_shader->
1624                                info.TransformFeedback.BufferStride[j],
1625                             shader->info.TransformFeedback.BufferStride[j]);
1626                return;
1627             }
1628 
1629             if (shader->info.TransformFeedback.BufferStride[j])
1630                linked_shader->info.TransformFeedback.BufferStride[j] =
1631                   shader->info.TransformFeedback.BufferStride[j];
1632          }
1633       }
1634    }
1635 
1636    for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1637       if (linked_shader->info.TransformFeedback.BufferStride[j]) {
1638          prog->TransformFeedback.BufferStride[j] =
1639             linked_shader->info.TransformFeedback.BufferStride[j];
1640 
1641          /* We will validate doubles at a later stage */
1642          if (prog->TransformFeedback.BufferStride[j] % 4) {
1643             linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1644                          "multiple of 4 or if its applied to a type that is "
1645                          "or contains a double a multiple of 8.",
1646                          prog->TransformFeedback.BufferStride[j]);
1647             return;
1648          }
1649 
1650          if (prog->TransformFeedback.BufferStride[j] / 4 >
1651              ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1652             linker_error(prog,
1653                          "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1654                          "limit has been exceeded.");
1655                   return;
1656          }
1657       }
1658    }
1659 }
1660 
1661 /**
1662  * Performs the cross-validation of tessellation control shader vertices and
1663  * layout qualifiers for the attached tessellation control shaders,
1664  * and propagates them to the linked TCS and linked shader program.
1665  */
1666 static void
link_tcs_out_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)1667 link_tcs_out_layout_qualifiers(struct gl_shader_program *prog,
1668                                struct gl_linked_shader *linked_shader,
1669                                struct gl_shader **shader_list,
1670                                unsigned num_shaders)
1671 {
1672    linked_shader->info.TessCtrl.VerticesOut = 0;
1673 
1674    if (linked_shader->Stage != MESA_SHADER_TESS_CTRL)
1675       return;
1676 
1677    /* From the GLSL 4.0 spec (chapter 4.3.8.2):
1678     *
1679     *     "All tessellation control shader layout declarations in a program
1680     *      must specify the same output patch vertex count.  There must be at
1681     *      least one layout qualifier specifying an output patch vertex count
1682     *      in any program containing tessellation control shaders; however,
1683     *      such a declaration is not required in all tessellation control
1684     *      shaders."
1685     */
1686 
1687    for (unsigned i = 0; i < num_shaders; i++) {
1688       struct gl_shader *shader = shader_list[i];
1689 
1690       if (shader->info.TessCtrl.VerticesOut != 0) {
1691          if (linked_shader->info.TessCtrl.VerticesOut != 0 &&
1692              linked_shader->info.TessCtrl.VerticesOut !=
1693              shader->info.TessCtrl.VerticesOut) {
1694             linker_error(prog, "tessellation control shader defined with "
1695                          "conflicting output vertex count (%d and %d)\n",
1696                          linked_shader->info.TessCtrl.VerticesOut,
1697                          shader->info.TessCtrl.VerticesOut);
1698             return;
1699          }
1700          linked_shader->info.TessCtrl.VerticesOut =
1701             shader->info.TessCtrl.VerticesOut;
1702       }
1703    }
1704 
1705    /* Just do the intrastage -> interstage propagation right now,
1706     * since we already know we're in the right type of shader program
1707     * for doing it.
1708     */
1709    if (linked_shader->info.TessCtrl.VerticesOut == 0) {
1710       linker_error(prog, "tessellation control shader didn't declare "
1711                    "vertices out layout qualifier\n");
1712       return;
1713    }
1714 }
1715 
1716 
1717 /**
1718  * Performs the cross-validation of tessellation evaluation shader
1719  * primitive type, vertex spacing, ordering and point_mode layout qualifiers
1720  * for the attached tessellation evaluation shaders, and propagates them
1721  * to the linked TES and linked shader program.
1722  */
1723 static void
link_tes_in_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)1724 link_tes_in_layout_qualifiers(struct gl_shader_program *prog,
1725                               struct gl_linked_shader *linked_shader,
1726                               struct gl_shader **shader_list,
1727                               unsigned num_shaders)
1728 {
1729    linked_shader->info.TessEval.PrimitiveMode = PRIM_UNKNOWN;
1730    linked_shader->info.TessEval.Spacing = TESS_SPACING_UNSPECIFIED;
1731    linked_shader->info.TessEval.VertexOrder = 0;
1732    linked_shader->info.TessEval.PointMode = -1;
1733 
1734    if (linked_shader->Stage != MESA_SHADER_TESS_EVAL)
1735       return;
1736 
1737    /* From the GLSL 4.0 spec (chapter 4.3.8.1):
1738     *
1739     *     "At least one tessellation evaluation shader (compilation unit) in
1740     *      a program must declare a primitive mode in its input layout.
1741     *      Declaration vertex spacing, ordering, and point mode identifiers is
1742     *      optional.  It is not required that all tessellation evaluation
1743     *      shaders in a program declare a primitive mode.  If spacing or
1744     *      vertex ordering declarations are omitted, the tessellation
1745     *      primitive generator will use equal spacing or counter-clockwise
1746     *      vertex ordering, respectively.  If a point mode declaration is
1747     *      omitted, the tessellation primitive generator will produce lines or
1748     *      triangles according to the primitive mode."
1749     */
1750 
1751    for (unsigned i = 0; i < num_shaders; i++) {
1752       struct gl_shader *shader = shader_list[i];
1753 
1754       if (shader->info.TessEval.PrimitiveMode != PRIM_UNKNOWN) {
1755          if (linked_shader->info.TessEval.PrimitiveMode != PRIM_UNKNOWN &&
1756              linked_shader->info.TessEval.PrimitiveMode !=
1757              shader->info.TessEval.PrimitiveMode) {
1758             linker_error(prog, "tessellation evaluation shader defined with "
1759                          "conflicting input primitive modes.\n");
1760             return;
1761          }
1762          linked_shader->info.TessEval.PrimitiveMode = shader->info.TessEval.PrimitiveMode;
1763       }
1764 
1765       if (shader->info.TessEval.Spacing != 0) {
1766          if (linked_shader->info.TessEval.Spacing != 0 &&
1767              linked_shader->info.TessEval.Spacing !=
1768              shader->info.TessEval.Spacing) {
1769             linker_error(prog, "tessellation evaluation shader defined with "
1770                          "conflicting vertex spacing.\n");
1771             return;
1772          }
1773          linked_shader->info.TessEval.Spacing = shader->info.TessEval.Spacing;
1774       }
1775 
1776       if (shader->info.TessEval.VertexOrder != 0) {
1777          if (linked_shader->info.TessEval.VertexOrder != 0 &&
1778              linked_shader->info.TessEval.VertexOrder !=
1779              shader->info.TessEval.VertexOrder) {
1780             linker_error(prog, "tessellation evaluation shader defined with "
1781                          "conflicting ordering.\n");
1782             return;
1783          }
1784          linked_shader->info.TessEval.VertexOrder =
1785             shader->info.TessEval.VertexOrder;
1786       }
1787 
1788       if (shader->info.TessEval.PointMode != -1) {
1789          if (linked_shader->info.TessEval.PointMode != -1 &&
1790              linked_shader->info.TessEval.PointMode !=
1791              shader->info.TessEval.PointMode) {
1792             linker_error(prog, "tessellation evaluation shader defined with "
1793                          "conflicting point modes.\n");
1794             return;
1795          }
1796          linked_shader->info.TessEval.PointMode =
1797             shader->info.TessEval.PointMode;
1798       }
1799 
1800    }
1801 
1802    /* Just do the intrastage -> interstage propagation right now,
1803     * since we already know we're in the right type of shader program
1804     * for doing it.
1805     */
1806    if (linked_shader->info.TessEval.PrimitiveMode == PRIM_UNKNOWN) {
1807       linker_error(prog,
1808                    "tessellation evaluation shader didn't declare input "
1809                    "primitive modes.\n");
1810       return;
1811    }
1812 
1813    if (linked_shader->info.TessEval.Spacing == TESS_SPACING_UNSPECIFIED)
1814       linked_shader->info.TessEval.Spacing = TESS_SPACING_EQUAL;
1815 
1816    if (linked_shader->info.TessEval.VertexOrder == 0)
1817       linked_shader->info.TessEval.VertexOrder = GL_CCW;
1818 
1819    if (linked_shader->info.TessEval.PointMode == -1)
1820       linked_shader->info.TessEval.PointMode = GL_FALSE;
1821 }
1822 
1823 
1824 /**
1825  * Performs the cross-validation of layout qualifiers specified in
1826  * redeclaration of gl_FragCoord for the attached fragment shaders,
1827  * and propagates them to the linked FS and linked shader program.
1828  */
1829 static void
link_fs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)1830 link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
1831                                 struct gl_linked_shader *linked_shader,
1832                                 struct gl_shader **shader_list,
1833                                 unsigned num_shaders)
1834 {
1835    linked_shader->info.redeclares_gl_fragcoord = false;
1836    linked_shader->info.uses_gl_fragcoord = false;
1837    linked_shader->info.origin_upper_left = false;
1838    linked_shader->info.pixel_center_integer = false;
1839 
1840    if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
1841        (prog->data->Version < 150 &&
1842         !prog->ARB_fragment_coord_conventions_enable))
1843       return;
1844 
1845    for (unsigned i = 0; i < num_shaders; i++) {
1846       struct gl_shader *shader = shader_list[i];
1847       /* From the GLSL 1.50 spec, page 39:
1848        *
1849        *   "If gl_FragCoord is redeclared in any fragment shader in a program,
1850        *    it must be redeclared in all the fragment shaders in that program
1851        *    that have a static use gl_FragCoord."
1852        */
1853       if ((linked_shader->info.redeclares_gl_fragcoord
1854            && !shader->info.redeclares_gl_fragcoord
1855            && shader->info.uses_gl_fragcoord)
1856           || (shader->info.redeclares_gl_fragcoord
1857               && !linked_shader->info.redeclares_gl_fragcoord
1858               && linked_shader->info.uses_gl_fragcoord)) {
1859              linker_error(prog, "fragment shader defined with conflicting "
1860                          "layout qualifiers for gl_FragCoord\n");
1861       }
1862 
1863       /* From the GLSL 1.50 spec, page 39:
1864        *
1865        *   "All redeclarations of gl_FragCoord in all fragment shaders in a
1866        *    single program must have the same set of qualifiers."
1867        */
1868       if (linked_shader->info.redeclares_gl_fragcoord &&
1869           shader->info.redeclares_gl_fragcoord &&
1870           (shader->info.origin_upper_left !=
1871            linked_shader->info.origin_upper_left ||
1872            shader->info.pixel_center_integer !=
1873            linked_shader->info.pixel_center_integer)) {
1874          linker_error(prog, "fragment shader defined with conflicting "
1875                       "layout qualifiers for gl_FragCoord\n");
1876       }
1877 
1878       /* Update the linked shader state.  Note that uses_gl_fragcoord should
1879        * accumulate the results.  The other values should replace.  If there
1880        * are multiple redeclarations, all the fields except uses_gl_fragcoord
1881        * are already known to be the same.
1882        */
1883       if (shader->info.redeclares_gl_fragcoord ||
1884           shader->info.uses_gl_fragcoord) {
1885          linked_shader->info.redeclares_gl_fragcoord =
1886             shader->info.redeclares_gl_fragcoord;
1887          linked_shader->info.uses_gl_fragcoord =
1888             linked_shader->info.uses_gl_fragcoord ||
1889             shader->info.uses_gl_fragcoord;
1890          linked_shader->info.origin_upper_left =
1891             shader->info.origin_upper_left;
1892          linked_shader->info.pixel_center_integer =
1893             shader->info.pixel_center_integer;
1894       }
1895 
1896       linked_shader->info.EarlyFragmentTests |=
1897          shader->info.EarlyFragmentTests;
1898       linked_shader->info.InnerCoverage |=
1899          shader->info.InnerCoverage;
1900       linked_shader->Program->info.fs.post_depth_coverage |=
1901          shader->info.PostDepthCoverage;
1902 
1903       linked_shader->Program->sh.fs.BlendSupport |= shader->BlendSupport;
1904    }
1905 }
1906 
1907 /**
1908  * Performs the cross-validation of geometry shader max_vertices and
1909  * primitive type layout qualifiers for the attached geometry shaders,
1910  * and propagates them to the linked GS and linked shader program.
1911  */
1912 static void
link_gs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)1913 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
1914                                 struct gl_linked_shader *linked_shader,
1915                                 struct gl_shader **shader_list,
1916                                 unsigned num_shaders)
1917 {
1918    linked_shader->info.Geom.VerticesOut = -1;
1919    linked_shader->info.Geom.Invocations = 0;
1920    linked_shader->info.Geom.InputType = PRIM_UNKNOWN;
1921    linked_shader->info.Geom.OutputType = PRIM_UNKNOWN;
1922 
1923    /* No in/out qualifiers defined for anything but GLSL 1.50+
1924     * geometry shaders so far.
1925     */
1926    if (linked_shader->Stage != MESA_SHADER_GEOMETRY ||
1927        prog->data->Version < 150)
1928       return;
1929 
1930    /* From the GLSL 1.50 spec, page 46:
1931     *
1932     *     "All geometry shader output layout declarations in a program
1933     *      must declare the same layout and same value for
1934     *      max_vertices. There must be at least one geometry output
1935     *      layout declaration somewhere in a program, but not all
1936     *      geometry shaders (compilation units) are required to
1937     *      declare it."
1938     */
1939 
1940    for (unsigned i = 0; i < num_shaders; i++) {
1941       struct gl_shader *shader = shader_list[i];
1942 
1943       if (shader->info.Geom.InputType != PRIM_UNKNOWN) {
1944          if (linked_shader->info.Geom.InputType != PRIM_UNKNOWN &&
1945              linked_shader->info.Geom.InputType !=
1946              shader->info.Geom.InputType) {
1947             linker_error(prog, "geometry shader defined with conflicting "
1948                          "input types\n");
1949             return;
1950          }
1951          linked_shader->info.Geom.InputType = shader->info.Geom.InputType;
1952       }
1953 
1954       if (shader->info.Geom.OutputType != PRIM_UNKNOWN) {
1955          if (linked_shader->info.Geom.OutputType != PRIM_UNKNOWN &&
1956              linked_shader->info.Geom.OutputType !=
1957              shader->info.Geom.OutputType) {
1958             linker_error(prog, "geometry shader defined with conflicting "
1959                          "output types\n");
1960             return;
1961          }
1962          linked_shader->info.Geom.OutputType = shader->info.Geom.OutputType;
1963       }
1964 
1965       if (shader->info.Geom.VerticesOut != -1) {
1966          if (linked_shader->info.Geom.VerticesOut != -1 &&
1967              linked_shader->info.Geom.VerticesOut !=
1968              shader->info.Geom.VerticesOut) {
1969             linker_error(prog, "geometry shader defined with conflicting "
1970                          "output vertex count (%d and %d)\n",
1971                          linked_shader->info.Geom.VerticesOut,
1972                          shader->info.Geom.VerticesOut);
1973             return;
1974          }
1975          linked_shader->info.Geom.VerticesOut = shader->info.Geom.VerticesOut;
1976       }
1977 
1978       if (shader->info.Geom.Invocations != 0) {
1979          if (linked_shader->info.Geom.Invocations != 0 &&
1980              linked_shader->info.Geom.Invocations !=
1981              shader->info.Geom.Invocations) {
1982             linker_error(prog, "geometry shader defined with conflicting "
1983                          "invocation count (%d and %d)\n",
1984                          linked_shader->info.Geom.Invocations,
1985                          shader->info.Geom.Invocations);
1986             return;
1987          }
1988          linked_shader->info.Geom.Invocations = shader->info.Geom.Invocations;
1989       }
1990    }
1991 
1992    /* Just do the intrastage -> interstage propagation right now,
1993     * since we already know we're in the right type of shader program
1994     * for doing it.
1995     */
1996    if (linked_shader->info.Geom.InputType == PRIM_UNKNOWN) {
1997       linker_error(prog,
1998                    "geometry shader didn't declare primitive input type\n");
1999       return;
2000    }
2001 
2002    if (linked_shader->info.Geom.OutputType == PRIM_UNKNOWN) {
2003       linker_error(prog,
2004                    "geometry shader didn't declare primitive output type\n");
2005       return;
2006    }
2007 
2008    if (linked_shader->info.Geom.VerticesOut == -1) {
2009       linker_error(prog,
2010                    "geometry shader didn't declare max_vertices\n");
2011       return;
2012    }
2013 
2014    if (linked_shader->info.Geom.Invocations == 0)
2015       linked_shader->info.Geom.Invocations = 1;
2016 }
2017 
2018 
2019 /**
2020  * Perform cross-validation of compute shader local_size_{x,y,z} layout
2021  * qualifiers for the attached compute shaders, and propagate them to the
2022  * linked CS and linked shader program.
2023  */
2024 static void
link_cs_input_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)2025 link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
2026                                 struct gl_linked_shader *linked_shader,
2027                                 struct gl_shader **shader_list,
2028                                 unsigned num_shaders)
2029 {
2030    for (int i = 0; i < 3; i++)
2031       linked_shader->info.Comp.LocalSize[i] = 0;
2032 
2033    linked_shader->info.Comp.LocalSizeVariable = false;
2034 
2035    /* This function is called for all shader stages, but it only has an effect
2036     * for compute shaders.
2037     */
2038    if (linked_shader->Stage != MESA_SHADER_COMPUTE)
2039       return;
2040 
2041    /* From the ARB_compute_shader spec, in the section describing local size
2042     * declarations:
2043     *
2044     *     If multiple compute shaders attached to a single program object
2045     *     declare local work-group size, the declarations must be identical;
2046     *     otherwise a link-time error results. Furthermore, if a program
2047     *     object contains any compute shaders, at least one must contain an
2048     *     input layout qualifier specifying the local work sizes of the
2049     *     program, or a link-time error will occur.
2050     */
2051    for (unsigned sh = 0; sh < num_shaders; sh++) {
2052       struct gl_shader *shader = shader_list[sh];
2053 
2054       if (shader->info.Comp.LocalSize[0] != 0) {
2055          if (linked_shader->info.Comp.LocalSize[0] != 0) {
2056             for (int i = 0; i < 3; i++) {
2057                if (linked_shader->info.Comp.LocalSize[i] !=
2058                    shader->info.Comp.LocalSize[i]) {
2059                   linker_error(prog, "compute shader defined with conflicting "
2060                                "local sizes\n");
2061                   return;
2062                }
2063             }
2064          }
2065          for (int i = 0; i < 3; i++) {
2066             linked_shader->info.Comp.LocalSize[i] =
2067                shader->info.Comp.LocalSize[i];
2068          }
2069       } else if (shader->info.Comp.LocalSizeVariable) {
2070          if (linked_shader->info.Comp.LocalSize[0] != 0) {
2071             /* The ARB_compute_variable_group_size spec says:
2072              *
2073              *     If one compute shader attached to a program declares a
2074              *     variable local group size and a second compute shader
2075              *     attached to the same program declares a fixed local group
2076              *     size, a link-time error results.
2077              */
2078             linker_error(prog, "compute shader defined with both fixed and "
2079                          "variable local group size\n");
2080             return;
2081          }
2082          linked_shader->info.Comp.LocalSizeVariable = true;
2083       }
2084    }
2085 
2086    /* Just do the intrastage -> interstage propagation right now,
2087     * since we already know we're in the right type of shader program
2088     * for doing it.
2089     */
2090    if (linked_shader->info.Comp.LocalSize[0] == 0 &&
2091        !linked_shader->info.Comp.LocalSizeVariable) {
2092       linker_error(prog, "compute shader must contain a fixed or a variable "
2093                          "local group size\n");
2094       return;
2095    }
2096    for (int i = 0; i < 3; i++)
2097       prog->Comp.LocalSize[i] = linked_shader->info.Comp.LocalSize[i];
2098 
2099    prog->Comp.LocalSizeVariable =
2100       linked_shader->info.Comp.LocalSizeVariable;
2101 }
2102 
2103 
2104 /**
2105  * Combine a group of shaders for a single stage to generate a linked shader
2106  *
2107  * \note
2108  * If this function is supplied a single shader, it is cloned, and the new
2109  * shader is returned.
2110  */
2111 struct gl_linked_shader *
link_intrastage_shaders(void * mem_ctx,struct gl_context * ctx,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders,bool allow_missing_main)2112 link_intrastage_shaders(void *mem_ctx,
2113                         struct gl_context *ctx,
2114                         struct gl_shader_program *prog,
2115                         struct gl_shader **shader_list,
2116                         unsigned num_shaders,
2117                         bool allow_missing_main)
2118 {
2119    struct gl_uniform_block *ubo_blocks = NULL;
2120    struct gl_uniform_block *ssbo_blocks = NULL;
2121    unsigned num_ubo_blocks = 0;
2122    unsigned num_ssbo_blocks = 0;
2123 
2124    /* Check that global variables defined in multiple shaders are consistent.
2125     */
2126    glsl_symbol_table variables;
2127    for (unsigned i = 0; i < num_shaders; i++) {
2128       if (shader_list[i] == NULL)
2129          continue;
2130       cross_validate_globals(prog, shader_list[i]->ir, &variables, false);
2131    }
2132 
2133    if (!prog->data->LinkStatus)
2134       return NULL;
2135 
2136    /* Check that interface blocks defined in multiple shaders are consistent.
2137     */
2138    validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
2139                                         num_shaders);
2140    if (!prog->data->LinkStatus)
2141       return NULL;
2142 
2143    /* Check that there is only a single definition of each function signature
2144     * across all shaders.
2145     */
2146    for (unsigned i = 0; i < (num_shaders - 1); i++) {
2147       foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
2148          ir_function *const f = node->as_function();
2149 
2150          if (f == NULL)
2151             continue;
2152 
2153          for (unsigned j = i + 1; j < num_shaders; j++) {
2154             ir_function *const other =
2155                shader_list[j]->symbols->get_function(f->name);
2156 
2157             /* If the other shader has no function (and therefore no function
2158              * signatures) with the same name, skip to the next shader.
2159              */
2160             if (other == NULL)
2161                continue;
2162 
2163             foreach_in_list(ir_function_signature, sig, &f->signatures) {
2164                if (!sig->is_defined)
2165                   continue;
2166 
2167                ir_function_signature *other_sig =
2168                   other->exact_matching_signature(NULL, &sig->parameters);
2169 
2170                if (other_sig != NULL && other_sig->is_defined) {
2171                   linker_error(prog, "function `%s' is multiply defined\n",
2172                                f->name);
2173                   return NULL;
2174                }
2175             }
2176          }
2177       }
2178    }
2179 
2180    /* Find the shader that defines main, and make a clone of it.
2181     *
2182     * Starting with the clone, search for undefined references.  If one is
2183     * found, find the shader that defines it.  Clone the reference and add
2184     * it to the shader.  Repeat until there are no undefined references or
2185     * until a reference cannot be resolved.
2186     */
2187    gl_shader *main = NULL;
2188    for (unsigned i = 0; i < num_shaders; i++) {
2189       if (_mesa_get_main_function_signature(shader_list[i]->symbols)) {
2190          main = shader_list[i];
2191          break;
2192       }
2193    }
2194 
2195    if (main == NULL && allow_missing_main)
2196       main = shader_list[0];
2197 
2198    if (main == NULL) {
2199       linker_error(prog, "%s shader lacks `main'\n",
2200                    _mesa_shader_stage_to_string(shader_list[0]->Stage));
2201       return NULL;
2202    }
2203 
2204    gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
2205    linked->Stage = shader_list[0]->Stage;
2206 
2207    /* Create program and attach it to the linked shader */
2208    struct gl_program *gl_prog =
2209       ctx->Driver.NewProgram(ctx,
2210                              _mesa_shader_stage_to_program(shader_list[0]->Stage),
2211                              prog->Name, false);
2212    if (!gl_prog) {
2213       prog->data->LinkStatus = false;
2214       _mesa_delete_linked_shader(ctx, linked);
2215       return NULL;
2216    }
2217 
2218    _mesa_reference_shader_program_data(ctx, &gl_prog->sh.data, prog->data);
2219 
2220    /* Don't use _mesa_reference_program() just take ownership */
2221    linked->Program = gl_prog;
2222 
2223    linked->ir = new(linked) exec_list;
2224    clone_ir_list(mem_ctx, linked->ir, main->ir);
2225 
2226    link_fs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
2227    link_tcs_out_layout_qualifiers(prog, linked, shader_list, num_shaders);
2228    link_tes_in_layout_qualifiers(prog, linked, shader_list, num_shaders);
2229    link_gs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
2230    link_cs_input_layout_qualifiers(prog, linked, shader_list, num_shaders);
2231    link_xfb_stride_layout_qualifiers(ctx, prog, linked, shader_list,
2232                                      num_shaders);
2233 
2234    populate_symbol_table(linked);
2235 
2236    /* The pointer to the main function in the final linked shader (i.e., the
2237     * copy of the original shader that contained the main function).
2238     */
2239    ir_function_signature *const main_sig =
2240       _mesa_get_main_function_signature(linked->symbols);
2241 
2242    /* Move any instructions other than variable declarations or function
2243     * declarations into main.
2244     */
2245    if (main_sig != NULL) {
2246       exec_node *insertion_point =
2247          move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
2248                                linked);
2249 
2250       for (unsigned i = 0; i < num_shaders; i++) {
2251          if (shader_list[i] == main)
2252             continue;
2253 
2254          insertion_point = move_non_declarations(shader_list[i]->ir,
2255                                                  insertion_point, true, linked);
2256       }
2257    }
2258 
2259    if (!link_function_calls(prog, linked, shader_list, num_shaders)) {
2260       _mesa_delete_linked_shader(ctx, linked);
2261       return NULL;
2262    }
2263 
2264    /* Make a pass over all variable declarations to ensure that arrays with
2265     * unspecified sizes have a size specified.  The size is inferred from the
2266     * max_array_access field.
2267     */
2268    array_sizing_visitor v;
2269    v.run(linked->ir);
2270    v.fixup_unnamed_interface_types();
2271 
2272    /* Link up uniform blocks defined within this stage. */
2273    link_uniform_blocks(mem_ctx, ctx, prog, linked, &ubo_blocks,
2274                        &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
2275 
2276    if (!prog->data->LinkStatus) {
2277       _mesa_delete_linked_shader(ctx, linked);
2278       return NULL;
2279    }
2280 
2281    /* Copy ubo blocks to linked shader list */
2282    linked->Program->sh.UniformBlocks =
2283       ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
2284    ralloc_steal(linked, ubo_blocks);
2285    for (unsigned i = 0; i < num_ubo_blocks; i++) {
2286       linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
2287    }
2288    linked->Program->info.num_ubos = num_ubo_blocks;
2289 
2290    /* Copy ssbo blocks to linked shader list */
2291    linked->Program->sh.ShaderStorageBlocks =
2292       ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
2293    ralloc_steal(linked, ssbo_blocks);
2294    for (unsigned i = 0; i < num_ssbo_blocks; i++) {
2295       linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
2296    }
2297    linked->Program->info.num_ssbos = num_ssbo_blocks;
2298 
2299    /* At this point linked should contain all of the linked IR, so
2300     * validate it to make sure nothing went wrong.
2301     */
2302    validate_ir_tree(linked->ir);
2303 
2304    /* Set the size of geometry shader input arrays */
2305    if (linked->Stage == MESA_SHADER_GEOMETRY) {
2306       unsigned num_vertices = vertices_per_prim(linked->info.Geom.InputType);
2307       array_resize_visitor input_resize_visitor(num_vertices, prog,
2308                                                 MESA_SHADER_GEOMETRY);
2309       foreach_in_list(ir_instruction, ir, linked->ir) {
2310          ir->accept(&input_resize_visitor);
2311       }
2312    }
2313 
2314    if (ctx->Const.VertexID_is_zero_based)
2315       lower_vertex_id(linked);
2316 
2317 #ifdef DEBUG
2318    /* Compute the source checksum. */
2319    linked->SourceChecksum = 0;
2320    for (unsigned i = 0; i < num_shaders; i++) {
2321       if (shader_list[i] == NULL)
2322          continue;
2323       linked->SourceChecksum ^= shader_list[i]->SourceChecksum;
2324    }
2325 #endif
2326 
2327    return linked;
2328 }
2329 
2330 /**
2331  * Update the sizes of linked shader uniform arrays to the maximum
2332  * array index used.
2333  *
2334  * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
2335  *
2336  *     If one or more elements of an array are active,
2337  *     GetActiveUniform will return the name of the array in name,
2338  *     subject to the restrictions listed above. The type of the array
2339  *     is returned in type. The size parameter contains the highest
2340  *     array element index used, plus one. The compiler or linker
2341  *     determines the highest index used.  There will be only one
2342  *     active uniform reported by the GL per uniform array.
2343 
2344  */
2345 static void
update_array_sizes(struct gl_shader_program * prog)2346 update_array_sizes(struct gl_shader_program *prog)
2347 {
2348    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
2349          if (prog->_LinkedShaders[i] == NULL)
2350             continue;
2351 
2352       bool types_were_updated = false;
2353 
2354       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
2355          ir_variable *const var = node->as_variable();
2356 
2357          if ((var == NULL) || (var->data.mode != ir_var_uniform) ||
2358              !var->type->is_array())
2359             continue;
2360 
2361          /* GL_ARB_uniform_buffer_object says that std140 uniforms
2362           * will not be eliminated.  Since we always do std140, just
2363           * don't resize arrays in UBOs.
2364           *
2365           * Atomic counters are supposed to get deterministic
2366           * locations assigned based on the declaration ordering and
2367           * sizes, array compaction would mess that up.
2368           *
2369           * Subroutine uniforms are not removed.
2370           */
2371          if (var->is_in_buffer_block() || var->type->contains_atomic() ||
2372              var->type->contains_subroutine() || var->constant_initializer)
2373             continue;
2374 
2375          int size = var->data.max_array_access;
2376          for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
2377                if (prog->_LinkedShaders[j] == NULL)
2378                   continue;
2379 
2380             foreach_in_list(ir_instruction, node2, prog->_LinkedShaders[j]->ir) {
2381                ir_variable *other_var = node2->as_variable();
2382                if (!other_var)
2383                   continue;
2384 
2385                if (strcmp(var->name, other_var->name) == 0 &&
2386                    other_var->data.max_array_access > size) {
2387                   size = other_var->data.max_array_access;
2388                }
2389             }
2390          }
2391 
2392          if (size + 1 != (int)var->type->length) {
2393             /* If this is a built-in uniform (i.e., it's backed by some
2394              * fixed-function state), adjust the number of state slots to
2395              * match the new array size.  The number of slots per array entry
2396              * is not known.  It seems safe to assume that the total number of
2397              * slots is an integer multiple of the number of array elements.
2398              * Determine the number of slots per array element by dividing by
2399              * the old (total) size.
2400              */
2401             const unsigned num_slots = var->get_num_state_slots();
2402             if (num_slots > 0) {
2403                var->set_num_state_slots((size + 1)
2404                                         * (num_slots / var->type->length));
2405             }
2406 
2407             var->type = glsl_type::get_array_instance(var->type->fields.array,
2408                                                       size + 1);
2409             types_were_updated = true;
2410          }
2411       }
2412 
2413       /* Update the types of dereferences in case we changed any. */
2414       if (types_were_updated) {
2415          deref_type_updater v;
2416          v.run(prog->_LinkedShaders[i]->ir);
2417       }
2418    }
2419 }
2420 
2421 /**
2422  * Resize tessellation evaluation per-vertex inputs to the size of
2423  * tessellation control per-vertex outputs.
2424  */
2425 static void
resize_tes_inputs(struct gl_context * ctx,struct gl_shader_program * prog)2426 resize_tes_inputs(struct gl_context *ctx,
2427                   struct gl_shader_program *prog)
2428 {
2429    if (prog->_LinkedShaders[MESA_SHADER_TESS_EVAL] == NULL)
2430       return;
2431 
2432    gl_linked_shader *const tcs = prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
2433    gl_linked_shader *const tes = prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
2434 
2435    /* If no control shader is present, then the TES inputs are statically
2436     * sized to MaxPatchVertices; the actual size of the arrays won't be
2437     * known until draw time.
2438     */
2439    const int num_vertices = tcs
2440       ? tcs->info.TessCtrl.VerticesOut
2441       : ctx->Const.MaxPatchVertices;
2442 
2443    array_resize_visitor input_resize_visitor(num_vertices, prog,
2444                                              MESA_SHADER_TESS_EVAL);
2445    foreach_in_list(ir_instruction, ir, tes->ir) {
2446       ir->accept(&input_resize_visitor);
2447    }
2448 
2449    if (tcs || ctx->Const.LowerTESPatchVerticesIn) {
2450       /* Convert the gl_PatchVerticesIn system value into a constant, since
2451        * the value is known at this point.
2452        */
2453       foreach_in_list(ir_instruction, ir, tes->ir) {
2454          ir_variable *var = ir->as_variable();
2455          if (var && var->data.mode == ir_var_system_value &&
2456              var->data.location == SYSTEM_VALUE_VERTICES_IN) {
2457             void *mem_ctx = ralloc_parent(var);
2458             var->data.location = 0;
2459             var->data.explicit_location = false;
2460             if (tcs) {
2461                var->data.mode = ir_var_auto;
2462                var->constant_value = new(mem_ctx) ir_constant(num_vertices);
2463             } else {
2464                var->data.mode = ir_var_uniform;
2465                var->data.how_declared = ir_var_hidden;
2466                var->allocate_state_slots(1);
2467                ir_state_slot *slot0 = &var->get_state_slots()[0];
2468                slot0->swizzle = SWIZZLE_XXXX;
2469                slot0->tokens[0] = STATE_INTERNAL;
2470                slot0->tokens[1] = STATE_TES_PATCH_VERTICES_IN;
2471                for (int i = 2; i < STATE_LENGTH; i++)
2472                   slot0->tokens[i] = 0;
2473             }
2474          }
2475       }
2476    }
2477 }
2478 
2479 /**
2480  * Find a contiguous set of available bits in a bitmask.
2481  *
2482  * \param used_mask     Bits representing used (1) and unused (0) locations
2483  * \param needed_count  Number of contiguous bits needed.
2484  *
2485  * \return
2486  * Base location of the available bits on success or -1 on failure.
2487  */
2488 int
find_available_slots(unsigned used_mask,unsigned needed_count)2489 find_available_slots(unsigned used_mask, unsigned needed_count)
2490 {
2491    unsigned needed_mask = (1 << needed_count) - 1;
2492    const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
2493 
2494    /* The comparison to 32 is redundant, but without it GCC emits "warning:
2495     * cannot optimize possibly infinite loops" for the loop below.
2496     */
2497    if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
2498       return -1;
2499 
2500    for (int i = 0; i <= max_bit_to_test; i++) {
2501       if ((needed_mask & ~used_mask) == needed_mask)
2502          return i;
2503 
2504       needed_mask <<= 1;
2505    }
2506 
2507    return -1;
2508 }
2509 
2510 
2511 /**
2512  * Assign locations for either VS inputs or FS outputs
2513  *
2514  * \param mem_ctx       Temporary ralloc context used for linking
2515  * \param prog          Shader program whose variables need locations assigned
2516  * \param constants     Driver specific constant values for the program.
2517  * \param target_index  Selector for the program target to receive location
2518  *                      assignmnets.  Must be either \c MESA_SHADER_VERTEX or
2519  *                      \c MESA_SHADER_FRAGMENT.
2520  *
2521  * \return
2522  * If locations are successfully assigned, true is returned.  Otherwise an
2523  * error is emitted to the shader link log and false is returned.
2524  */
2525 bool
assign_attribute_or_color_locations(void * mem_ctx,gl_shader_program * prog,struct gl_constants * constants,unsigned target_index)2526 assign_attribute_or_color_locations(void *mem_ctx,
2527                                     gl_shader_program *prog,
2528                                     struct gl_constants *constants,
2529                                     unsigned target_index)
2530 {
2531    /* Maximum number of generic locations.  This corresponds to either the
2532     * maximum number of draw buffers or the maximum number of generic
2533     * attributes.
2534     */
2535    unsigned max_index = (target_index == MESA_SHADER_VERTEX) ?
2536       constants->Program[target_index].MaxAttribs :
2537       MAX2(constants->MaxDrawBuffers, constants->MaxDualSourceDrawBuffers);
2538 
2539    /* Mark invalid locations as being used.
2540     */
2541    unsigned used_locations = (max_index >= 32)
2542       ? ~0 : ~((1 << max_index) - 1);
2543    unsigned double_storage_locations = 0;
2544 
2545    assert((target_index == MESA_SHADER_VERTEX)
2546           || (target_index == MESA_SHADER_FRAGMENT));
2547 
2548    gl_linked_shader *const sh = prog->_LinkedShaders[target_index];
2549    if (sh == NULL)
2550       return true;
2551 
2552    /* Operate in a total of four passes.
2553     *
2554     * 1. Invalidate the location assignments for all vertex shader inputs.
2555     *
2556     * 2. Assign locations for inputs that have user-defined (via
2557     *    glBindVertexAttribLocation) locations and outputs that have
2558     *    user-defined locations (via glBindFragDataLocation).
2559     *
2560     * 3. Sort the attributes without assigned locations by number of slots
2561     *    required in decreasing order.  Fragmentation caused by attribute
2562     *    locations assigned by the application may prevent large attributes
2563     *    from having enough contiguous space.
2564     *
2565     * 4. Assign locations to any inputs without assigned locations.
2566     */
2567 
2568    const int generic_base = (target_index == MESA_SHADER_VERTEX)
2569       ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
2570 
2571    const enum ir_variable_mode direction =
2572       (target_index == MESA_SHADER_VERTEX)
2573       ? ir_var_shader_in : ir_var_shader_out;
2574 
2575 
2576    /* Temporary storage for the set of attributes that need locations assigned.
2577     */
2578    struct temp_attr {
2579       unsigned slots;
2580       ir_variable *var;
2581 
2582       /* Used below in the call to qsort. */
2583       static int compare(const void *a, const void *b)
2584       {
2585          const temp_attr *const l = (const temp_attr *) a;
2586          const temp_attr *const r = (const temp_attr *) b;
2587 
2588          /* Reversed because we want a descending order sort below. */
2589          return r->slots - l->slots;
2590       }
2591    } to_assign[32];
2592    assert(max_index <= 32);
2593 
2594    /* Temporary array for the set of attributes that have locations assigned.
2595     */
2596    ir_variable *assigned[16];
2597 
2598    unsigned num_attr = 0;
2599    unsigned assigned_attr = 0;
2600 
2601    foreach_in_list(ir_instruction, node, sh->ir) {
2602       ir_variable *const var = node->as_variable();
2603 
2604       if ((var == NULL) || (var->data.mode != (unsigned) direction))
2605          continue;
2606 
2607       if (var->data.explicit_location) {
2608          var->data.is_unmatched_generic_inout = 0;
2609          if ((var->data.location >= (int)(max_index + generic_base))
2610              || (var->data.location < 0)) {
2611             linker_error(prog,
2612                          "invalid explicit location %d specified for `%s'\n",
2613                          (var->data.location < 0)
2614                          ? var->data.location
2615                          : var->data.location - generic_base,
2616                          var->name);
2617             return false;
2618          }
2619       } else if (target_index == MESA_SHADER_VERTEX) {
2620          unsigned binding;
2621 
2622          if (prog->AttributeBindings->get(binding, var->name)) {
2623             assert(binding >= VERT_ATTRIB_GENERIC0);
2624             var->data.location = binding;
2625             var->data.is_unmatched_generic_inout = 0;
2626          }
2627       } else if (target_index == MESA_SHADER_FRAGMENT) {
2628          unsigned binding;
2629          unsigned index;
2630          const char *name = var->name;
2631          const glsl_type *type = var->type;
2632 
2633          while (type) {
2634             /* Check if there's a binding for the variable name */
2635             if (prog->FragDataBindings->get(binding, name)) {
2636                assert(binding >= FRAG_RESULT_DATA0);
2637                var->data.location = binding;
2638                var->data.is_unmatched_generic_inout = 0;
2639 
2640                if (prog->FragDataIndexBindings->get(index, name)) {
2641                   var->data.index = index;
2642                }
2643                break;
2644             }
2645 
2646             /* If not, but it's an array type, look for name[0] */
2647             if (type->is_array()) {
2648                name = ralloc_asprintf(mem_ctx, "%s[0]", name);
2649                type = type->fields.array;
2650                continue;
2651             }
2652 
2653             break;
2654          }
2655       }
2656 
2657       if (strcmp(var->name, "gl_LastFragData") == 0)
2658          continue;
2659 
2660       /* From GL4.5 core spec, section 15.2 (Shader Execution):
2661        *
2662        *     "Output binding assignments will cause LinkProgram to fail:
2663        *     ...
2664        *     If the program has an active output assigned to a location greater
2665        *     than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
2666        *     an active output assigned an index greater than or equal to one;"
2667        */
2668       if (target_index == MESA_SHADER_FRAGMENT && var->data.index >= 1 &&
2669           var->data.location - generic_base >=
2670           (int) constants->MaxDualSourceDrawBuffers) {
2671          linker_error(prog,
2672                       "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
2673                       "with index %u for %s\n",
2674                       var->data.location - generic_base, var->data.index,
2675                       var->name);
2676          return false;
2677       }
2678 
2679       const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX);
2680 
2681       /* If the variable is not a built-in and has a location statically
2682        * assigned in the shader (presumably via a layout qualifier), make sure
2683        * that it doesn't collide with other assigned locations.  Otherwise,
2684        * add it to the list of variables that need linker-assigned locations.
2685        */
2686       if (var->data.location != -1) {
2687          if (var->data.location >= generic_base && var->data.index < 1) {
2688             /* From page 61 of the OpenGL 4.0 spec:
2689              *
2690              *     "LinkProgram will fail if the attribute bindings assigned
2691              *     by BindAttribLocation do not leave not enough space to
2692              *     assign a location for an active matrix attribute or an
2693              *     active attribute array, both of which require multiple
2694              *     contiguous generic attributes."
2695              *
2696              * I think above text prohibits the aliasing of explicit and
2697              * automatic assignments. But, aliasing is allowed in manual
2698              * assignments of attribute locations. See below comments for
2699              * the details.
2700              *
2701              * From OpenGL 4.0 spec, page 61:
2702              *
2703              *     "It is possible for an application to bind more than one
2704              *     attribute name to the same location. This is referred to as
2705              *     aliasing. This will only work if only one of the aliased
2706              *     attributes is active in the executable program, or if no
2707              *     path through the shader consumes more than one attribute of
2708              *     a set of attributes aliased to the same location. A link
2709              *     error can occur if the linker determines that every path
2710              *     through the shader consumes multiple aliased attributes,
2711              *     but implementations are not required to generate an error
2712              *     in this case."
2713              *
2714              * From GLSL 4.30 spec, page 54:
2715              *
2716              *    "A program will fail to link if any two non-vertex shader
2717              *     input variables are assigned to the same location. For
2718              *     vertex shaders, multiple input variables may be assigned
2719              *     to the same location using either layout qualifiers or via
2720              *     the OpenGL API. However, such aliasing is intended only to
2721              *     support vertex shaders where each execution path accesses
2722              *     at most one input per each location. Implementations are
2723              *     permitted, but not required, to generate link-time errors
2724              *     if they detect that every path through the vertex shader
2725              *     executable accesses multiple inputs assigned to any single
2726              *     location. For all shader types, a program will fail to link
2727              *     if explicit location assignments leave the linker unable
2728              *     to find space for other variables without explicit
2729              *     assignments."
2730              *
2731              * From OpenGL ES 3.0 spec, page 56:
2732              *
2733              *    "Binding more than one attribute name to the same location
2734              *     is referred to as aliasing, and is not permitted in OpenGL
2735              *     ES Shading Language 3.00 vertex shaders. LinkProgram will
2736              *     fail when this condition exists. However, aliasing is
2737              *     possible in OpenGL ES Shading Language 1.00 vertex shaders.
2738              *     This will only work if only one of the aliased attributes
2739              *     is active in the executable program, or if no path through
2740              *     the shader consumes more than one attribute of a set of
2741              *     attributes aliased to the same location. A link error can
2742              *     occur if the linker determines that every path through the
2743              *     shader consumes multiple aliased attributes, but implemen-
2744              *     tations are not required to generate an error in this case."
2745              *
2746              * After looking at above references from OpenGL, OpenGL ES and
2747              * GLSL specifications, we allow aliasing of vertex input variables
2748              * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
2749              *
2750              * NOTE: This is not required by the spec but its worth mentioning
2751              * here that we're not doing anything to make sure that no path
2752              * through the vertex shader executable accesses multiple inputs
2753              * assigned to any single location.
2754              */
2755 
2756             /* Mask representing the contiguous slots that will be used by
2757              * this attribute.
2758              */
2759             const unsigned attr = var->data.location - generic_base;
2760             const unsigned use_mask = (1 << slots) - 1;
2761             const char *const string = (target_index == MESA_SHADER_VERTEX)
2762                ? "vertex shader input" : "fragment shader output";
2763 
2764             /* Generate a link error if the requested locations for this
2765              * attribute exceed the maximum allowed attribute location.
2766              */
2767             if (attr + slots > max_index) {
2768                linker_error(prog,
2769                            "insufficient contiguous locations "
2770                            "available for %s `%s' %d %d %d\n", string,
2771                            var->name, used_locations, use_mask, attr);
2772                return false;
2773             }
2774 
2775             /* Generate a link error if the set of bits requested for this
2776              * attribute overlaps any previously allocated bits.
2777              */
2778             if ((~(use_mask << attr) & used_locations) != used_locations) {
2779                if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
2780                   /* From section 4.4.2 (Output Layout Qualifiers) of the GLSL
2781                    * 4.40 spec:
2782                    *
2783                    *    "Additionally, for fragment shader outputs, if two
2784                    *    variables are placed within the same location, they
2785                    *    must have the same underlying type (floating-point or
2786                    *    integer). No component aliasing of output variables or
2787                    *    members is allowed.
2788                    */
2789                   for (unsigned i = 0; i < assigned_attr; i++) {
2790                      unsigned assigned_slots =
2791                         assigned[i]->type->count_attribute_slots(false);
2792                      unsigned assig_attr =
2793                         assigned[i]->data.location - generic_base;
2794                      unsigned assigned_use_mask = (1 << assigned_slots) - 1;
2795 
2796                      if ((assigned_use_mask << assig_attr) &
2797                          (use_mask << attr)) {
2798 
2799                         const glsl_type *assigned_type =
2800                            assigned[i]->type->without_array();
2801                         const glsl_type *type = var->type->without_array();
2802                         if (assigned_type->base_type != type->base_type) {
2803                            linker_error(prog, "types do not match for aliased"
2804                                         " %ss %s and %s\n", string,
2805                                         assigned[i]->name, var->name);
2806                            return false;
2807                         }
2808 
2809                         unsigned assigned_component_mask =
2810                            ((1 << assigned_type->vector_elements) - 1) <<
2811                            assigned[i]->data.location_frac;
2812                         unsigned component_mask =
2813                            ((1 << type->vector_elements) - 1) <<
2814                            var->data.location_frac;
2815                         if (assigned_component_mask & component_mask) {
2816                            linker_error(prog, "overlapping component is "
2817                                         "assigned to %ss %s and %s "
2818                                         "(component=%d)\n",
2819                                         string, assigned[i]->name, var->name,
2820                                         var->data.location_frac);
2821                            return false;
2822                         }
2823                      }
2824                   }
2825                } else if (target_index == MESA_SHADER_FRAGMENT ||
2826                           (prog->IsES && prog->data->Version >= 300)) {
2827                   linker_error(prog, "overlapping location is assigned "
2828                                "to %s `%s' %d %d %d\n", string, var->name,
2829                                used_locations, use_mask, attr);
2830                   return false;
2831                } else {
2832                   linker_warning(prog, "overlapping location is assigned "
2833                                  "to %s `%s' %d %d %d\n", string, var->name,
2834                                  used_locations, use_mask, attr);
2835                }
2836             }
2837 
2838             used_locations |= (use_mask << attr);
2839 
2840             /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
2841              *
2842              * "A program with more than the value of MAX_VERTEX_ATTRIBS
2843              *  active attribute variables may fail to link, unless
2844              *  device-dependent optimizations are able to make the program
2845              *  fit within available hardware resources. For the purposes
2846              *  of this test, attribute variables of the type dvec3, dvec4,
2847              *  dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
2848              *  count as consuming twice as many attributes as equivalent
2849              *  single-precision types. While these types use the same number
2850              *  of generic attributes as their single-precision equivalents,
2851              *  implementations are permitted to consume two single-precision
2852              *  vectors of internal storage for each three- or four-component
2853              *  double-precision vector."
2854              *
2855              * Mark this attribute slot as taking up twice as much space
2856              * so we can count it properly against limits.  According to
2857              * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
2858              * is optional behavior, but it seems preferable.
2859              */
2860             if (var->type->without_array()->is_dual_slot())
2861                double_storage_locations |= (use_mask << attr);
2862          }
2863 
2864          assigned[assigned_attr] = var;
2865          assigned_attr++;
2866 
2867          continue;
2868       }
2869 
2870       if (num_attr >= max_index) {
2871          linker_error(prog, "too many %s (max %u)",
2872                       target_index == MESA_SHADER_VERTEX ?
2873                       "vertex shader inputs" : "fragment shader outputs",
2874                       max_index);
2875          return false;
2876       }
2877       to_assign[num_attr].slots = slots;
2878       to_assign[num_attr].var = var;
2879       num_attr++;
2880    }
2881 
2882    if (target_index == MESA_SHADER_VERTEX) {
2883       unsigned total_attribs_size =
2884          _mesa_bitcount(used_locations & ((1 << max_index) - 1)) +
2885          _mesa_bitcount(double_storage_locations);
2886       if (total_attribs_size > max_index) {
2887          linker_error(prog,
2888                       "attempt to use %d vertex attribute slots only %d available ",
2889                       total_attribs_size, max_index);
2890          return false;
2891       }
2892    }
2893 
2894    /* If all of the attributes were assigned locations by the application (or
2895     * are built-in attributes with fixed locations), return early.  This should
2896     * be the common case.
2897     */
2898    if (num_attr == 0)
2899       return true;
2900 
2901    qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
2902 
2903    if (target_index == MESA_SHADER_VERTEX) {
2904       /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS.  It can
2905        * only be explicitly assigned by via glBindAttribLocation.  Mark it as
2906        * reserved to prevent it from being automatically allocated below.
2907        */
2908       find_deref_visitor find("gl_Vertex");
2909       find.run(sh->ir);
2910       if (find.variable_found())
2911          used_locations |= (1 << 0);
2912    }
2913 
2914    for (unsigned i = 0; i < num_attr; i++) {
2915       /* Mask representing the contiguous slots that will be used by this
2916        * attribute.
2917        */
2918       const unsigned use_mask = (1 << to_assign[i].slots) - 1;
2919 
2920       int location = find_available_slots(used_locations, to_assign[i].slots);
2921 
2922       if (location < 0) {
2923          const char *const string = (target_index == MESA_SHADER_VERTEX)
2924             ? "vertex shader input" : "fragment shader output";
2925 
2926          linker_error(prog,
2927                       "insufficient contiguous locations "
2928                       "available for %s `%s'\n",
2929                       string, to_assign[i].var->name);
2930          return false;
2931       }
2932 
2933       to_assign[i].var->data.location = generic_base + location;
2934       to_assign[i].var->data.is_unmatched_generic_inout = 0;
2935       used_locations |= (use_mask << location);
2936 
2937       if (to_assign[i].var->type->without_array()->is_dual_slot())
2938          double_storage_locations |= (use_mask << location);
2939    }
2940 
2941    /* Now that we have all the locations, from the GL 4.5 core spec, section
2942     * 11.1.1 (Vertex Attributes), dvec3, dvec4, dmat2x3, dmat2x4, dmat3,
2943     * dmat3x4, dmat4x3, and dmat4 count as consuming twice as many attributes
2944     * as equivalent single-precision types.
2945     */
2946    if (target_index == MESA_SHADER_VERTEX) {
2947       unsigned total_attribs_size =
2948          _mesa_bitcount(used_locations & ((1 << max_index) - 1)) +
2949          _mesa_bitcount(double_storage_locations);
2950       if (total_attribs_size > max_index) {
2951          linker_error(prog,
2952                       "attempt to use %d vertex attribute slots only %d available ",
2953                       total_attribs_size, max_index);
2954          return false;
2955       }
2956    }
2957 
2958    return true;
2959 }
2960 
2961 /**
2962  * Match explicit locations of outputs to inputs and deactivate the
2963  * unmatch flag if found so we don't optimise them away.
2964  */
2965 static void
match_explicit_outputs_to_inputs(gl_linked_shader * producer,gl_linked_shader * consumer)2966 match_explicit_outputs_to_inputs(gl_linked_shader *producer,
2967                                  gl_linked_shader *consumer)
2968 {
2969    glsl_symbol_table parameters;
2970    ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
2971       { {NULL, NULL} };
2972 
2973    /* Find all shader outputs in the "producer" stage.
2974     */
2975    foreach_in_list(ir_instruction, node, producer->ir) {
2976       ir_variable *const var = node->as_variable();
2977 
2978       if ((var == NULL) || (var->data.mode != ir_var_shader_out))
2979          continue;
2980 
2981       if (var->data.explicit_location &&
2982           var->data.location >= VARYING_SLOT_VAR0) {
2983          const unsigned idx = var->data.location - VARYING_SLOT_VAR0;
2984          if (explicit_locations[idx][var->data.location_frac] == NULL)
2985             explicit_locations[idx][var->data.location_frac] = var;
2986       }
2987    }
2988 
2989    /* Match inputs to outputs */
2990    foreach_in_list(ir_instruction, node, consumer->ir) {
2991       ir_variable *const input = node->as_variable();
2992 
2993       if ((input == NULL) || (input->data.mode != ir_var_shader_in))
2994          continue;
2995 
2996       ir_variable *output = NULL;
2997       if (input->data.explicit_location
2998           && input->data.location >= VARYING_SLOT_VAR0) {
2999          output = explicit_locations[input->data.location - VARYING_SLOT_VAR0]
3000             [input->data.location_frac];
3001 
3002          if (output != NULL){
3003             input->data.is_unmatched_generic_inout = 0;
3004             output->data.is_unmatched_generic_inout = 0;
3005          }
3006       }
3007    }
3008 }
3009 
3010 /**
3011  * Store the gl_FragDepth layout in the gl_shader_program struct.
3012  */
3013 static void
store_fragdepth_layout(struct gl_shader_program * prog)3014 store_fragdepth_layout(struct gl_shader_program *prog)
3015 {
3016    if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
3017       return;
3018    }
3019 
3020    struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
3021 
3022    /* We don't look up the gl_FragDepth symbol directly because if
3023     * gl_FragDepth is not used in the shader, it's removed from the IR.
3024     * However, the symbol won't be removed from the symbol table.
3025     *
3026     * We're only interested in the cases where the variable is NOT removed
3027     * from the IR.
3028     */
3029    foreach_in_list(ir_instruction, node, ir) {
3030       ir_variable *const var = node->as_variable();
3031 
3032       if (var == NULL || var->data.mode != ir_var_shader_out) {
3033          continue;
3034       }
3035 
3036       if (strcmp(var->name, "gl_FragDepth") == 0) {
3037          switch (var->data.depth_layout) {
3038          case ir_depth_layout_none:
3039             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
3040             return;
3041          case ir_depth_layout_any:
3042             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
3043             return;
3044          case ir_depth_layout_greater:
3045             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
3046             return;
3047          case ir_depth_layout_less:
3048             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
3049             return;
3050          case ir_depth_layout_unchanged:
3051             prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
3052             return;
3053          default:
3054             assert(0);
3055             return;
3056          }
3057       }
3058    }
3059 }
3060 
3061 /**
3062  * Validate the resources used by a program versus the implementation limits
3063  */
3064 static void
check_resources(struct gl_context * ctx,struct gl_shader_program * prog)3065 check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
3066 {
3067    unsigned total_uniform_blocks = 0;
3068    unsigned total_shader_storage_blocks = 0;
3069 
3070    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3071       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
3072 
3073       if (sh == NULL)
3074          continue;
3075 
3076       if (sh->Program->info.num_textures >
3077           ctx->Const.Program[i].MaxTextureImageUnits) {
3078          linker_error(prog, "Too many %s shader texture samplers\n",
3079                       _mesa_shader_stage_to_string(i));
3080       }
3081 
3082       if (sh->num_uniform_components >
3083           ctx->Const.Program[i].MaxUniformComponents) {
3084          if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
3085             linker_warning(prog, "Too many %s shader default uniform block "
3086                            "components, but the driver will try to optimize "
3087                            "them out; this is non-portable out-of-spec "
3088                            "behavior\n",
3089                            _mesa_shader_stage_to_string(i));
3090          } else {
3091             linker_error(prog, "Too many %s shader default uniform block "
3092                          "components\n",
3093                          _mesa_shader_stage_to_string(i));
3094          }
3095       }
3096 
3097       if (sh->num_combined_uniform_components >
3098           ctx->Const.Program[i].MaxCombinedUniformComponents) {
3099          if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
3100             linker_warning(prog, "Too many %s shader uniform components, "
3101                            "but the driver will try to optimize them out; "
3102                            "this is non-portable out-of-spec behavior\n",
3103                            _mesa_shader_stage_to_string(i));
3104          } else {
3105             linker_error(prog, "Too many %s shader uniform components\n",
3106                          _mesa_shader_stage_to_string(i));
3107          }
3108       }
3109 
3110       total_shader_storage_blocks += sh->Program->info.num_ssbos;
3111       total_uniform_blocks += sh->Program->info.num_ubos;
3112 
3113       const unsigned max_uniform_blocks =
3114          ctx->Const.Program[i].MaxUniformBlocks;
3115       if (max_uniform_blocks < sh->Program->info.num_ubos) {
3116          linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
3117                       _mesa_shader_stage_to_string(i),
3118                       sh->Program->info.num_ubos, max_uniform_blocks);
3119       }
3120 
3121       const unsigned max_shader_storage_blocks =
3122          ctx->Const.Program[i].MaxShaderStorageBlocks;
3123       if (max_shader_storage_blocks < sh->Program->info.num_ssbos) {
3124          linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
3125                       _mesa_shader_stage_to_string(i),
3126                       sh->Program->info.num_ssbos, max_shader_storage_blocks);
3127       }
3128    }
3129 
3130    if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
3131       linker_error(prog, "Too many combined uniform blocks (%d/%d)\n",
3132                    total_uniform_blocks, ctx->Const.MaxCombinedUniformBlocks);
3133    }
3134 
3135    if (total_shader_storage_blocks > ctx->Const.MaxCombinedShaderStorageBlocks) {
3136       linker_error(prog, "Too many combined shader storage blocks (%d/%d)\n",
3137                    total_shader_storage_blocks,
3138                    ctx->Const.MaxCombinedShaderStorageBlocks);
3139    }
3140 
3141    for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
3142       if (prog->data->UniformBlocks[i].UniformBufferSize >
3143           ctx->Const.MaxUniformBlockSize) {
3144          linker_error(prog, "Uniform block %s too big (%d/%d)\n",
3145                       prog->data->UniformBlocks[i].Name,
3146                       prog->data->UniformBlocks[i].UniformBufferSize,
3147                       ctx->Const.MaxUniformBlockSize);
3148       }
3149    }
3150 
3151    for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
3152       if (prog->data->ShaderStorageBlocks[i].UniformBufferSize >
3153           ctx->Const.MaxShaderStorageBlockSize) {
3154          linker_error(prog, "Shader storage block %s too big (%d/%d)\n",
3155                       prog->data->ShaderStorageBlocks[i].Name,
3156                       prog->data->ShaderStorageBlocks[i].UniformBufferSize,
3157                       ctx->Const.MaxShaderStorageBlockSize);
3158       }
3159    }
3160 }
3161 
3162 static void
link_calculate_subroutine_compat(struct gl_shader_program * prog)3163 link_calculate_subroutine_compat(struct gl_shader_program *prog)
3164 {
3165    unsigned mask = prog->data->linked_stages;
3166    while (mask) {
3167       const int i = u_bit_scan(&mask);
3168       struct gl_program *p = prog->_LinkedShaders[i]->Program;
3169 
3170       for (unsigned j = 0; j < p->sh.NumSubroutineUniformRemapTable; j++) {
3171          if (p->sh.SubroutineUniformRemapTable[j] == INACTIVE_UNIFORM_EXPLICIT_LOCATION)
3172             continue;
3173 
3174          struct gl_uniform_storage *uni = p->sh.SubroutineUniformRemapTable[j];
3175 
3176          if (!uni)
3177             continue;
3178 
3179          int count = 0;
3180          if (p->sh.NumSubroutineFunctions == 0) {
3181             linker_error(prog, "subroutine uniform %s defined but no valid functions found\n", uni->type->name);
3182             continue;
3183          }
3184          for (unsigned f = 0; f < p->sh.NumSubroutineFunctions; f++) {
3185             struct gl_subroutine_function *fn = &p->sh.SubroutineFunctions[f];
3186             for (int k = 0; k < fn->num_compat_types; k++) {
3187                if (fn->types[k] == uni->type) {
3188                   count++;
3189                   break;
3190                }
3191             }
3192          }
3193          uni->num_compatible_subroutines = count;
3194       }
3195    }
3196 }
3197 
3198 static void
check_subroutine_resources(struct gl_shader_program * prog)3199 check_subroutine_resources(struct gl_shader_program *prog)
3200 {
3201    unsigned mask = prog->data->linked_stages;
3202    while (mask) {
3203       const int i = u_bit_scan(&mask);
3204       struct gl_program *p = prog->_LinkedShaders[i]->Program;
3205 
3206       if (p->sh.NumSubroutineUniformRemapTable > MAX_SUBROUTINE_UNIFORM_LOCATIONS) {
3207          linker_error(prog, "Too many %s shader subroutine uniforms\n",
3208                       _mesa_shader_stage_to_string(i));
3209       }
3210    }
3211 }
3212 /**
3213  * Validate shader image resources.
3214  */
3215 static void
check_image_resources(struct gl_context * ctx,struct gl_shader_program * prog)3216 check_image_resources(struct gl_context *ctx, struct gl_shader_program *prog)
3217 {
3218    unsigned total_image_units = 0;
3219    unsigned fragment_outputs = 0;
3220    unsigned total_shader_storage_blocks = 0;
3221 
3222    if (!ctx->Extensions.ARB_shader_image_load_store)
3223       return;
3224 
3225    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3226       struct gl_linked_shader *sh = prog->_LinkedShaders[i];
3227 
3228       if (sh) {
3229          if (sh->Program->info.num_images > ctx->Const.Program[i].MaxImageUniforms)
3230             linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
3231                          _mesa_shader_stage_to_string(i),
3232                          sh->Program->info.num_images,
3233                          ctx->Const.Program[i].MaxImageUniforms);
3234 
3235          total_image_units += sh->Program->info.num_images;
3236          total_shader_storage_blocks += sh->Program->info.num_ssbos;
3237 
3238          if (i == MESA_SHADER_FRAGMENT) {
3239             foreach_in_list(ir_instruction, node, sh->ir) {
3240                ir_variable *var = node->as_variable();
3241                if (var && var->data.mode == ir_var_shader_out)
3242                   /* since there are no double fs outputs - pass false */
3243                   fragment_outputs += var->type->count_attribute_slots(false);
3244             }
3245          }
3246       }
3247    }
3248 
3249    if (total_image_units > ctx->Const.MaxCombinedImageUniforms)
3250       linker_error(prog, "Too many combined image uniforms\n");
3251 
3252    if (total_image_units + fragment_outputs + total_shader_storage_blocks >
3253        ctx->Const.MaxCombinedShaderOutputResources)
3254       linker_error(prog, "Too many combined image uniforms, shader storage "
3255                          " buffers and fragment outputs\n");
3256 }
3257 
3258 
3259 /**
3260  * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
3261  * for a variable, checks for overlaps between other uniforms using explicit
3262  * locations.
3263  */
3264 static int
reserve_explicit_locations(struct gl_shader_program * prog,string_to_uint_map * map,ir_variable * var)3265 reserve_explicit_locations(struct gl_shader_program *prog,
3266                            string_to_uint_map *map, ir_variable *var)
3267 {
3268    unsigned slots = var->type->uniform_locations();
3269    unsigned max_loc = var->data.location + slots - 1;
3270    unsigned return_value = slots;
3271 
3272    /* Resize remap table if locations do not fit in the current one. */
3273    if (max_loc + 1 > prog->NumUniformRemapTable) {
3274       prog->UniformRemapTable =
3275          reralloc(prog, prog->UniformRemapTable,
3276                   gl_uniform_storage *,
3277                   max_loc + 1);
3278 
3279       if (!prog->UniformRemapTable) {
3280          linker_error(prog, "Out of memory during linking.\n");
3281          return -1;
3282       }
3283 
3284       /* Initialize allocated space. */
3285       for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++)
3286          prog->UniformRemapTable[i] = NULL;
3287 
3288       prog->NumUniformRemapTable = max_loc + 1;
3289    }
3290 
3291    for (unsigned i = 0; i < slots; i++) {
3292       unsigned loc = var->data.location + i;
3293 
3294       /* Check if location is already used. */
3295       if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3296 
3297          /* Possibly same uniform from a different stage, this is ok. */
3298          unsigned hash_loc;
3299          if (map->get(hash_loc, var->name) && hash_loc == loc - i) {
3300             return_value = 0;
3301             continue;
3302          }
3303 
3304          /* ARB_explicit_uniform_location specification states:
3305           *
3306           *     "No two default-block uniform variables in the program can have
3307           *     the same location, even if they are unused, otherwise a compiler
3308           *     or linker error will be generated."
3309           */
3310          linker_error(prog,
3311                       "location qualifier for uniform %s overlaps "
3312                       "previously used location\n",
3313                       var->name);
3314          return -1;
3315       }
3316 
3317       /* Initialize location as inactive before optimization
3318        * rounds and location assignment.
3319        */
3320       prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3321    }
3322 
3323    /* Note, base location used for arrays. */
3324    map->put(var->data.location, var->name);
3325 
3326    return return_value;
3327 }
3328 
3329 static bool
reserve_subroutine_explicit_locations(struct gl_shader_program * prog,struct gl_program * p,ir_variable * var)3330 reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
3331                                       struct gl_program *p,
3332                                       ir_variable *var)
3333 {
3334    unsigned slots = var->type->uniform_locations();
3335    unsigned max_loc = var->data.location + slots - 1;
3336 
3337    /* Resize remap table if locations do not fit in the current one. */
3338    if (max_loc + 1 > p->sh.NumSubroutineUniformRemapTable) {
3339       p->sh.SubroutineUniformRemapTable =
3340          reralloc(p, p->sh.SubroutineUniformRemapTable,
3341                   gl_uniform_storage *,
3342                   max_loc + 1);
3343 
3344       if (!p->sh.SubroutineUniformRemapTable) {
3345          linker_error(prog, "Out of memory during linking.\n");
3346          return false;
3347       }
3348 
3349       /* Initialize allocated space. */
3350       for (unsigned i = p->sh.NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
3351          p->sh.SubroutineUniformRemapTable[i] = NULL;
3352 
3353       p->sh.NumSubroutineUniformRemapTable = max_loc + 1;
3354    }
3355 
3356    for (unsigned i = 0; i < slots; i++) {
3357       unsigned loc = var->data.location + i;
3358 
3359       /* Check if location is already used. */
3360       if (p->sh.SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3361 
3362          /* ARB_explicit_uniform_location specification states:
3363           *     "No two subroutine uniform variables can have the same location
3364           *     in the same shader stage, otherwise a compiler or linker error
3365           *     will be generated."
3366           */
3367          linker_error(prog,
3368                       "location qualifier for uniform %s overlaps "
3369                       "previously used location\n",
3370                       var->name);
3371          return false;
3372       }
3373 
3374       /* Initialize location as inactive before optimization
3375        * rounds and location assignment.
3376        */
3377       p->sh.SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3378    }
3379 
3380    return true;
3381 }
3382 /**
3383  * Check and reserve all explicit uniform locations, called before
3384  * any optimizations happen to handle also inactive uniforms and
3385  * inactive array elements that may get trimmed away.
3386  */
3387 static unsigned
check_explicit_uniform_locations(struct gl_context * ctx,struct gl_shader_program * prog)3388 check_explicit_uniform_locations(struct gl_context *ctx,
3389                                  struct gl_shader_program *prog)
3390 {
3391    if (!ctx->Extensions.ARB_explicit_uniform_location)
3392       return 0;
3393 
3394    /* This map is used to detect if overlapping explicit locations
3395     * occur with the same uniform (from different stage) or a different one.
3396     */
3397    string_to_uint_map *uniform_map = new string_to_uint_map;
3398 
3399    if (!uniform_map) {
3400       linker_error(prog, "Out of memory during linking.\n");
3401       return 0;
3402    }
3403 
3404    unsigned entries_total = 0;
3405    unsigned mask = prog->data->linked_stages;
3406    while (mask) {
3407       const int i = u_bit_scan(&mask);
3408       struct gl_program *p = prog->_LinkedShaders[i]->Program;
3409 
3410       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
3411          ir_variable *var = node->as_variable();
3412          if (!var || var->data.mode != ir_var_uniform)
3413             continue;
3414 
3415          if (var->data.explicit_location) {
3416             bool ret = false;
3417             if (var->type->without_array()->is_subroutine())
3418                ret = reserve_subroutine_explicit_locations(prog, p, var);
3419             else {
3420                int slots = reserve_explicit_locations(prog, uniform_map,
3421                                                       var);
3422                if (slots != -1) {
3423                   ret = true;
3424                   entries_total += slots;
3425                }
3426             }
3427             if (!ret) {
3428                delete uniform_map;
3429                return 0;
3430             }
3431          }
3432       }
3433    }
3434 
3435    struct empty_uniform_block *current_block = NULL;
3436 
3437    for (unsigned i = 0; i < prog->NumUniformRemapTable; i++) {
3438       /* We found empty space in UniformRemapTable. */
3439       if (prog->UniformRemapTable[i] == NULL) {
3440          /* We've found the beginning of a new continous block of empty slots */
3441          if (!current_block || current_block->start + current_block->slots != i) {
3442             current_block = rzalloc(prog, struct empty_uniform_block);
3443             current_block->start = i;
3444             exec_list_push_tail(&prog->EmptyUniformLocations,
3445                                 &current_block->link);
3446          }
3447 
3448          /* The current block continues, so we simply increment its slots */
3449          current_block->slots++;
3450       }
3451    }
3452 
3453    delete uniform_map;
3454    return entries_total;
3455 }
3456 
3457 static bool
should_add_buffer_variable(struct gl_shader_program * shProg,GLenum type,const char * name)3458 should_add_buffer_variable(struct gl_shader_program *shProg,
3459                            GLenum type, const char *name)
3460 {
3461    bool found_interface = false;
3462    unsigned block_name_len = 0;
3463    const char *block_name_dot = strchr(name, '.');
3464 
3465    /* These rules only apply to buffer variables. So we return
3466     * true for the rest of types.
3467     */
3468    if (type != GL_BUFFER_VARIABLE)
3469       return true;
3470 
3471    for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
3472       const char *block_name = shProg->data->ShaderStorageBlocks[i].Name;
3473       block_name_len = strlen(block_name);
3474 
3475       const char *block_square_bracket = strchr(block_name, '[');
3476       if (block_square_bracket) {
3477          /* The block is part of an array of named interfaces,
3478           * for the name comparison we ignore the "[x]" part.
3479           */
3480          block_name_len -= strlen(block_square_bracket);
3481       }
3482 
3483       if (block_name_dot) {
3484          /* Check if the variable name starts with the interface
3485           * name. The interface name (if present) should have the
3486           * length than the interface block name we are comparing to.
3487           */
3488          unsigned len = strlen(name) - strlen(block_name_dot);
3489          if (len != block_name_len)
3490             continue;
3491       }
3492 
3493       if (strncmp(block_name, name, block_name_len) == 0) {
3494          found_interface = true;
3495          break;
3496       }
3497    }
3498 
3499    /* We remove the interface name from the buffer variable name,
3500     * including the dot that follows it.
3501     */
3502    if (found_interface)
3503       name = name + block_name_len + 1;
3504 
3505    /* The ARB_program_interface_query spec says:
3506     *
3507     *     "For an active shader storage block member declared as an array, an
3508     *     entry will be generated only for the first array element, regardless
3509     *     of its type.  For arrays of aggregate types, the enumeration rules
3510     *     are applied recursively for the single enumerated array element."
3511     */
3512    const char *struct_first_dot = strchr(name, '.');
3513    const char *first_square_bracket = strchr(name, '[');
3514 
3515    /* The buffer variable is on top level and it is not an array */
3516    if (!first_square_bracket) {
3517       return true;
3518    /* The shader storage block member is a struct, then generate the entry */
3519    } else if (struct_first_dot && struct_first_dot < first_square_bracket) {
3520       return true;
3521    } else {
3522       /* Shader storage block member is an array, only generate an entry for the
3523        * first array element.
3524        */
3525       if (strncmp(first_square_bracket, "[0]", 3) == 0)
3526          return true;
3527    }
3528 
3529    return false;
3530 }
3531 
3532 static bool
add_program_resource(struct gl_shader_program * prog,struct set * resource_set,GLenum type,const void * data,uint8_t stages)3533 add_program_resource(struct gl_shader_program *prog,
3534                      struct set *resource_set,
3535                      GLenum type, const void *data, uint8_t stages)
3536 {
3537    assert(data);
3538 
3539    /* If resource already exists, do not add it again. */
3540    if (_mesa_set_search(resource_set, data))
3541       return true;
3542 
3543    prog->ProgramResourceList =
3544       reralloc(prog,
3545                prog->ProgramResourceList,
3546                gl_program_resource,
3547                prog->NumProgramResourceList + 1);
3548 
3549    if (!prog->ProgramResourceList) {
3550       linker_error(prog, "Out of memory during linking.\n");
3551       return false;
3552    }
3553 
3554    struct gl_program_resource *res =
3555       &prog->ProgramResourceList[prog->NumProgramResourceList];
3556 
3557    res->Type = type;
3558    res->Data = data;
3559    res->StageReferences = stages;
3560 
3561    prog->NumProgramResourceList++;
3562 
3563    _mesa_set_add(resource_set, data);
3564 
3565    return true;
3566 }
3567 
3568 /* Function checks if a variable var is a packed varying and
3569  * if given name is part of packed varying's list.
3570  *
3571  * If a variable is a packed varying, it has a name like
3572  * 'packed:a,b,c' where a, b and c are separate variables.
3573  */
3574 static bool
included_in_packed_varying(ir_variable * var,const char * name)3575 included_in_packed_varying(ir_variable *var, const char *name)
3576 {
3577    if (strncmp(var->name, "packed:", 7) != 0)
3578       return false;
3579 
3580    char *list = strdup(var->name + 7);
3581    assert(list);
3582 
3583    bool found = false;
3584    char *saveptr;
3585    char *token = strtok_r(list, ",", &saveptr);
3586    while (token) {
3587       if (strcmp(token, name) == 0) {
3588          found = true;
3589          break;
3590       }
3591       token = strtok_r(NULL, ",", &saveptr);
3592    }
3593    free(list);
3594    return found;
3595 }
3596 
3597 /**
3598  * Function builds a stage reference bitmask from variable name.
3599  */
3600 static uint8_t
build_stageref(struct gl_shader_program * shProg,const char * name,unsigned mode)3601 build_stageref(struct gl_shader_program *shProg, const char *name,
3602                unsigned mode)
3603 {
3604    uint8_t stages = 0;
3605 
3606    /* Note, that we assume MAX 8 stages, if there will be more stages, type
3607     * used for reference mask in gl_program_resource will need to be changed.
3608     */
3609    assert(MESA_SHADER_STAGES < 8);
3610 
3611    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3612       struct gl_linked_shader *sh = shProg->_LinkedShaders[i];
3613       if (!sh)
3614          continue;
3615 
3616       /* Shader symbol table may contain variables that have
3617        * been optimized away. Search IR for the variable instead.
3618        */
3619       foreach_in_list(ir_instruction, node, sh->ir) {
3620          ir_variable *var = node->as_variable();
3621          if (var) {
3622             unsigned baselen = strlen(var->name);
3623 
3624             if (included_in_packed_varying(var, name)) {
3625                   stages |= (1 << i);
3626                   break;
3627             }
3628 
3629             /* Type needs to match if specified, otherwise we might
3630              * pick a variable with same name but different interface.
3631              */
3632             if (var->data.mode != mode)
3633                continue;
3634 
3635             if (strncmp(var->name, name, baselen) == 0) {
3636                /* Check for exact name matches but also check for arrays and
3637                 * structs.
3638                 */
3639                if (name[baselen] == '\0' ||
3640                    name[baselen] == '[' ||
3641                    name[baselen] == '.') {
3642                   stages |= (1 << i);
3643                   break;
3644                }
3645             }
3646          }
3647       }
3648    }
3649    return stages;
3650 }
3651 
3652 /**
3653  * Create gl_shader_variable from ir_variable class.
3654  */
3655 static gl_shader_variable *
create_shader_variable(struct gl_shader_program * shProg,const ir_variable * in,const char * name,const glsl_type * type,const glsl_type * interface_type,bool use_implicit_location,int location,const glsl_type * outermost_struct_type)3656 create_shader_variable(struct gl_shader_program *shProg,
3657                        const ir_variable *in,
3658                        const char *name, const glsl_type *type,
3659                        const glsl_type *interface_type,
3660                        bool use_implicit_location, int location,
3661                        const glsl_type *outermost_struct_type)
3662 {
3663    gl_shader_variable *out = ralloc(shProg, struct gl_shader_variable);
3664    if (!out)
3665       return NULL;
3666 
3667    /* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
3668     * expect to see gl_VertexID in the program resource list.  Pretend.
3669     */
3670    if (in->data.mode == ir_var_system_value &&
3671        in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
3672       out->name = ralloc_strdup(shProg, "gl_VertexID");
3673    } else if ((in->data.mode == ir_var_shader_out &&
3674                in->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) ||
3675               (in->data.mode == ir_var_system_value &&
3676                in->data.location == SYSTEM_VALUE_TESS_LEVEL_OUTER)) {
3677       out->name = ralloc_strdup(shProg, "gl_TessLevelOuter");
3678       type = glsl_type::get_array_instance(glsl_type::float_type, 4);
3679    } else if ((in->data.mode == ir_var_shader_out &&
3680                in->data.location == VARYING_SLOT_TESS_LEVEL_INNER) ||
3681               (in->data.mode == ir_var_system_value &&
3682                in->data.location == SYSTEM_VALUE_TESS_LEVEL_INNER)) {
3683       out->name = ralloc_strdup(shProg, "gl_TessLevelInner");
3684       type = glsl_type::get_array_instance(glsl_type::float_type, 2);
3685    } else {
3686       out->name = ralloc_strdup(shProg, name);
3687    }
3688 
3689    if (!out->name)
3690       return NULL;
3691 
3692    /* The ARB_program_interface_query spec says:
3693     *
3694     *     "Not all active variables are assigned valid locations; the
3695     *     following variables will have an effective location of -1:
3696     *
3697     *      * uniforms declared as atomic counters;
3698     *
3699     *      * members of a uniform block;
3700     *
3701     *      * built-in inputs, outputs, and uniforms (starting with "gl_"); and
3702     *
3703     *      * inputs or outputs not declared with a "location" layout
3704     *        qualifier, except for vertex shader inputs and fragment shader
3705     *        outputs."
3706     */
3707    if (in->type->base_type == GLSL_TYPE_ATOMIC_UINT ||
3708        is_gl_identifier(in->name) ||
3709        !(in->data.explicit_location || use_implicit_location)) {
3710       out->location = -1;
3711    } else {
3712       out->location = location;
3713    }
3714 
3715    out->type = type;
3716    out->outermost_struct_type = outermost_struct_type;
3717    out->interface_type = interface_type;
3718    out->component = in->data.location_frac;
3719    out->index = in->data.index;
3720    out->patch = in->data.patch;
3721    out->mode = in->data.mode;
3722    out->interpolation = in->data.interpolation;
3723    out->explicit_location = in->data.explicit_location;
3724    out->precision = in->data.precision;
3725 
3726    return out;
3727 }
3728 
3729 static const glsl_type *
resize_to_max_patch_vertices(const struct gl_context * ctx,const glsl_type * type)3730 resize_to_max_patch_vertices(const struct gl_context *ctx,
3731                              const glsl_type *type)
3732 {
3733    if (!type)
3734       return NULL;
3735 
3736    return glsl_type::get_array_instance(type->fields.array,
3737                                         ctx->Const.MaxPatchVertices);
3738 }
3739 
3740 static bool
add_shader_variable(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set,unsigned stage_mask,GLenum programInterface,ir_variable * var,const char * name,const glsl_type * type,bool use_implicit_location,int location,const glsl_type * outermost_struct_type=NULL)3741 add_shader_variable(const struct gl_context *ctx,
3742                     struct gl_shader_program *shProg,
3743                     struct set *resource_set,
3744                     unsigned stage_mask,
3745                     GLenum programInterface, ir_variable *var,
3746                     const char *name, const glsl_type *type,
3747                     bool use_implicit_location, int location,
3748                     const glsl_type *outermost_struct_type = NULL)
3749 {
3750    const glsl_type *interface_type = var->get_interface_type();
3751 
3752    if (outermost_struct_type == NULL) {
3753       /* Unsized (non-patch) TCS output/TES input arrays are implicitly
3754        * sized to gl_MaxPatchVertices.  Internally, we shrink them to a
3755        * smaller size.
3756        *
3757        * This can cause trouble with SSO programs.  Since the TCS declares
3758        * the number of output vertices, we can always shrink TCS output
3759        * arrays.  However, the TES might not be linked with a TCS, in
3760        * which case it won't know the size of the patch.  In other words,
3761        * the TCS and TES may disagree on the (smaller) array sizes.  This
3762        * can result in the resource names differing across stages, causing
3763        * SSO validation failures and other cascading issues.
3764        *
3765        * Expanding the array size to the full gl_MaxPatchVertices fixes
3766        * these issues.  It's also what program interface queries expect,
3767        * as that is the official size of the array.
3768        */
3769       if (var->data.tess_varying_implicit_sized_array) {
3770          type = resize_to_max_patch_vertices(ctx, type);
3771          interface_type = resize_to_max_patch_vertices(ctx, interface_type);
3772       }
3773 
3774       if (var->data.from_named_ifc_block) {
3775          const char *interface_name = interface_type->name;
3776 
3777          if (interface_type->is_array()) {
3778             /* Issue #16 of the ARB_program_interface_query spec says:
3779              *
3780              * "* If a variable is a member of an interface block without an
3781              *    instance name, it is enumerated using just the variable name.
3782              *
3783              *  * If a variable is a member of an interface block with an
3784              *    instance name, it is enumerated as "BlockName.Member", where
3785              *    "BlockName" is the name of the interface block (not the
3786              *    instance name) and "Member" is the name of the variable."
3787              *
3788              * In particular, it indicates that it should be "BlockName",
3789              * not "BlockName[array length]".  The conformance suite and
3790              * dEQP both require this behavior.
3791              *
3792              * Here, we unwrap the extra array level added by named interface
3793              * block array lowering so we have the correct variable type.  We
3794              * also unwrap the interface type when constructing the name.
3795              *
3796              * We leave interface_type the same so that ES 3.x SSO pipeline
3797              * validation can enforce the rules requiring array length to
3798              * match on interface blocks.
3799              */
3800             type = type->fields.array;
3801 
3802             interface_name = interface_type->fields.array->name;
3803          }
3804 
3805          name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
3806       }
3807    }
3808 
3809    switch (type->base_type) {
3810    case GLSL_TYPE_STRUCT: {
3811       /* The ARB_program_interface_query spec says:
3812        *
3813        *     "For an active variable declared as a structure, a separate entry
3814        *     will be generated for each active structure member.  The name of
3815        *     each entry is formed by concatenating the name of the structure,
3816        *     the "."  character, and the name of the structure member.  If a
3817        *     structure member to enumerate is itself a structure or array,
3818        *     these enumeration rules are applied recursively."
3819        */
3820       if (outermost_struct_type == NULL)
3821          outermost_struct_type = type;
3822 
3823       unsigned field_location = location;
3824       for (unsigned i = 0; i < type->length; i++) {
3825          const struct glsl_struct_field *field = &type->fields.structure[i];
3826          char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
3827          if (!add_shader_variable(ctx, shProg, resource_set,
3828                                   stage_mask, programInterface,
3829                                   var, field_name, field->type,
3830                                   use_implicit_location, field_location,
3831                                   outermost_struct_type))
3832             return false;
3833 
3834          field_location += field->type->count_attribute_slots(false);
3835       }
3836       return true;
3837    }
3838 
3839    default: {
3840       /* The ARB_program_interface_query spec says:
3841        *
3842        *     "For an active variable declared as a single instance of a basic
3843        *     type, a single entry will be generated, using the variable name
3844        *     from the shader source."
3845        */
3846       gl_shader_variable *sha_v =
3847          create_shader_variable(shProg, var, name, type, interface_type,
3848                                 use_implicit_location, location,
3849                                 outermost_struct_type);
3850       if (!sha_v)
3851          return false;
3852 
3853       return add_program_resource(shProg, resource_set,
3854                                   programInterface, sha_v, stage_mask);
3855    }
3856    }
3857 }
3858 
3859 static bool
add_interface_variables(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set,unsigned stage,GLenum programInterface)3860 add_interface_variables(const struct gl_context *ctx,
3861                         struct gl_shader_program *shProg,
3862                         struct set *resource_set,
3863                         unsigned stage, GLenum programInterface)
3864 {
3865    exec_list *ir = shProg->_LinkedShaders[stage]->ir;
3866 
3867    foreach_in_list(ir_instruction, node, ir) {
3868       ir_variable *var = node->as_variable();
3869 
3870       if (!var || var->data.how_declared == ir_var_hidden)
3871          continue;
3872 
3873       int loc_bias;
3874 
3875       switch (var->data.mode) {
3876       case ir_var_system_value:
3877       case ir_var_shader_in:
3878          if (programInterface != GL_PROGRAM_INPUT)
3879             continue;
3880          loc_bias = (stage == MESA_SHADER_VERTEX) ? int(VERT_ATTRIB_GENERIC0)
3881                                                   : int(VARYING_SLOT_VAR0);
3882          break;
3883       case ir_var_shader_out:
3884          if (programInterface != GL_PROGRAM_OUTPUT)
3885             continue;
3886          loc_bias = (stage == MESA_SHADER_FRAGMENT) ? int(FRAG_RESULT_DATA0)
3887                                                     : int(VARYING_SLOT_VAR0);
3888          break;
3889       default:
3890          continue;
3891       };
3892 
3893       if (var->data.patch)
3894          loc_bias = int(VARYING_SLOT_PATCH0);
3895 
3896       /* Skip packed varyings, packed varyings are handled separately
3897        * by add_packed_varyings.
3898        */
3899       if (strncmp(var->name, "packed:", 7) == 0)
3900          continue;
3901 
3902       /* Skip fragdata arrays, these are handled separately
3903        * by add_fragdata_arrays.
3904        */
3905       if (strncmp(var->name, "gl_out_FragData", 15) == 0)
3906          continue;
3907 
3908       const bool vs_input_or_fs_output =
3909          (stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
3910          (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out);
3911 
3912       if (!add_shader_variable(ctx, shProg, resource_set,
3913                                1 << stage, programInterface,
3914                                var, var->name, var->type, vs_input_or_fs_output,
3915                                var->data.location - loc_bias))
3916          return false;
3917    }
3918    return true;
3919 }
3920 
3921 static bool
add_packed_varyings(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set,int stage,GLenum type)3922 add_packed_varyings(const struct gl_context *ctx,
3923                     struct gl_shader_program *shProg,
3924                     struct set *resource_set,
3925                     int stage, GLenum type)
3926 {
3927    struct gl_linked_shader *sh = shProg->_LinkedShaders[stage];
3928    GLenum iface;
3929 
3930    if (!sh || !sh->packed_varyings)
3931       return true;
3932 
3933    foreach_in_list(ir_instruction, node, sh->packed_varyings) {
3934       ir_variable *var = node->as_variable();
3935       if (var) {
3936          switch (var->data.mode) {
3937          case ir_var_shader_in:
3938             iface = GL_PROGRAM_INPUT;
3939             break;
3940          case ir_var_shader_out:
3941             iface = GL_PROGRAM_OUTPUT;
3942             break;
3943          default:
3944             unreachable("unexpected type");
3945          }
3946 
3947          if (type == iface) {
3948             const int stage_mask =
3949                build_stageref(shProg, var->name, var->data.mode);
3950             if (!add_shader_variable(ctx, shProg, resource_set,
3951                                      stage_mask,
3952                                      iface, var, var->name, var->type, false,
3953                                      var->data.location - VARYING_SLOT_VAR0))
3954                return false;
3955          }
3956       }
3957    }
3958    return true;
3959 }
3960 
3961 static bool
add_fragdata_arrays(const struct gl_context * ctx,struct gl_shader_program * shProg,struct set * resource_set)3962 add_fragdata_arrays(const struct gl_context *ctx,
3963                     struct gl_shader_program *shProg,
3964                     struct set *resource_set)
3965 {
3966    struct gl_linked_shader *sh = shProg->_LinkedShaders[MESA_SHADER_FRAGMENT];
3967 
3968    if (!sh || !sh->fragdata_arrays)
3969       return true;
3970 
3971    foreach_in_list(ir_instruction, node, sh->fragdata_arrays) {
3972       ir_variable *var = node->as_variable();
3973       if (var) {
3974          assert(var->data.mode == ir_var_shader_out);
3975 
3976          if (!add_shader_variable(ctx, shProg, resource_set,
3977                                   1 << MESA_SHADER_FRAGMENT,
3978                                   GL_PROGRAM_OUTPUT, var, var->name, var->type,
3979                                   true, var->data.location - FRAG_RESULT_DATA0))
3980             return false;
3981       }
3982    }
3983    return true;
3984 }
3985 
3986 static char*
get_top_level_name(const char * name)3987 get_top_level_name(const char *name)
3988 {
3989    const char *first_dot = strchr(name, '.');
3990    const char *first_square_bracket = strchr(name, '[');
3991    int name_size = 0;
3992 
3993    /* The ARB_program_interface_query spec says:
3994     *
3995     *     "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying
3996     *     the number of active array elements of the top-level shader storage
3997     *     block member containing to the active variable is written to
3998     *     <params>.  If the top-level block member is not declared as an
3999     *     array, the value one is written to <params>.  If the top-level block
4000     *     member is an array with no declared size, the value zero is written
4001     *     to <params>."
4002     */
4003 
4004    /* The buffer variable is on top level.*/
4005    if (!first_square_bracket && !first_dot)
4006       name_size = strlen(name);
4007    else if ((!first_square_bracket ||
4008             (first_dot && first_dot < first_square_bracket)))
4009       name_size = first_dot - name;
4010    else
4011       name_size = first_square_bracket - name;
4012 
4013    return strndup(name, name_size);
4014 }
4015 
4016 static char*
get_var_name(const char * name)4017 get_var_name(const char *name)
4018 {
4019    const char *first_dot = strchr(name, '.');
4020 
4021    if (!first_dot)
4022       return strdup(name);
4023 
4024    return strndup(first_dot+1, strlen(first_dot) - 1);
4025 }
4026 
4027 static bool
is_top_level_shader_storage_block_member(const char * name,const char * interface_name,const char * field_name)4028 is_top_level_shader_storage_block_member(const char* name,
4029                                          const char* interface_name,
4030                                          const char* field_name)
4031 {
4032    bool result = false;
4033 
4034    /* If the given variable is already a top-level shader storage
4035     * block member, then return array_size = 1.
4036     * We could have two possibilities: if we have an instanced
4037     * shader storage block or not instanced.
4038     *
4039     * For the first, we check create a name as it was in top level and
4040     * compare it with the real name. If they are the same, then
4041     * the variable is already at top-level.
4042     *
4043     * Full instanced name is: interface name + '.' + var name +
4044     *    NULL character
4045     */
4046    int name_length = strlen(interface_name) + 1 + strlen(field_name) + 1;
4047    char *full_instanced_name = (char *) calloc(name_length, sizeof(char));
4048    if (!full_instanced_name) {
4049       fprintf(stderr, "%s: Cannot allocate space for name\n", __func__);
4050       return false;
4051    }
4052 
4053    snprintf(full_instanced_name, name_length, "%s.%s",
4054             interface_name, field_name);
4055 
4056    /* Check if its top-level shader storage block member of an
4057     * instanced interface block, or of a unnamed interface block.
4058     */
4059    if (strcmp(name, full_instanced_name) == 0 ||
4060        strcmp(name, field_name) == 0)
4061       result = true;
4062 
4063    free(full_instanced_name);
4064    return result;
4065 }
4066 
4067 static int
get_array_size(struct gl_uniform_storage * uni,const glsl_struct_field * field,char * interface_name,char * var_name)4068 get_array_size(struct gl_uniform_storage *uni, const glsl_struct_field *field,
4069                char *interface_name, char *var_name)
4070 {
4071    /* The ARB_program_interface_query spec says:
4072     *
4073     *     "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying
4074     *     the number of active array elements of the top-level shader storage
4075     *     block member containing to the active variable is written to
4076     *     <params>.  If the top-level block member is not declared as an
4077     *     array, the value one is written to <params>.  If the top-level block
4078     *     member is an array with no declared size, the value zero is written
4079     *     to <params>."
4080     */
4081    if (is_top_level_shader_storage_block_member(uni->name,
4082                                                 interface_name,
4083                                                 var_name))
4084       return  1;
4085    else if (field->type->is_unsized_array())
4086       return 0;
4087    else if (field->type->is_array())
4088       return field->type->length;
4089 
4090    return 1;
4091 }
4092 
4093 static int
get_array_stride(struct gl_uniform_storage * uni,const glsl_type * interface,const glsl_struct_field * field,char * interface_name,char * var_name)4094 get_array_stride(struct gl_uniform_storage *uni, const glsl_type *interface,
4095                  const glsl_struct_field *field, char *interface_name,
4096                  char *var_name)
4097 {
4098    /* The ARB_program_interface_query spec says:
4099     *
4100     *     "For the property TOP_LEVEL_ARRAY_STRIDE, a single integer
4101     *     identifying the stride between array elements of the top-level
4102     *     shader storage block member containing the active variable is
4103     *     written to <params>.  For top-level block members declared as
4104     *     arrays, the value written is the difference, in basic machine units,
4105     *     between the offsets of the active variable for consecutive elements
4106     *     in the top-level array.  For top-level block members not declared as
4107     *     an array, zero is written to <params>."
4108     */
4109    if (field->type->is_array()) {
4110       const enum glsl_matrix_layout matrix_layout =
4111          glsl_matrix_layout(field->matrix_layout);
4112       bool row_major = matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
4113       const glsl_type *array_type = field->type->fields.array;
4114 
4115       if (is_top_level_shader_storage_block_member(uni->name,
4116                                                    interface_name,
4117                                                    var_name))
4118          return 0;
4119 
4120       if (interface->interface_packing != GLSL_INTERFACE_PACKING_STD430) {
4121          if (array_type->is_record() || array_type->is_array())
4122             return glsl_align(array_type->std140_size(row_major), 16);
4123          else
4124             return MAX2(array_type->std140_base_alignment(row_major), 16);
4125       } else {
4126          return array_type->std430_array_stride(row_major);
4127       }
4128    }
4129    return 0;
4130 }
4131 
4132 static void
calculate_array_size_and_stride(struct gl_shader_program * shProg,struct gl_uniform_storage * uni)4133 calculate_array_size_and_stride(struct gl_shader_program *shProg,
4134                                 struct gl_uniform_storage *uni)
4135 {
4136    int block_index = uni->block_index;
4137    int array_size = -1;
4138    int array_stride = -1;
4139    char *var_name = get_top_level_name(uni->name);
4140    char *interface_name =
4141       get_top_level_name(uni->is_shader_storage ?
4142                          shProg->data->ShaderStorageBlocks[block_index].Name :
4143                          shProg->data->UniformBlocks[block_index].Name);
4144 
4145    if (strcmp(var_name, interface_name) == 0) {
4146       /* Deal with instanced array of SSBOs */
4147       char *temp_name = get_var_name(uni->name);
4148       if (!temp_name) {
4149          linker_error(shProg, "Out of memory during linking.\n");
4150          goto write_top_level_array_size_and_stride;
4151       }
4152       free(var_name);
4153       var_name = get_top_level_name(temp_name);
4154       free(temp_name);
4155       if (!var_name) {
4156          linker_error(shProg, "Out of memory during linking.\n");
4157          goto write_top_level_array_size_and_stride;
4158       }
4159    }
4160 
4161    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4162       const gl_linked_shader *sh = shProg->_LinkedShaders[i];
4163       if (sh == NULL)
4164          continue;
4165 
4166       foreach_in_list(ir_instruction, node, sh->ir) {
4167          ir_variable *var = node->as_variable();
4168          if (!var || !var->get_interface_type() ||
4169              var->data.mode != ir_var_shader_storage)
4170             continue;
4171 
4172          const glsl_type *interface = var->get_interface_type();
4173 
4174          if (strcmp(interface_name, interface->name) != 0)
4175             continue;
4176 
4177          for (unsigned i = 0; i < interface->length; i++) {
4178             const glsl_struct_field *field = &interface->fields.structure[i];
4179             if (strcmp(field->name, var_name) != 0)
4180                continue;
4181 
4182             array_stride = get_array_stride(uni, interface, field,
4183                                             interface_name, var_name);
4184             array_size = get_array_size(uni, field, interface_name, var_name);
4185             goto write_top_level_array_size_and_stride;
4186          }
4187       }
4188    }
4189 write_top_level_array_size_and_stride:
4190    free(interface_name);
4191    free(var_name);
4192    uni->top_level_array_stride = array_stride;
4193    uni->top_level_array_size = array_size;
4194 }
4195 
4196 /**
4197  * Builds up a list of program resources that point to existing
4198  * resource data.
4199  */
4200 void
build_program_resource_list(struct gl_context * ctx,struct gl_shader_program * shProg)4201 build_program_resource_list(struct gl_context *ctx,
4202                             struct gl_shader_program *shProg)
4203 {
4204    /* Rebuild resource list. */
4205    if (shProg->ProgramResourceList) {
4206       ralloc_free(shProg->ProgramResourceList);
4207       shProg->ProgramResourceList = NULL;
4208       shProg->NumProgramResourceList = 0;
4209    }
4210 
4211    int input_stage = MESA_SHADER_STAGES, output_stage = 0;
4212 
4213    /* Determine first input and final output stage. These are used to
4214     * detect which variables should be enumerated in the resource list
4215     * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
4216     */
4217    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4218       if (!shProg->_LinkedShaders[i])
4219          continue;
4220       if (input_stage == MESA_SHADER_STAGES)
4221          input_stage = i;
4222       output_stage = i;
4223    }
4224 
4225    /* Empty shader, no resources. */
4226    if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
4227       return;
4228 
4229    struct set *resource_set = _mesa_set_create(NULL,
4230                                                _mesa_hash_pointer,
4231                                                _mesa_key_pointer_equal);
4232 
4233    /* Program interface needs to expose varyings in case of SSO. */
4234    if (shProg->SeparateShader) {
4235       if (!add_packed_varyings(ctx, shProg, resource_set,
4236                                input_stage, GL_PROGRAM_INPUT))
4237          return;
4238 
4239       if (!add_packed_varyings(ctx, shProg, resource_set,
4240                                output_stage, GL_PROGRAM_OUTPUT))
4241          return;
4242    }
4243 
4244    if (!add_fragdata_arrays(ctx, shProg, resource_set))
4245       return;
4246 
4247    /* Add inputs and outputs to the resource list. */
4248    if (!add_interface_variables(ctx, shProg, resource_set,
4249                                 input_stage, GL_PROGRAM_INPUT))
4250       return;
4251 
4252    if (!add_interface_variables(ctx, shProg, resource_set,
4253                                 output_stage, GL_PROGRAM_OUTPUT))
4254       return;
4255 
4256    struct gl_transform_feedback_info *linked_xfb =
4257       shProg->xfb_program->sh.LinkedTransformFeedback;
4258 
4259    /* Add transform feedback varyings. */
4260    if (linked_xfb->NumVarying > 0) {
4261       for (int i = 0; i < linked_xfb->NumVarying; i++) {
4262          if (!add_program_resource(shProg, resource_set,
4263                                    GL_TRANSFORM_FEEDBACK_VARYING,
4264                                    &linked_xfb->Varyings[i], 0))
4265          return;
4266       }
4267    }
4268 
4269    /* Add transform feedback buffers. */
4270    for (unsigned i = 0; i < ctx->Const.MaxTransformFeedbackBuffers; i++) {
4271       if ((linked_xfb->ActiveBuffers >> i) & 1) {
4272          linked_xfb->Buffers[i].Binding = i;
4273          if (!add_program_resource(shProg, resource_set,
4274                                    GL_TRANSFORM_FEEDBACK_BUFFER,
4275                                    &linked_xfb->Buffers[i], 0))
4276          return;
4277       }
4278    }
4279 
4280    /* Add uniforms from uniform storage. */
4281    for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
4282       /* Do not add uniforms internally used by Mesa. */
4283       if (shProg->data->UniformStorage[i].hidden)
4284          continue;
4285 
4286       uint8_t stageref =
4287          build_stageref(shProg, shProg->data->UniformStorage[i].name,
4288                         ir_var_uniform);
4289 
4290       /* Add stagereferences for uniforms in a uniform block. */
4291       bool is_shader_storage =
4292         shProg->data->UniformStorage[i].is_shader_storage;
4293       int block_index = shProg->data->UniformStorage[i].block_index;
4294       if (block_index != -1) {
4295          stageref |= is_shader_storage ?
4296             shProg->data->ShaderStorageBlocks[block_index].stageref :
4297             shProg->data->UniformBlocks[block_index].stageref;
4298       }
4299 
4300       GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
4301       if (!should_add_buffer_variable(shProg, type,
4302                                       shProg->data->UniformStorage[i].name))
4303          continue;
4304 
4305       if (is_shader_storage) {
4306          calculate_array_size_and_stride(shProg,
4307                                          &shProg->data->UniformStorage[i]);
4308       }
4309 
4310       if (!add_program_resource(shProg, resource_set, type,
4311                                 &shProg->data->UniformStorage[i], stageref))
4312          return;
4313    }
4314 
4315    /* Add program uniform blocks. */
4316    for (unsigned i = 0; i < shProg->data->NumUniformBlocks; i++) {
4317       if (!add_program_resource(shProg, resource_set, GL_UNIFORM_BLOCK,
4318           &shProg->data->UniformBlocks[i], 0))
4319          return;
4320    }
4321 
4322    /* Add program shader storage blocks. */
4323    for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
4324       if (!add_program_resource(shProg, resource_set, GL_SHADER_STORAGE_BLOCK,
4325           &shProg->data->ShaderStorageBlocks[i], 0))
4326          return;
4327    }
4328 
4329    /* Add atomic counter buffers. */
4330    for (unsigned i = 0; i < shProg->data->NumAtomicBuffers; i++) {
4331       if (!add_program_resource(shProg, resource_set, GL_ATOMIC_COUNTER_BUFFER,
4332                                 &shProg->data->AtomicBuffers[i], 0))
4333          return;
4334    }
4335 
4336    for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
4337       GLenum type;
4338       if (!shProg->data->UniformStorage[i].hidden)
4339          continue;
4340 
4341       for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
4342          if (!shProg->data->UniformStorage[i].opaque[j].active ||
4343              !shProg->data->UniformStorage[i].type->is_subroutine())
4344             continue;
4345 
4346          type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
4347          /* add shader subroutines */
4348          if (!add_program_resource(shProg, resource_set,
4349                                    type, &shProg->data->UniformStorage[i], 0))
4350             return;
4351       }
4352    }
4353 
4354    unsigned mask = shProg->data->linked_stages;
4355    while (mask) {
4356       const int i = u_bit_scan(&mask);
4357       struct gl_program *p = shProg->_LinkedShaders[i]->Program;
4358 
4359       GLuint type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
4360       for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4361          if (!add_program_resource(shProg, resource_set,
4362                                    type, &p->sh.SubroutineFunctions[j], 0))
4363             return;
4364       }
4365    }
4366 
4367    _mesa_set_destroy(resource_set, NULL);
4368 }
4369 
4370 /**
4371  * This check is done to make sure we allow only constant expression
4372  * indexing and "constant-index-expression" (indexing with an expression
4373  * that includes loop induction variable).
4374  */
4375 static bool
validate_sampler_array_indexing(struct gl_context * ctx,struct gl_shader_program * prog)4376 validate_sampler_array_indexing(struct gl_context *ctx,
4377                                 struct gl_shader_program *prog)
4378 {
4379    dynamic_sampler_array_indexing_visitor v;
4380    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4381       if (prog->_LinkedShaders[i] == NULL)
4382          continue;
4383 
4384       bool no_dynamic_indexing =
4385          ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectSampler;
4386 
4387       /* Search for array derefs in shader. */
4388       v.run(prog->_LinkedShaders[i]->ir);
4389       if (v.uses_dynamic_sampler_array_indexing()) {
4390          const char *msg = "sampler arrays indexed with non-constant "
4391                            "expressions is forbidden in GLSL %s %u";
4392          /* Backend has indicated that it has no dynamic indexing support. */
4393          if (no_dynamic_indexing) {
4394             linker_error(prog, msg, prog->IsES ? "ES" : "",
4395                          prog->data->Version);
4396             return false;
4397          } else {
4398             linker_warning(prog, msg, prog->IsES ? "ES" : "",
4399                            prog->data->Version);
4400          }
4401       }
4402    }
4403    return true;
4404 }
4405 
4406 static void
link_assign_subroutine_types(struct gl_shader_program * prog)4407 link_assign_subroutine_types(struct gl_shader_program *prog)
4408 {
4409    unsigned mask = prog->data->linked_stages;
4410    while (mask) {
4411       const int i = u_bit_scan(&mask);
4412       gl_program *p = prog->_LinkedShaders[i]->Program;
4413 
4414       p->sh.MaxSubroutineFunctionIndex = 0;
4415       foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
4416          ir_function *fn = node->as_function();
4417          if (!fn)
4418             continue;
4419 
4420          if (fn->is_subroutine)
4421             p->sh.NumSubroutineUniformTypes++;
4422 
4423          if (!fn->num_subroutine_types)
4424             continue;
4425 
4426          /* these should have been calculated earlier. */
4427          assert(fn->subroutine_index != -1);
4428          if (p->sh.NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
4429             linker_error(prog, "Too many subroutine functions declared.\n");
4430             return;
4431          }
4432          p->sh.SubroutineFunctions = reralloc(p, p->sh.SubroutineFunctions,
4433                                             struct gl_subroutine_function,
4434                                             p->sh.NumSubroutineFunctions + 1);
4435          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name = ralloc_strdup(p, fn->name);
4436          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
4437          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types =
4438             ralloc_array(p, const struct glsl_type *,
4439                          fn->num_subroutine_types);
4440 
4441          /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
4442           * GLSL 4.5 spec:
4443           *
4444           *    "Each subroutine with an index qualifier in the shader must be
4445           *    given a unique index, otherwise a compile or link error will be
4446           *    generated."
4447           */
4448          for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
4449             if (p->sh.SubroutineFunctions[j].index != -1 &&
4450                 p->sh.SubroutineFunctions[j].index == fn->subroutine_index) {
4451                linker_error(prog, "each subroutine index qualifier in the "
4452                             "shader must be unique\n");
4453                return;
4454             }
4455          }
4456          p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].index =
4457             fn->subroutine_index;
4458 
4459          if (fn->subroutine_index > (int)p->sh.MaxSubroutineFunctionIndex)
4460             p->sh.MaxSubroutineFunctionIndex = fn->subroutine_index;
4461 
4462          for (int j = 0; j < fn->num_subroutine_types; j++)
4463             p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
4464          p->sh.NumSubroutineFunctions++;
4465       }
4466    }
4467 }
4468 
4469 static void
set_always_active_io(exec_list * ir,ir_variable_mode io_mode)4470 set_always_active_io(exec_list *ir, ir_variable_mode io_mode)
4471 {
4472    assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
4473 
4474    foreach_in_list(ir_instruction, node, ir) {
4475       ir_variable *const var = node->as_variable();
4476 
4477       if (var == NULL || var->data.mode != io_mode)
4478          continue;
4479 
4480       /* Don't set always active on builtins that haven't been redeclared */
4481       if (var->data.how_declared == ir_var_declared_implicitly)
4482          continue;
4483 
4484       var->data.always_active_io = true;
4485    }
4486 }
4487 
4488 /**
4489  * When separate shader programs are enabled, only input/outputs between
4490  * the stages of a multi-stage separate program can be safely removed
4491  * from the shader interface. Other inputs/outputs must remain active.
4492  */
4493 static void
disable_varying_optimizations_for_sso(struct gl_shader_program * prog)4494 disable_varying_optimizations_for_sso(struct gl_shader_program *prog)
4495 {
4496    unsigned first, last;
4497    assert(prog->SeparateShader);
4498 
4499    first = MESA_SHADER_STAGES;
4500    last = 0;
4501 
4502    /* Determine first and last stage. Excluding the compute stage */
4503    for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
4504       if (!prog->_LinkedShaders[i])
4505          continue;
4506       if (first == MESA_SHADER_STAGES)
4507          first = i;
4508       last = i;
4509    }
4510 
4511    if (first == MESA_SHADER_STAGES)
4512       return;
4513 
4514    for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4515       gl_linked_shader *sh = prog->_LinkedShaders[stage];
4516       if (!sh)
4517          continue;
4518 
4519       if (first == last) {
4520          /* For a single shader program only allow inputs to the vertex shader
4521           * and outputs from the fragment shader to be removed.
4522           */
4523          if (stage != MESA_SHADER_VERTEX)
4524             set_always_active_io(sh->ir, ir_var_shader_in);
4525          if (stage != MESA_SHADER_FRAGMENT)
4526             set_always_active_io(sh->ir, ir_var_shader_out);
4527       } else {
4528          /* For multi-stage separate shader programs only allow inputs and
4529           * outputs between the shader stages to be removed as well as inputs
4530           * to the vertex shader and outputs from the fragment shader.
4531           */
4532          if (stage == first && stage != MESA_SHADER_VERTEX)
4533             set_always_active_io(sh->ir, ir_var_shader_in);
4534          else if (stage == last && stage != MESA_SHADER_FRAGMENT)
4535             set_always_active_io(sh->ir, ir_var_shader_out);
4536       }
4537    }
4538 }
4539 
4540 static bool
link_varyings_and_uniforms(unsigned first,unsigned last,unsigned num_explicit_uniform_locs,struct gl_context * ctx,struct gl_shader_program * prog,void * mem_ctx)4541 link_varyings_and_uniforms(unsigned first, unsigned last,
4542                            unsigned num_explicit_uniform_locs,
4543                            struct gl_context *ctx,
4544                            struct gl_shader_program *prog, void *mem_ctx)
4545 {
4546    bool has_xfb_qualifiers = false;
4547    unsigned num_tfeedback_decls = 0;
4548    char **varying_names = NULL;
4549    tfeedback_decl *tfeedback_decls = NULL;
4550 
4551    /* Mark all generic shader inputs and outputs as unpaired. */
4552    for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
4553       if (prog->_LinkedShaders[i] != NULL) {
4554          link_invalidate_variable_locations(prog->_LinkedShaders[i]->ir);
4555       }
4556    }
4557 
4558    unsigned prev = first;
4559    for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
4560       if (prog->_LinkedShaders[i] == NULL)
4561          continue;
4562 
4563       match_explicit_outputs_to_inputs(prog->_LinkedShaders[prev],
4564                                        prog->_LinkedShaders[i]);
4565       prev = i;
4566    }
4567 
4568    if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
4569                                             MESA_SHADER_VERTEX)) {
4570       return false;
4571    }
4572 
4573    if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
4574                                             MESA_SHADER_FRAGMENT)) {
4575       return false;
4576    }
4577 
4578    /* From the ARB_enhanced_layouts spec:
4579     *
4580     *    "If the shader used to record output variables for transform feedback
4581     *    varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
4582     *    qualifiers, the values specified by TransformFeedbackVaryings are
4583     *    ignored, and the set of variables captured for transform feedback is
4584     *    instead derived from the specified layout qualifiers."
4585     */
4586    for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
4587       /* Find last stage before fragment shader */
4588       if (prog->_LinkedShaders[i]) {
4589          has_xfb_qualifiers =
4590             process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
4591                                           &num_tfeedback_decls,
4592                                           &varying_names);
4593          break;
4594       }
4595    }
4596 
4597    if (!has_xfb_qualifiers) {
4598       num_tfeedback_decls = prog->TransformFeedback.NumVarying;
4599       varying_names = prog->TransformFeedback.VaryingNames;
4600    }
4601 
4602    /* Find the program used for xfb. Even if we don't use xfb we still want to
4603     * set this so we can fill the default values for program interface query.
4604     */
4605    prog->xfb_program = prog->_LinkedShaders[last]->Program;
4606    for (int i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) {
4607       if (prog->_LinkedShaders[i] == NULL)
4608          continue;
4609 
4610       prog->xfb_program = prog->_LinkedShaders[i]->Program;
4611       break;
4612    }
4613 
4614    if (num_tfeedback_decls != 0) {
4615       /* From GL_EXT_transform_feedback:
4616        *   A program will fail to link if:
4617        *
4618        *   * the <count> specified by TransformFeedbackVaryingsEXT is
4619        *     non-zero, but the program object has no vertex or geometry
4620        *     shader;
4621        */
4622       if (first >= MESA_SHADER_FRAGMENT) {
4623          linker_error(prog, "Transform feedback varyings specified, but "
4624                       "no vertex, tessellation, or geometry shader is "
4625                       "present.\n");
4626          return false;
4627       }
4628 
4629       tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
4630                                       num_tfeedback_decls);
4631       if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
4632                                  varying_names, tfeedback_decls))
4633          return false;
4634    }
4635 
4636    /* If there is no fragment shader we need to set transform feedback.
4637     *
4638     * For SSO we also need to assign output locations.  We assign them here
4639     * because we need to do it for both single stage programs and multi stage
4640     * programs.
4641     */
4642    if (last < MESA_SHADER_FRAGMENT &&
4643        (num_tfeedback_decls != 0 || prog->SeparateShader)) {
4644       const uint64_t reserved_out_slots =
4645          reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
4646       if (!assign_varying_locations(ctx, mem_ctx, prog,
4647                                     prog->_LinkedShaders[last], NULL,
4648                                     num_tfeedback_decls, tfeedback_decls,
4649                                     reserved_out_slots))
4650          return false;
4651    }
4652 
4653    if (last <= MESA_SHADER_FRAGMENT) {
4654       /* Remove unused varyings from the first/last stage unless SSO */
4655       remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
4656                                               prog->_LinkedShaders[first],
4657                                               ir_var_shader_in);
4658       remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
4659                                               prog->_LinkedShaders[last],
4660                                               ir_var_shader_out);
4661 
4662       /* If the program is made up of only a single stage */
4663       if (first == last) {
4664          gl_linked_shader *const sh = prog->_LinkedShaders[last];
4665 
4666          do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
4667          do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
4668                                   tfeedback_decls);
4669 
4670          if (prog->SeparateShader) {
4671             const uint64_t reserved_slots =
4672                reserved_varying_slot(sh, ir_var_shader_in);
4673 
4674             /* Assign input locations for SSO, output locations are already
4675              * assigned.
4676              */
4677             if (!assign_varying_locations(ctx, mem_ctx, prog,
4678                                           NULL /* producer */,
4679                                           sh /* consumer */,
4680                                           0 /* num_tfeedback_decls */,
4681                                           NULL /* tfeedback_decls */,
4682                                           reserved_slots))
4683                return false;
4684          }
4685       } else {
4686          /* Linking the stages in the opposite order (from fragment to vertex)
4687           * ensures that inter-shader outputs written to in an earlier stage
4688           * are eliminated if they are (transitively) not used in a later
4689           * stage.
4690           */
4691          int next = last;
4692          for (int i = next - 1; i >= 0; i--) {
4693             if (prog->_LinkedShaders[i] == NULL && i != 0)
4694                continue;
4695 
4696             gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
4697             gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
4698 
4699             const uint64_t reserved_out_slots =
4700                reserved_varying_slot(sh_i, ir_var_shader_out);
4701             const uint64_t reserved_in_slots =
4702                reserved_varying_slot(sh_next, ir_var_shader_in);
4703 
4704             do_dead_builtin_varyings(ctx, sh_i, sh_next,
4705                       next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
4706                       tfeedback_decls);
4707 
4708             if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
4709                       next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
4710                       tfeedback_decls,
4711                       reserved_out_slots | reserved_in_slots))
4712                return false;
4713 
4714             /* This must be done after all dead varyings are eliminated. */
4715             if (sh_i != NULL) {
4716                unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
4717                if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
4718                   return false;
4719                }
4720             }
4721 
4722             unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
4723             if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
4724                return false;
4725 
4726             next = i;
4727          }
4728       }
4729    }
4730 
4731    if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
4732                              has_xfb_qualifiers))
4733       return false;
4734 
4735    update_array_sizes(prog);
4736    link_assign_uniform_locations(prog, ctx, num_explicit_uniform_locs);
4737    link_assign_atomic_counter_resources(ctx, prog);
4738 
4739    link_calculate_subroutine_compat(prog);
4740    check_resources(ctx, prog);
4741    check_subroutine_resources(prog);
4742    check_image_resources(ctx, prog);
4743    link_check_atomic_counter_resources(ctx, prog);
4744 
4745    if (!prog->data->LinkStatus)
4746       return false;
4747 
4748    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4749       if (prog->_LinkedShaders[i] == NULL)
4750          continue;
4751 
4752       const struct gl_shader_compiler_options *options =
4753          &ctx->Const.ShaderCompilerOptions[i];
4754 
4755       if (options->LowerBufferInterfaceBlocks)
4756          lower_ubo_reference(prog->_LinkedShaders[i],
4757                              options->ClampBlockIndicesToArrayBounds);
4758 
4759       if (i == MESA_SHADER_COMPUTE)
4760          lower_shared_reference(prog->_LinkedShaders[i],
4761                                 &prog->Comp.SharedSize);
4762 
4763       lower_vector_derefs(prog->_LinkedShaders[i]);
4764       do_vec_index_to_swizzle(prog->_LinkedShaders[i]->ir);
4765    }
4766 
4767    return true;
4768 }
4769 
4770 void
link_shaders(struct gl_context * ctx,struct gl_shader_program * prog)4771 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
4772 {
4773    prog->data->LinkStatus = true; /* All error paths will set this to false */
4774    prog->data->Validated = false;
4775 
4776    /* Section 7.3 (Program Objects) of the OpenGL 4.5 Core Profile spec says:
4777     *
4778     *     "Linking can fail for a variety of reasons as specified in the
4779     *     OpenGL Shading Language Specification, as well as any of the
4780     *     following reasons:
4781     *
4782     *     - No shader objects are attached to program."
4783     *
4784     * The Compatibility Profile specification does not list the error.  In
4785     * Compatibility Profile missing shader stages are replaced by
4786     * fixed-function.  This applies to the case where all stages are
4787     * missing.
4788     */
4789    if (prog->NumShaders == 0) {
4790       if (ctx->API != API_OPENGL_COMPAT)
4791          linker_error(prog, "no shaders attached to the program\n");
4792       return;
4793    }
4794 
4795    unsigned int num_explicit_uniform_locs = 0;
4796 
4797    void *mem_ctx = ralloc_context(NULL); // temporary linker context
4798 
4799    prog->ARB_fragment_coord_conventions_enable = false;
4800 
4801    /* Separate the shaders into groups based on their type.
4802     */
4803    struct gl_shader **shader_list[MESA_SHADER_STAGES];
4804    unsigned num_shaders[MESA_SHADER_STAGES];
4805 
4806    for (int i = 0; i < MESA_SHADER_STAGES; i++) {
4807       shader_list[i] = (struct gl_shader **)
4808          calloc(prog->NumShaders, sizeof(struct gl_shader *));
4809       num_shaders[i] = 0;
4810    }
4811 
4812    unsigned min_version = UINT_MAX;
4813    unsigned max_version = 0;
4814    for (unsigned i = 0; i < prog->NumShaders; i++) {
4815       min_version = MIN2(min_version, prog->Shaders[i]->Version);
4816       max_version = MAX2(max_version, prog->Shaders[i]->Version);
4817 
4818       if (prog->Shaders[i]->IsES != prog->Shaders[0]->IsES) {
4819          linker_error(prog, "all shaders must use same shading "
4820                       "language version\n");
4821          goto done;
4822       }
4823 
4824       if (prog->Shaders[i]->info.ARB_fragment_coord_conventions_enable) {
4825          prog->ARB_fragment_coord_conventions_enable = true;
4826       }
4827 
4828       gl_shader_stage shader_type = prog->Shaders[i]->Stage;
4829       shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i];
4830       num_shaders[shader_type]++;
4831    }
4832 
4833    /* In desktop GLSL, different shader versions may be linked together.  In
4834     * GLSL ES, all shader versions must be the same.
4835     */
4836    if (prog->Shaders[0]->IsES && min_version != max_version) {
4837       linker_error(prog, "all shaders must use same shading "
4838                    "language version\n");
4839       goto done;
4840    }
4841 
4842    prog->data->Version = max_version;
4843    prog->IsES = prog->Shaders[0]->IsES;
4844 
4845    /* Some shaders have to be linked with some other shaders present.
4846     */
4847    if (!prog->SeparateShader) {
4848       if (num_shaders[MESA_SHADER_GEOMETRY] > 0 &&
4849           num_shaders[MESA_SHADER_VERTEX] == 0) {
4850          linker_error(prog, "Geometry shader must be linked with "
4851                       "vertex shader\n");
4852          goto done;
4853       }
4854       if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
4855           num_shaders[MESA_SHADER_VERTEX] == 0) {
4856          linker_error(prog, "Tessellation evaluation shader must be linked "
4857                       "with vertex shader\n");
4858          goto done;
4859       }
4860       if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
4861           num_shaders[MESA_SHADER_VERTEX] == 0) {
4862          linker_error(prog, "Tessellation control shader must be linked with "
4863                       "vertex shader\n");
4864          goto done;
4865       }
4866 
4867       /* The spec is self-contradictory here. It allows linking without a tess
4868        * eval shader, but that can only be used with transform feedback and
4869        * rasterization disabled. However, transform feedback isn't allowed
4870        * with GL_PATCHES, so it can't be used.
4871        *
4872        * More investigation showed that the idea of transform feedback after
4873        * a tess control shader was dropped, because some hw vendors couldn't
4874        * support tessellation without a tess eval shader, but the linker
4875        * section wasn't updated to reflect that.
4876        *
4877        * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
4878        * spec bug.
4879        *
4880        * Do what's reasonable and always require a tess eval shader if a tess
4881        * control shader is present.
4882        */
4883       if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
4884           num_shaders[MESA_SHADER_TESS_EVAL] == 0) {
4885          linker_error(prog, "Tessellation control shader must be linked with "
4886                       "tessellation evaluation shader\n");
4887          goto done;
4888       }
4889    }
4890 
4891    /* Compute shaders have additional restrictions. */
4892    if (num_shaders[MESA_SHADER_COMPUTE] > 0 &&
4893        num_shaders[MESA_SHADER_COMPUTE] != prog->NumShaders) {
4894       linker_error(prog, "Compute shaders may not be linked with any other "
4895                    "type of shader\n");
4896    }
4897 
4898    /* Link all shaders for a particular stage and validate the result.
4899     */
4900    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4901       if (num_shaders[stage] > 0) {
4902          gl_linked_shader *const sh =
4903             link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
4904                                     num_shaders[stage], false);
4905 
4906          if (!prog->data->LinkStatus) {
4907             if (sh)
4908                _mesa_delete_linked_shader(ctx, sh);
4909             goto done;
4910          }
4911 
4912          switch (stage) {
4913          case MESA_SHADER_VERTEX:
4914             validate_vertex_shader_executable(prog, sh, ctx);
4915             break;
4916          case MESA_SHADER_TESS_CTRL:
4917             /* nothing to be done */
4918             break;
4919          case MESA_SHADER_TESS_EVAL:
4920             validate_tess_eval_shader_executable(prog, sh, ctx);
4921             break;
4922          case MESA_SHADER_GEOMETRY:
4923             validate_geometry_shader_executable(prog, sh, ctx);
4924             break;
4925          case MESA_SHADER_FRAGMENT:
4926             validate_fragment_shader_executable(prog, sh);
4927             break;
4928          }
4929          if (!prog->data->LinkStatus) {
4930             if (sh)
4931                _mesa_delete_linked_shader(ctx, sh);
4932             goto done;
4933          }
4934 
4935          prog->_LinkedShaders[stage] = sh;
4936          prog->data->linked_stages |= 1 << stage;
4937       }
4938    }
4939 
4940    if (num_shaders[MESA_SHADER_GEOMETRY] > 0) {
4941       prog->LastClipDistanceArraySize = prog->Geom.ClipDistanceArraySize;
4942       prog->LastCullDistanceArraySize = prog->Geom.CullDistanceArraySize;
4943    } else if (num_shaders[MESA_SHADER_TESS_EVAL] > 0) {
4944       prog->LastClipDistanceArraySize = prog->TessEval.ClipDistanceArraySize;
4945       prog->LastCullDistanceArraySize = prog->TessEval.CullDistanceArraySize;
4946    } else if (num_shaders[MESA_SHADER_VERTEX] > 0) {
4947       prog->LastClipDistanceArraySize = prog->Vert.ClipDistanceArraySize;
4948       prog->LastCullDistanceArraySize = prog->Vert.CullDistanceArraySize;
4949    } else {
4950       prog->LastClipDistanceArraySize = 0; /* Not used */
4951       prog->LastCullDistanceArraySize = 0; /* Not used */
4952    }
4953 
4954    /* Here begins the inter-stage linking phase.  Some initial validation is
4955     * performed, then locations are assigned for uniforms, attributes, and
4956     * varyings.
4957     */
4958    cross_validate_uniforms(prog);
4959    if (!prog->data->LinkStatus)
4960       goto done;
4961 
4962    unsigned first, last, prev;
4963 
4964    first = MESA_SHADER_STAGES;
4965    last = 0;
4966 
4967    /* Determine first and last stage. */
4968    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
4969       if (!prog->_LinkedShaders[i])
4970          continue;
4971       if (first == MESA_SHADER_STAGES)
4972          first = i;
4973       last = i;
4974    }
4975 
4976    num_explicit_uniform_locs = check_explicit_uniform_locations(ctx, prog);
4977    link_assign_subroutine_types(prog);
4978 
4979    if (!prog->data->LinkStatus)
4980       goto done;
4981 
4982    resize_tes_inputs(ctx, prog);
4983 
4984    /* Validate the inputs of each stage with the output of the preceding
4985     * stage.
4986     */
4987    prev = first;
4988    for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
4989       if (prog->_LinkedShaders[i] == NULL)
4990          continue;
4991 
4992       validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
4993                                        prog->_LinkedShaders[i]);
4994       if (!prog->data->LinkStatus)
4995          goto done;
4996 
4997       cross_validate_outputs_to_inputs(prog,
4998                                        prog->_LinkedShaders[prev],
4999                                        prog->_LinkedShaders[i]);
5000       if (!prog->data->LinkStatus)
5001          goto done;
5002 
5003       prev = i;
5004    }
5005 
5006    /* Cross-validate uniform blocks between shader stages */
5007    validate_interstage_uniform_blocks(prog, prog->_LinkedShaders);
5008    if (!prog->data->LinkStatus)
5009       goto done;
5010 
5011    for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
5012       if (prog->_LinkedShaders[i] != NULL)
5013          lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
5014    }
5015 
5016    /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
5017     * it before optimization because we want most of the checks to get
5018     * dropped thanks to constant propagation.
5019     *
5020     * This rule also applies to GLSL ES 3.00.
5021     */
5022    if (max_version >= (prog->IsES ? 300 : 130)) {
5023       struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
5024       if (sh) {
5025          lower_discard_flow(sh->ir);
5026       }
5027    }
5028 
5029    if (prog->SeparateShader)
5030       disable_varying_optimizations_for_sso(prog);
5031 
5032    /* Process UBOs */
5033    if (!interstage_cross_validate_uniform_blocks(prog, false))
5034       goto done;
5035 
5036    /* Process SSBOs */
5037    if (!interstage_cross_validate_uniform_blocks(prog, true))
5038       goto done;
5039 
5040    /* Do common optimization before assigning storage for attributes,
5041     * uniforms, and varyings.  Later optimization could possibly make
5042     * some of that unused.
5043     */
5044    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
5045       if (prog->_LinkedShaders[i] == NULL)
5046          continue;
5047 
5048       detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
5049       if (!prog->data->LinkStatus)
5050          goto done;
5051 
5052       if (ctx->Const.ShaderCompilerOptions[i].LowerCombinedClipCullDistance) {
5053          lower_clip_cull_distance(prog, prog->_LinkedShaders[i]);
5054       }
5055 
5056       if (ctx->Const.LowerTessLevel) {
5057          lower_tess_level(prog->_LinkedShaders[i]);
5058       }
5059 
5060       if (ctx->Const.GLSLOptimizeConservatively) {
5061          /* Run it just once. */
5062          do_common_optimization(prog->_LinkedShaders[i]->ir, true, false,
5063                                 &ctx->Const.ShaderCompilerOptions[i],
5064                                 ctx->Const.NativeIntegers);
5065       } else {
5066          /* Repeat it until it stops making changes. */
5067          while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false,
5068                                        &ctx->Const.ShaderCompilerOptions[i],
5069                                        ctx->Const.NativeIntegers))
5070             ;
5071       }
5072 
5073       lower_const_arrays_to_uniforms(prog->_LinkedShaders[i]->ir, i);
5074       propagate_invariance(prog->_LinkedShaders[i]->ir);
5075    }
5076 
5077    /* Validation for special cases where we allow sampler array indexing
5078     * with loop induction variable. This check emits a warning or error
5079     * depending if backend can handle dynamic indexing.
5080     */
5081    if ((!prog->IsES && prog->data->Version < 130) ||
5082        (prog->IsES && prog->data->Version < 300)) {
5083       if (!validate_sampler_array_indexing(ctx, prog))
5084          goto done;
5085    }
5086 
5087    /* Check and validate stream emissions in geometry shaders */
5088    validate_geometry_shader_emissions(ctx, prog);
5089 
5090    store_fragdepth_layout(prog);
5091 
5092    if(!link_varyings_and_uniforms(first, last, num_explicit_uniform_locs, ctx,
5093                                   prog, mem_ctx))
5094       goto done;
5095 
5096    /* OpenGL ES < 3.1 requires that a vertex shader and a fragment shader both
5097     * be present in a linked program. GL_ARB_ES2_compatibility doesn't say
5098     * anything about shader linking when one of the shaders (vertex or
5099     * fragment shader) is absent. So, the extension shouldn't change the
5100     * behavior specified in GLSL specification.
5101     *
5102     * From OpenGL ES 3.1 specification (7.3 Program Objects):
5103     *     "Linking can fail for a variety of reasons as specified in the
5104     *     OpenGL ES Shading Language Specification, as well as any of the
5105     *     following reasons:
5106     *
5107     *     ...
5108     *
5109     *     * program contains objects to form either a vertex shader or
5110     *       fragment shader, and program is not separable, and does not
5111     *       contain objects to form both a vertex shader and fragment
5112     *       shader."
5113     *
5114     * However, the only scenario in 3.1+ where we don't require them both is
5115     * when we have a compute shader. For example:
5116     *
5117     * - No shaders is a link error.
5118     * - Geom or Tess without a Vertex shader is a link error which means we
5119     *   always require a Vertex shader and hence a Fragment shader.
5120     * - Finally a Compute shader linked with any other stage is a link error.
5121     */
5122    if (!prog->SeparateShader && ctx->API == API_OPENGLES2 &&
5123        num_shaders[MESA_SHADER_COMPUTE] == 0) {
5124       if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
5125          linker_error(prog, "program lacks a vertex shader\n");
5126       } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
5127          linker_error(prog, "program lacks a fragment shader\n");
5128       }
5129    }
5130 
5131 done:
5132    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
5133       free(shader_list[i]);
5134       if (prog->_LinkedShaders[i] == NULL)
5135          continue;
5136 
5137       /* Do a final validation step to make sure that the IR wasn't
5138        * invalidated by any modifications performed after intrastage linking.
5139        */
5140       validate_ir_tree(prog->_LinkedShaders[i]->ir);
5141 
5142       /* Retain any live IR, but trash the rest. */
5143       reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
5144 
5145       /* The symbol table in the linked shaders may contain references to
5146        * variables that were removed (e.g., unused uniforms).  Since it may
5147        * contain junk, there is no possible valid use.  Delete it and set the
5148        * pointer to NULL.
5149        */
5150       delete prog->_LinkedShaders[i]->symbols;
5151       prog->_LinkedShaders[i]->symbols = NULL;
5152    }
5153 
5154    ralloc_free(mem_ctx);
5155 }
5156