1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66
67 #include <ctype.h>
68 #include "util/strndup.h"
69 #include "glsl_symbol_table.h"
70 #include "glsl_parser_extras.h"
71 #include "ir.h"
72 #include "nir.h"
73 #include "program.h"
74 #include "program/prog_instruction.h"
75 #include "program/program.h"
76 #include "util/mesa-sha1.h"
77 #include "util/set.h"
78 #include "string_to_uint_map.h"
79 #include "linker.h"
80 #include "linker_util.h"
81 #include "link_varyings.h"
82 #include "ir_optimization.h"
83 #include "ir_rvalue_visitor.h"
84 #include "ir_uniform.h"
85 #include "builtin_functions.h"
86 #include "shader_cache.h"
87 #include "util/u_string.h"
88 #include "util/u_math.h"
89
90
91 #include "main/shaderobj.h"
92 #include "main/enums.h"
93 #include "main/mtypes.h"
94
95
96 namespace {
97
98 struct find_variable {
99 const char *name;
100 bool found;
101
find_variable__anonf2b593850111::find_variable102 find_variable(const char *name) : name(name), found(false) {}
103 };
104
105 /**
106 * Visitor that determines whether or not a variable is ever written.
107 * Note: this is only considering if the variable is statically written
108 * (= regardless of the runtime flow of control)
109 *
110 * Use \ref find_assignments for convenience.
111 */
112 class find_assignment_visitor : public ir_hierarchical_visitor {
113 public:
find_assignment_visitor(unsigned num_vars,find_variable * const * vars)114 find_assignment_visitor(unsigned num_vars,
115 find_variable * const *vars)
116 : num_variables(num_vars), num_found(0), variables(vars)
117 {
118 }
119
visit_enter(ir_assignment * ir)120 virtual ir_visitor_status visit_enter(ir_assignment *ir)
121 {
122 ir_variable *const var = ir->lhs->variable_referenced();
123
124 return check_variable_name(var->name);
125 }
126
visit_enter(ir_call * ir)127 virtual ir_visitor_status visit_enter(ir_call *ir)
128 {
129 foreach_two_lists(formal_node, &ir->callee->parameters,
130 actual_node, &ir->actual_parameters) {
131 ir_rvalue *param_rval = (ir_rvalue *) actual_node;
132 ir_variable *sig_param = (ir_variable *) formal_node;
133
134 if (sig_param->data.mode == ir_var_function_out ||
135 sig_param->data.mode == ir_var_function_inout) {
136 ir_variable *var = param_rval->variable_referenced();
137 if (var && check_variable_name(var->name) == visit_stop)
138 return visit_stop;
139 }
140 }
141
142 if (ir->return_deref != NULL) {
143 ir_variable *const var = ir->return_deref->variable_referenced();
144
145 if (check_variable_name(var->name) == visit_stop)
146 return visit_stop;
147 }
148
149 return visit_continue_with_parent;
150 }
151
152 private:
check_variable_name(const char * name)153 ir_visitor_status check_variable_name(const char *name)
154 {
155 for (unsigned i = 0; i < num_variables; ++i) {
156 if (strcmp(variables[i]->name, name) == 0) {
157 if (!variables[i]->found) {
158 variables[i]->found = true;
159
160 assert(num_found < num_variables);
161 if (++num_found == num_variables)
162 return visit_stop;
163 }
164 break;
165 }
166 }
167
168 return visit_continue_with_parent;
169 }
170
171 private:
172 unsigned num_variables; /**< Number of variables to find */
173 unsigned num_found; /**< Number of variables already found */
174 find_variable * const *variables; /**< Variables to find */
175 };
176
177 /**
178 * Determine whether or not any of NULL-terminated list of variables is ever
179 * written to.
180 */
181 static void
find_assignments(exec_list * ir,find_variable * const * vars)182 find_assignments(exec_list *ir, find_variable * const *vars)
183 {
184 unsigned num_variables = 0;
185
186 for (find_variable * const *v = vars; *v; ++v)
187 num_variables++;
188
189 find_assignment_visitor visitor(num_variables, vars);
190 visitor.run(ir);
191 }
192
193 /**
194 * Determine whether or not the given variable is ever written to.
195 */
196 static void
find_assignments(exec_list * ir,find_variable * var)197 find_assignments(exec_list *ir, find_variable *var)
198 {
199 find_assignment_visitor visitor(1, &var);
200 visitor.run(ir);
201 }
202
203 /**
204 * Visitor that determines whether or not a variable is ever read.
205 */
206 class find_deref_visitor : public ir_hierarchical_visitor {
207 public:
find_deref_visitor(const char * name)208 find_deref_visitor(const char *name)
209 : name(name), found(false)
210 {
211 /* empty */
212 }
213
visit(ir_dereference_variable * ir)214 virtual ir_visitor_status visit(ir_dereference_variable *ir)
215 {
216 if (strcmp(this->name, ir->var->name) == 0) {
217 this->found = true;
218 return visit_stop;
219 }
220
221 return visit_continue;
222 }
223
variable_found() const224 bool variable_found() const
225 {
226 return this->found;
227 }
228
229 private:
230 const char *name; /**< Find writes to a variable with this name. */
231 bool found; /**< Was a write to the variable found? */
232 };
233
234
235 /**
236 * A visitor helper that provides methods for updating the types of
237 * ir_dereferences. Classes that update variable types (say, updating
238 * array sizes) will want to use this so that dereference types stay in sync.
239 */
240 class deref_type_updater : public ir_hierarchical_visitor {
241 public:
visit(ir_dereference_variable * ir)242 virtual ir_visitor_status visit(ir_dereference_variable *ir)
243 {
244 ir->type = ir->var->type;
245 return visit_continue;
246 }
247
visit_leave(ir_dereference_array * ir)248 virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
249 {
250 const glsl_type *const vt = ir->array->type;
251 if (vt->is_array())
252 ir->type = vt->fields.array;
253 return visit_continue;
254 }
255
visit_leave(ir_dereference_record * ir)256 virtual ir_visitor_status visit_leave(ir_dereference_record *ir)
257 {
258 ir->type = ir->record->type->fields.structure[ir->field_idx].type;
259 return visit_continue;
260 }
261 };
262
263
264 class array_resize_visitor : public deref_type_updater {
265 public:
266 using deref_type_updater::visit;
267
268 unsigned num_vertices;
269 gl_shader_program *prog;
270 gl_shader_stage stage;
271
array_resize_visitor(unsigned num_vertices,gl_shader_program * prog,gl_shader_stage stage)272 array_resize_visitor(unsigned num_vertices,
273 gl_shader_program *prog,
274 gl_shader_stage stage)
275 {
276 this->num_vertices = num_vertices;
277 this->prog = prog;
278 this->stage = stage;
279 }
280
~array_resize_visitor()281 virtual ~array_resize_visitor()
282 {
283 /* empty */
284 }
285
visit(ir_variable * var)286 virtual ir_visitor_status visit(ir_variable *var)
287 {
288 if (!var->type->is_array() || var->data.mode != ir_var_shader_in ||
289 var->data.patch)
290 return visit_continue;
291
292 unsigned size = var->type->length;
293
294 if (stage == MESA_SHADER_GEOMETRY) {
295 /* Generate a link error if the shader has declared this array with
296 * an incorrect size.
297 */
298 if (!var->data.implicit_sized_array &&
299 size && size != this->num_vertices) {
300 linker_error(this->prog, "size of array %s declared as %u, "
301 "but number of input vertices is %u\n",
302 var->name, size, this->num_vertices);
303 return visit_continue;
304 }
305
306 /* Generate a link error if the shader attempts to access an input
307 * array using an index too large for its actual size assigned at
308 * link time.
309 */
310 if (var->data.max_array_access >= (int)this->num_vertices) {
311 linker_error(this->prog, "%s shader accesses element %i of "
312 "%s, but only %i input vertices\n",
313 _mesa_shader_stage_to_string(this->stage),
314 var->data.max_array_access, var->name, this->num_vertices);
315 return visit_continue;
316 }
317 }
318
319 var->type = glsl_type::get_array_instance(var->type->fields.array,
320 this->num_vertices);
321 var->data.max_array_access = this->num_vertices - 1;
322
323 return visit_continue;
324 }
325 };
326
327 class array_length_to_const_visitor : public ir_rvalue_visitor {
328 public:
array_length_to_const_visitor()329 array_length_to_const_visitor()
330 {
331 this->progress = false;
332 }
333
~array_length_to_const_visitor()334 virtual ~array_length_to_const_visitor()
335 {
336 /* empty */
337 }
338
339 bool progress;
340
handle_rvalue(ir_rvalue ** rvalue)341 virtual void handle_rvalue(ir_rvalue **rvalue)
342 {
343 if (*rvalue == NULL || (*rvalue)->ir_type != ir_type_expression)
344 return;
345
346 ir_expression *expr = (*rvalue)->as_expression();
347 if (expr) {
348 if (expr->operation == ir_unop_implicitly_sized_array_length) {
349 assert(!expr->operands[0]->type->is_unsized_array());
350 ir_constant *constant = new(expr)
351 ir_constant(expr->operands[0]->type->array_size());
352 if (constant) {
353 *rvalue = constant;
354 }
355 }
356 }
357 }
358 };
359
360 /**
361 * Visitor that determines the highest stream id to which a (geometry) shader
362 * emits vertices. It also checks whether End{Stream}Primitive is ever called.
363 */
364 class find_emit_vertex_visitor : public ir_hierarchical_visitor {
365 public:
find_emit_vertex_visitor(int max_allowed)366 find_emit_vertex_visitor(int max_allowed)
367 : max_stream_allowed(max_allowed),
368 invalid_stream_id(0),
369 invalid_stream_id_from_emit_vertex(false),
370 end_primitive_found(false),
371 used_streams(0)
372 {
373 /* empty */
374 }
375
visit_leave(ir_emit_vertex * ir)376 virtual ir_visitor_status visit_leave(ir_emit_vertex *ir)
377 {
378 int stream_id = ir->stream_id();
379
380 if (stream_id < 0) {
381 invalid_stream_id = stream_id;
382 invalid_stream_id_from_emit_vertex = true;
383 return visit_stop;
384 }
385
386 if (stream_id > max_stream_allowed) {
387 invalid_stream_id = stream_id;
388 invalid_stream_id_from_emit_vertex = true;
389 return visit_stop;
390 }
391
392 used_streams |= 1 << stream_id;
393
394 return visit_continue;
395 }
396
visit_leave(ir_end_primitive * ir)397 virtual ir_visitor_status visit_leave(ir_end_primitive *ir)
398 {
399 end_primitive_found = true;
400
401 int stream_id = ir->stream_id();
402
403 if (stream_id < 0) {
404 invalid_stream_id = stream_id;
405 invalid_stream_id_from_emit_vertex = false;
406 return visit_stop;
407 }
408
409 if (stream_id > max_stream_allowed) {
410 invalid_stream_id = stream_id;
411 invalid_stream_id_from_emit_vertex = false;
412 return visit_stop;
413 }
414
415 used_streams |= 1 << stream_id;
416
417 return visit_continue;
418 }
419
error()420 bool error()
421 {
422 return invalid_stream_id != 0;
423 }
424
error_func()425 const char *error_func()
426 {
427 return invalid_stream_id_from_emit_vertex ?
428 "EmitStreamVertex" : "EndStreamPrimitive";
429 }
430
error_stream()431 int error_stream()
432 {
433 return invalid_stream_id;
434 }
435
active_stream_mask()436 unsigned active_stream_mask()
437 {
438 return used_streams;
439 }
440
uses_end_primitive()441 bool uses_end_primitive()
442 {
443 return end_primitive_found;
444 }
445
446 private:
447 int max_stream_allowed;
448 int invalid_stream_id;
449 bool invalid_stream_id_from_emit_vertex;
450 bool end_primitive_found;
451 unsigned used_streams;
452 };
453
454 } /* anonymous namespace */
455
456 void
linker_error(gl_shader_program * prog,const char * fmt,...)457 linker_error(gl_shader_program *prog, const char *fmt, ...)
458 {
459 va_list ap;
460
461 ralloc_strcat(&prog->data->InfoLog, "error: ");
462 va_start(ap, fmt);
463 ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
464 va_end(ap);
465
466 prog->data->LinkStatus = LINKING_FAILURE;
467 }
468
469
470 void
linker_warning(gl_shader_program * prog,const char * fmt,...)471 linker_warning(gl_shader_program *prog, const char *fmt, ...)
472 {
473 va_list ap;
474
475 ralloc_strcat(&prog->data->InfoLog, "warning: ");
476 va_start(ap, fmt);
477 ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
478 va_end(ap);
479
480 }
481
482
483 void
link_invalidate_variable_locations(exec_list * ir)484 link_invalidate_variable_locations(exec_list *ir)
485 {
486 foreach_in_list(ir_instruction, node, ir) {
487 ir_variable *const var = node->as_variable();
488
489 if (var == NULL)
490 continue;
491
492 /* Only assign locations for variables that lack an explicit location.
493 * Explicit locations are set for all built-in variables, generic vertex
494 * shader inputs (via layout(location=...)), and generic fragment shader
495 * outputs (also via layout(location=...)).
496 */
497 if (!var->data.explicit_location) {
498 var->data.location = -1;
499 var->data.location_frac = 0;
500 }
501 }
502 }
503
504
505 /**
506 * Set clip_distance_array_size based and cull_distance_array_size on the given
507 * shader.
508 *
509 * Also check for errors based on incorrect usage of gl_ClipVertex and
510 * gl_ClipDistance and gl_CullDistance.
511 * Additionally test whether the arrays gl_ClipDistance and gl_CullDistance
512 * exceed the maximum size defined by gl_MaxCombinedClipAndCullDistances.
513 *
514 * Return false if an error was reported.
515 */
516 static void
analyze_clip_cull_usage(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts,struct shader_info * info)517 analyze_clip_cull_usage(struct gl_shader_program *prog,
518 struct gl_linked_shader *shader,
519 const struct gl_constants *consts,
520 struct shader_info *info)
521 {
522 if (consts->DoDCEBeforeClipCullAnalysis) {
523 /* Remove dead functions to avoid raising an error (eg: dead function
524 * writes to gl_ClipVertex, and main() writes to gl_ClipDistance).
525 */
526 do_dead_functions(shader->ir);
527 }
528
529 info->clip_distance_array_size = 0;
530 info->cull_distance_array_size = 0;
531
532 if (prog->data->Version >= (prog->IsES ? 300 : 130)) {
533 /* From section 7.1 (Vertex Shader Special Variables) of the
534 * GLSL 1.30 spec:
535 *
536 * "It is an error for a shader to statically write both
537 * gl_ClipVertex and gl_ClipDistance."
538 *
539 * This does not apply to GLSL ES shaders, since GLSL ES defines neither
540 * gl_ClipVertex nor gl_ClipDistance. However with
541 * GL_EXT_clip_cull_distance, this functionality is exposed in ES 3.0.
542 */
543 find_variable gl_ClipDistance("gl_ClipDistance");
544 find_variable gl_CullDistance("gl_CullDistance");
545 find_variable gl_ClipVertex("gl_ClipVertex");
546 find_variable * const variables[] = {
547 &gl_ClipDistance,
548 &gl_CullDistance,
549 !prog->IsES ? &gl_ClipVertex : NULL,
550 NULL
551 };
552 find_assignments(shader->ir, variables);
553
554 /* From the ARB_cull_distance spec:
555 *
556 * It is a compile-time or link-time error for the set of shaders forming
557 * a program to statically read or write both gl_ClipVertex and either
558 * gl_ClipDistance or gl_CullDistance.
559 *
560 * This does not apply to GLSL ES shaders, since GLSL ES doesn't define
561 * gl_ClipVertex.
562 */
563 if (!prog->IsES) {
564 if (gl_ClipVertex.found && gl_ClipDistance.found) {
565 linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
566 "and `gl_ClipDistance'\n",
567 _mesa_shader_stage_to_string(shader->Stage));
568 return;
569 }
570 if (gl_ClipVertex.found && gl_CullDistance.found) {
571 linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
572 "and `gl_CullDistance'\n",
573 _mesa_shader_stage_to_string(shader->Stage));
574 return;
575 }
576 }
577
578 if (gl_ClipDistance.found) {
579 ir_variable *clip_distance_var =
580 shader->symbols->get_variable("gl_ClipDistance");
581 assert(clip_distance_var);
582 info->clip_distance_array_size = clip_distance_var->type->length;
583 }
584 if (gl_CullDistance.found) {
585 ir_variable *cull_distance_var =
586 shader->symbols->get_variable("gl_CullDistance");
587 assert(cull_distance_var);
588 info->cull_distance_array_size = cull_distance_var->type->length;
589 }
590 /* From the ARB_cull_distance spec:
591 *
592 * It is a compile-time or link-time error for the set of shaders forming
593 * a program to have the sum of the sizes of the gl_ClipDistance and
594 * gl_CullDistance arrays to be larger than
595 * gl_MaxCombinedClipAndCullDistances.
596 */
597 if ((uint32_t)(info->clip_distance_array_size + info->cull_distance_array_size) >
598 consts->MaxClipPlanes) {
599 linker_error(prog, "%s shader: the combined size of "
600 "'gl_ClipDistance' and 'gl_CullDistance' size cannot "
601 "be larger than "
602 "gl_MaxCombinedClipAndCullDistances (%u)",
603 _mesa_shader_stage_to_string(shader->Stage),
604 consts->MaxClipPlanes);
605 }
606 }
607 }
608
609
610 /**
611 * Verify that a vertex shader executable meets all semantic requirements.
612 *
613 * Also sets info.clip_distance_array_size and
614 * info.cull_distance_array_size as a side effect.
615 *
616 * \param shader Vertex shader executable to be verified
617 */
618 static void
validate_vertex_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts)619 validate_vertex_shader_executable(struct gl_shader_program *prog,
620 struct gl_linked_shader *shader,
621 const struct gl_constants *consts)
622 {
623 if (shader == NULL)
624 return;
625
626 /* From the GLSL 1.10 spec, page 48:
627 *
628 * "The variable gl_Position is available only in the vertex
629 * language and is intended for writing the homogeneous vertex
630 * position. All executions of a well-formed vertex shader
631 * executable must write a value into this variable. [...] The
632 * variable gl_Position is available only in the vertex
633 * language and is intended for writing the homogeneous vertex
634 * position. All executions of a well-formed vertex shader
635 * executable must write a value into this variable."
636 *
637 * while in GLSL 1.40 this text is changed to:
638 *
639 * "The variable gl_Position is available only in the vertex
640 * language and is intended for writing the homogeneous vertex
641 * position. It can be written at any time during shader
642 * execution. It may also be read back by a vertex shader
643 * after being written. This value will be used by primitive
644 * assembly, clipping, culling, and other fixed functionality
645 * operations, if present, that operate on primitives after
646 * vertex processing has occurred. Its value is undefined if
647 * the vertex shader executable does not write gl_Position."
648 *
649 * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
650 * gl_Position is not an error.
651 */
652 if (prog->data->Version < (prog->IsES ? 300 : 140)) {
653 find_variable gl_Position("gl_Position");
654 find_assignments(shader->ir, &gl_Position);
655 if (!gl_Position.found) {
656 if (prog->IsES) {
657 linker_warning(prog,
658 "vertex shader does not write to `gl_Position'. "
659 "Its value is undefined. \n");
660 } else {
661 linker_error(prog,
662 "vertex shader does not write to `gl_Position'. \n");
663 }
664 return;
665 }
666 }
667
668 analyze_clip_cull_usage(prog, shader, consts, &shader->Program->info);
669 }
670
671 static void
validate_tess_eval_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts)672 validate_tess_eval_shader_executable(struct gl_shader_program *prog,
673 struct gl_linked_shader *shader,
674 const struct gl_constants *consts)
675 {
676 if (shader == NULL)
677 return;
678
679 analyze_clip_cull_usage(prog, shader, consts, &shader->Program->info);
680 }
681
682
683 /**
684 * Verify that a fragment shader executable meets all semantic requirements
685 *
686 * \param shader Fragment shader executable to be verified
687 */
688 static void
validate_fragment_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader)689 validate_fragment_shader_executable(struct gl_shader_program *prog,
690 struct gl_linked_shader *shader)
691 {
692 if (shader == NULL)
693 return;
694
695 find_variable gl_FragColor("gl_FragColor");
696 find_variable gl_FragData("gl_FragData");
697 find_variable * const variables[] = { &gl_FragColor, &gl_FragData, NULL };
698 find_assignments(shader->ir, variables);
699
700 if (gl_FragColor.found && gl_FragData.found) {
701 linker_error(prog, "fragment shader writes to both "
702 "`gl_FragColor' and `gl_FragData'\n");
703 }
704 }
705
706 /**
707 * Verify that a geometry shader executable meets all semantic requirements
708 *
709 * Also sets prog->Geom.VerticesIn, and info.clip_distance_array_sizeand
710 * info.cull_distance_array_size as a side effect.
711 *
712 * \param shader Geometry shader executable to be verified
713 */
714 static void
validate_geometry_shader_executable(struct gl_shader_program * prog,struct gl_linked_shader * shader,const struct gl_constants * consts)715 validate_geometry_shader_executable(struct gl_shader_program *prog,
716 struct gl_linked_shader *shader,
717 const struct gl_constants *consts)
718 {
719 if (shader == NULL)
720 return;
721
722 unsigned num_vertices =
723 vertices_per_prim(shader->Program->info.gs.input_primitive);
724 prog->Geom.VerticesIn = num_vertices;
725
726 analyze_clip_cull_usage(prog, shader, consts, &shader->Program->info);
727 }
728
729 /**
730 * Check if geometry shaders emit to non-zero streams and do corresponding
731 * validations.
732 */
733 static void
validate_geometry_shader_emissions(const struct gl_constants * consts,struct gl_shader_program * prog)734 validate_geometry_shader_emissions(const struct gl_constants *consts,
735 struct gl_shader_program *prog)
736 {
737 struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
738
739 if (sh != NULL) {
740 find_emit_vertex_visitor emit_vertex(consts->MaxVertexStreams - 1);
741 emit_vertex.run(sh->ir);
742 if (emit_vertex.error()) {
743 linker_error(prog, "Invalid call %s(%d). Accepted values for the "
744 "stream parameter are in the range [0, %d].\n",
745 emit_vertex.error_func(),
746 emit_vertex.error_stream(),
747 consts->MaxVertexStreams - 1);
748 }
749 prog->Geom.ActiveStreamMask = emit_vertex.active_stream_mask();
750 prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive();
751
752 /* From the ARB_gpu_shader5 spec:
753 *
754 * "Multiple vertex streams are supported only if the output primitive
755 * type is declared to be "points". A program will fail to link if it
756 * contains a geometry shader calling EmitStreamVertex() or
757 * EndStreamPrimitive() if its output primitive type is not "points".
758 *
759 * However, in the same spec:
760 *
761 * "The function EmitVertex() is equivalent to calling EmitStreamVertex()
762 * with <stream> set to zero."
763 *
764 * And:
765 *
766 * "The function EndPrimitive() is equivalent to calling
767 * EndStreamPrimitive() with <stream> set to zero."
768 *
769 * Since we can call EmitVertex() and EndPrimitive() when we output
770 * primitives other than points, calling EmitStreamVertex(0) or
771 * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
772 * does. We can use prog->Geom.ActiveStreamMask to check whether only the
773 * first (zero) stream is active.
774 * stream.
775 */
776 if (prog->Geom.ActiveStreamMask & ~(1 << 0) &&
777 sh->Program->info.gs.output_primitive != GL_POINTS) {
778 linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
779 "with n>0 requires point output\n");
780 }
781 }
782 }
783
784 bool
validate_intrastage_arrays(struct gl_shader_program * prog,ir_variable * const var,ir_variable * const existing,bool match_precision)785 validate_intrastage_arrays(struct gl_shader_program *prog,
786 ir_variable *const var,
787 ir_variable *const existing,
788 bool match_precision)
789 {
790 /* Consider the types to be "the same" if both types are arrays
791 * of the same type and one of the arrays is implicitly sized.
792 * In addition, set the type of the linked variable to the
793 * explicitly sized array.
794 */
795 if (var->type->is_array() && existing->type->is_array()) {
796 const glsl_type *no_array_var = var->type->fields.array;
797 const glsl_type *no_array_existing = existing->type->fields.array;
798 bool type_matches;
799
800 type_matches = (match_precision ?
801 no_array_var == no_array_existing :
802 no_array_var->compare_no_precision(no_array_existing));
803
804 if (type_matches &&
805 ((var->type->length == 0)|| (existing->type->length == 0))) {
806 if (var->type->length != 0) {
807 if ((int)var->type->length <= existing->data.max_array_access) {
808 linker_error(prog, "%s `%s' declared as type "
809 "`%s' but outermost dimension has an index"
810 " of `%i'\n",
811 mode_string(var),
812 var->name, var->type->name,
813 existing->data.max_array_access);
814 }
815 existing->type = var->type;
816 return true;
817 } else if (existing->type->length != 0) {
818 if((int)existing->type->length <= var->data.max_array_access &&
819 !existing->data.from_ssbo_unsized_array) {
820 linker_error(prog, "%s `%s' declared as type "
821 "`%s' but outermost dimension has an index"
822 " of `%i'\n",
823 mode_string(var),
824 var->name, existing->type->name,
825 var->data.max_array_access);
826 }
827 return true;
828 }
829 }
830 }
831 return false;
832 }
833
834
835 /**
836 * Perform validation of global variables used across multiple shaders
837 */
838 static void
cross_validate_globals(const struct gl_constants * consts,struct gl_shader_program * prog,struct exec_list * ir,glsl_symbol_table * variables,bool uniforms_only)839 cross_validate_globals(const struct gl_constants *consts,
840 struct gl_shader_program *prog,
841 struct exec_list *ir, glsl_symbol_table *variables,
842 bool uniforms_only)
843 {
844 foreach_in_list(ir_instruction, node, ir) {
845 ir_variable *const var = node->as_variable();
846
847 if (var == NULL)
848 continue;
849
850 if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
851 continue;
852
853 /* don't cross validate subroutine uniforms */
854 if (var->type->contains_subroutine())
855 continue;
856
857 /* Don't cross validate interface instances. These are only relevant
858 * inside a shader. The cross validation is done at the Interface Block
859 * name level.
860 */
861 if (var->is_interface_instance())
862 continue;
863
864 /* Don't cross validate temporaries that are at global scope. These
865 * will eventually get pulled into the shaders 'main'.
866 */
867 if (var->data.mode == ir_var_temporary)
868 continue;
869
870 /* If a global with this name has already been seen, verify that the
871 * new instance has the same type. In addition, if the globals have
872 * initializers, the values of the initializers must be the same.
873 */
874 ir_variable *const existing = variables->get_variable(var->name);
875 if (existing != NULL) {
876 /* Check if types match. */
877 if (var->type != existing->type) {
878 if (!validate_intrastage_arrays(prog, var, existing)) {
879 /* If it is an unsized array in a Shader Storage Block,
880 * two different shaders can access to different elements.
881 * Because of that, they might be converted to different
882 * sized arrays, then check that they are compatible but
883 * ignore the array size.
884 */
885 if (!(var->data.mode == ir_var_shader_storage &&
886 var->data.from_ssbo_unsized_array &&
887 existing->data.mode == ir_var_shader_storage &&
888 existing->data.from_ssbo_unsized_array &&
889 var->type->gl_type == existing->type->gl_type)) {
890 linker_error(prog, "%s `%s' declared as type "
891 "`%s' and type `%s'\n",
892 mode_string(var),
893 var->name, var->type->name,
894 existing->type->name);
895 return;
896 }
897 }
898 }
899
900 if (var->data.explicit_location) {
901 if (existing->data.explicit_location
902 && (var->data.location != existing->data.location)) {
903 linker_error(prog, "explicit locations for %s "
904 "`%s' have differing values\n",
905 mode_string(var), var->name);
906 return;
907 }
908
909 if (var->data.location_frac != existing->data.location_frac) {
910 linker_error(prog, "explicit components for %s `%s' have "
911 "differing values\n", mode_string(var), var->name);
912 return;
913 }
914
915 existing->data.location = var->data.location;
916 existing->data.explicit_location = true;
917 } else {
918 /* Check if uniform with implicit location was marked explicit
919 * by earlier shader stage. If so, mark it explicit in this stage
920 * too to make sure later processing does not treat it as
921 * implicit one.
922 */
923 if (existing->data.explicit_location) {
924 var->data.location = existing->data.location;
925 var->data.explicit_location = true;
926 }
927 }
928
929 /* From the GLSL 4.20 specification:
930 * "A link error will result if two compilation units in a program
931 * specify different integer-constant bindings for the same
932 * opaque-uniform name. However, it is not an error to specify a
933 * binding on some but not all declarations for the same name"
934 */
935 if (var->data.explicit_binding) {
936 if (existing->data.explicit_binding &&
937 var->data.binding != existing->data.binding) {
938 linker_error(prog, "explicit bindings for %s "
939 "`%s' have differing values\n",
940 mode_string(var), var->name);
941 return;
942 }
943
944 existing->data.binding = var->data.binding;
945 existing->data.explicit_binding = true;
946 }
947
948 if (var->type->contains_atomic() &&
949 var->data.offset != existing->data.offset) {
950 linker_error(prog, "offset specifications for %s "
951 "`%s' have differing values\n",
952 mode_string(var), var->name);
953 return;
954 }
955
956 /* Validate layout qualifiers for gl_FragDepth.
957 *
958 * From the AMD/ARB_conservative_depth specs:
959 *
960 * "If gl_FragDepth is redeclared in any fragment shader in a
961 * program, it must be redeclared in all fragment shaders in
962 * that program that have static assignments to
963 * gl_FragDepth. All redeclarations of gl_FragDepth in all
964 * fragment shaders in a single program must have the same set
965 * of qualifiers."
966 */
967 if (strcmp(var->name, "gl_FragDepth") == 0) {
968 bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
969 bool layout_differs =
970 var->data.depth_layout != existing->data.depth_layout;
971
972 if (layout_declared && layout_differs) {
973 linker_error(prog,
974 "All redeclarations of gl_FragDepth in all "
975 "fragment shaders in a single program must have "
976 "the same set of qualifiers.\n");
977 }
978
979 if (var->data.used && layout_differs) {
980 linker_error(prog,
981 "If gl_FragDepth is redeclared with a layout "
982 "qualifier in any fragment shader, it must be "
983 "redeclared with the same layout qualifier in "
984 "all fragment shaders that have assignments to "
985 "gl_FragDepth\n");
986 }
987 }
988
989 /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
990 *
991 * "If a shared global has multiple initializers, the
992 * initializers must all be constant expressions, and they
993 * must all have the same value. Otherwise, a link error will
994 * result. (A shared global having only one initializer does
995 * not require that initializer to be a constant expression.)"
996 *
997 * Previous to 4.20 the GLSL spec simply said that initializers
998 * must have the same value. In this case of non-constant
999 * initializers, this was impossible to determine. As a result,
1000 * no vendor actually implemented that behavior. The 4.20
1001 * behavior matches the implemented behavior of at least one other
1002 * vendor, so we'll implement that for all GLSL versions.
1003 * If (at least) one of these constant expressions is implicit,
1004 * because it was added by glsl_zero_init, we skip the verification.
1005 */
1006 if (var->constant_initializer != NULL) {
1007 if (existing->constant_initializer != NULL &&
1008 !existing->data.is_implicit_initializer &&
1009 !var->data.is_implicit_initializer) {
1010 if (!var->constant_initializer->has_value(existing->constant_initializer)) {
1011 linker_error(prog, "initializers for %s "
1012 "`%s' have differing values\n",
1013 mode_string(var), var->name);
1014 return;
1015 }
1016 } else {
1017 /* If the first-seen instance of a particular uniform did
1018 * not have an initializer but a later instance does,
1019 * replace the former with the later.
1020 */
1021 if (!var->data.is_implicit_initializer)
1022 variables->replace_variable(existing->name, var);
1023 }
1024 }
1025
1026 if (var->data.has_initializer) {
1027 if (existing->data.has_initializer
1028 && (var->constant_initializer == NULL
1029 || existing->constant_initializer == NULL)) {
1030 linker_error(prog,
1031 "shared global variable `%s' has multiple "
1032 "non-constant initializers.\n",
1033 var->name);
1034 return;
1035 }
1036 }
1037
1038 if (existing->data.explicit_invariant != var->data.explicit_invariant) {
1039 linker_error(prog, "declarations for %s `%s' have "
1040 "mismatching invariant qualifiers\n",
1041 mode_string(var), var->name);
1042 return;
1043 }
1044 if (existing->data.centroid != var->data.centroid) {
1045 linker_error(prog, "declarations for %s `%s' have "
1046 "mismatching centroid qualifiers\n",
1047 mode_string(var), var->name);
1048 return;
1049 }
1050 if (existing->data.sample != var->data.sample) {
1051 linker_error(prog, "declarations for %s `%s` have "
1052 "mismatching sample qualifiers\n",
1053 mode_string(var), var->name);
1054 return;
1055 }
1056 if (existing->data.image_format != var->data.image_format) {
1057 linker_error(prog, "declarations for %s `%s` have "
1058 "mismatching image format qualifiers\n",
1059 mode_string(var), var->name);
1060 return;
1061 }
1062
1063 /* Check the precision qualifier matches for uniform variables on
1064 * GLSL ES.
1065 */
1066 if (!consts->AllowGLSLRelaxedES &&
1067 prog->IsES && !var->get_interface_type() &&
1068 existing->data.precision != var->data.precision) {
1069 if ((existing->data.used && var->data.used) || prog->data->Version >= 300) {
1070 linker_error(prog, "declarations for %s `%s` have "
1071 "mismatching precision qualifiers\n",
1072 mode_string(var), var->name);
1073 return;
1074 } else {
1075 linker_warning(prog, "declarations for %s `%s` have "
1076 "mismatching precision qualifiers\n",
1077 mode_string(var), var->name);
1078 }
1079 }
1080
1081 /* In OpenGL GLSL 3.20 spec, section 4.3.9:
1082 *
1083 * "It is a link-time error if any particular shader interface
1084 * contains:
1085 *
1086 * - two different blocks, each having no instance name, and each
1087 * having a member of the same name, or
1088 *
1089 * - a variable outside a block, and a block with no instance name,
1090 * where the variable has the same name as a member in the block."
1091 */
1092 const glsl_type *var_itype = var->get_interface_type();
1093 const glsl_type *existing_itype = existing->get_interface_type();
1094 if (var_itype != existing_itype) {
1095 if (!var_itype || !existing_itype) {
1096 linker_error(prog, "declarations for %s `%s` are inside block "
1097 "`%s` and outside a block",
1098 mode_string(var), var->name,
1099 var_itype ? var_itype->name : existing_itype->name);
1100 return;
1101 } else if (strcmp(var_itype->name, existing_itype->name) != 0) {
1102 linker_error(prog, "declarations for %s `%s` are inside blocks "
1103 "`%s` and `%s`",
1104 mode_string(var), var->name,
1105 existing_itype->name,
1106 var_itype->name);
1107 return;
1108 }
1109 }
1110 } else
1111 variables->add_variable(var);
1112 }
1113 }
1114
1115
1116 /**
1117 * Perform validation of uniforms used across multiple shader stages
1118 */
1119 static void
cross_validate_uniforms(const struct gl_constants * consts,struct gl_shader_program * prog)1120 cross_validate_uniforms(const struct gl_constants *consts,
1121 struct gl_shader_program *prog)
1122 {
1123 glsl_symbol_table variables;
1124 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1125 if (prog->_LinkedShaders[i] == NULL)
1126 continue;
1127
1128 cross_validate_globals(consts, prog, prog->_LinkedShaders[i]->ir,
1129 &variables, true);
1130 }
1131 }
1132
1133 /**
1134 * Accumulates the array of buffer blocks and checks that all definitions of
1135 * blocks agree on their contents.
1136 */
1137 static bool
interstage_cross_validate_uniform_blocks(struct gl_shader_program * prog,bool validate_ssbo)1138 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
1139 bool validate_ssbo)
1140 {
1141 int *ifc_blk_stage_idx[MESA_SHADER_STAGES];
1142 struct gl_uniform_block *blks = NULL;
1143 unsigned *num_blks = validate_ssbo ? &prog->data->NumShaderStorageBlocks :
1144 &prog->data->NumUniformBlocks;
1145
1146 unsigned max_num_buffer_blocks = 0;
1147 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1148 if (prog->_LinkedShaders[i]) {
1149 if (validate_ssbo) {
1150 max_num_buffer_blocks +=
1151 prog->_LinkedShaders[i]->Program->info.num_ssbos;
1152 } else {
1153 max_num_buffer_blocks +=
1154 prog->_LinkedShaders[i]->Program->info.num_ubos;
1155 }
1156 }
1157 }
1158
1159 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1160 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1161
1162 ifc_blk_stage_idx[i] =
1163 (int *) malloc(sizeof(int) * max_num_buffer_blocks);
1164 for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
1165 ifc_blk_stage_idx[i][j] = -1;
1166
1167 if (sh == NULL)
1168 continue;
1169
1170 unsigned sh_num_blocks;
1171 struct gl_uniform_block **sh_blks;
1172 if (validate_ssbo) {
1173 sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ssbos;
1174 sh_blks = sh->Program->sh.ShaderStorageBlocks;
1175 } else {
1176 sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ubos;
1177 sh_blks = sh->Program->sh.UniformBlocks;
1178 }
1179
1180 for (unsigned int j = 0; j < sh_num_blocks; j++) {
1181 int index = link_cross_validate_uniform_block(prog->data, &blks,
1182 num_blks, sh_blks[j]);
1183
1184 if (index == -1) {
1185 linker_error(prog, "buffer block `%s' has mismatching "
1186 "definitions\n", sh_blks[j]->name.string);
1187
1188 for (unsigned k = 0; k <= i; k++) {
1189 free(ifc_blk_stage_idx[k]);
1190 }
1191
1192 /* Reset the block count. This will help avoid various segfaults
1193 * from api calls that assume the array exists due to the count
1194 * being non-zero.
1195 */
1196 *num_blks = 0;
1197 return false;
1198 }
1199
1200 ifc_blk_stage_idx[i][index] = j;
1201 }
1202 }
1203
1204 /* Update per stage block pointers to point to the program list.
1205 * FIXME: We should be able to free the per stage blocks here.
1206 */
1207 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1208 for (unsigned j = 0; j < *num_blks; j++) {
1209 int stage_index = ifc_blk_stage_idx[i][j];
1210
1211 if (stage_index != -1) {
1212 struct gl_linked_shader *sh = prog->_LinkedShaders[i];
1213
1214 struct gl_uniform_block **sh_blks = validate_ssbo ?
1215 sh->Program->sh.ShaderStorageBlocks :
1216 sh->Program->sh.UniformBlocks;
1217
1218 blks[j].stageref |= sh_blks[stage_index]->stageref;
1219 sh_blks[stage_index] = &blks[j];
1220 }
1221 }
1222 }
1223
1224 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
1225 free(ifc_blk_stage_idx[i]);
1226 }
1227
1228 if (validate_ssbo)
1229 prog->data->ShaderStorageBlocks = blks;
1230 else
1231 prog->data->UniformBlocks = blks;
1232
1233 return true;
1234 }
1235
1236 /**
1237 * Verifies the invariance of built-in special variables.
1238 */
1239 static bool
validate_invariant_builtins(struct gl_shader_program * prog,const gl_linked_shader * vert,const gl_linked_shader * frag)1240 validate_invariant_builtins(struct gl_shader_program *prog,
1241 const gl_linked_shader *vert,
1242 const gl_linked_shader *frag)
1243 {
1244 const ir_variable *var_vert;
1245 const ir_variable *var_frag;
1246
1247 if (!vert || !frag)
1248 return true;
1249
1250 /*
1251 * From OpenGL ES Shading Language 1.0 specification
1252 * (4.6.4 Invariance and Linkage):
1253 * "The invariance of varyings that are declared in both the vertex and
1254 * fragment shaders must match. For the built-in special variables,
1255 * gl_FragCoord can only be declared invariant if and only if
1256 * gl_Position is declared invariant. Similarly gl_PointCoord can only
1257 * be declared invariant if and only if gl_PointSize is declared
1258 * invariant. It is an error to declare gl_FrontFacing as invariant.
1259 * The invariance of gl_FrontFacing is the same as the invariance of
1260 * gl_Position."
1261 */
1262 var_frag = frag->symbols->get_variable("gl_FragCoord");
1263 if (var_frag && var_frag->data.invariant) {
1264 var_vert = vert->symbols->get_variable("gl_Position");
1265 if (var_vert && !var_vert->data.invariant) {
1266 linker_error(prog,
1267 "fragment shader built-in `%s' has invariant qualifier, "
1268 "but vertex shader built-in `%s' lacks invariant qualifier\n",
1269 var_frag->name, var_vert->name);
1270 return false;
1271 }
1272 }
1273
1274 var_frag = frag->symbols->get_variable("gl_PointCoord");
1275 if (var_frag && var_frag->data.invariant) {
1276 var_vert = vert->symbols->get_variable("gl_PointSize");
1277 if (var_vert && !var_vert->data.invariant) {
1278 linker_error(prog,
1279 "fragment shader built-in `%s' has invariant qualifier, "
1280 "but vertex shader built-in `%s' lacks invariant qualifier\n",
1281 var_frag->name, var_vert->name);
1282 return false;
1283 }
1284 }
1285
1286 var_frag = frag->symbols->get_variable("gl_FrontFacing");
1287 if (var_frag && var_frag->data.invariant) {
1288 linker_error(prog,
1289 "fragment shader built-in `%s' can not be declared as invariant\n",
1290 var_frag->name);
1291 return false;
1292 }
1293
1294 return true;
1295 }
1296
1297 /**
1298 * Populates a shaders symbol table with all global declarations
1299 */
1300 static void
populate_symbol_table(gl_linked_shader * sh,glsl_symbol_table * symbols)1301 populate_symbol_table(gl_linked_shader *sh, glsl_symbol_table *symbols)
1302 {
1303 sh->symbols = new(sh) glsl_symbol_table;
1304
1305 _mesa_glsl_copy_symbols_from_table(sh->ir, symbols, sh->symbols);
1306 }
1307
1308
1309 /**
1310 * Remap variables referenced in an instruction tree
1311 *
1312 * This is used when instruction trees are cloned from one shader and placed in
1313 * another. These trees will contain references to \c ir_variable nodes that
1314 * do not exist in the target shader. This function finds these \c ir_variable
1315 * references and replaces the references with matching variables in the target
1316 * shader.
1317 *
1318 * If there is no matching variable in the target shader, a clone of the
1319 * \c ir_variable is made and added to the target shader. The new variable is
1320 * added to \b both the instruction stream and the symbol table.
1321 *
1322 * \param inst IR tree that is to be processed.
1323 * \param symbols Symbol table containing global scope symbols in the
1324 * linked shader.
1325 * \param instructions Instruction stream where new variable declarations
1326 * should be added.
1327 */
1328 static void
remap_variables(ir_instruction * inst,struct gl_linked_shader * target,hash_table * temps)1329 remap_variables(ir_instruction *inst, struct gl_linked_shader *target,
1330 hash_table *temps)
1331 {
1332 class remap_visitor : public ir_hierarchical_visitor {
1333 public:
1334 remap_visitor(struct gl_linked_shader *target, hash_table *temps)
1335 {
1336 this->target = target;
1337 this->symbols = target->symbols;
1338 this->instructions = target->ir;
1339 this->temps = temps;
1340 }
1341
1342 virtual ir_visitor_status visit(ir_dereference_variable *ir)
1343 {
1344 if (ir->var->data.mode == ir_var_temporary) {
1345 hash_entry *entry = _mesa_hash_table_search(temps, ir->var);
1346 ir_variable *var = entry ? (ir_variable *) entry->data : NULL;
1347
1348 assert(var != NULL);
1349 ir->var = var;
1350 return visit_continue;
1351 }
1352
1353 ir_variable *const existing =
1354 this->symbols->get_variable(ir->var->name);
1355 if (existing != NULL)
1356 ir->var = existing;
1357 else {
1358 ir_variable *copy = ir->var->clone(this->target, NULL);
1359
1360 this->symbols->add_variable(copy);
1361 this->instructions->push_head(copy);
1362 ir->var = copy;
1363 }
1364
1365 return visit_continue;
1366 }
1367
1368 private:
1369 struct gl_linked_shader *target;
1370 glsl_symbol_table *symbols;
1371 exec_list *instructions;
1372 hash_table *temps;
1373 };
1374
1375 remap_visitor v(target, temps);
1376
1377 inst->accept(&v);
1378 }
1379
1380
1381 /**
1382 * Move non-declarations from one instruction stream to another
1383 *
1384 * The intended usage pattern of this function is to pass the pointer to the
1385 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
1386 * pointer) for \c last and \c false for \c make_copies on the first
1387 * call. Successive calls pass the return value of the previous call for
1388 * \c last and \c true for \c make_copies.
1389 *
1390 * \param instructions Source instruction stream
1391 * \param last Instruction after which new instructions should be
1392 * inserted in the target instruction stream
1393 * \param make_copies Flag selecting whether instructions in \c instructions
1394 * should be copied (via \c ir_instruction::clone) into the
1395 * target list or moved.
1396 *
1397 * \return
1398 * The new "last" instruction in the target instruction stream. This pointer
1399 * is suitable for use as the \c last parameter of a later call to this
1400 * function.
1401 */
1402 static exec_node *
move_non_declarations(exec_list * instructions,exec_node * last,bool make_copies,gl_linked_shader * target)1403 move_non_declarations(exec_list *instructions, exec_node *last,
1404 bool make_copies, gl_linked_shader *target)
1405 {
1406 hash_table *temps = NULL;
1407
1408 if (make_copies)
1409 temps = _mesa_pointer_hash_table_create(NULL);
1410
1411 foreach_in_list_safe(ir_instruction, inst, instructions) {
1412 if (inst->as_function())
1413 continue;
1414
1415 ir_variable *var = inst->as_variable();
1416 if ((var != NULL) && (var->data.mode != ir_var_temporary))
1417 continue;
1418
1419 assert(inst->as_assignment()
1420 || inst->as_call()
1421 || inst->as_if() /* for initializers with the ?: operator */
1422 || ((var != NULL) && (var->data.mode == ir_var_temporary)));
1423
1424 if (make_copies) {
1425 inst = inst->clone(target, NULL);
1426
1427 if (var != NULL)
1428 _mesa_hash_table_insert(temps, var, inst);
1429 else
1430 remap_variables(inst, target, temps);
1431 } else {
1432 inst->remove();
1433 }
1434
1435 last->insert_after(inst);
1436 last = inst;
1437 }
1438
1439 if (make_copies)
1440 _mesa_hash_table_destroy(temps, NULL);
1441
1442 return last;
1443 }
1444
1445
1446 /**
1447 * This class is only used in link_intrastage_shaders() below but declaring
1448 * it inside that function leads to compiler warnings with some versions of
1449 * gcc.
1450 */
1451 class array_sizing_visitor : public deref_type_updater {
1452 public:
1453 using deref_type_updater::visit;
1454
array_sizing_visitor()1455 array_sizing_visitor()
1456 : mem_ctx(ralloc_context(NULL)),
1457 unnamed_interfaces(_mesa_pointer_hash_table_create(NULL))
1458 {
1459 }
1460
~array_sizing_visitor()1461 ~array_sizing_visitor()
1462 {
1463 _mesa_hash_table_destroy(this->unnamed_interfaces, NULL);
1464 ralloc_free(this->mem_ctx);
1465 }
1466
visit(ir_variable * var)1467 virtual ir_visitor_status visit(ir_variable *var)
1468 {
1469 const glsl_type *type_without_array;
1470 bool implicit_sized_array = var->data.implicit_sized_array;
1471 fixup_type(&var->type, var->data.max_array_access,
1472 var->data.from_ssbo_unsized_array,
1473 &implicit_sized_array);
1474 var->data.implicit_sized_array = implicit_sized_array;
1475 type_without_array = var->type->without_array();
1476 if (var->type->is_interface()) {
1477 if (interface_contains_unsized_arrays(var->type)) {
1478 const glsl_type *new_type =
1479 resize_interface_members(var->type,
1480 var->get_max_ifc_array_access(),
1481 var->is_in_shader_storage_block());
1482 var->type = new_type;
1483 var->change_interface_type(new_type);
1484 }
1485 } else if (type_without_array->is_interface()) {
1486 if (interface_contains_unsized_arrays(type_without_array)) {
1487 const glsl_type *new_type =
1488 resize_interface_members(type_without_array,
1489 var->get_max_ifc_array_access(),
1490 var->is_in_shader_storage_block());
1491 var->change_interface_type(new_type);
1492 var->type = update_interface_members_array(var->type, new_type);
1493 }
1494 } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1495 /* Store a pointer to the variable in the unnamed_interfaces
1496 * hashtable.
1497 */
1498 hash_entry *entry =
1499 _mesa_hash_table_search(this->unnamed_interfaces,
1500 ifc_type);
1501
1502 ir_variable **interface_vars = entry ? (ir_variable **) entry->data : NULL;
1503
1504 if (interface_vars == NULL) {
1505 interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1506 ifc_type->length);
1507 _mesa_hash_table_insert(this->unnamed_interfaces, ifc_type,
1508 interface_vars);
1509 }
1510 unsigned index = ifc_type->field_index(var->name);
1511 assert(index < ifc_type->length);
1512 assert(interface_vars[index] == NULL);
1513 interface_vars[index] = var;
1514 }
1515 return visit_continue;
1516 }
1517
1518 /**
1519 * For each unnamed interface block that was discovered while running the
1520 * visitor, adjust the interface type to reflect the newly assigned array
1521 * sizes, and fix up the ir_variable nodes to point to the new interface
1522 * type.
1523 */
fixup_unnamed_interface_types()1524 void fixup_unnamed_interface_types()
1525 {
1526 hash_table_call_foreach(this->unnamed_interfaces,
1527 fixup_unnamed_interface_type, NULL);
1528 }
1529
1530 private:
1531 /**
1532 * If the type pointed to by \c type represents an unsized array, replace
1533 * it with a sized array whose size is determined by max_array_access.
1534 */
fixup_type(const glsl_type ** type,unsigned max_array_access,bool from_ssbo_unsized_array,bool * implicit_sized)1535 static void fixup_type(const glsl_type **type, unsigned max_array_access,
1536 bool from_ssbo_unsized_array, bool *implicit_sized)
1537 {
1538 if (!from_ssbo_unsized_array && (*type)->is_unsized_array()) {
1539 *type = glsl_type::get_array_instance((*type)->fields.array,
1540 max_array_access + 1);
1541 *implicit_sized = true;
1542 assert(*type != NULL);
1543 }
1544 }
1545
1546 static const glsl_type *
update_interface_members_array(const glsl_type * type,const glsl_type * new_interface_type)1547 update_interface_members_array(const glsl_type *type,
1548 const glsl_type *new_interface_type)
1549 {
1550 const glsl_type *element_type = type->fields.array;
1551 if (element_type->is_array()) {
1552 const glsl_type *new_array_type =
1553 update_interface_members_array(element_type, new_interface_type);
1554 return glsl_type::get_array_instance(new_array_type, type->length);
1555 } else {
1556 return glsl_type::get_array_instance(new_interface_type,
1557 type->length);
1558 }
1559 }
1560
1561 /**
1562 * Determine whether the given interface type contains unsized arrays (if
1563 * it doesn't, array_sizing_visitor doesn't need to process it).
1564 */
interface_contains_unsized_arrays(const glsl_type * type)1565 static bool interface_contains_unsized_arrays(const glsl_type *type)
1566 {
1567 for (unsigned i = 0; i < type->length; i++) {
1568 const glsl_type *elem_type = type->fields.structure[i].type;
1569 if (elem_type->is_unsized_array())
1570 return true;
1571 }
1572 return false;
1573 }
1574
1575 /**
1576 * Create a new interface type based on the given type, with unsized arrays
1577 * replaced by sized arrays whose size is determined by
1578 * max_ifc_array_access.
1579 */
1580 static const glsl_type *
resize_interface_members(const glsl_type * type,const int * max_ifc_array_access,bool is_ssbo)1581 resize_interface_members(const glsl_type *type,
1582 const int *max_ifc_array_access,
1583 bool is_ssbo)
1584 {
1585 unsigned num_fields = type->length;
1586 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1587 memcpy(fields, type->fields.structure,
1588 num_fields * sizeof(*fields));
1589 for (unsigned i = 0; i < num_fields; i++) {
1590 bool implicit_sized_array = fields[i].implicit_sized_array;
1591 /* If SSBO last member is unsized array, we don't replace it by a sized
1592 * array.
1593 */
1594 if (is_ssbo && i == (num_fields - 1))
1595 fixup_type(&fields[i].type, max_ifc_array_access[i],
1596 true, &implicit_sized_array);
1597 else
1598 fixup_type(&fields[i].type, max_ifc_array_access[i],
1599 false, &implicit_sized_array);
1600 fields[i].implicit_sized_array = implicit_sized_array;
1601 }
1602 glsl_interface_packing packing =
1603 (glsl_interface_packing) type->interface_packing;
1604 bool row_major = (bool) type->interface_row_major;
1605 const glsl_type *new_ifc_type =
1606 glsl_type::get_interface_instance(fields, num_fields,
1607 packing, row_major, type->name);
1608 delete [] fields;
1609 return new_ifc_type;
1610 }
1611
fixup_unnamed_interface_type(const void * key,void * data,void *)1612 static void fixup_unnamed_interface_type(const void *key, void *data,
1613 void *)
1614 {
1615 const glsl_type *ifc_type = (const glsl_type *) key;
1616 ir_variable **interface_vars = (ir_variable **) data;
1617 unsigned num_fields = ifc_type->length;
1618 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1619 memcpy(fields, ifc_type->fields.structure,
1620 num_fields * sizeof(*fields));
1621 bool interface_type_changed = false;
1622 for (unsigned i = 0; i < num_fields; i++) {
1623 if (interface_vars[i] != NULL &&
1624 fields[i].type != interface_vars[i]->type) {
1625 fields[i].type = interface_vars[i]->type;
1626 interface_type_changed = true;
1627 }
1628 }
1629 if (!interface_type_changed) {
1630 delete [] fields;
1631 return;
1632 }
1633 glsl_interface_packing packing =
1634 (glsl_interface_packing) ifc_type->interface_packing;
1635 bool row_major = (bool) ifc_type->interface_row_major;
1636 const glsl_type *new_ifc_type =
1637 glsl_type::get_interface_instance(fields, num_fields, packing,
1638 row_major, ifc_type->name);
1639 delete [] fields;
1640 for (unsigned i = 0; i < num_fields; i++) {
1641 if (interface_vars[i] != NULL)
1642 interface_vars[i]->change_interface_type(new_ifc_type);
1643 }
1644 }
1645
1646 /**
1647 * Memory context used to allocate the data in \c unnamed_interfaces.
1648 */
1649 void *mem_ctx;
1650
1651 /**
1652 * Hash table from const glsl_type * to an array of ir_variable *'s
1653 * pointing to the ir_variables constituting each unnamed interface block.
1654 */
1655 hash_table *unnamed_interfaces;
1656 };
1657
1658 static bool
validate_xfb_buffer_stride(const struct gl_constants * consts,unsigned idx,struct gl_shader_program * prog)1659 validate_xfb_buffer_stride(const struct gl_constants *consts, unsigned idx,
1660 struct gl_shader_program *prog)
1661 {
1662 /* We will validate doubles at a later stage */
1663 if (prog->TransformFeedback.BufferStride[idx] % 4) {
1664 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1665 "multiple of 4 or if its applied to a type that is "
1666 "or contains a double a multiple of 8.",
1667 prog->TransformFeedback.BufferStride[idx]);
1668 return false;
1669 }
1670
1671 if (prog->TransformFeedback.BufferStride[idx] / 4 >
1672 consts->MaxTransformFeedbackInterleavedComponents) {
1673 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1674 "limit has been exceeded.");
1675 return false;
1676 }
1677
1678 return true;
1679 }
1680
1681 /**
1682 * Check for conflicting xfb_stride default qualifiers and store buffer stride
1683 * for later use.
1684 */
1685 static void
link_xfb_stride_layout_qualifiers(const struct gl_constants * consts,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders)1686 link_xfb_stride_layout_qualifiers(const struct gl_constants *consts,
1687 struct gl_shader_program *prog,
1688 struct gl_shader **shader_list,
1689 unsigned num_shaders)
1690 {
1691 for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
1692 prog->TransformFeedback.BufferStride[i] = 0;
1693 }
1694
1695 for (unsigned i = 0; i < num_shaders; i++) {
1696 struct gl_shader *shader = shader_list[i];
1697
1698 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1699 if (shader->TransformFeedbackBufferStride[j]) {
1700 if (prog->TransformFeedback.BufferStride[j] == 0) {
1701 prog->TransformFeedback.BufferStride[j] =
1702 shader->TransformFeedbackBufferStride[j];
1703 if (!validate_xfb_buffer_stride(consts, j, prog))
1704 return;
1705 } else if (prog->TransformFeedback.BufferStride[j] !=
1706 shader->TransformFeedbackBufferStride[j]){
1707 linker_error(prog,
1708 "intrastage shaders defined with conflicting "
1709 "xfb_stride for buffer %d (%d and %d)\n", j,
1710 prog->TransformFeedback.BufferStride[j],
1711 shader->TransformFeedbackBufferStride[j]);
1712 return;
1713 }
1714 }
1715 }
1716 }
1717 }
1718
1719 /**
1720 * Check for conflicting bindless/bound sampler/image layout qualifiers at
1721 * global scope.
1722 */
1723 static void
link_bindless_layout_qualifiers(struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders)1724 link_bindless_layout_qualifiers(struct gl_shader_program *prog,
1725 struct gl_shader **shader_list,
1726 unsigned num_shaders)
1727 {
1728 bool bindless_sampler, bindless_image;
1729 bool bound_sampler, bound_image;
1730
1731 bindless_sampler = bindless_image = false;
1732 bound_sampler = bound_image = false;
1733
1734 for (unsigned i = 0; i < num_shaders; i++) {
1735 struct gl_shader *shader = shader_list[i];
1736
1737 if (shader->bindless_sampler)
1738 bindless_sampler = true;
1739 if (shader->bindless_image)
1740 bindless_image = true;
1741 if (shader->bound_sampler)
1742 bound_sampler = true;
1743 if (shader->bound_image)
1744 bound_image = true;
1745
1746 if ((bindless_sampler && bound_sampler) ||
1747 (bindless_image && bound_image)) {
1748 /* From section 4.4.6 of the ARB_bindless_texture spec:
1749 *
1750 * "If both bindless_sampler and bound_sampler, or bindless_image
1751 * and bound_image, are declared at global scope in any
1752 * compilation unit, a link- time error will be generated."
1753 */
1754 linker_error(prog, "both bindless_sampler and bound_sampler, or "
1755 "bindless_image and bound_image, can't be declared at "
1756 "global scope");
1757 }
1758 }
1759 }
1760
1761 /**
1762 * Check for conflicting viewport_relative settings across shaders, and sets
1763 * the value for the linked shader.
1764 */
1765 static void
link_layer_viewport_relative_qualifier(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1766 link_layer_viewport_relative_qualifier(struct gl_shader_program *prog,
1767 struct gl_program *gl_prog,
1768 struct gl_shader **shader_list,
1769 unsigned num_shaders)
1770 {
1771 unsigned i;
1772
1773 /* Find first shader with explicit layer declaration */
1774 for (i = 0; i < num_shaders; i++) {
1775 if (shader_list[i]->redeclares_gl_layer) {
1776 gl_prog->info.layer_viewport_relative =
1777 shader_list[i]->layer_viewport_relative;
1778 break;
1779 }
1780 }
1781
1782 /* Now make sure that each subsequent shader's explicit layer declaration
1783 * matches the first one's.
1784 */
1785 for (; i < num_shaders; i++) {
1786 if (shader_list[i]->redeclares_gl_layer &&
1787 shader_list[i]->layer_viewport_relative !=
1788 gl_prog->info.layer_viewport_relative) {
1789 linker_error(prog, "all gl_Layer redeclarations must have identical "
1790 "viewport_relative settings");
1791 }
1792 }
1793 }
1794
1795 /**
1796 * Performs the cross-validation of tessellation control shader vertices and
1797 * layout qualifiers for the attached tessellation control shaders,
1798 * and propagates them to the linked TCS and linked shader program.
1799 */
1800 static void
link_tcs_out_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1801 link_tcs_out_layout_qualifiers(struct gl_shader_program *prog,
1802 struct gl_program *gl_prog,
1803 struct gl_shader **shader_list,
1804 unsigned num_shaders)
1805 {
1806 if (gl_prog->info.stage != MESA_SHADER_TESS_CTRL)
1807 return;
1808
1809 gl_prog->info.tess.tcs_vertices_out = 0;
1810
1811 /* From the GLSL 4.0 spec (chapter 4.3.8.2):
1812 *
1813 * "All tessellation control shader layout declarations in a program
1814 * must specify the same output patch vertex count. There must be at
1815 * least one layout qualifier specifying an output patch vertex count
1816 * in any program containing tessellation control shaders; however,
1817 * such a declaration is not required in all tessellation control
1818 * shaders."
1819 */
1820
1821 for (unsigned i = 0; i < num_shaders; i++) {
1822 struct gl_shader *shader = shader_list[i];
1823
1824 if (shader->info.TessCtrl.VerticesOut != 0) {
1825 if (gl_prog->info.tess.tcs_vertices_out != 0 &&
1826 gl_prog->info.tess.tcs_vertices_out !=
1827 (unsigned) shader->info.TessCtrl.VerticesOut) {
1828 linker_error(prog, "tessellation control shader defined with "
1829 "conflicting output vertex count (%d and %d)\n",
1830 gl_prog->info.tess.tcs_vertices_out,
1831 shader->info.TessCtrl.VerticesOut);
1832 return;
1833 }
1834 gl_prog->info.tess.tcs_vertices_out =
1835 shader->info.TessCtrl.VerticesOut;
1836 }
1837 }
1838
1839 /* Just do the intrastage -> interstage propagation right now,
1840 * since we already know we're in the right type of shader program
1841 * for doing it.
1842 */
1843 if (gl_prog->info.tess.tcs_vertices_out == 0) {
1844 linker_error(prog, "tessellation control shader didn't declare "
1845 "vertices out layout qualifier\n");
1846 return;
1847 }
1848 }
1849
1850
1851 /**
1852 * Performs the cross-validation of tessellation evaluation shader
1853 * primitive type, vertex spacing, ordering and point_mode layout qualifiers
1854 * for the attached tessellation evaluation shaders, and propagates them
1855 * to the linked TES and linked shader program.
1856 */
1857 static void
link_tes_in_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)1858 link_tes_in_layout_qualifiers(struct gl_shader_program *prog,
1859 struct gl_program *gl_prog,
1860 struct gl_shader **shader_list,
1861 unsigned num_shaders)
1862 {
1863 if (gl_prog->info.stage != MESA_SHADER_TESS_EVAL)
1864 return;
1865
1866 int point_mode = -1;
1867 unsigned vertex_order = 0;
1868
1869 gl_prog->info.tess._primitive_mode = TESS_PRIMITIVE_UNSPECIFIED;
1870 gl_prog->info.tess.spacing = TESS_SPACING_UNSPECIFIED;
1871
1872 /* From the GLSL 4.0 spec (chapter 4.3.8.1):
1873 *
1874 * "At least one tessellation evaluation shader (compilation unit) in
1875 * a program must declare a primitive mode in its input layout.
1876 * Declaration vertex spacing, ordering, and point mode identifiers is
1877 * optional. It is not required that all tessellation evaluation
1878 * shaders in a program declare a primitive mode. If spacing or
1879 * vertex ordering declarations are omitted, the tessellation
1880 * primitive generator will use equal spacing or counter-clockwise
1881 * vertex ordering, respectively. If a point mode declaration is
1882 * omitted, the tessellation primitive generator will produce lines or
1883 * triangles according to the primitive mode."
1884 */
1885
1886 for (unsigned i = 0; i < num_shaders; i++) {
1887 struct gl_shader *shader = shader_list[i];
1888
1889 if (shader->info.TessEval._PrimitiveMode != TESS_PRIMITIVE_UNSPECIFIED) {
1890 if (gl_prog->info.tess._primitive_mode != TESS_PRIMITIVE_UNSPECIFIED &&
1891 gl_prog->info.tess._primitive_mode !=
1892 shader->info.TessEval._PrimitiveMode) {
1893 linker_error(prog, "tessellation evaluation shader defined with "
1894 "conflicting input primitive modes.\n");
1895 return;
1896 }
1897 gl_prog->info.tess._primitive_mode =
1898 shader->info.TessEval._PrimitiveMode;
1899 }
1900
1901 if (shader->info.TessEval.Spacing != 0) {
1902 if (gl_prog->info.tess.spacing != 0 && gl_prog->info.tess.spacing !=
1903 shader->info.TessEval.Spacing) {
1904 linker_error(prog, "tessellation evaluation shader defined with "
1905 "conflicting vertex spacing.\n");
1906 return;
1907 }
1908 gl_prog->info.tess.spacing = shader->info.TessEval.Spacing;
1909 }
1910
1911 if (shader->info.TessEval.VertexOrder != 0) {
1912 if (vertex_order != 0 &&
1913 vertex_order != shader->info.TessEval.VertexOrder) {
1914 linker_error(prog, "tessellation evaluation shader defined with "
1915 "conflicting ordering.\n");
1916 return;
1917 }
1918 vertex_order = shader->info.TessEval.VertexOrder;
1919 }
1920
1921 if (shader->info.TessEval.PointMode != -1) {
1922 if (point_mode != -1 &&
1923 point_mode != shader->info.TessEval.PointMode) {
1924 linker_error(prog, "tessellation evaluation shader defined with "
1925 "conflicting point modes.\n");
1926 return;
1927 }
1928 point_mode = shader->info.TessEval.PointMode;
1929 }
1930
1931 }
1932
1933 /* Just do the intrastage -> interstage propagation right now,
1934 * since we already know we're in the right type of shader program
1935 * for doing it.
1936 */
1937 if (gl_prog->info.tess._primitive_mode == TESS_PRIMITIVE_UNSPECIFIED) {
1938 linker_error(prog,
1939 "tessellation evaluation shader didn't declare input "
1940 "primitive modes.\n");
1941 return;
1942 }
1943
1944 if (gl_prog->info.tess.spacing == TESS_SPACING_UNSPECIFIED)
1945 gl_prog->info.tess.spacing = TESS_SPACING_EQUAL;
1946
1947 if (vertex_order == 0 || vertex_order == GL_CCW)
1948 gl_prog->info.tess.ccw = true;
1949 else
1950 gl_prog->info.tess.ccw = false;
1951
1952
1953 if (point_mode == -1 || point_mode == GL_FALSE)
1954 gl_prog->info.tess.point_mode = false;
1955 else
1956 gl_prog->info.tess.point_mode = true;
1957 }
1958
1959
1960 /**
1961 * Performs the cross-validation of layout qualifiers specified in
1962 * redeclaration of gl_FragCoord for the attached fragment shaders,
1963 * and propagates them to the linked FS and linked shader program.
1964 */
1965 static void
link_fs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)1966 link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
1967 struct gl_linked_shader *linked_shader,
1968 struct gl_shader **shader_list,
1969 unsigned num_shaders)
1970 {
1971 bool redeclares_gl_fragcoord = false;
1972 bool uses_gl_fragcoord = false;
1973 bool origin_upper_left = false;
1974 bool pixel_center_integer = false;
1975
1976 if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
1977 (prog->data->Version < 150 &&
1978 !prog->ARB_fragment_coord_conventions_enable))
1979 return;
1980
1981 for (unsigned i = 0; i < num_shaders; i++) {
1982 struct gl_shader *shader = shader_list[i];
1983 /* From the GLSL 1.50 spec, page 39:
1984 *
1985 * "If gl_FragCoord is redeclared in any fragment shader in a program,
1986 * it must be redeclared in all the fragment shaders in that program
1987 * that have a static use gl_FragCoord."
1988 */
1989 if ((redeclares_gl_fragcoord && !shader->redeclares_gl_fragcoord &&
1990 shader->uses_gl_fragcoord)
1991 || (shader->redeclares_gl_fragcoord && !redeclares_gl_fragcoord &&
1992 uses_gl_fragcoord)) {
1993 linker_error(prog, "fragment shader defined with conflicting "
1994 "layout qualifiers for gl_FragCoord\n");
1995 }
1996
1997 /* From the GLSL 1.50 spec, page 39:
1998 *
1999 * "All redeclarations of gl_FragCoord in all fragment shaders in a
2000 * single program must have the same set of qualifiers."
2001 */
2002 if (redeclares_gl_fragcoord && shader->redeclares_gl_fragcoord &&
2003 (shader->origin_upper_left != origin_upper_left ||
2004 shader->pixel_center_integer != pixel_center_integer)) {
2005 linker_error(prog, "fragment shader defined with conflicting "
2006 "layout qualifiers for gl_FragCoord\n");
2007 }
2008
2009 /* Update the linked shader state. Note that uses_gl_fragcoord should
2010 * accumulate the results. The other values should replace. If there
2011 * are multiple redeclarations, all the fields except uses_gl_fragcoord
2012 * are already known to be the same.
2013 */
2014 if (shader->redeclares_gl_fragcoord || shader->uses_gl_fragcoord) {
2015 redeclares_gl_fragcoord = shader->redeclares_gl_fragcoord;
2016 uses_gl_fragcoord |= shader->uses_gl_fragcoord;
2017 origin_upper_left = shader->origin_upper_left;
2018 pixel_center_integer = shader->pixel_center_integer;
2019 }
2020
2021 linked_shader->Program->info.fs.early_fragment_tests |=
2022 shader->EarlyFragmentTests || shader->PostDepthCoverage;
2023 linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
2024 linked_shader->Program->info.fs.post_depth_coverage |=
2025 shader->PostDepthCoverage;
2026 linked_shader->Program->info.fs.pixel_interlock_ordered |=
2027 shader->PixelInterlockOrdered;
2028 linked_shader->Program->info.fs.pixel_interlock_unordered |=
2029 shader->PixelInterlockUnordered;
2030 linked_shader->Program->info.fs.sample_interlock_ordered |=
2031 shader->SampleInterlockOrdered;
2032 linked_shader->Program->info.fs.sample_interlock_unordered |=
2033 shader->SampleInterlockUnordered;
2034 linked_shader->Program->info.fs.advanced_blend_modes |= shader->BlendSupport;
2035 }
2036
2037 linked_shader->Program->info.fs.pixel_center_integer = pixel_center_integer;
2038 linked_shader->Program->info.fs.origin_upper_left = origin_upper_left;
2039 }
2040
2041 /**
2042 * Performs the cross-validation of geometry shader max_vertices and
2043 * primitive type layout qualifiers for the attached geometry shaders,
2044 * and propagates them to the linked GS and linked shader program.
2045 */
2046 static void
link_gs_inout_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)2047 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
2048 struct gl_program *gl_prog,
2049 struct gl_shader **shader_list,
2050 unsigned num_shaders)
2051 {
2052 /* No in/out qualifiers defined for anything but GLSL 1.50+
2053 * geometry shaders so far.
2054 */
2055 if (gl_prog->info.stage != MESA_SHADER_GEOMETRY ||
2056 prog->data->Version < 150)
2057 return;
2058
2059 int vertices_out = -1;
2060
2061 gl_prog->info.gs.invocations = 0;
2062 gl_prog->info.gs.input_primitive = SHADER_PRIM_UNKNOWN;
2063 gl_prog->info.gs.output_primitive = SHADER_PRIM_UNKNOWN;
2064
2065 /* From the GLSL 1.50 spec, page 46:
2066 *
2067 * "All geometry shader output layout declarations in a program
2068 * must declare the same layout and same value for
2069 * max_vertices. There must be at least one geometry output
2070 * layout declaration somewhere in a program, but not all
2071 * geometry shaders (compilation units) are required to
2072 * declare it."
2073 */
2074
2075 for (unsigned i = 0; i < num_shaders; i++) {
2076 struct gl_shader *shader = shader_list[i];
2077
2078 if (shader->info.Geom.InputType != SHADER_PRIM_UNKNOWN) {
2079 if (gl_prog->info.gs.input_primitive != SHADER_PRIM_UNKNOWN &&
2080 gl_prog->info.gs.input_primitive !=
2081 shader->info.Geom.InputType) {
2082 linker_error(prog, "geometry shader defined with conflicting "
2083 "input types\n");
2084 return;
2085 }
2086 gl_prog->info.gs.input_primitive = (enum shader_prim)shader->info.Geom.InputType;
2087 }
2088
2089 if (shader->info.Geom.OutputType != SHADER_PRIM_UNKNOWN) {
2090 if (gl_prog->info.gs.output_primitive != SHADER_PRIM_UNKNOWN &&
2091 gl_prog->info.gs.output_primitive !=
2092 shader->info.Geom.OutputType) {
2093 linker_error(prog, "geometry shader defined with conflicting "
2094 "output types\n");
2095 return;
2096 }
2097 gl_prog->info.gs.output_primitive = (enum shader_prim)shader->info.Geom.OutputType;
2098 }
2099
2100 if (shader->info.Geom.VerticesOut != -1) {
2101 if (vertices_out != -1 &&
2102 vertices_out != shader->info.Geom.VerticesOut) {
2103 linker_error(prog, "geometry shader defined with conflicting "
2104 "output vertex count (%d and %d)\n",
2105 vertices_out, shader->info.Geom.VerticesOut);
2106 return;
2107 }
2108 vertices_out = shader->info.Geom.VerticesOut;
2109 }
2110
2111 if (shader->info.Geom.Invocations != 0) {
2112 if (gl_prog->info.gs.invocations != 0 &&
2113 gl_prog->info.gs.invocations !=
2114 (unsigned) shader->info.Geom.Invocations) {
2115 linker_error(prog, "geometry shader defined with conflicting "
2116 "invocation count (%d and %d)\n",
2117 gl_prog->info.gs.invocations,
2118 shader->info.Geom.Invocations);
2119 return;
2120 }
2121 gl_prog->info.gs.invocations = shader->info.Geom.Invocations;
2122 }
2123 }
2124
2125 /* Just do the intrastage -> interstage propagation right now,
2126 * since we already know we're in the right type of shader program
2127 * for doing it.
2128 */
2129 if (gl_prog->info.gs.input_primitive == SHADER_PRIM_UNKNOWN) {
2130 linker_error(prog,
2131 "geometry shader didn't declare primitive input type\n");
2132 return;
2133 }
2134
2135 if (gl_prog->info.gs.output_primitive == SHADER_PRIM_UNKNOWN) {
2136 linker_error(prog,
2137 "geometry shader didn't declare primitive output type\n");
2138 return;
2139 }
2140
2141 if (vertices_out == -1) {
2142 linker_error(prog,
2143 "geometry shader didn't declare max_vertices\n");
2144 return;
2145 } else {
2146 gl_prog->info.gs.vertices_out = vertices_out;
2147 }
2148
2149 if (gl_prog->info.gs.invocations == 0)
2150 gl_prog->info.gs.invocations = 1;
2151 }
2152
2153
2154 /**
2155 * Perform cross-validation of compute shader local_size_{x,y,z} layout and
2156 * derivative arrangement qualifiers for the attached compute shaders, and
2157 * propagate them to the linked CS and linked shader program.
2158 */
2159 static void
link_cs_input_layout_qualifiers(struct gl_shader_program * prog,struct gl_program * gl_prog,struct gl_shader ** shader_list,unsigned num_shaders)2160 link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
2161 struct gl_program *gl_prog,
2162 struct gl_shader **shader_list,
2163 unsigned num_shaders)
2164 {
2165 /* This function is called for all shader stages, but it only has an effect
2166 * for compute shaders.
2167 */
2168 if (gl_prog->info.stage != MESA_SHADER_COMPUTE)
2169 return;
2170
2171 for (int i = 0; i < 3; i++)
2172 gl_prog->info.workgroup_size[i] = 0;
2173
2174 gl_prog->info.workgroup_size_variable = false;
2175
2176 gl_prog->info.cs.derivative_group = DERIVATIVE_GROUP_NONE;
2177
2178 /* From the ARB_compute_shader spec, in the section describing local size
2179 * declarations:
2180 *
2181 * If multiple compute shaders attached to a single program object
2182 * declare local work-group size, the declarations must be identical;
2183 * otherwise a link-time error results. Furthermore, if a program
2184 * object contains any compute shaders, at least one must contain an
2185 * input layout qualifier specifying the local work sizes of the
2186 * program, or a link-time error will occur.
2187 */
2188 for (unsigned sh = 0; sh < num_shaders; sh++) {
2189 struct gl_shader *shader = shader_list[sh];
2190
2191 if (shader->info.Comp.LocalSize[0] != 0) {
2192 if (gl_prog->info.workgroup_size[0] != 0) {
2193 for (int i = 0; i < 3; i++) {
2194 if (gl_prog->info.workgroup_size[i] !=
2195 shader->info.Comp.LocalSize[i]) {
2196 linker_error(prog, "compute shader defined with conflicting "
2197 "local sizes\n");
2198 return;
2199 }
2200 }
2201 }
2202 for (int i = 0; i < 3; i++) {
2203 gl_prog->info.workgroup_size[i] =
2204 shader->info.Comp.LocalSize[i];
2205 }
2206 } else if (shader->info.Comp.LocalSizeVariable) {
2207 if (gl_prog->info.workgroup_size[0] != 0) {
2208 /* The ARB_compute_variable_group_size spec says:
2209 *
2210 * If one compute shader attached to a program declares a
2211 * variable local group size and a second compute shader
2212 * attached to the same program declares a fixed local group
2213 * size, a link-time error results.
2214 */
2215 linker_error(prog, "compute shader defined with both fixed and "
2216 "variable local group size\n");
2217 return;
2218 }
2219 gl_prog->info.workgroup_size_variable = true;
2220 }
2221
2222 enum gl_derivative_group group = shader->info.Comp.DerivativeGroup;
2223 if (group != DERIVATIVE_GROUP_NONE) {
2224 if (gl_prog->info.cs.derivative_group != DERIVATIVE_GROUP_NONE &&
2225 gl_prog->info.cs.derivative_group != group) {
2226 linker_error(prog, "compute shader defined with conflicting "
2227 "derivative groups\n");
2228 return;
2229 }
2230 gl_prog->info.cs.derivative_group = group;
2231 }
2232 }
2233
2234 /* Just do the intrastage -> interstage propagation right now,
2235 * since we already know we're in the right type of shader program
2236 * for doing it.
2237 */
2238 if (gl_prog->info.workgroup_size[0] == 0 &&
2239 !gl_prog->info.workgroup_size_variable) {
2240 linker_error(prog, "compute shader must contain a fixed or a variable "
2241 "local group size\n");
2242 return;
2243 }
2244
2245 if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
2246 if (gl_prog->info.workgroup_size[0] % 2 != 0) {
2247 linker_error(prog, "derivative_group_quadsNV must be used with a "
2248 "local group size whose first dimension "
2249 "is a multiple of 2\n");
2250 return;
2251 }
2252 if (gl_prog->info.workgroup_size[1] % 2 != 0) {
2253 linker_error(prog, "derivative_group_quadsNV must be used with a local"
2254 "group size whose second dimension "
2255 "is a multiple of 2\n");
2256 return;
2257 }
2258 } else if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
2259 if ((gl_prog->info.workgroup_size[0] *
2260 gl_prog->info.workgroup_size[1] *
2261 gl_prog->info.workgroup_size[2]) % 4 != 0) {
2262 linker_error(prog, "derivative_group_linearNV must be used with a "
2263 "local group size whose total number of invocations "
2264 "is a multiple of 4\n");
2265 return;
2266 }
2267 }
2268 }
2269
2270 /**
2271 * Link all out variables on a single stage which are not
2272 * directly used in a shader with the main function.
2273 */
2274 static void
link_output_variables(struct gl_linked_shader * linked_shader,struct gl_shader ** shader_list,unsigned num_shaders)2275 link_output_variables(struct gl_linked_shader *linked_shader,
2276 struct gl_shader **shader_list,
2277 unsigned num_shaders)
2278 {
2279 struct glsl_symbol_table *symbols = linked_shader->symbols;
2280
2281 for (unsigned i = 0; i < num_shaders; i++) {
2282
2283 /* Skip shader object with main function */
2284 if (shader_list[i]->symbols->get_function("main"))
2285 continue;
2286
2287 foreach_in_list(ir_instruction, ir, shader_list[i]->ir) {
2288 if (ir->ir_type != ir_type_variable)
2289 continue;
2290
2291 ir_variable *var = (ir_variable *) ir;
2292
2293 if (var->data.mode == ir_var_shader_out &&
2294 !symbols->get_variable(var->name)) {
2295 var = var->clone(linked_shader, NULL);
2296 symbols->add_variable(var);
2297 linked_shader->ir->push_head(var);
2298 }
2299 }
2300 }
2301
2302 return;
2303 }
2304
2305
2306 /**
2307 * Combine a group of shaders for a single stage to generate a linked shader
2308 *
2309 * \note
2310 * If this function is supplied a single shader, it is cloned, and the new
2311 * shader is returned.
2312 */
2313 struct gl_linked_shader *
link_intrastage_shaders(void * mem_ctx,struct gl_context * ctx,struct gl_shader_program * prog,struct gl_shader ** shader_list,unsigned num_shaders,bool allow_missing_main)2314 link_intrastage_shaders(void *mem_ctx,
2315 struct gl_context *ctx,
2316 struct gl_shader_program *prog,
2317 struct gl_shader **shader_list,
2318 unsigned num_shaders,
2319 bool allow_missing_main)
2320 {
2321 struct gl_uniform_block *ubo_blocks = NULL;
2322 struct gl_uniform_block *ssbo_blocks = NULL;
2323 unsigned num_ubo_blocks = 0;
2324 unsigned num_ssbo_blocks = 0;
2325
2326 /* Check that global variables defined in multiple shaders are consistent.
2327 */
2328 glsl_symbol_table variables;
2329 for (unsigned i = 0; i < num_shaders; i++) {
2330 if (shader_list[i] == NULL)
2331 continue;
2332 cross_validate_globals(&ctx->Const, prog, shader_list[i]->ir, &variables,
2333 false);
2334 }
2335
2336 if (!prog->data->LinkStatus)
2337 return NULL;
2338
2339 /* Check that interface blocks defined in multiple shaders are consistent.
2340 */
2341 validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
2342 num_shaders);
2343 if (!prog->data->LinkStatus)
2344 return NULL;
2345
2346 /* Check that there is only a single definition of each function signature
2347 * across all shaders.
2348 */
2349 for (unsigned i = 0; i < (num_shaders - 1); i++) {
2350 foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
2351 ir_function *const f = node->as_function();
2352
2353 if (f == NULL)
2354 continue;
2355
2356 for (unsigned j = i + 1; j < num_shaders; j++) {
2357 ir_function *const other =
2358 shader_list[j]->symbols->get_function(f->name);
2359
2360 /* If the other shader has no function (and therefore no function
2361 * signatures) with the same name, skip to the next shader.
2362 */
2363 if (other == NULL)
2364 continue;
2365
2366 foreach_in_list(ir_function_signature, sig, &f->signatures) {
2367 if (!sig->is_defined)
2368 continue;
2369
2370 ir_function_signature *other_sig =
2371 other->exact_matching_signature(NULL, &sig->parameters);
2372
2373 if (other_sig != NULL && other_sig->is_defined) {
2374 linker_error(prog, "function `%s' is multiply defined\n",
2375 f->name);
2376 return NULL;
2377 }
2378 }
2379 }
2380 }
2381 }
2382
2383 /* Find the shader that defines main, and make a clone of it.
2384 *
2385 * Starting with the clone, search for undefined references. If one is
2386 * found, find the shader that defines it. Clone the reference and add
2387 * it to the shader. Repeat until there are no undefined references or
2388 * until a reference cannot be resolved.
2389 */
2390 gl_shader *main = NULL;
2391 for (unsigned i = 0; i < num_shaders; i++) {
2392 if (_mesa_get_main_function_signature(shader_list[i]->symbols)) {
2393 main = shader_list[i];
2394 break;
2395 }
2396 }
2397
2398 if (main == NULL && allow_missing_main)
2399 main = shader_list[0];
2400
2401 if (main == NULL) {
2402 linker_error(prog, "%s shader lacks `main'\n",
2403 _mesa_shader_stage_to_string(shader_list[0]->Stage));
2404 return NULL;
2405 }
2406
2407 gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
2408 linked->Stage = shader_list[0]->Stage;
2409
2410 /* Create program and attach it to the linked shader */
2411 struct gl_program *gl_prog =
2412 ctx->Driver.NewProgram(ctx, shader_list[0]->Stage, prog->Name, false);
2413 if (!gl_prog) {
2414 prog->data->LinkStatus = LINKING_FAILURE;
2415 _mesa_delete_linked_shader(ctx, linked);
2416 return NULL;
2417 }
2418
2419 _mesa_reference_shader_program_data(&gl_prog->sh.data, prog->data);
2420
2421 /* Don't use _mesa_reference_program() just take ownership */
2422 linked->Program = gl_prog;
2423
2424 linked->ir = new(linked) exec_list;
2425 clone_ir_list(mem_ctx, linked->ir, main->ir);
2426
2427 link_fs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
2428 link_tcs_out_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2429 link_tes_in_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2430 link_gs_inout_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2431 link_cs_input_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
2432
2433 if (linked->Stage != MESA_SHADER_FRAGMENT)
2434 link_xfb_stride_layout_qualifiers(&ctx->Const, prog, shader_list, num_shaders);
2435
2436 link_bindless_layout_qualifiers(prog, shader_list, num_shaders);
2437
2438 link_layer_viewport_relative_qualifier(prog, gl_prog, shader_list, num_shaders);
2439
2440 populate_symbol_table(linked, shader_list[0]->symbols);
2441
2442 /* The pointer to the main function in the final linked shader (i.e., the
2443 * copy of the original shader that contained the main function).
2444 */
2445 ir_function_signature *const main_sig =
2446 _mesa_get_main_function_signature(linked->symbols);
2447
2448 /* Move any instructions other than variable declarations or function
2449 * declarations into main.
2450 */
2451 if (main_sig != NULL) {
2452 exec_node *insertion_point =
2453 move_non_declarations(linked->ir, &main_sig->body.head_sentinel, false,
2454 linked);
2455
2456 for (unsigned i = 0; i < num_shaders; i++) {
2457 if (shader_list[i] == main)
2458 continue;
2459
2460 insertion_point = move_non_declarations(shader_list[i]->ir,
2461 insertion_point, true, linked);
2462 }
2463 }
2464
2465 if (!link_function_calls(prog, linked, shader_list, num_shaders)) {
2466 _mesa_delete_linked_shader(ctx, linked);
2467 return NULL;
2468 }
2469
2470 if (linked->Stage != MESA_SHADER_FRAGMENT)
2471 link_output_variables(linked, shader_list, num_shaders);
2472
2473 /* Make a pass over all variable declarations to ensure that arrays with
2474 * unspecified sizes have a size specified. The size is inferred from the
2475 * max_array_access field.
2476 */
2477 array_sizing_visitor v;
2478 v.run(linked->ir);
2479 v.fixup_unnamed_interface_types();
2480
2481 /* Now that we know the sizes of all the arrays, we can replace .length()
2482 * calls with a constant expression.
2483 */
2484 array_length_to_const_visitor len_v;
2485 len_v.run(linked->ir);
2486
2487 /* Link up uniform blocks defined within this stage. */
2488 link_uniform_blocks(mem_ctx, &ctx->Const, prog, linked, &ubo_blocks,
2489 &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
2490
2491 const unsigned max_uniform_blocks =
2492 ctx->Const.Program[linked->Stage].MaxUniformBlocks;
2493 if (num_ubo_blocks > max_uniform_blocks) {
2494 linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
2495 _mesa_shader_stage_to_string(linked->Stage),
2496 num_ubo_blocks, max_uniform_blocks);
2497 }
2498
2499 const unsigned max_shader_storage_blocks =
2500 ctx->Const.Program[linked->Stage].MaxShaderStorageBlocks;
2501 if (num_ssbo_blocks > max_shader_storage_blocks) {
2502 linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
2503 _mesa_shader_stage_to_string(linked->Stage),
2504 num_ssbo_blocks, max_shader_storage_blocks);
2505 }
2506
2507 if (!prog->data->LinkStatus) {
2508 _mesa_delete_linked_shader(ctx, linked);
2509 return NULL;
2510 }
2511
2512 /* Copy ubo blocks to linked shader list */
2513 linked->Program->sh.UniformBlocks =
2514 ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
2515 ralloc_steal(linked, ubo_blocks);
2516 for (unsigned i = 0; i < num_ubo_blocks; i++) {
2517 linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
2518 }
2519 linked->Program->sh.NumUniformBlocks = num_ubo_blocks;
2520 linked->Program->info.num_ubos = num_ubo_blocks;
2521
2522 /* Copy ssbo blocks to linked shader list */
2523 linked->Program->sh.ShaderStorageBlocks =
2524 ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
2525 ralloc_steal(linked, ssbo_blocks);
2526 for (unsigned i = 0; i < num_ssbo_blocks; i++) {
2527 linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
2528 }
2529 linked->Program->info.num_ssbos = num_ssbo_blocks;
2530
2531 /* At this point linked should contain all of the linked IR, so
2532 * validate it to make sure nothing went wrong.
2533 */
2534 validate_ir_tree(linked->ir);
2535
2536 /* Set the size of geometry shader input arrays */
2537 if (linked->Stage == MESA_SHADER_GEOMETRY) {
2538 unsigned num_vertices =
2539 vertices_per_prim(gl_prog->info.gs.input_primitive);
2540 array_resize_visitor input_resize_visitor(num_vertices, prog,
2541 MESA_SHADER_GEOMETRY);
2542 foreach_in_list(ir_instruction, ir, linked->ir) {
2543 ir->accept(&input_resize_visitor);
2544 }
2545 }
2546
2547 if (ctx->Const.VertexID_is_zero_based)
2548 lower_vertex_id(linked);
2549
2550 if (ctx->Const.LowerCsDerivedVariables)
2551 lower_cs_derived(linked);
2552
2553 /* Set the linked source SHA1. */
2554 if (num_shaders == 1) {
2555 memcpy(linked->linked_source_sha1, shader_list[0]->compiled_source_sha1,
2556 SHA1_DIGEST_LENGTH);
2557 } else {
2558 struct mesa_sha1 sha1_ctx;
2559 _mesa_sha1_init(&sha1_ctx);
2560
2561 for (unsigned i = 0; i < num_shaders; i++) {
2562 if (shader_list[i] == NULL)
2563 continue;
2564
2565 _mesa_sha1_update(&sha1_ctx, shader_list[i]->compiled_source_sha1,
2566 SHA1_DIGEST_LENGTH);
2567 }
2568 _mesa_sha1_final(&sha1_ctx, linked->linked_source_sha1);
2569 }
2570
2571 return linked;
2572 }
2573
2574 /**
2575 * Resize tessellation evaluation per-vertex inputs to the size of
2576 * tessellation control per-vertex outputs.
2577 */
2578 static void
resize_tes_inputs(const struct gl_constants * consts,struct gl_shader_program * prog)2579 resize_tes_inputs(const struct gl_constants *consts,
2580 struct gl_shader_program *prog)
2581 {
2582 if (prog->_LinkedShaders[MESA_SHADER_TESS_EVAL] == NULL)
2583 return;
2584
2585 gl_linked_shader *const tcs = prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
2586 gl_linked_shader *const tes = prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
2587
2588 /* If no control shader is present, then the TES inputs are statically
2589 * sized to MaxPatchVertices; the actual size of the arrays won't be
2590 * known until draw time.
2591 */
2592 const int num_vertices = tcs
2593 ? tcs->Program->info.tess.tcs_vertices_out
2594 : consts->MaxPatchVertices;
2595
2596 array_resize_visitor input_resize_visitor(num_vertices, prog,
2597 MESA_SHADER_TESS_EVAL);
2598 foreach_in_list(ir_instruction, ir, tes->ir) {
2599 ir->accept(&input_resize_visitor);
2600 }
2601
2602 if (tcs) {
2603 /* Convert the gl_PatchVerticesIn system value into a constant, since
2604 * the value is known at this point.
2605 */
2606 foreach_in_list(ir_instruction, ir, tes->ir) {
2607 ir_variable *var = ir->as_variable();
2608 if (var && var->data.mode == ir_var_system_value &&
2609 var->data.location == SYSTEM_VALUE_VERTICES_IN) {
2610 void *mem_ctx = ralloc_parent(var);
2611 var->data.location = 0;
2612 var->data.explicit_location = false;
2613 var->data.mode = ir_var_auto;
2614 var->constant_value = new(mem_ctx) ir_constant(num_vertices);
2615 }
2616 }
2617 }
2618 }
2619
2620 /**
2621 * Find a contiguous set of available bits in a bitmask.
2622 *
2623 * \param used_mask Bits representing used (1) and unused (0) locations
2624 * \param needed_count Number of contiguous bits needed.
2625 *
2626 * \return
2627 * Base location of the available bits on success or -1 on failure.
2628 */
2629 static int
find_available_slots(unsigned used_mask,unsigned needed_count)2630 find_available_slots(unsigned used_mask, unsigned needed_count)
2631 {
2632 unsigned needed_mask = (1 << needed_count) - 1;
2633 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
2634
2635 /* The comparison to 32 is redundant, but without it GCC emits "warning:
2636 * cannot optimize possibly infinite loops" for the loop below.
2637 */
2638 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
2639 return -1;
2640
2641 for (int i = 0; i <= max_bit_to_test; i++) {
2642 if ((needed_mask & ~used_mask) == needed_mask)
2643 return i;
2644
2645 needed_mask <<= 1;
2646 }
2647
2648 return -1;
2649 }
2650
2651
2652 #define SAFE_MASK_FROM_INDEX(i) (((i) >= 32) ? ~0 : ((1 << (i)) - 1))
2653
2654 /**
2655 * Assign locations for either VS inputs or FS outputs.
2656 *
2657 * \param mem_ctx Temporary ralloc context used for linking.
2658 * \param prog Shader program whose variables need locations
2659 * assigned.
2660 * \param constants Driver specific constant values for the program.
2661 * \param target_index Selector for the program target to receive location
2662 * assignmnets. Must be either \c MESA_SHADER_VERTEX or
2663 * \c MESA_SHADER_FRAGMENT.
2664 * \param do_assignment Whether we are actually marking the assignment or we
2665 * are just doing a dry-run checking.
2666 *
2667 * \return
2668 * If locations are (or can be, in case of dry-running) successfully assigned,
2669 * true is returned. Otherwise an error is emitted to the shader link log and
2670 * false is returned.
2671 */
2672 static bool
assign_attribute_or_color_locations(void * mem_ctx,gl_shader_program * prog,const struct gl_constants * constants,unsigned target_index,bool do_assignment)2673 assign_attribute_or_color_locations(void *mem_ctx,
2674 gl_shader_program *prog,
2675 const struct gl_constants *constants,
2676 unsigned target_index,
2677 bool do_assignment)
2678 {
2679 /* Maximum number of generic locations. This corresponds to either the
2680 * maximum number of draw buffers or the maximum number of generic
2681 * attributes.
2682 */
2683 unsigned max_index = (target_index == MESA_SHADER_VERTEX) ?
2684 constants->Program[target_index].MaxAttribs :
2685 MAX2(constants->MaxDrawBuffers, constants->MaxDualSourceDrawBuffers);
2686
2687 /* Mark invalid locations as being used.
2688 */
2689 unsigned used_locations = ~SAFE_MASK_FROM_INDEX(max_index);
2690 unsigned double_storage_locations = 0;
2691
2692 assert((target_index == MESA_SHADER_VERTEX)
2693 || (target_index == MESA_SHADER_FRAGMENT));
2694
2695 gl_linked_shader *const sh = prog->_LinkedShaders[target_index];
2696 if (sh == NULL)
2697 return true;
2698
2699 /* Operate in a total of four passes.
2700 *
2701 * 1. Invalidate the location assignments for all vertex shader inputs.
2702 *
2703 * 2. Assign locations for inputs that have user-defined (via
2704 * glBindVertexAttribLocation) locations and outputs that have
2705 * user-defined locations (via glBindFragDataLocation).
2706 *
2707 * 3. Sort the attributes without assigned locations by number of slots
2708 * required in decreasing order. Fragmentation caused by attribute
2709 * locations assigned by the application may prevent large attributes
2710 * from having enough contiguous space.
2711 *
2712 * 4. Assign locations to any inputs without assigned locations.
2713 */
2714
2715 const int generic_base = (target_index == MESA_SHADER_VERTEX)
2716 ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
2717
2718 const enum ir_variable_mode direction =
2719 (target_index == MESA_SHADER_VERTEX)
2720 ? ir_var_shader_in : ir_var_shader_out;
2721
2722
2723 /* Temporary storage for the set of attributes that need locations assigned.
2724 */
2725 struct temp_attr {
2726 unsigned slots;
2727 ir_variable *var;
2728
2729 /* Used below in the call to qsort. */
2730 static int compare(const void *a, const void *b)
2731 {
2732 const temp_attr *const l = (const temp_attr *) a;
2733 const temp_attr *const r = (const temp_attr *) b;
2734
2735 /* Reversed because we want a descending order sort below. */
2736 return r->slots - l->slots;
2737 }
2738 } to_assign[32];
2739 assert(max_index <= 32);
2740
2741 /* Temporary array for the set of attributes that have locations assigned,
2742 * for the purpose of checking overlapping slots/components of (non-ES)
2743 * fragment shader outputs.
2744 */
2745 ir_variable *assigned[12 * 4]; /* (max # of FS outputs) * # components */
2746 unsigned assigned_attr = 0;
2747
2748 unsigned num_attr = 0;
2749
2750 foreach_in_list(ir_instruction, node, sh->ir) {
2751 ir_variable *const var = node->as_variable();
2752
2753 if ((var == NULL) || (var->data.mode != (unsigned) direction))
2754 continue;
2755
2756 if (var->data.explicit_location) {
2757 if ((var->data.location >= (int)(max_index + generic_base))
2758 || (var->data.location < 0)) {
2759 linker_error(prog,
2760 "invalid explicit location %d specified for `%s'\n",
2761 (var->data.location < 0)
2762 ? var->data.location
2763 : var->data.location - generic_base,
2764 var->name);
2765 return false;
2766 }
2767 } else if (target_index == MESA_SHADER_VERTEX) {
2768 unsigned binding;
2769
2770 if (prog->AttributeBindings->get(binding, var->name)) {
2771 assert(binding >= VERT_ATTRIB_GENERIC0);
2772 var->data.location = binding;
2773 }
2774 } else if (target_index == MESA_SHADER_FRAGMENT) {
2775 unsigned binding;
2776 unsigned index;
2777 const char *name = var->name;
2778 const glsl_type *type = var->type;
2779
2780 while (type) {
2781 /* Check if there's a binding for the variable name */
2782 if (prog->FragDataBindings->get(binding, name)) {
2783 assert(binding >= FRAG_RESULT_DATA0);
2784 var->data.location = binding;
2785
2786 if (prog->FragDataIndexBindings->get(index, name)) {
2787 var->data.index = index;
2788 }
2789 break;
2790 }
2791
2792 /* If not, but it's an array type, look for name[0] */
2793 if (type->is_array()) {
2794 name = ralloc_asprintf(mem_ctx, "%s[0]", name);
2795 type = type->fields.array;
2796 continue;
2797 }
2798
2799 break;
2800 }
2801 }
2802
2803 if (strcmp(var->name, "gl_LastFragData") == 0)
2804 continue;
2805
2806 /* From GL4.5 core spec, section 15.2 (Shader Execution):
2807 *
2808 * "Output binding assignments will cause LinkProgram to fail:
2809 * ...
2810 * If the program has an active output assigned to a location greater
2811 * than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
2812 * an active output assigned an index greater than or equal to one;"
2813 */
2814 if (target_index == MESA_SHADER_FRAGMENT && var->data.index >= 1 &&
2815 var->data.location - generic_base >=
2816 (int) constants->MaxDualSourceDrawBuffers) {
2817 linker_error(prog,
2818 "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
2819 "with index %u for %s\n",
2820 var->data.location - generic_base, var->data.index,
2821 var->name);
2822 return false;
2823 }
2824
2825 const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX);
2826
2827 /* If the variable is not a built-in and has a location statically
2828 * assigned in the shader (presumably via a layout qualifier), make sure
2829 * that it doesn't collide with other assigned locations. Otherwise,
2830 * add it to the list of variables that need linker-assigned locations.
2831 */
2832 if (var->data.location != -1) {
2833 if (var->data.location >= generic_base && var->data.index < 1) {
2834 /* From page 61 of the OpenGL 4.0 spec:
2835 *
2836 * "LinkProgram will fail if the attribute bindings assigned
2837 * by BindAttribLocation do not leave not enough space to
2838 * assign a location for an active matrix attribute or an
2839 * active attribute array, both of which require multiple
2840 * contiguous generic attributes."
2841 *
2842 * I think above text prohibits the aliasing of explicit and
2843 * automatic assignments. But, aliasing is allowed in manual
2844 * assignments of attribute locations. See below comments for
2845 * the details.
2846 *
2847 * From OpenGL 4.0 spec, page 61:
2848 *
2849 * "It is possible for an application to bind more than one
2850 * attribute name to the same location. This is referred to as
2851 * aliasing. This will only work if only one of the aliased
2852 * attributes is active in the executable program, or if no
2853 * path through the shader consumes more than one attribute of
2854 * a set of attributes aliased to the same location. A link
2855 * error can occur if the linker determines that every path
2856 * through the shader consumes multiple aliased attributes,
2857 * but implementations are not required to generate an error
2858 * in this case."
2859 *
2860 * From GLSL 4.30 spec, page 54:
2861 *
2862 * "A program will fail to link if any two non-vertex shader
2863 * input variables are assigned to the same location. For
2864 * vertex shaders, multiple input variables may be assigned
2865 * to the same location using either layout qualifiers or via
2866 * the OpenGL API. However, such aliasing is intended only to
2867 * support vertex shaders where each execution path accesses
2868 * at most one input per each location. Implementations are
2869 * permitted, but not required, to generate link-time errors
2870 * if they detect that every path through the vertex shader
2871 * executable accesses multiple inputs assigned to any single
2872 * location. For all shader types, a program will fail to link
2873 * if explicit location assignments leave the linker unable
2874 * to find space for other variables without explicit
2875 * assignments."
2876 *
2877 * From OpenGL ES 3.0 spec, page 56:
2878 *
2879 * "Binding more than one attribute name to the same location
2880 * is referred to as aliasing, and is not permitted in OpenGL
2881 * ES Shading Language 3.00 vertex shaders. LinkProgram will
2882 * fail when this condition exists. However, aliasing is
2883 * possible in OpenGL ES Shading Language 1.00 vertex shaders.
2884 * This will only work if only one of the aliased attributes
2885 * is active in the executable program, or if no path through
2886 * the shader consumes more than one attribute of a set of
2887 * attributes aliased to the same location. A link error can
2888 * occur if the linker determines that every path through the
2889 * shader consumes multiple aliased attributes, but implemen-
2890 * tations are not required to generate an error in this case."
2891 *
2892 * After looking at above references from OpenGL, OpenGL ES and
2893 * GLSL specifications, we allow aliasing of vertex input variables
2894 * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
2895 *
2896 * NOTE: This is not required by the spec but its worth mentioning
2897 * here that we're not doing anything to make sure that no path
2898 * through the vertex shader executable accesses multiple inputs
2899 * assigned to any single location.
2900 */
2901
2902 /* Mask representing the contiguous slots that will be used by
2903 * this attribute.
2904 */
2905 const unsigned attr = var->data.location - generic_base;
2906 const unsigned use_mask = (1 << slots) - 1;
2907 const char *const string = (target_index == MESA_SHADER_VERTEX)
2908 ? "vertex shader input" : "fragment shader output";
2909
2910 /* Generate a link error if the requested locations for this
2911 * attribute exceed the maximum allowed attribute location.
2912 */
2913 if (attr + slots > max_index) {
2914 linker_error(prog,
2915 "insufficient contiguous locations "
2916 "available for %s `%s' %d %d %d\n", string,
2917 var->name, used_locations, use_mask, attr);
2918 return false;
2919 }
2920
2921 /* Generate a link error if the set of bits requested for this
2922 * attribute overlaps any previously allocated bits.
2923 */
2924 if ((~(use_mask << attr) & used_locations) != used_locations) {
2925 if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
2926 /* From section 4.4.2 (Output Layout Qualifiers) of the GLSL
2927 * 4.40 spec:
2928 *
2929 * "Additionally, for fragment shader outputs, if two
2930 * variables are placed within the same location, they
2931 * must have the same underlying type (floating-point or
2932 * integer). No component aliasing of output variables or
2933 * members is allowed.
2934 */
2935 for (unsigned i = 0; i < assigned_attr; i++) {
2936 unsigned assigned_slots =
2937 assigned[i]->type->count_attribute_slots(false);
2938 unsigned assig_attr =
2939 assigned[i]->data.location - generic_base;
2940 unsigned assigned_use_mask = (1 << assigned_slots) - 1;
2941
2942 if ((assigned_use_mask << assig_attr) &
2943 (use_mask << attr)) {
2944
2945 const glsl_type *assigned_type =
2946 assigned[i]->type->without_array();
2947 const glsl_type *type = var->type->without_array();
2948 if (assigned_type->base_type != type->base_type) {
2949 linker_error(prog, "types do not match for aliased"
2950 " %ss %s and %s\n", string,
2951 assigned[i]->name, var->name);
2952 return false;
2953 }
2954
2955 unsigned assigned_component_mask =
2956 ((1 << assigned_type->vector_elements) - 1) <<
2957 assigned[i]->data.location_frac;
2958 unsigned component_mask =
2959 ((1 << type->vector_elements) - 1) <<
2960 var->data.location_frac;
2961 if (assigned_component_mask & component_mask) {
2962 linker_error(prog, "overlapping component is "
2963 "assigned to %ss %s and %s "
2964 "(component=%d)\n",
2965 string, assigned[i]->name, var->name,
2966 var->data.location_frac);
2967 return false;
2968 }
2969 }
2970 }
2971 } else if (target_index == MESA_SHADER_FRAGMENT ||
2972 (prog->IsES && prog->data->Version >= 300)) {
2973 linker_error(prog, "overlapping location is assigned "
2974 "to %s `%s' %d %d %d\n", string, var->name,
2975 used_locations, use_mask, attr);
2976 return false;
2977 } else {
2978 linker_warning(prog, "overlapping location is assigned "
2979 "to %s `%s' %d %d %d\n", string, var->name,
2980 used_locations, use_mask, attr);
2981 }
2982 }
2983
2984 if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
2985 /* Only track assigned variables for non-ES fragment shaders
2986 * to avoid overflowing the array.
2987 *
2988 * At most one variable per fragment output component should
2989 * reach this.
2990 */
2991 assert(assigned_attr < ARRAY_SIZE(assigned));
2992 assigned[assigned_attr] = var;
2993 assigned_attr++;
2994 }
2995
2996 used_locations |= (use_mask << attr);
2997
2998 /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
2999 *
3000 * "A program with more than the value of MAX_VERTEX_ATTRIBS
3001 * active attribute variables may fail to link, unless
3002 * device-dependent optimizations are able to make the program
3003 * fit within available hardware resources. For the purposes
3004 * of this test, attribute variables of the type dvec3, dvec4,
3005 * dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
3006 * count as consuming twice as many attributes as equivalent
3007 * single-precision types. While these types use the same number
3008 * of generic attributes as their single-precision equivalents,
3009 * implementations are permitted to consume two single-precision
3010 * vectors of internal storage for each three- or four-component
3011 * double-precision vector."
3012 *
3013 * Mark this attribute slot as taking up twice as much space
3014 * so we can count it properly against limits. According to
3015 * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
3016 * is optional behavior, but it seems preferable.
3017 */
3018 if (var->type->without_array()->is_dual_slot())
3019 double_storage_locations |= (use_mask << attr);
3020 }
3021
3022 continue;
3023 }
3024
3025 if (num_attr >= max_index) {
3026 linker_error(prog, "too many %s (max %u)",
3027 target_index == MESA_SHADER_VERTEX ?
3028 "vertex shader inputs" : "fragment shader outputs",
3029 max_index);
3030 return false;
3031 }
3032 to_assign[num_attr].slots = slots;
3033 to_assign[num_attr].var = var;
3034 num_attr++;
3035 }
3036
3037 if (!do_assignment)
3038 return true;
3039
3040 if (target_index == MESA_SHADER_VERTEX) {
3041 unsigned total_attribs_size =
3042 util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
3043 util_bitcount(double_storage_locations);
3044 if (total_attribs_size > max_index) {
3045 linker_error(prog,
3046 "attempt to use %d vertex attribute slots only %d available ",
3047 total_attribs_size, max_index);
3048 return false;
3049 }
3050 }
3051
3052 /* If all of the attributes were assigned locations by the application (or
3053 * are built-in attributes with fixed locations), return early. This should
3054 * be the common case.
3055 */
3056 if (num_attr == 0)
3057 return true;
3058
3059 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
3060
3061 if (target_index == MESA_SHADER_VERTEX) {
3062 /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
3063 * only be explicitly assigned by via glBindAttribLocation. Mark it as
3064 * reserved to prevent it from being automatically allocated below.
3065 */
3066 find_deref_visitor find("gl_Vertex");
3067 find.run(sh->ir);
3068 if (find.variable_found())
3069 used_locations |= (1 << 0);
3070 }
3071
3072 for (unsigned i = 0; i < num_attr; i++) {
3073 /* Mask representing the contiguous slots that will be used by this
3074 * attribute.
3075 */
3076 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
3077
3078 int location = find_available_slots(used_locations, to_assign[i].slots);
3079
3080 if (location < 0) {
3081 const char *const string = (target_index == MESA_SHADER_VERTEX)
3082 ? "vertex shader input" : "fragment shader output";
3083
3084 linker_error(prog,
3085 "insufficient contiguous locations "
3086 "available for %s `%s'\n",
3087 string, to_assign[i].var->name);
3088 return false;
3089 }
3090
3091 to_assign[i].var->data.location = generic_base + location;
3092 used_locations |= (use_mask << location);
3093
3094 if (to_assign[i].var->type->without_array()->is_dual_slot())
3095 double_storage_locations |= (use_mask << location);
3096 }
3097
3098 /* Now that we have all the locations, from the GL 4.5 core spec, section
3099 * 11.1.1 (Vertex Attributes), dvec3, dvec4, dmat2x3, dmat2x4, dmat3,
3100 * dmat3x4, dmat4x3, and dmat4 count as consuming twice as many attributes
3101 * as equivalent single-precision types.
3102 */
3103 if (target_index == MESA_SHADER_VERTEX) {
3104 unsigned total_attribs_size =
3105 util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
3106 util_bitcount(double_storage_locations);
3107 if (total_attribs_size > max_index) {
3108 linker_error(prog,
3109 "attempt to use %d vertex attribute slots only %d available ",
3110 total_attribs_size, max_index);
3111 return false;
3112 }
3113 }
3114
3115 return true;
3116 }
3117
3118 /**
3119 * Store the gl_FragDepth layout in the gl_shader_program struct.
3120 */
3121 static void
store_fragdepth_layout(struct gl_shader_program * prog)3122 store_fragdepth_layout(struct gl_shader_program *prog)
3123 {
3124 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
3125 return;
3126 }
3127
3128 struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
3129
3130 /* We don't look up the gl_FragDepth symbol directly because if
3131 * gl_FragDepth is not used in the shader, it's removed from the IR.
3132 * However, the symbol won't be removed from the symbol table.
3133 *
3134 * We're only interested in the cases where the variable is NOT removed
3135 * from the IR.
3136 */
3137 foreach_in_list(ir_instruction, node, ir) {
3138 ir_variable *const var = node->as_variable();
3139
3140 if (var == NULL || var->data.mode != ir_var_shader_out) {
3141 continue;
3142 }
3143
3144 if (strcmp(var->name, "gl_FragDepth") == 0) {
3145 switch (var->data.depth_layout) {
3146 case ir_depth_layout_none:
3147 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
3148 return;
3149 case ir_depth_layout_any:
3150 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
3151 return;
3152 case ir_depth_layout_greater:
3153 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
3154 return;
3155 case ir_depth_layout_less:
3156 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
3157 return;
3158 case ir_depth_layout_unchanged:
3159 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
3160 return;
3161 default:
3162 assert(0);
3163 return;
3164 }
3165 }
3166 }
3167 }
3168
3169
3170 /**
3171 * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
3172 * for a variable, checks for overlaps between other uniforms using explicit
3173 * locations.
3174 */
3175 static int
reserve_explicit_locations(struct gl_shader_program * prog,string_to_uint_map * map,ir_variable * var)3176 reserve_explicit_locations(struct gl_shader_program *prog,
3177 string_to_uint_map *map, ir_variable *var)
3178 {
3179 unsigned slots = var->type->uniform_locations();
3180 unsigned max_loc = var->data.location + slots - 1;
3181 unsigned return_value = slots;
3182
3183 /* Resize remap table if locations do not fit in the current one. */
3184 if (max_loc + 1 > prog->NumUniformRemapTable) {
3185 prog->UniformRemapTable =
3186 reralloc(prog, prog->UniformRemapTable,
3187 gl_uniform_storage *,
3188 max_loc + 1);
3189
3190 if (!prog->UniformRemapTable) {
3191 linker_error(prog, "Out of memory during linking.\n");
3192 return -1;
3193 }
3194
3195 /* Initialize allocated space. */
3196 for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++)
3197 prog->UniformRemapTable[i] = NULL;
3198
3199 prog->NumUniformRemapTable = max_loc + 1;
3200 }
3201
3202 for (unsigned i = 0; i < slots; i++) {
3203 unsigned loc = var->data.location + i;
3204
3205 /* Check if location is already used. */
3206 if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3207
3208 /* Possibly same uniform from a different stage, this is ok. */
3209 unsigned hash_loc;
3210 if (map->get(hash_loc, var->name) && hash_loc == loc - i) {
3211 return_value = 0;
3212 continue;
3213 }
3214
3215 /* ARB_explicit_uniform_location specification states:
3216 *
3217 * "No two default-block uniform variables in the program can have
3218 * the same location, even if they are unused, otherwise a compiler
3219 * or linker error will be generated."
3220 */
3221 linker_error(prog,
3222 "location qualifier for uniform %s overlaps "
3223 "previously used location\n",
3224 var->name);
3225 return -1;
3226 }
3227
3228 /* Initialize location as inactive before optimization
3229 * rounds and location assignment.
3230 */
3231 prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3232 }
3233
3234 /* Note, base location used for arrays. */
3235 map->put(var->data.location, var->name);
3236
3237 return return_value;
3238 }
3239
3240 static bool
reserve_subroutine_explicit_locations(struct gl_shader_program * prog,struct gl_program * p,ir_variable * var)3241 reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
3242 struct gl_program *p,
3243 ir_variable *var)
3244 {
3245 unsigned slots = var->type->uniform_locations();
3246 unsigned max_loc = var->data.location + slots - 1;
3247
3248 /* Resize remap table if locations do not fit in the current one. */
3249 if (max_loc + 1 > p->sh.NumSubroutineUniformRemapTable) {
3250 p->sh.SubroutineUniformRemapTable =
3251 reralloc(p, p->sh.SubroutineUniformRemapTable,
3252 gl_uniform_storage *,
3253 max_loc + 1);
3254
3255 if (!p->sh.SubroutineUniformRemapTable) {
3256 linker_error(prog, "Out of memory during linking.\n");
3257 return false;
3258 }
3259
3260 /* Initialize allocated space. */
3261 for (unsigned i = p->sh.NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
3262 p->sh.SubroutineUniformRemapTable[i] = NULL;
3263
3264 p->sh.NumSubroutineUniformRemapTable = max_loc + 1;
3265 }
3266
3267 for (unsigned i = 0; i < slots; i++) {
3268 unsigned loc = var->data.location + i;
3269
3270 /* Check if location is already used. */
3271 if (p->sh.SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
3272
3273 /* ARB_explicit_uniform_location specification states:
3274 * "No two subroutine uniform variables can have the same location
3275 * in the same shader stage, otherwise a compiler or linker error
3276 * will be generated."
3277 */
3278 linker_error(prog,
3279 "location qualifier for uniform %s overlaps "
3280 "previously used location\n",
3281 var->name);
3282 return false;
3283 }
3284
3285 /* Initialize location as inactive before optimization
3286 * rounds and location assignment.
3287 */
3288 p->sh.SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
3289 }
3290
3291 return true;
3292 }
3293 /**
3294 * Check and reserve all explicit uniform locations, called before
3295 * any optimizations happen to handle also inactive uniforms and
3296 * inactive array elements that may get trimmed away.
3297 */
3298 static void
check_explicit_uniform_locations(const struct gl_extensions * exts,struct gl_shader_program * prog)3299 check_explicit_uniform_locations(const struct gl_extensions *exts,
3300 struct gl_shader_program *prog)
3301 {
3302 prog->NumExplicitUniformLocations = 0;
3303
3304 if (!exts->ARB_explicit_uniform_location)
3305 return;
3306
3307 /* This map is used to detect if overlapping explicit locations
3308 * occur with the same uniform (from different stage) or a different one.
3309 */
3310 string_to_uint_map *uniform_map = new string_to_uint_map;
3311
3312 if (!uniform_map) {
3313 linker_error(prog, "Out of memory during linking.\n");
3314 return;
3315 }
3316
3317 unsigned entries_total = 0;
3318 unsigned mask = prog->data->linked_stages;
3319 while (mask) {
3320 const int i = u_bit_scan(&mask);
3321 struct gl_program *p = prog->_LinkedShaders[i]->Program;
3322
3323 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
3324 ir_variable *var = node->as_variable();
3325 if (!var || var->data.mode != ir_var_uniform)
3326 continue;
3327
3328 if (var->data.explicit_location) {
3329 bool ret = false;
3330 if (var->type->without_array()->is_subroutine())
3331 ret = reserve_subroutine_explicit_locations(prog, p, var);
3332 else {
3333 int slots = reserve_explicit_locations(prog, uniform_map,
3334 var);
3335 if (slots != -1) {
3336 ret = true;
3337 entries_total += slots;
3338 }
3339 }
3340 if (!ret) {
3341 delete uniform_map;
3342 return;
3343 }
3344 }
3345 }
3346 }
3347
3348 link_util_update_empty_uniform_locations(prog);
3349
3350 delete uniform_map;
3351 prog->NumExplicitUniformLocations = entries_total;
3352 }
3353
3354 static void
link_assign_subroutine_types(struct gl_shader_program * prog)3355 link_assign_subroutine_types(struct gl_shader_program *prog)
3356 {
3357 unsigned mask = prog->data->linked_stages;
3358 while (mask) {
3359 const int i = u_bit_scan(&mask);
3360 gl_program *p = prog->_LinkedShaders[i]->Program;
3361
3362 p->sh.MaxSubroutineFunctionIndex = 0;
3363 foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
3364 ir_function *fn = node->as_function();
3365 if (!fn)
3366 continue;
3367
3368 if (fn->is_subroutine)
3369 p->sh.NumSubroutineUniformTypes++;
3370
3371 if (!fn->num_subroutine_types)
3372 continue;
3373
3374 /* these should have been calculated earlier. */
3375 assert(fn->subroutine_index != -1);
3376 if (p->sh.NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
3377 linker_error(prog, "Too many subroutine functions declared.\n");
3378 return;
3379 }
3380 p->sh.SubroutineFunctions = reralloc(p, p->sh.SubroutineFunctions,
3381 struct gl_subroutine_function,
3382 p->sh.NumSubroutineFunctions + 1);
3383 p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name.string = ralloc_strdup(p, fn->name);
3384 resource_name_updated(&p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name);
3385 p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
3386 p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types =
3387 ralloc_array(p, const struct glsl_type *,
3388 fn->num_subroutine_types);
3389
3390 /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
3391 * GLSL 4.5 spec:
3392 *
3393 * "Each subroutine with an index qualifier in the shader must be
3394 * given a unique index, otherwise a compile or link error will be
3395 * generated."
3396 */
3397 for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
3398 if (p->sh.SubroutineFunctions[j].index != -1 &&
3399 p->sh.SubroutineFunctions[j].index == fn->subroutine_index) {
3400 linker_error(prog, "each subroutine index qualifier in the "
3401 "shader must be unique\n");
3402 return;
3403 }
3404 }
3405 p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].index =
3406 fn->subroutine_index;
3407
3408 if (fn->subroutine_index > (int)p->sh.MaxSubroutineFunctionIndex)
3409 p->sh.MaxSubroutineFunctionIndex = fn->subroutine_index;
3410
3411 for (int j = 0; j < fn->num_subroutine_types; j++)
3412 p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
3413 p->sh.NumSubroutineFunctions++;
3414 }
3415 }
3416 }
3417
3418 static void
verify_subroutine_associated_funcs(struct gl_shader_program * prog)3419 verify_subroutine_associated_funcs(struct gl_shader_program *prog)
3420 {
3421 unsigned mask = prog->data->linked_stages;
3422 while (mask) {
3423 const int i = u_bit_scan(&mask);
3424 gl_program *p = prog->_LinkedShaders[i]->Program;
3425 glsl_symbol_table *symbols = prog->_LinkedShaders[i]->symbols;
3426
3427 /* Section 6.1.2 (Subroutines) of the GLSL 4.00 spec says:
3428 *
3429 * "A program will fail to compile or link if any shader
3430 * or stage contains two or more functions with the same
3431 * name if the name is associated with a subroutine type."
3432 */
3433 for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
3434 unsigned definitions = 0;
3435 char *name = p->sh.SubroutineFunctions[j].name.string;
3436 ir_function *fn = symbols->get_function(name);
3437
3438 /* Calculate number of function definitions with the same name */
3439 foreach_in_list(ir_function_signature, sig, &fn->signatures) {
3440 if (sig->is_defined) {
3441 if (++definitions > 1) {
3442 linker_error(prog, "%s shader contains two or more function "
3443 "definitions with name `%s', which is "
3444 "associated with a subroutine type.\n",
3445 _mesa_shader_stage_to_string(i),
3446 fn->name);
3447 return;
3448 }
3449 }
3450 }
3451 }
3452 }
3453 }
3454
3455
3456 static void
set_always_active_io(exec_list * ir,ir_variable_mode io_mode)3457 set_always_active_io(exec_list *ir, ir_variable_mode io_mode)
3458 {
3459 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
3460
3461 foreach_in_list(ir_instruction, node, ir) {
3462 ir_variable *const var = node->as_variable();
3463
3464 if (var == NULL || var->data.mode != io_mode)
3465 continue;
3466
3467 /* Don't set always active on builtins that haven't been redeclared */
3468 if (var->data.how_declared == ir_var_declared_implicitly)
3469 continue;
3470
3471 var->data.always_active_io = true;
3472 }
3473 }
3474
3475 /**
3476 * When separate shader programs are enabled, only input/outputs between
3477 * the stages of a multi-stage separate program can be safely removed
3478 * from the shader interface. Other inputs/outputs must remain active.
3479 */
3480 static void
disable_varying_optimizations_for_sso(struct gl_shader_program * prog)3481 disable_varying_optimizations_for_sso(struct gl_shader_program *prog)
3482 {
3483 unsigned first, last;
3484 assert(prog->SeparateShader);
3485
3486 first = MESA_SHADER_STAGES;
3487 last = 0;
3488
3489 /* Determine first and last stage. Excluding the compute stage */
3490 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
3491 if (!prog->_LinkedShaders[i])
3492 continue;
3493 if (first == MESA_SHADER_STAGES)
3494 first = i;
3495 last = i;
3496 }
3497
3498 if (first == MESA_SHADER_STAGES)
3499 return;
3500
3501 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
3502 gl_linked_shader *sh = prog->_LinkedShaders[stage];
3503 if (!sh)
3504 continue;
3505
3506 /* Prevent the removal of inputs to the first and outputs from the last
3507 * stage, unless they are the initial pipeline inputs or final pipeline
3508 * outputs, respectively.
3509 *
3510 * The removal of IO between shaders in the same program is always
3511 * allowed.
3512 */
3513 if (stage == first && stage != MESA_SHADER_VERTEX)
3514 set_always_active_io(sh->ir, ir_var_shader_in);
3515 if (stage == last && stage != MESA_SHADER_FRAGMENT)
3516 set_always_active_io(sh->ir, ir_var_shader_out);
3517 }
3518 }
3519
3520 static bool
link_varyings(const struct gl_constants * consts,struct gl_shader_program * prog,void * mem_ctx)3521 link_varyings(const struct gl_constants *consts, struct gl_shader_program *prog,
3522 void *mem_ctx)
3523 {
3524 /* Mark all generic shader inputs and outputs as unpaired. */
3525 for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
3526 if (prog->_LinkedShaders[i] != NULL) {
3527 link_invalidate_variable_locations(prog->_LinkedShaders[i]->ir);
3528 }
3529 }
3530
3531 if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
3532 MESA_SHADER_VERTEX, true)) {
3533 return false;
3534 }
3535
3536 if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
3537 MESA_SHADER_FRAGMENT, true)) {
3538 return false;
3539 }
3540
3541 prog->last_vert_prog = NULL;
3542 for (int i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) {
3543 if (prog->_LinkedShaders[i] == NULL)
3544 continue;
3545
3546 prog->last_vert_prog = prog->_LinkedShaders[i]->Program;
3547 break;
3548 }
3549
3550 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3551 if (prog->_LinkedShaders[i] == NULL)
3552 continue;
3553
3554 lower_vector_derefs(prog->_LinkedShaders[i]);
3555 do_vec_index_to_swizzle(prog->_LinkedShaders[i]->ir);
3556 }
3557
3558 return true;
3559 }
3560
3561 void
link_shaders(struct gl_context * ctx,struct gl_shader_program * prog)3562 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
3563 {
3564 const struct gl_constants *consts = &ctx->Const;
3565 prog->data->LinkStatus = LINKING_SUCCESS; /* All error paths will set this to false */
3566 prog->data->Validated = false;
3567
3568 /* Section 7.3 (Program Objects) of the OpenGL 4.5 Core Profile spec says:
3569 *
3570 * "Linking can fail for a variety of reasons as specified in the
3571 * OpenGL Shading Language Specification, as well as any of the
3572 * following reasons:
3573 *
3574 * - No shader objects are attached to program."
3575 *
3576 * The Compatibility Profile specification does not list the error. In
3577 * Compatibility Profile missing shader stages are replaced by
3578 * fixed-function. This applies to the case where all stages are
3579 * missing.
3580 */
3581 if (prog->NumShaders == 0) {
3582 if (ctx->API != API_OPENGL_COMPAT)
3583 linker_error(prog, "no shaders attached to the program\n");
3584 return;
3585 }
3586
3587 #ifdef ENABLE_SHADER_CACHE
3588 if (shader_cache_read_program_metadata(ctx, prog))
3589 return;
3590 #endif
3591
3592 void *mem_ctx = ralloc_context(NULL); // temporary linker context
3593
3594 prog->ARB_fragment_coord_conventions_enable = false;
3595
3596 /* Separate the shaders into groups based on their type.
3597 */
3598 struct gl_shader **shader_list[MESA_SHADER_STAGES];
3599 unsigned num_shaders[MESA_SHADER_STAGES];
3600
3601 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
3602 shader_list[i] = (struct gl_shader **)
3603 calloc(prog->NumShaders, sizeof(struct gl_shader *));
3604 num_shaders[i] = 0;
3605 }
3606
3607 unsigned min_version = UINT_MAX;
3608 unsigned max_version = 0;
3609 for (unsigned i = 0; i < prog->NumShaders; i++) {
3610 min_version = MIN2(min_version, prog->Shaders[i]->Version);
3611 max_version = MAX2(max_version, prog->Shaders[i]->Version);
3612
3613 if (!consts->AllowGLSLRelaxedES &&
3614 prog->Shaders[i]->IsES != prog->Shaders[0]->IsES) {
3615 linker_error(prog, "all shaders must use same shading "
3616 "language version\n");
3617 goto done;
3618 }
3619
3620 if (prog->Shaders[i]->ARB_fragment_coord_conventions_enable) {
3621 prog->ARB_fragment_coord_conventions_enable = true;
3622 }
3623
3624 gl_shader_stage shader_type = prog->Shaders[i]->Stage;
3625 shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i];
3626 num_shaders[shader_type]++;
3627 }
3628
3629 /* In desktop GLSL, different shader versions may be linked together. In
3630 * GLSL ES, all shader versions must be the same.
3631 */
3632 if (!consts->AllowGLSLRelaxedES && prog->Shaders[0]->IsES &&
3633 min_version != max_version) {
3634 linker_error(prog, "all shaders must use same shading "
3635 "language version\n");
3636 goto done;
3637 }
3638
3639 prog->data->Version = max_version;
3640 prog->IsES = prog->Shaders[0]->IsES;
3641
3642 /* Some shaders have to be linked with some other shaders present.
3643 */
3644 if (!prog->SeparateShader) {
3645 if (num_shaders[MESA_SHADER_GEOMETRY] > 0 &&
3646 num_shaders[MESA_SHADER_VERTEX] == 0) {
3647 linker_error(prog, "Geometry shader must be linked with "
3648 "vertex shader\n");
3649 goto done;
3650 }
3651 if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
3652 num_shaders[MESA_SHADER_VERTEX] == 0) {
3653 linker_error(prog, "Tessellation evaluation shader must be linked "
3654 "with vertex shader\n");
3655 goto done;
3656 }
3657 if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
3658 num_shaders[MESA_SHADER_VERTEX] == 0) {
3659 linker_error(prog, "Tessellation control shader must be linked with "
3660 "vertex shader\n");
3661 goto done;
3662 }
3663
3664 /* Section 7.3 of the OpenGL ES 3.2 specification says:
3665 *
3666 * "Linking can fail for [...] any of the following reasons:
3667 *
3668 * * program contains an object to form a tessellation control
3669 * shader [...] and [...] the program is not separable and
3670 * contains no object to form a tessellation evaluation shader"
3671 *
3672 * The OpenGL spec is contradictory. It allows linking without a tess
3673 * eval shader, but that can only be used with transform feedback and
3674 * rasterization disabled. However, transform feedback isn't allowed
3675 * with GL_PATCHES, so it can't be used.
3676 *
3677 * More investigation showed that the idea of transform feedback after
3678 * a tess control shader was dropped, because some hw vendors couldn't
3679 * support tessellation without a tess eval shader, but the linker
3680 * section wasn't updated to reflect that.
3681 *
3682 * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
3683 * spec bug.
3684 *
3685 * Do what's reasonable and always require a tess eval shader if a tess
3686 * control shader is present.
3687 */
3688 if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
3689 num_shaders[MESA_SHADER_TESS_EVAL] == 0) {
3690 linker_error(prog, "Tessellation control shader must be linked with "
3691 "tessellation evaluation shader\n");
3692 goto done;
3693 }
3694
3695 if (prog->IsES) {
3696 if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
3697 num_shaders[MESA_SHADER_TESS_CTRL] == 0) {
3698 linker_error(prog, "GLSL ES requires non-separable programs "
3699 "containing a tessellation evaluation shader to also "
3700 "be linked with a tessellation control shader\n");
3701 goto done;
3702 }
3703 }
3704 }
3705
3706 /* Compute shaders have additional restrictions. */
3707 if (num_shaders[MESA_SHADER_COMPUTE] > 0 &&
3708 num_shaders[MESA_SHADER_COMPUTE] != prog->NumShaders) {
3709 linker_error(prog, "Compute shaders may not be linked with any other "
3710 "type of shader\n");
3711 }
3712
3713 /* Link all shaders for a particular stage and validate the result.
3714 */
3715 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
3716 if (num_shaders[stage] > 0) {
3717 gl_linked_shader *const sh =
3718 link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
3719 num_shaders[stage], false);
3720
3721 if (!prog->data->LinkStatus) {
3722 if (sh)
3723 _mesa_delete_linked_shader(ctx, sh);
3724 goto done;
3725 }
3726
3727 switch (stage) {
3728 case MESA_SHADER_VERTEX:
3729 validate_vertex_shader_executable(prog, sh, consts);
3730 break;
3731 case MESA_SHADER_TESS_CTRL:
3732 /* nothing to be done */
3733 break;
3734 case MESA_SHADER_TESS_EVAL:
3735 validate_tess_eval_shader_executable(prog, sh, consts);
3736 break;
3737 case MESA_SHADER_GEOMETRY:
3738 validate_geometry_shader_executable(prog, sh, consts);
3739 break;
3740 case MESA_SHADER_FRAGMENT:
3741 validate_fragment_shader_executable(prog, sh);
3742 break;
3743 }
3744 if (!prog->data->LinkStatus) {
3745 if (sh)
3746 _mesa_delete_linked_shader(ctx, sh);
3747 goto done;
3748 }
3749
3750 prog->_LinkedShaders[stage] = sh;
3751 prog->data->linked_stages |= 1 << stage;
3752 }
3753 }
3754
3755 /* Here begins the inter-stage linking phase. Some initial validation is
3756 * performed, then locations are assigned for uniforms, attributes, and
3757 * varyings.
3758 */
3759 cross_validate_uniforms(consts, prog);
3760 if (!prog->data->LinkStatus)
3761 goto done;
3762
3763 unsigned first, last, prev;
3764
3765 first = MESA_SHADER_STAGES;
3766 last = 0;
3767
3768 /* Determine first and last stage. */
3769 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3770 if (!prog->_LinkedShaders[i])
3771 continue;
3772 if (first == MESA_SHADER_STAGES)
3773 first = i;
3774 last = i;
3775 }
3776
3777 check_explicit_uniform_locations(&ctx->Extensions, prog);
3778 link_assign_subroutine_types(prog);
3779 verify_subroutine_associated_funcs(prog);
3780
3781 if (!prog->data->LinkStatus)
3782 goto done;
3783
3784 resize_tes_inputs(consts, prog);
3785
3786 /* Validate the inputs of each stage with the output of the preceding
3787 * stage.
3788 */
3789 prev = first;
3790 for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
3791 if (prog->_LinkedShaders[i] == NULL)
3792 continue;
3793
3794 validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
3795 prog->_LinkedShaders[i]);
3796 if (!prog->data->LinkStatus)
3797 goto done;
3798
3799 cross_validate_outputs_to_inputs(consts, prog,
3800 prog->_LinkedShaders[prev],
3801 prog->_LinkedShaders[i]);
3802 if (!prog->data->LinkStatus)
3803 goto done;
3804
3805 prev = i;
3806 }
3807
3808 /* The cross validation of outputs/inputs above validates interstage
3809 * explicit locations. We need to do this also for the inputs in the first
3810 * stage and outputs of the last stage included in the program, since there
3811 * is no cross validation for these.
3812 */
3813 validate_first_and_last_interface_explicit_locations(consts, prog,
3814 (gl_shader_stage) first,
3815 (gl_shader_stage) last);
3816
3817 /* Cross-validate uniform blocks between shader stages */
3818 validate_interstage_uniform_blocks(prog, prog->_LinkedShaders);
3819 if (!prog->data->LinkStatus)
3820 goto done;
3821
3822 for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
3823 if (prog->_LinkedShaders[i] != NULL)
3824 lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
3825 }
3826
3827 if (prog->IsES && prog->data->Version == 100)
3828 if (!validate_invariant_builtins(prog,
3829 prog->_LinkedShaders[MESA_SHADER_VERTEX],
3830 prog->_LinkedShaders[MESA_SHADER_FRAGMENT]))
3831 goto done;
3832
3833 /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
3834 * it before optimization because we want most of the checks to get
3835 * dropped thanks to constant propagation.
3836 *
3837 * This rule also applies to GLSL ES 3.00.
3838 */
3839 if (max_version >= (prog->IsES ? 300 : 130)) {
3840 struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
3841 if (sh) {
3842 lower_discard_flow(sh->ir);
3843 }
3844 }
3845
3846 if (prog->SeparateShader)
3847 disable_varying_optimizations_for_sso(prog);
3848
3849 /* Process UBOs */
3850 if (!interstage_cross_validate_uniform_blocks(prog, false))
3851 goto done;
3852
3853 /* Process SSBOs */
3854 if (!interstage_cross_validate_uniform_blocks(prog, true))
3855 goto done;
3856
3857 /* Do common optimization before assigning storage for attributes,
3858 * uniforms, and varyings. Later optimization could possibly make
3859 * some of that unused.
3860 */
3861 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3862 if (prog->_LinkedShaders[i] == NULL)
3863 continue;
3864
3865 detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
3866 if (!prog->data->LinkStatus)
3867 goto done;
3868
3869 if (consts->ShaderCompilerOptions[i].LowerCombinedClipCullDistance) {
3870 lower_clip_cull_distance(prog, prog->_LinkedShaders[i]);
3871 }
3872
3873 if (consts->LowerTessLevel) {
3874 lower_tess_level(prog->_LinkedShaders[i]);
3875 }
3876
3877 /* Section 13.46 (Vertex Attribute Aliasing) of the OpenGL ES 3.2
3878 * specification says:
3879 *
3880 * "In general, the behavior of GLSL ES should not depend on compiler
3881 * optimizations which might be implementation-dependent. Name matching
3882 * rules in most languages, including C++ from which GLSL ES is derived,
3883 * are based on declarations rather than use.
3884 *
3885 * RESOLUTION: The existence of aliasing is determined by declarations
3886 * present after preprocessing."
3887 *
3888 * Because of this rule, we do a 'dry-run' of attribute assignment for
3889 * vertex shader inputs here.
3890 */
3891 if (prog->IsES && i == MESA_SHADER_VERTEX) {
3892 if (!assign_attribute_or_color_locations(mem_ctx, prog, consts,
3893 MESA_SHADER_VERTEX, false)) {
3894 goto done;
3895 }
3896 }
3897
3898 /* Run it just once, since NIR will do the real optimizaiton. */
3899 do_common_optimization(prog->_LinkedShaders[i]->ir, true,
3900 &consts->ShaderCompilerOptions[i],
3901 consts->NativeIntegers);
3902 }
3903
3904 /* Check and validate stream emissions in geometry shaders */
3905 validate_geometry_shader_emissions(consts, prog);
3906
3907 store_fragdepth_layout(prog);
3908
3909 if(!link_varyings(consts, prog, mem_ctx))
3910 goto done;
3911
3912 /* OpenGL ES < 3.1 requires that a vertex shader and a fragment shader both
3913 * be present in a linked program. GL_ARB_ES2_compatibility doesn't say
3914 * anything about shader linking when one of the shaders (vertex or
3915 * fragment shader) is absent. So, the extension shouldn't change the
3916 * behavior specified in GLSL specification.
3917 *
3918 * From OpenGL ES 3.1 specification (7.3 Program Objects):
3919 * "Linking can fail for a variety of reasons as specified in the
3920 * OpenGL ES Shading Language Specification, as well as any of the
3921 * following reasons:
3922 *
3923 * ...
3924 *
3925 * * program contains objects to form either a vertex shader or
3926 * fragment shader, and program is not separable, and does not
3927 * contain objects to form both a vertex shader and fragment
3928 * shader."
3929 *
3930 * However, the only scenario in 3.1+ where we don't require them both is
3931 * when we have a compute shader. For example:
3932 *
3933 * - No shaders is a link error.
3934 * - Geom or Tess without a Vertex shader is a link error which means we
3935 * always require a Vertex shader and hence a Fragment shader.
3936 * - Finally a Compute shader linked with any other stage is a link error.
3937 */
3938 if (!prog->SeparateShader && ctx->API == API_OPENGLES2 &&
3939 num_shaders[MESA_SHADER_COMPUTE] == 0) {
3940 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
3941 linker_error(prog, "program lacks a vertex shader\n");
3942 } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
3943 linker_error(prog, "program lacks a fragment shader\n");
3944 }
3945 }
3946
3947 done:
3948 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
3949 free(shader_list[i]);
3950 if (prog->_LinkedShaders[i] == NULL)
3951 continue;
3952
3953 /* Do a final validation step to make sure that the IR wasn't
3954 * invalidated by any modifications performed after intrastage linking.
3955 */
3956 validate_ir_tree(prog->_LinkedShaders[i]->ir);
3957
3958 /* Retain any live IR, but trash the rest. */
3959 reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
3960
3961 /* The symbol table in the linked shaders may contain references to
3962 * variables that were removed (e.g., unused uniforms). Since it may
3963 * contain junk, there is no possible valid use. Delete it and set the
3964 * pointer to NULL.
3965 */
3966 delete prog->_LinkedShaders[i]->symbols;
3967 prog->_LinkedShaders[i]->symbols = NULL;
3968 }
3969
3970 ralloc_free(mem_ctx);
3971 }
3972
3973 void
resource_name_updated(struct gl_resource_name * name)3974 resource_name_updated(struct gl_resource_name *name)
3975 {
3976 if (name->string) {
3977 name->length = strlen(name->string);
3978
3979 const char *last_square_bracket = strrchr(name->string, '[');
3980 if (last_square_bracket) {
3981 name->last_square_bracket = last_square_bracket - name->string;
3982 name->suffix_is_zero_square_bracketed =
3983 strcmp(last_square_bracket, "[0]") == 0;
3984 } else {
3985 name->last_square_bracket = -1;
3986 name->suffix_is_zero_square_bracketed = false;
3987 }
3988 } else {
3989 name->length = 0;
3990 name->last_square_bracket = -1;
3991 name->suffix_is_zero_square_bracketed = false;
3992 }
3993 }
3994