1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/errors.h"
33 #include "main/mtypes.h"
34 #include "glsl_symbol_table.h"
35 #include "glsl_parser_extras.h"
36 #include "ir_optimization.h"
37 #include "linker.h"
38 #include "link_varyings.h"
39 #include "main/macros.h"
40 #include "util/hash_table.h"
41 #include "util/u_math.h"
42 #include "program.h"
43
44
45 /**
46 * Get the varying type stripped of the outermost array if we're processing
47 * a stage whose varyings are arrays indexed by a vertex number (such as
48 * geometry shader inputs).
49 */
50 static const glsl_type *
get_varying_type(const ir_variable * var,gl_shader_stage stage)51 get_varying_type(const ir_variable *var, gl_shader_stage stage)
52 {
53 const glsl_type *type = var->type;
54
55 if (!var->data.patch &&
56 ((var->data.mode == ir_var_shader_out &&
57 stage == MESA_SHADER_TESS_CTRL) ||
58 (var->data.mode == ir_var_shader_in &&
59 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
60 stage == MESA_SHADER_GEOMETRY)))) {
61 assert(type->is_array());
62 type = type->fields.array;
63 }
64
65 return type;
66 }
67
68 static void
create_xfb_varying_names(void * mem_ctx,const glsl_type * t,char ** name,size_t name_length,unsigned * count,const char * ifc_member_name,const glsl_type * ifc_member_t,char *** varying_names)69 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
70 size_t name_length, unsigned *count,
71 const char *ifc_member_name,
72 const glsl_type *ifc_member_t, char ***varying_names)
73 {
74 if (t->is_interface()) {
75 size_t new_length = name_length;
76
77 assert(ifc_member_name && ifc_member_t);
78 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
79
80 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
81 NULL, NULL, varying_names);
82 } else if (t->is_struct()) {
83 for (unsigned i = 0; i < t->length; i++) {
84 const char *field = t->fields.structure[i].name;
85 size_t new_length = name_length;
86
87 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
88
89 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
90 new_length, count, NULL, NULL,
91 varying_names);
92 }
93 } else if (t->without_array()->is_struct() ||
94 t->without_array()->is_interface() ||
95 (t->is_array() && t->fields.array->is_array())) {
96 for (unsigned i = 0; i < t->length; i++) {
97 size_t new_length = name_length;
98
99 /* Append the subscript to the current variable name */
100 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
101
102 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
103 count, ifc_member_name, ifc_member_t,
104 varying_names);
105 }
106 } else {
107 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
108 }
109 }
110
111 static bool
process_xfb_layout_qualifiers(void * mem_ctx,const gl_linked_shader * sh,struct gl_shader_program * prog,unsigned * num_tfeedback_decls,char *** varying_names)112 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
113 struct gl_shader_program *prog,
114 unsigned *num_tfeedback_decls,
115 char ***varying_names)
116 {
117 bool has_xfb_qualifiers = false;
118
119 /* We still need to enable transform feedback mode even if xfb_stride is
120 * only applied to a global out. Also we don't bother to propagate
121 * xfb_stride to interface block members so this will catch that case also.
122 */
123 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
124 if (prog->TransformFeedback.BufferStride[j]) {
125 has_xfb_qualifiers = true;
126 break;
127 }
128 }
129
130 foreach_in_list(ir_instruction, node, sh->ir) {
131 ir_variable *var = node->as_variable();
132 if (!var || var->data.mode != ir_var_shader_out)
133 continue;
134
135 /* From the ARB_enhanced_layouts spec:
136 *
137 * "Any shader making any static use (after preprocessing) of any of
138 * these *xfb_* qualifiers will cause the shader to be in a
139 * transform feedback capturing mode and hence responsible for
140 * describing the transform feedback setup. This mode will capture
141 * any output selected by *xfb_offset*, directly or indirectly, to
142 * a transform feedback buffer."
143 */
144 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
145 has_xfb_qualifiers = true;
146 }
147
148 if (var->data.explicit_xfb_offset) {
149 *num_tfeedback_decls += var->type->varying_count();
150 has_xfb_qualifiers = true;
151 }
152 }
153
154 if (*num_tfeedback_decls == 0)
155 return has_xfb_qualifiers;
156
157 unsigned i = 0;
158 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
159 foreach_in_list(ir_instruction, node, sh->ir) {
160 ir_variable *var = node->as_variable();
161 if (!var || var->data.mode != ir_var_shader_out)
162 continue;
163
164 if (var->data.explicit_xfb_offset) {
165 char *name;
166 const glsl_type *type, *member_type;
167
168 if (var->data.from_named_ifc_block) {
169 type = var->get_interface_type();
170
171 /* Find the member type before it was altered by lowering */
172 const glsl_type *type_wa = type->without_array();
173 member_type =
174 type_wa->fields.structure[type_wa->field_index(var->name)].type;
175 name = ralloc_strdup(NULL, type_wa->name);
176 } else {
177 type = var->type;
178 member_type = NULL;
179 name = ralloc_strdup(NULL, var->name);
180 }
181 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
182 var->name, member_type, varying_names);
183 ralloc_free(name);
184 }
185 }
186
187 assert(i == *num_tfeedback_decls);
188 return has_xfb_qualifiers;
189 }
190
191 /**
192 * Validate the types and qualifiers of an output from one stage against the
193 * matching input to another stage.
194 */
195 static void
cross_validate_types_and_qualifiers(struct gl_context * ctx,struct gl_shader_program * prog,const ir_variable * input,const ir_variable * output,gl_shader_stage consumer_stage,gl_shader_stage producer_stage)196 cross_validate_types_and_qualifiers(struct gl_context *ctx,
197 struct gl_shader_program *prog,
198 const ir_variable *input,
199 const ir_variable *output,
200 gl_shader_stage consumer_stage,
201 gl_shader_stage producer_stage)
202 {
203 /* Check that the types match between stages.
204 */
205 const glsl_type *type_to_match = input->type;
206
207 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
208 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
209 consumer_stage != MESA_SHADER_FRAGMENT) ||
210 consumer_stage == MESA_SHADER_GEOMETRY;
211 if (extra_array_level) {
212 assert(type_to_match->is_array());
213 type_to_match = type_to_match->fields.array;
214 }
215
216 if (type_to_match != output->type) {
217 if (output->type->is_struct()) {
218 /* Structures across shader stages can have different name
219 * and considered to match in type if and only if structure
220 * members match in name, type, qualification, and declaration
221 * order. The precision doesn’t need to match.
222 */
223 if (!output->type->record_compare(type_to_match,
224 false, /* match_name */
225 true, /* match_locations */
226 false /* match_precision */)) {
227 linker_error(prog,
228 "%s shader output `%s' declared as struct `%s', "
229 "doesn't match in type with %s shader input "
230 "declared as struct `%s'\n",
231 _mesa_shader_stage_to_string(producer_stage),
232 output->name,
233 output->type->name,
234 _mesa_shader_stage_to_string(consumer_stage),
235 input->type->name);
236 }
237 } else if (!output->type->is_array() || !is_gl_identifier(output->name)) {
238 /* There is a bit of a special case for gl_TexCoord. This
239 * built-in is unsized by default. Applications that variable
240 * access it must redeclare it with a size. There is some
241 * language in the GLSL spec that implies the fragment shader
242 * and vertex shader do not have to agree on this size. Other
243 * driver behave this way, and one or two applications seem to
244 * rely on it.
245 *
246 * Neither declaration needs to be modified here because the array
247 * sizes are fixed later when update_array_sizes is called.
248 *
249 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
250 *
251 * "Unlike user-defined varying variables, the built-in
252 * varying variables don't have a strict one-to-one
253 * correspondence between the vertex language and the
254 * fragment language."
255 */
256 linker_error(prog,
257 "%s shader output `%s' declared as type `%s', "
258 "but %s shader input declared as type `%s'\n",
259 _mesa_shader_stage_to_string(producer_stage),
260 output->name,
261 output->type->name,
262 _mesa_shader_stage_to_string(consumer_stage),
263 input->type->name);
264 return;
265 }
266 }
267
268 /* Check that all of the qualifiers match between stages.
269 */
270
271 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
272 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
273 * conformance test suite does not verify that the qualifiers must match.
274 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
275 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
276 */
277 if (false /* always skip the centroid check */ &&
278 prog->data->Version < (prog->IsES ? 310 : 430) &&
279 input->data.centroid != output->data.centroid) {
280 linker_error(prog,
281 "%s shader output `%s' %s centroid qualifier, "
282 "but %s shader input %s centroid qualifier\n",
283 _mesa_shader_stage_to_string(producer_stage),
284 output->name,
285 (output->data.centroid) ? "has" : "lacks",
286 _mesa_shader_stage_to_string(consumer_stage),
287 (input->data.centroid) ? "has" : "lacks");
288 return;
289 }
290
291 if (input->data.sample != output->data.sample) {
292 linker_error(prog,
293 "%s shader output `%s' %s sample qualifier, "
294 "but %s shader input %s sample qualifier\n",
295 _mesa_shader_stage_to_string(producer_stage),
296 output->name,
297 (output->data.sample) ? "has" : "lacks",
298 _mesa_shader_stage_to_string(consumer_stage),
299 (input->data.sample) ? "has" : "lacks");
300 return;
301 }
302
303 if (input->data.patch != output->data.patch) {
304 linker_error(prog,
305 "%s shader output `%s' %s patch qualifier, "
306 "but %s shader input %s patch qualifier\n",
307 _mesa_shader_stage_to_string(producer_stage),
308 output->name,
309 (output->data.patch) ? "has" : "lacks",
310 _mesa_shader_stage_to_string(consumer_stage),
311 (input->data.patch) ? "has" : "lacks");
312 return;
313 }
314
315 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
316 *
317 * "As only outputs need be declared with invariant, an output from
318 * one shader stage will still match an input of a subsequent stage
319 * without the input being declared as invariant."
320 *
321 * while GLSL 4.20 says:
322 *
323 * "For variables leaving one shader and coming into another shader,
324 * the invariant keyword has to be used in both shaders, or a link
325 * error will result."
326 *
327 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
328 *
329 * "The invariance of varyings that are declared in both the vertex
330 * and fragment shaders must match."
331 */
332 if (input->data.explicit_invariant != output->data.explicit_invariant &&
333 prog->data->Version < (prog->IsES ? 300 : 430)) {
334 linker_error(prog,
335 "%s shader output `%s' %s invariant qualifier, "
336 "but %s shader input %s invariant qualifier\n",
337 _mesa_shader_stage_to_string(producer_stage),
338 output->name,
339 (output->data.explicit_invariant) ? "has" : "lacks",
340 _mesa_shader_stage_to_string(consumer_stage),
341 (input->data.explicit_invariant) ? "has" : "lacks");
342 return;
343 }
344
345 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
346 * to match cross stage, they must only match within the same stage.
347 *
348 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
349 *
350 * "It is a link-time error if, within the same stage, the interpolation
351 * qualifiers of variables of the same name do not match.
352 *
353 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
354 *
355 * "When no interpolation qualifier is present, smooth interpolation
356 * is used."
357 *
358 * So we match variables where one is smooth and the other has no explicit
359 * qualifier.
360 */
361 unsigned input_interpolation = input->data.interpolation;
362 unsigned output_interpolation = output->data.interpolation;
363 if (prog->IsES) {
364 if (input_interpolation == INTERP_MODE_NONE)
365 input_interpolation = INTERP_MODE_SMOOTH;
366 if (output_interpolation == INTERP_MODE_NONE)
367 output_interpolation = INTERP_MODE_SMOOTH;
368 }
369 if (input_interpolation != output_interpolation &&
370 prog->data->Version < 440) {
371 if (!ctx->Const.AllowGLSLCrossStageInterpolationMismatch) {
372 linker_error(prog,
373 "%s shader output `%s' specifies %s "
374 "interpolation qualifier, "
375 "but %s shader input specifies %s "
376 "interpolation qualifier\n",
377 _mesa_shader_stage_to_string(producer_stage),
378 output->name,
379 interpolation_string(output->data.interpolation),
380 _mesa_shader_stage_to_string(consumer_stage),
381 interpolation_string(input->data.interpolation));
382 return;
383 } else {
384 linker_warning(prog,
385 "%s shader output `%s' specifies %s "
386 "interpolation qualifier, "
387 "but %s shader input specifies %s "
388 "interpolation qualifier\n",
389 _mesa_shader_stage_to_string(producer_stage),
390 output->name,
391 interpolation_string(output->data.interpolation),
392 _mesa_shader_stage_to_string(consumer_stage),
393 interpolation_string(input->data.interpolation));
394 }
395 }
396 }
397
398 /**
399 * Validate front and back color outputs against single color input
400 */
401 static void
cross_validate_front_and_back_color(struct gl_context * ctx,struct gl_shader_program * prog,const ir_variable * input,const ir_variable * front_color,const ir_variable * back_color,gl_shader_stage consumer_stage,gl_shader_stage producer_stage)402 cross_validate_front_and_back_color(struct gl_context *ctx,
403 struct gl_shader_program *prog,
404 const ir_variable *input,
405 const ir_variable *front_color,
406 const ir_variable *back_color,
407 gl_shader_stage consumer_stage,
408 gl_shader_stage producer_stage)
409 {
410 if (front_color != NULL && front_color->data.assigned)
411 cross_validate_types_and_qualifiers(ctx, prog, input, front_color,
412 consumer_stage, producer_stage);
413
414 if (back_color != NULL && back_color->data.assigned)
415 cross_validate_types_and_qualifiers(ctx, prog, input, back_color,
416 consumer_stage, producer_stage);
417 }
418
419 static unsigned
compute_variable_location_slot(ir_variable * var,gl_shader_stage stage)420 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
421 {
422 unsigned location_start = VARYING_SLOT_VAR0;
423
424 switch (stage) {
425 case MESA_SHADER_VERTEX:
426 if (var->data.mode == ir_var_shader_in)
427 location_start = VERT_ATTRIB_GENERIC0;
428 break;
429 case MESA_SHADER_TESS_CTRL:
430 case MESA_SHADER_TESS_EVAL:
431 if (var->data.patch)
432 location_start = VARYING_SLOT_PATCH0;
433 break;
434 case MESA_SHADER_FRAGMENT:
435 if (var->data.mode == ir_var_shader_out)
436 location_start = FRAG_RESULT_DATA0;
437 break;
438 default:
439 break;
440 }
441
442 return var->data.location - location_start;
443 }
444
445 struct explicit_location_info {
446 ir_variable *var;
447 bool base_type_is_integer;
448 unsigned base_type_bit_size;
449 unsigned interpolation;
450 bool centroid;
451 bool sample;
452 bool patch;
453 };
454
455 static bool
check_location_aliasing(struct explicit_location_info explicit_locations[][4],ir_variable * var,unsigned location,unsigned component,unsigned location_limit,const glsl_type * type,unsigned interpolation,bool centroid,bool sample,bool patch,gl_shader_program * prog,gl_shader_stage stage)456 check_location_aliasing(struct explicit_location_info explicit_locations[][4],
457 ir_variable *var,
458 unsigned location,
459 unsigned component,
460 unsigned location_limit,
461 const glsl_type *type,
462 unsigned interpolation,
463 bool centroid,
464 bool sample,
465 bool patch,
466 gl_shader_program *prog,
467 gl_shader_stage stage)
468 {
469 unsigned last_comp;
470 unsigned base_type_bit_size;
471 const glsl_type *type_without_array = type->without_array();
472 const bool base_type_is_integer =
473 glsl_base_type_is_integer(type_without_array->base_type);
474 const bool is_struct = type_without_array->is_struct();
475 if (is_struct) {
476 /* structs don't have a defined underlying base type so just treat all
477 * component slots as used and set the bit size to 0. If there is
478 * location aliasing, we'll fail anyway later.
479 */
480 last_comp = 4;
481 base_type_bit_size = 0;
482 } else {
483 unsigned dmul = type_without_array->is_64bit() ? 2 : 1;
484 last_comp = component + type_without_array->vector_elements * dmul;
485 base_type_bit_size =
486 glsl_base_type_get_bit_size(type_without_array->base_type);
487 }
488
489 while (location < location_limit) {
490 unsigned comp = 0;
491 while (comp < 4) {
492 struct explicit_location_info *info =
493 &explicit_locations[location][comp];
494
495 if (info->var) {
496 if (info->var->type->without_array()->is_struct() || is_struct) {
497 /* Structs cannot share location since they are incompatible
498 * with any other underlying numerical type.
499 */
500 linker_error(prog,
501 "%s shader has multiple %sputs sharing the "
502 "same location that don't have the same "
503 "underlying numerical type. Struct variable '%s', "
504 "location %u\n",
505 _mesa_shader_stage_to_string(stage),
506 var->data.mode == ir_var_shader_in ? "in" : "out",
507 is_struct ? var->name : info->var->name,
508 location);
509 return false;
510 } else if (comp >= component && comp < last_comp) {
511 /* Component aliasing is not allowed */
512 linker_error(prog,
513 "%s shader has multiple %sputs explicitly "
514 "assigned to location %d and component %d\n",
515 _mesa_shader_stage_to_string(stage),
516 var->data.mode == ir_var_shader_in ? "in" : "out",
517 location, comp);
518 return false;
519 } else {
520 /* From the OpenGL 4.60.5 spec, section 4.4.1 Input Layout
521 * Qualifiers, Page 67, (Location aliasing):
522 *
523 * " Further, when location aliasing, the aliases sharing the
524 * location must have the same underlying numerical type
525 * and bit width (floating-point or integer, 32-bit versus
526 * 64-bit, etc.) and the same auxiliary storage and
527 * interpolation qualification."
528 */
529
530 /* If the underlying numerical type isn't integer, implicitly
531 * it will be float or else we would have failed by now.
532 */
533 if (info->base_type_is_integer != base_type_is_integer) {
534 linker_error(prog,
535 "%s shader has multiple %sputs sharing the "
536 "same location that don't have the same "
537 "underlying numerical type. Location %u "
538 "component %u.\n",
539 _mesa_shader_stage_to_string(stage),
540 var->data.mode == ir_var_shader_in ?
541 "in" : "out", location, comp);
542 return false;
543 }
544
545 if (info->base_type_bit_size != base_type_bit_size) {
546 linker_error(prog,
547 "%s shader has multiple %sputs sharing the "
548 "same location that don't have the same "
549 "underlying numerical bit size. Location %u "
550 "component %u.\n",
551 _mesa_shader_stage_to_string(stage),
552 var->data.mode == ir_var_shader_in ?
553 "in" : "out", location, comp);
554 return false;
555 }
556
557 if (info->interpolation != interpolation) {
558 linker_error(prog,
559 "%s shader has multiple %sputs sharing the "
560 "same location that don't have the same "
561 "interpolation qualification. Location %u "
562 "component %u.\n",
563 _mesa_shader_stage_to_string(stage),
564 var->data.mode == ir_var_shader_in ?
565 "in" : "out", location, comp);
566 return false;
567 }
568
569 if (info->centroid != centroid ||
570 info->sample != sample ||
571 info->patch != patch) {
572 linker_error(prog,
573 "%s shader has multiple %sputs sharing the "
574 "same location that don't have the same "
575 "auxiliary storage qualification. Location %u "
576 "component %u.\n",
577 _mesa_shader_stage_to_string(stage),
578 var->data.mode == ir_var_shader_in ?
579 "in" : "out", location, comp);
580 return false;
581 }
582 }
583 } else if (comp >= component && comp < last_comp) {
584 info->var = var;
585 info->base_type_is_integer = base_type_is_integer;
586 info->base_type_bit_size = base_type_bit_size;
587 info->interpolation = interpolation;
588 info->centroid = centroid;
589 info->sample = sample;
590 info->patch = patch;
591 }
592
593 comp++;
594
595 /* We need to do some special handling for doubles as dvec3 and
596 * dvec4 consume two consecutive locations. We don't need to
597 * worry about components beginning at anything other than 0 as
598 * the spec does not allow this for dvec3 and dvec4.
599 */
600 if (comp == 4 && last_comp > 4) {
601 last_comp = last_comp - 4;
602 /* Bump location index and reset the component index */
603 location++;
604 comp = 0;
605 component = 0;
606 }
607 }
608
609 location++;
610 }
611
612 return true;
613 }
614
615 static bool
validate_explicit_variable_location(struct gl_context * ctx,struct explicit_location_info explicit_locations[][4],ir_variable * var,gl_shader_program * prog,gl_linked_shader * sh)616 validate_explicit_variable_location(struct gl_context *ctx,
617 struct explicit_location_info explicit_locations[][4],
618 ir_variable *var,
619 gl_shader_program *prog,
620 gl_linked_shader *sh)
621 {
622 const glsl_type *type = get_varying_type(var, sh->Stage);
623 unsigned num_elements = type->count_attribute_slots(false);
624 unsigned idx = compute_variable_location_slot(var, sh->Stage);
625 unsigned slot_limit = idx + num_elements;
626
627 /* Vertex shader inputs and fragment shader outputs are validated in
628 * assign_attribute_or_color_locations() so we should not attempt to
629 * validate them again here.
630 */
631 unsigned slot_max;
632 if (var->data.mode == ir_var_shader_out) {
633 assert(sh->Stage != MESA_SHADER_FRAGMENT);
634 slot_max =
635 ctx->Const.Program[sh->Stage].MaxOutputComponents / 4;
636 } else {
637 assert(var->data.mode == ir_var_shader_in);
638 assert(sh->Stage != MESA_SHADER_VERTEX);
639 slot_max =
640 ctx->Const.Program[sh->Stage].MaxInputComponents / 4;
641 }
642
643 if (slot_limit > slot_max) {
644 linker_error(prog,
645 "Invalid location %u in %s shader\n",
646 idx, _mesa_shader_stage_to_string(sh->Stage));
647 return false;
648 }
649
650 const glsl_type *type_without_array = type->without_array();
651 if (type_without_array->is_interface()) {
652 for (unsigned i = 0; i < type_without_array->length; i++) {
653 glsl_struct_field *field = &type_without_array->fields.structure[i];
654 unsigned field_location = field->location -
655 (field->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
656 if (!check_location_aliasing(explicit_locations, var,
657 field_location,
658 0, field_location + 1,
659 field->type,
660 field->interpolation,
661 field->centroid,
662 field->sample,
663 field->patch,
664 prog, sh->Stage)) {
665 return false;
666 }
667 }
668 } else if (!check_location_aliasing(explicit_locations, var,
669 idx, var->data.location_frac,
670 slot_limit, type,
671 var->data.interpolation,
672 var->data.centroid,
673 var->data.sample,
674 var->data.patch,
675 prog, sh->Stage)) {
676 return false;
677 }
678
679 return true;
680 }
681
682 /**
683 * Validate explicit locations for the inputs to the first stage and the
684 * outputs of the last stage in a program, if those are not the VS and FS
685 * shaders.
686 */
687 void
validate_first_and_last_interface_explicit_locations(struct gl_context * ctx,struct gl_shader_program * prog,gl_shader_stage first_stage,gl_shader_stage last_stage)688 validate_first_and_last_interface_explicit_locations(struct gl_context *ctx,
689 struct gl_shader_program *prog,
690 gl_shader_stage first_stage,
691 gl_shader_stage last_stage)
692 {
693 /* VS inputs and FS outputs are validated in
694 * assign_attribute_or_color_locations()
695 */
696 bool validate_first_stage = first_stage != MESA_SHADER_VERTEX;
697 bool validate_last_stage = last_stage != MESA_SHADER_FRAGMENT;
698 if (!validate_first_stage && !validate_last_stage)
699 return;
700
701 struct explicit_location_info explicit_locations[MAX_VARYING][4];
702
703 gl_shader_stage stages[2] = { first_stage, last_stage };
704 bool validate_stage[2] = { validate_first_stage, validate_last_stage };
705 ir_variable_mode var_direction[2] = { ir_var_shader_in, ir_var_shader_out };
706
707 for (unsigned i = 0; i < 2; i++) {
708 if (!validate_stage[i])
709 continue;
710
711 gl_shader_stage stage = stages[i];
712
713 gl_linked_shader *sh = prog->_LinkedShaders[stage];
714 assert(sh);
715
716 memset(explicit_locations, 0, sizeof(explicit_locations));
717
718 foreach_in_list(ir_instruction, node, sh->ir) {
719 ir_variable *const var = node->as_variable();
720
721 if (var == NULL ||
722 !var->data.explicit_location ||
723 var->data.location < VARYING_SLOT_VAR0 ||
724 var->data.mode != var_direction[i])
725 continue;
726
727 if (!validate_explicit_variable_location(
728 ctx, explicit_locations, var, prog, sh)) {
729 return;
730 }
731 }
732 }
733 }
734
735 /**
736 * Validate that outputs from one stage match inputs of another
737 */
738 void
cross_validate_outputs_to_inputs(struct gl_context * ctx,struct gl_shader_program * prog,gl_linked_shader * producer,gl_linked_shader * consumer)739 cross_validate_outputs_to_inputs(struct gl_context *ctx,
740 struct gl_shader_program *prog,
741 gl_linked_shader *producer,
742 gl_linked_shader *consumer)
743 {
744 glsl_symbol_table parameters;
745 struct explicit_location_info output_explicit_locations[MAX_VARYING][4] = {};
746 struct explicit_location_info input_explicit_locations[MAX_VARYING][4] = {};
747
748 /* Find all shader outputs in the "producer" stage.
749 */
750 foreach_in_list(ir_instruction, node, producer->ir) {
751 ir_variable *const var = node->as_variable();
752
753 if (var == NULL || var->data.mode != ir_var_shader_out)
754 continue;
755
756 if (!var->data.explicit_location
757 || var->data.location < VARYING_SLOT_VAR0)
758 parameters.add_variable(var);
759 else {
760 /* User-defined varyings with explicit locations are handled
761 * differently because they do not need to have matching names.
762 */
763 if (!validate_explicit_variable_location(ctx,
764 output_explicit_locations,
765 var, prog, producer)) {
766 return;
767 }
768 }
769 }
770
771
772 /* Find all shader inputs in the "consumer" stage. Any variables that have
773 * matching outputs already in the symbol table must have the same type and
774 * qualifiers.
775 *
776 * Exception: if the consumer is the geometry shader, then the inputs
777 * should be arrays and the type of the array element should match the type
778 * of the corresponding producer output.
779 */
780 foreach_in_list(ir_instruction, node, consumer->ir) {
781 ir_variable *const input = node->as_variable();
782
783 if (input == NULL || input->data.mode != ir_var_shader_in)
784 continue;
785
786 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
787 const ir_variable *const front_color =
788 parameters.get_variable("gl_FrontColor");
789
790 const ir_variable *const back_color =
791 parameters.get_variable("gl_BackColor");
792
793 cross_validate_front_and_back_color(ctx, prog, input,
794 front_color, back_color,
795 consumer->Stage, producer->Stage);
796 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
797 const ir_variable *const front_color =
798 parameters.get_variable("gl_FrontSecondaryColor");
799
800 const ir_variable *const back_color =
801 parameters.get_variable("gl_BackSecondaryColor");
802
803 cross_validate_front_and_back_color(ctx, prog, input,
804 front_color, back_color,
805 consumer->Stage, producer->Stage);
806 } else {
807 /* The rules for connecting inputs and outputs change in the presence
808 * of explicit locations. In this case, we no longer care about the
809 * names of the variables. Instead, we care only about the
810 * explicitly assigned location.
811 */
812 ir_variable *output = NULL;
813 if (input->data.explicit_location
814 && input->data.location >= VARYING_SLOT_VAR0) {
815
816 const glsl_type *type = get_varying_type(input, consumer->Stage);
817 unsigned num_elements = type->count_attribute_slots(false);
818 unsigned idx =
819 compute_variable_location_slot(input, consumer->Stage);
820 unsigned slot_limit = idx + num_elements;
821
822 if (!validate_explicit_variable_location(ctx,
823 input_explicit_locations,
824 input, prog, consumer)) {
825 return;
826 }
827
828 while (idx < slot_limit) {
829 if (idx >= MAX_VARYING) {
830 linker_error(prog,
831 "Invalid location %u in %s shader\n", idx,
832 _mesa_shader_stage_to_string(consumer->Stage));
833 return;
834 }
835
836 output = output_explicit_locations[idx][input->data.location_frac].var;
837
838 if (output == NULL) {
839 /* A linker failure should only happen when there is no
840 * output declaration and there is Static Use of the
841 * declared input.
842 */
843 if (input->data.used) {
844 linker_error(prog,
845 "%s shader input `%s' with explicit location "
846 "has no matching output\n",
847 _mesa_shader_stage_to_string(consumer->Stage),
848 input->name);
849 break;
850 }
851 } else if (input->data.location != output->data.location) {
852 linker_error(prog,
853 "%s shader input `%s' with explicit location "
854 "has no matching output\n",
855 _mesa_shader_stage_to_string(consumer->Stage),
856 input->name);
857 break;
858 }
859 idx++;
860 }
861 } else {
862 output = parameters.get_variable(input->name);
863 }
864
865 if (output != NULL) {
866 /* Interface blocks have their own validation elsewhere so don't
867 * try validating them here.
868 */
869 if (!(input->get_interface_type() &&
870 output->get_interface_type()))
871 cross_validate_types_and_qualifiers(ctx, prog, input, output,
872 consumer->Stage,
873 producer->Stage);
874 } else {
875 /* Check for input vars with unmatched output vars in prev stage
876 * taking into account that interface blocks could have a matching
877 * output but with different name, so we ignore them.
878 *
879 * Section 4.3.4 (Inputs) of the GLSL 4.10 specifications say:
880 *
881 * "Only the input variables that are actually read need to be
882 * written by the previous stage; it is allowed to have
883 * superfluous declarations of input variables."
884 *
885 * However it's not defined anywhere as to how we should handle
886 * inputs that are not written in the previous stage and it's not
887 * clear what "actually read" means.
888 *
889 * The GLSL 4.20 spec however is much clearer:
890 *
891 * "Only the input variables that are statically read need to
892 * be written by the previous stage; it is allowed to have
893 * superfluous declarations of input variables."
894 *
895 * It also has a table that states it is an error to statically
896 * read an input that is not defined in the previous stage. While
897 * it is not an error to not statically write to the output (it
898 * just needs to be defined to not be an error).
899 *
900 * The text in the GLSL 4.20 spec was an attempt to clarify the
901 * previous spec iterations. However given the difference in spec
902 * and that some applications seem to depend on not erroring when
903 * the input is not actually read in control flow we only apply
904 * this rule to GLSL 4.00 and higher. GLSL 4.00 was chosen as
905 * a 3.30 shader is the highest version of GLSL we have seen in
906 * the wild dependant on the less strict interpretation.
907 */
908 assert(!input->data.assigned);
909 if (input->data.used && !input->get_interface_type() &&
910 !input->data.explicit_location &&
911 (prog->data->Version >= (prog->IsES ? 0 : 400)))
912 linker_error(prog,
913 "%s shader input `%s' "
914 "has no matching output in the previous stage\n",
915 _mesa_shader_stage_to_string(consumer->Stage),
916 input->name);
917 }
918 }
919 }
920 }
921
922 /**
923 * Demote shader inputs and outputs that are not used in other stages, and
924 * remove them via dead code elimination.
925 */
926 static void
remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,gl_linked_shader * sh,enum ir_variable_mode mode)927 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
928 gl_linked_shader *sh,
929 enum ir_variable_mode mode)
930 {
931 if (is_separate_shader_object)
932 return;
933
934 foreach_in_list(ir_instruction, node, sh->ir) {
935 ir_variable *const var = node->as_variable();
936
937 if (var == NULL || var->data.mode != int(mode))
938 continue;
939
940 /* A shader 'in' or 'out' variable is only really an input or output if
941 * its value is used by other shader stages. This will cause the
942 * variable to have a location assigned.
943 */
944 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
945 assert(var->data.mode != ir_var_temporary);
946
947 /* Assign zeros to demoted inputs to allow more optimizations. */
948 if (var->data.mode == ir_var_shader_in && !var->constant_value)
949 var->constant_value = ir_constant::zero(var, var->type);
950
951 var->data.mode = ir_var_auto;
952 }
953 }
954
955 /* Eliminate code that is now dead due to unused inputs/outputs being
956 * demoted.
957 */
958 while (do_dead_code(sh->ir, false))
959 ;
960
961 }
962
963 /**
964 * Initialize this object based on a string that was passed to
965 * glTransformFeedbackVaryings.
966 *
967 * If the input is mal-formed, this call still succeeds, but it sets
968 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
969 * will fail to find any matching variable.
970 */
971 void
init(struct gl_context * ctx,const void * mem_ctx,const char * input)972 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
973 const char *input)
974 {
975 /* We don't have to be pedantic about what is a valid GLSL variable name,
976 * because any variable with an invalid name can't exist in the IR anyway.
977 */
978
979 this->location = -1;
980 this->orig_name = input;
981 this->lowered_builtin_array_variable = none;
982 this->skip_components = 0;
983 this->next_buffer_separator = false;
984 this->matched_candidate = NULL;
985 this->stream_id = 0;
986 this->buffer = 0;
987 this->offset = 0;
988
989 if (ctx->Extensions.ARB_transform_feedback3) {
990 /* Parse gl_NextBuffer. */
991 if (strcmp(input, "gl_NextBuffer") == 0) {
992 this->next_buffer_separator = true;
993 return;
994 }
995
996 /* Parse gl_SkipComponents. */
997 if (strcmp(input, "gl_SkipComponents1") == 0)
998 this->skip_components = 1;
999 else if (strcmp(input, "gl_SkipComponents2") == 0)
1000 this->skip_components = 2;
1001 else if (strcmp(input, "gl_SkipComponents3") == 0)
1002 this->skip_components = 3;
1003 else if (strcmp(input, "gl_SkipComponents4") == 0)
1004 this->skip_components = 4;
1005
1006 if (this->skip_components)
1007 return;
1008 }
1009
1010 /* Parse a declaration. */
1011 const char *base_name_end;
1012 long subscript = parse_program_resource_name(input, &base_name_end);
1013 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
1014 if (this->var_name == NULL) {
1015 _mesa_error_no_memory(__func__);
1016 return;
1017 }
1018
1019 if (subscript >= 0) {
1020 this->array_subscript = subscript;
1021 this->is_subscripted = true;
1022 } else {
1023 this->is_subscripted = false;
1024 }
1025
1026 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
1027 * class must behave specially to account for the fact that gl_ClipDistance
1028 * is converted from a float[8] to a vec4[2].
1029 */
1030 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
1031 strcmp(this->var_name, "gl_ClipDistance") == 0) {
1032 this->lowered_builtin_array_variable = clip_distance;
1033 }
1034 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
1035 strcmp(this->var_name, "gl_CullDistance") == 0) {
1036 this->lowered_builtin_array_variable = cull_distance;
1037 }
1038
1039 if (ctx->Const.LowerTessLevel &&
1040 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
1041 this->lowered_builtin_array_variable = tess_level_outer;
1042 if (ctx->Const.LowerTessLevel &&
1043 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
1044 this->lowered_builtin_array_variable = tess_level_inner;
1045 }
1046
1047
1048 /**
1049 * Determine whether two tfeedback_decl objects refer to the same variable and
1050 * array index (if applicable).
1051 */
1052 bool
is_same(const tfeedback_decl & x,const tfeedback_decl & y)1053 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
1054 {
1055 assert(x.is_varying() && y.is_varying());
1056
1057 if (strcmp(x.var_name, y.var_name) != 0)
1058 return false;
1059 if (x.is_subscripted != y.is_subscripted)
1060 return false;
1061 if (x.is_subscripted && x.array_subscript != y.array_subscript)
1062 return false;
1063 return true;
1064 }
1065
1066
1067 /**
1068 * Assign a location and stream ID for this tfeedback_decl object based on the
1069 * transform feedback candidate found by find_candidate.
1070 *
1071 * If an error occurs, the error is reported through linker_error() and false
1072 * is returned.
1073 */
1074 bool
assign_location(struct gl_context * ctx,struct gl_shader_program * prog)1075 tfeedback_decl::assign_location(struct gl_context *ctx,
1076 struct gl_shader_program *prog)
1077 {
1078 assert(this->is_varying());
1079
1080 unsigned fine_location
1081 = this->matched_candidate->toplevel_var->data.location * 4
1082 + this->matched_candidate->toplevel_var->data.location_frac
1083 + this->matched_candidate->offset;
1084 const unsigned dmul =
1085 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
1086
1087 if (this->matched_candidate->type->is_array()) {
1088 /* Array variable */
1089 const unsigned matrix_cols =
1090 this->matched_candidate->type->fields.array->matrix_columns;
1091 const unsigned vector_elements =
1092 this->matched_candidate->type->fields.array->vector_elements;
1093 unsigned actual_array_size;
1094 switch (this->lowered_builtin_array_variable) {
1095 case clip_distance:
1096 actual_array_size = prog->last_vert_prog ?
1097 prog->last_vert_prog->info.clip_distance_array_size : 0;
1098 break;
1099 case cull_distance:
1100 actual_array_size = prog->last_vert_prog ?
1101 prog->last_vert_prog->info.cull_distance_array_size : 0;
1102 break;
1103 case tess_level_outer:
1104 actual_array_size = 4;
1105 break;
1106 case tess_level_inner:
1107 actual_array_size = 2;
1108 break;
1109 case none:
1110 default:
1111 actual_array_size = this->matched_candidate->type->array_size();
1112 break;
1113 }
1114
1115 if (this->is_subscripted) {
1116 /* Check array bounds. */
1117 if (this->array_subscript >= actual_array_size) {
1118 linker_error(prog, "Transform feedback varying %s has index "
1119 "%i, but the array size is %u.",
1120 this->orig_name, this->array_subscript,
1121 actual_array_size);
1122 return false;
1123 }
1124 unsigned array_elem_size = this->lowered_builtin_array_variable ?
1125 1 : vector_elements * matrix_cols * dmul;
1126 fine_location += array_elem_size * this->array_subscript;
1127 this->size = 1;
1128 } else {
1129 this->size = actual_array_size;
1130 }
1131 this->vector_elements = vector_elements;
1132 this->matrix_columns = matrix_cols;
1133 if (this->lowered_builtin_array_variable)
1134 this->type = GL_FLOAT;
1135 else
1136 this->type = this->matched_candidate->type->fields.array->gl_type;
1137 } else {
1138 /* Regular variable (scalar, vector, or matrix) */
1139 if (this->is_subscripted) {
1140 linker_error(prog, "Transform feedback varying %s requested, "
1141 "but %s is not an array.",
1142 this->orig_name, this->var_name);
1143 return false;
1144 }
1145 this->size = 1;
1146 this->vector_elements = this->matched_candidate->type->vector_elements;
1147 this->matrix_columns = this->matched_candidate->type->matrix_columns;
1148 this->type = this->matched_candidate->type->gl_type;
1149 }
1150 this->location = fine_location / 4;
1151 this->location_frac = fine_location % 4;
1152
1153 /* From GL_EXT_transform_feedback:
1154 * A program will fail to link if:
1155 *
1156 * * the total number of components to capture in any varying
1157 * variable in <varyings> is greater than the constant
1158 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
1159 * buffer mode is SEPARATE_ATTRIBS_EXT;
1160 */
1161 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1162 this->num_components() >
1163 ctx->Const.MaxTransformFeedbackSeparateComponents) {
1164 linker_error(prog, "Transform feedback varying %s exceeds "
1165 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
1166 this->orig_name);
1167 return false;
1168 }
1169
1170 /* Only transform feedback varyings can be assigned to non-zero streams,
1171 * so assign the stream id here.
1172 */
1173 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
1174
1175 unsigned array_offset = this->array_subscript * 4 * dmul;
1176 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
1177 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
1178 this->offset = this->matched_candidate->toplevel_var->data.offset +
1179 array_offset + struct_offset;
1180
1181 return true;
1182 }
1183
1184
1185 unsigned
get_num_outputs() const1186 tfeedback_decl::get_num_outputs() const
1187 {
1188 if (!this->is_varying()) {
1189 return 0;
1190 }
1191 return (this->num_components() + this->location_frac + 3)/4;
1192 }
1193
1194
1195 /**
1196 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
1197 *
1198 * If an error occurs, the error is reported through linker_error() and false
1199 * is returned.
1200 */
1201 bool
store(struct gl_context * ctx,struct gl_shader_program * prog,struct gl_transform_feedback_info * info,unsigned buffer,unsigned buffer_index,const unsigned max_outputs,BITSET_WORD * used_components[MAX_FEEDBACK_BUFFERS],bool * explicit_stride,bool has_xfb_qualifiers,const void * mem_ctx) const1202 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
1203 struct gl_transform_feedback_info *info,
1204 unsigned buffer, unsigned buffer_index,
1205 const unsigned max_outputs,
1206 BITSET_WORD *used_components[MAX_FEEDBACK_BUFFERS],
1207 bool *explicit_stride, bool has_xfb_qualifiers,
1208 const void* mem_ctx) const
1209 {
1210 unsigned xfb_offset = 0;
1211 unsigned size = this->size;
1212 /* Handle gl_SkipComponents. */
1213 if (this->skip_components) {
1214 info->Buffers[buffer].Stride += this->skip_components;
1215 size = this->skip_components;
1216 goto store_varying;
1217 }
1218
1219 if (this->next_buffer_separator) {
1220 size = 0;
1221 goto store_varying;
1222 }
1223
1224 if (has_xfb_qualifiers) {
1225 xfb_offset = this->offset / 4;
1226 } else {
1227 xfb_offset = info->Buffers[buffer].Stride;
1228 }
1229 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
1230
1231 {
1232 unsigned location = this->location;
1233 unsigned location_frac = this->location_frac;
1234 unsigned num_components = this->num_components();
1235
1236 /* From GL_EXT_transform_feedback:
1237 *
1238 * " A program will fail to link if:
1239 *
1240 * * the total number of components to capture is greater than the
1241 * constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
1242 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT."
1243 *
1244 * From GL_ARB_enhanced_layouts:
1245 *
1246 * " The resulting stride (implicit or explicit) must be less than or
1247 * equal to the implementation-dependent constant
1248 * gl_MaxTransformFeedbackInterleavedComponents."
1249 */
1250 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1251 has_xfb_qualifiers) &&
1252 xfb_offset + num_components >
1253 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1254 linker_error(prog,
1255 "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1256 "limit has been exceeded.");
1257 return false;
1258 }
1259
1260 /* From the OpenGL 4.60.5 spec, section 4.4.2. Output Layout Qualifiers,
1261 * Page 76, (Transform Feedback Layout Qualifiers):
1262 *
1263 * " No aliasing in output buffers is allowed: It is a compile-time or
1264 * link-time error to specify variables with overlapping transform
1265 * feedback offsets."
1266 */
1267 const unsigned max_components =
1268 ctx->Const.MaxTransformFeedbackInterleavedComponents;
1269 const unsigned first_component = xfb_offset;
1270 const unsigned last_component = xfb_offset + num_components - 1;
1271 const unsigned start_word = BITSET_BITWORD(first_component);
1272 const unsigned end_word = BITSET_BITWORD(last_component);
1273 BITSET_WORD *used;
1274 assert(last_component < max_components);
1275
1276 if (!used_components[buffer]) {
1277 used_components[buffer] =
1278 rzalloc_array(mem_ctx, BITSET_WORD, BITSET_WORDS(max_components));
1279 }
1280 used = used_components[buffer];
1281
1282 for (unsigned word = start_word; word <= end_word; word++) {
1283 unsigned start_range = 0;
1284 unsigned end_range = BITSET_WORDBITS - 1;
1285
1286 if (word == start_word)
1287 start_range = first_component % BITSET_WORDBITS;
1288
1289 if (word == end_word)
1290 end_range = last_component % BITSET_WORDBITS;
1291
1292 if (used[word] & BITSET_RANGE(start_range, end_range)) {
1293 linker_error(prog,
1294 "variable '%s', xfb_offset (%d) is causing aliasing.",
1295 this->orig_name, xfb_offset * 4);
1296 return false;
1297 }
1298 used[word] |= BITSET_RANGE(start_range, end_range);
1299 }
1300
1301 while (num_components > 0) {
1302 unsigned output_size = MIN2(num_components, 4 - location_frac);
1303 assert((info->NumOutputs == 0 && max_outputs == 0) ||
1304 info->NumOutputs < max_outputs);
1305
1306 /* From the ARB_enhanced_layouts spec:
1307 *
1308 * "If such a block member or variable is not written during a shader
1309 * invocation, the buffer contents at the assigned offset will be
1310 * undefined. Even if there are no static writes to a variable or
1311 * member that is assigned a transform feedback offset, the space is
1312 * still allocated in the buffer and still affects the stride."
1313 */
1314 if (this->is_varying_written()) {
1315 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
1316 info->Outputs[info->NumOutputs].OutputRegister = location;
1317 info->Outputs[info->NumOutputs].NumComponents = output_size;
1318 info->Outputs[info->NumOutputs].StreamId = stream_id;
1319 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
1320 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
1321 ++info->NumOutputs;
1322 }
1323 info->Buffers[buffer].Stream = this->stream_id;
1324 xfb_offset += output_size;
1325
1326 num_components -= output_size;
1327 location++;
1328 location_frac = 0;
1329 }
1330 }
1331
1332 if (explicit_stride && explicit_stride[buffer]) {
1333 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
1334 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1335 "multiple of 8 as its applied to a type that is or "
1336 "contains a double.",
1337 info->Buffers[buffer].Stride * 4);
1338 return false;
1339 }
1340
1341 if (xfb_offset > info->Buffers[buffer].Stride) {
1342 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
1343 "buffer (%d)", xfb_offset * 4,
1344 info->Buffers[buffer].Stride * 4, buffer);
1345 return false;
1346 }
1347 } else {
1348 info->Buffers[buffer].Stride = xfb_offset;
1349 }
1350
1351 store_varying:
1352 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1353 this->orig_name);
1354 info->Varyings[info->NumVarying].Type = this->type;
1355 info->Varyings[info->NumVarying].Size = size;
1356 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1357 info->NumVarying++;
1358 info->Buffers[buffer].NumVaryings++;
1359
1360 return true;
1361 }
1362
1363
1364 const tfeedback_candidate *
find_candidate(gl_shader_program * prog,hash_table * tfeedback_candidates)1365 tfeedback_decl::find_candidate(gl_shader_program *prog,
1366 hash_table *tfeedback_candidates)
1367 {
1368 const char *name = this->var_name;
1369 switch (this->lowered_builtin_array_variable) {
1370 case none:
1371 name = this->var_name;
1372 break;
1373 case clip_distance:
1374 name = "gl_ClipDistanceMESA";
1375 break;
1376 case cull_distance:
1377 name = "gl_CullDistanceMESA";
1378 break;
1379 case tess_level_outer:
1380 name = "gl_TessLevelOuterMESA";
1381 break;
1382 case tess_level_inner:
1383 name = "gl_TessLevelInnerMESA";
1384 break;
1385 }
1386 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1387
1388 this->matched_candidate = entry ?
1389 (const tfeedback_candidate *) entry->data : NULL;
1390
1391 if (!this->matched_candidate) {
1392 /* From GL_EXT_transform_feedback:
1393 * A program will fail to link if:
1394 *
1395 * * any variable name specified in the <varyings> array is not
1396 * declared as an output in the geometry shader (if present) or
1397 * the vertex shader (if no geometry shader is present);
1398 */
1399 linker_error(prog, "Transform feedback varying %s undeclared.",
1400 this->orig_name);
1401 }
1402
1403 return this->matched_candidate;
1404 }
1405
1406 /**
1407 * Force a candidate over the previously matched one. It happens when a new
1408 * varying needs to be created to match the xfb declaration, for example,
1409 * to fullfil an alignment criteria.
1410 */
1411 void
set_lowered_candidate(const tfeedback_candidate * candidate)1412 tfeedback_decl::set_lowered_candidate(const tfeedback_candidate *candidate)
1413 {
1414 this->matched_candidate = candidate;
1415
1416 /* The subscript part is no longer relevant */
1417 this->is_subscripted = false;
1418 this->array_subscript = 0;
1419 }
1420
1421
1422 /**
1423 * Parse all the transform feedback declarations that were passed to
1424 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1425 *
1426 * If an error occurs, the error is reported through linker_error() and false
1427 * is returned.
1428 */
1429 static bool
parse_tfeedback_decls(struct gl_context * ctx,struct gl_shader_program * prog,const void * mem_ctx,unsigned num_names,char ** varying_names,tfeedback_decl * decls)1430 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1431 const void *mem_ctx, unsigned num_names,
1432 char **varying_names, tfeedback_decl *decls)
1433 {
1434 for (unsigned i = 0; i < num_names; ++i) {
1435 decls[i].init(ctx, mem_ctx, varying_names[i]);
1436
1437 if (!decls[i].is_varying())
1438 continue;
1439
1440 /* From GL_EXT_transform_feedback:
1441 * A program will fail to link if:
1442 *
1443 * * any two entries in the <varyings> array specify the same varying
1444 * variable;
1445 *
1446 * We interpret this to mean "any two entries in the <varyings> array
1447 * specify the same varying variable and array index", since transform
1448 * feedback of arrays would be useless otherwise.
1449 */
1450 for (unsigned j = 0; j < i; ++j) {
1451 if (decls[j].is_varying()) {
1452 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1453 linker_error(prog, "Transform feedback varying %s specified "
1454 "more than once.", varying_names[i]);
1455 return false;
1456 }
1457 }
1458 }
1459 }
1460 return true;
1461 }
1462
1463
1464 static int
cmp_xfb_offset(const void * x_generic,const void * y_generic)1465 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1466 {
1467 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1468 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1469
1470 if (x->get_buffer() != y->get_buffer())
1471 return x->get_buffer() - y->get_buffer();
1472 return x->get_offset() - y->get_offset();
1473 }
1474
1475 /**
1476 * Store transform feedback location assignments into
1477 * prog->sh.LinkedTransformFeedback based on the data stored in
1478 * tfeedback_decls.
1479 *
1480 * If an error occurs, the error is reported through linker_error() and false
1481 * is returned.
1482 */
1483 static bool
store_tfeedback_info(struct gl_context * ctx,struct gl_shader_program * prog,unsigned num_tfeedback_decls,tfeedback_decl * tfeedback_decls,bool has_xfb_qualifiers,const void * mem_ctx)1484 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1485 unsigned num_tfeedback_decls,
1486 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers,
1487 const void *mem_ctx)
1488 {
1489 if (!prog->last_vert_prog)
1490 return true;
1491
1492 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1493 * tracking the number of buffers doesn't overflow.
1494 */
1495 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1496
1497 bool separate_attribs_mode =
1498 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1499
1500 struct gl_program *xfb_prog = prog->last_vert_prog;
1501 xfb_prog->sh.LinkedTransformFeedback =
1502 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1503
1504 /* The xfb_offset qualifier does not have to be used in increasing order
1505 * however some drivers expect to receive the list of transform feedback
1506 * declarations in order so sort it now for convenience.
1507 */
1508 if (has_xfb_qualifiers) {
1509 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1510 cmp_xfb_offset);
1511 }
1512
1513 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1514 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1515 num_tfeedback_decls);
1516
1517 unsigned num_outputs = 0;
1518 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1519 if (tfeedback_decls[i].is_varying_written())
1520 num_outputs += tfeedback_decls[i].get_num_outputs();
1521 }
1522
1523 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1524 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1525 num_outputs);
1526
1527 unsigned num_buffers = 0;
1528 unsigned buffers = 0;
1529 BITSET_WORD *used_components[MAX_FEEDBACK_BUFFERS] = {};
1530
1531 if (!has_xfb_qualifiers && separate_attribs_mode) {
1532 /* GL_SEPARATE_ATTRIBS */
1533 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1534 if (!tfeedback_decls[i].store(ctx, prog,
1535 xfb_prog->sh.LinkedTransformFeedback,
1536 num_buffers, num_buffers, num_outputs,
1537 used_components, NULL,
1538 has_xfb_qualifiers, mem_ctx))
1539 return false;
1540
1541 buffers |= 1 << num_buffers;
1542 num_buffers++;
1543 }
1544 }
1545 else {
1546 /* GL_INVERLEAVED_ATTRIBS */
1547 int buffer_stream_id = -1;
1548 unsigned buffer =
1549 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1550 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1551
1552 /* Apply any xfb_stride global qualifiers */
1553 if (has_xfb_qualifiers) {
1554 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1555 if (prog->TransformFeedback.BufferStride[j]) {
1556 explicit_stride[j] = true;
1557 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1558 prog->TransformFeedback.BufferStride[j] / 4;
1559 }
1560 }
1561 }
1562
1563 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1564 if (has_xfb_qualifiers &&
1565 buffer != tfeedback_decls[i].get_buffer()) {
1566 /* we have moved to the next buffer so reset stream id */
1567 buffer_stream_id = -1;
1568 num_buffers++;
1569 }
1570
1571 if (tfeedback_decls[i].is_next_buffer_separator()) {
1572 if (!tfeedback_decls[i].store(ctx, prog,
1573 xfb_prog->sh.LinkedTransformFeedback,
1574 buffer, num_buffers, num_outputs,
1575 used_components, explicit_stride,
1576 has_xfb_qualifiers, mem_ctx))
1577 return false;
1578 num_buffers++;
1579 buffer_stream_id = -1;
1580 continue;
1581 }
1582
1583 if (has_xfb_qualifiers) {
1584 buffer = tfeedback_decls[i].get_buffer();
1585 } else {
1586 buffer = num_buffers;
1587 }
1588
1589 if (tfeedback_decls[i].is_varying()) {
1590 if (buffer_stream_id == -1) {
1591 /* First varying writing to this buffer: remember its stream */
1592 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1593
1594 /* Only mark a buffer as active when there is a varying
1595 * attached to it. This behaviour is based on a revised version
1596 * of section 13.2.2 of the GL 4.6 spec.
1597 */
1598 buffers |= 1 << buffer;
1599 } else if (buffer_stream_id !=
1600 (int) tfeedback_decls[i].get_stream_id()) {
1601 /* Varying writes to the same buffer from a different stream */
1602 linker_error(prog,
1603 "Transform feedback can't capture varyings belonging "
1604 "to different vertex streams in a single buffer. "
1605 "Varying %s writes to buffer from stream %u, other "
1606 "varyings in the same buffer write from stream %u.",
1607 tfeedback_decls[i].name(),
1608 tfeedback_decls[i].get_stream_id(),
1609 buffer_stream_id);
1610 return false;
1611 }
1612 }
1613
1614 if (!tfeedback_decls[i].store(ctx, prog,
1615 xfb_prog->sh.LinkedTransformFeedback,
1616 buffer, num_buffers, num_outputs,
1617 used_components, explicit_stride,
1618 has_xfb_qualifiers, mem_ctx))
1619 return false;
1620 }
1621 }
1622
1623 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1624
1625 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1626 return true;
1627 }
1628
1629 namespace {
1630
1631 /**
1632 * Data structure recording the relationship between outputs of one shader
1633 * stage (the "producer") and inputs of another (the "consumer").
1634 */
1635 class varying_matches
1636 {
1637 public:
1638 varying_matches(bool disable_varying_packing,
1639 bool disable_xfb_packing,
1640 bool xfb_enabled,
1641 bool enhanced_layouts_enabled,
1642 gl_shader_stage producer_stage,
1643 gl_shader_stage consumer_stage);
1644 ~varying_matches();
1645 void record(ir_variable *producer_var, ir_variable *consumer_var);
1646 unsigned assign_locations(struct gl_shader_program *prog,
1647 uint8_t components[],
1648 uint64_t reserved_slots);
1649 void store_locations() const;
1650
1651 private:
1652 bool is_varying_packing_safe(const glsl_type *type,
1653 const ir_variable *var) const;
1654
1655 /**
1656 * If true, this driver disables varying packing, so all varyings need to
1657 * be aligned on slot boundaries, and take up a number of slots equal to
1658 * their number of matrix columns times their array size.
1659 *
1660 * Packing may also be disabled because our current packing method is not
1661 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1662 * guaranteed to match across stages.
1663 */
1664 const bool disable_varying_packing;
1665
1666 /**
1667 * If true, this driver disables packing for varyings used by transform
1668 * feedback.
1669 */
1670 const bool disable_xfb_packing;
1671
1672 /**
1673 * If true, this driver has transform feedback enabled. The transform
1674 * feedback code usually requires at least some packing be done even
1675 * when varying packing is disabled, fortunately where transform feedback
1676 * requires packing it's safe to override the disabled setting. See
1677 * is_varying_packing_safe().
1678 */
1679 const bool xfb_enabled;
1680
1681 const bool enhanced_layouts_enabled;
1682
1683 /**
1684 * Enum representing the order in which varyings are packed within a
1685 * packing class.
1686 *
1687 * Currently we pack vec4's first, then vec2's, then scalar values, then
1688 * vec3's. This order ensures that the only vectors that are at risk of
1689 * having to be "double parked" (split between two adjacent varying slots)
1690 * are the vec3's.
1691 */
1692 enum packing_order_enum {
1693 PACKING_ORDER_VEC4,
1694 PACKING_ORDER_VEC2,
1695 PACKING_ORDER_SCALAR,
1696 PACKING_ORDER_VEC3,
1697 };
1698
1699 static unsigned compute_packing_class(const ir_variable *var);
1700 static packing_order_enum compute_packing_order(const ir_variable *var);
1701 static int match_comparator(const void *x_generic, const void *y_generic);
1702 static int xfb_comparator(const void *x_generic, const void *y_generic);
1703 static int not_xfb_comparator(const void *x_generic, const void *y_generic);
1704
1705 /**
1706 * Structure recording the relationship between a single producer output
1707 * and a single consumer input.
1708 */
1709 struct match {
1710 /**
1711 * Packing class for this varying, computed by compute_packing_class().
1712 */
1713 unsigned packing_class;
1714
1715 /**
1716 * Packing order for this varying, computed by compute_packing_order().
1717 */
1718 packing_order_enum packing_order;
1719 unsigned num_components;
1720
1721 /**
1722 * The output variable in the producer stage.
1723 */
1724 ir_variable *producer_var;
1725
1726 /**
1727 * The input variable in the consumer stage.
1728 */
1729 ir_variable *consumer_var;
1730
1731 /**
1732 * The location which has been assigned for this varying. This is
1733 * expressed in multiples of a float, with the first generic varying
1734 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1735 * value 0.
1736 */
1737 unsigned generic_location;
1738 } *matches;
1739
1740 /**
1741 * The number of elements in the \c matches array that are currently in
1742 * use.
1743 */
1744 unsigned num_matches;
1745
1746 /**
1747 * The number of elements that were set aside for the \c matches array when
1748 * it was allocated.
1749 */
1750 unsigned matches_capacity;
1751
1752 gl_shader_stage producer_stage;
1753 gl_shader_stage consumer_stage;
1754 };
1755
1756 } /* anonymous namespace */
1757
varying_matches(bool disable_varying_packing,bool disable_xfb_packing,bool xfb_enabled,bool enhanced_layouts_enabled,gl_shader_stage producer_stage,gl_shader_stage consumer_stage)1758 varying_matches::varying_matches(bool disable_varying_packing,
1759 bool disable_xfb_packing,
1760 bool xfb_enabled,
1761 bool enhanced_layouts_enabled,
1762 gl_shader_stage producer_stage,
1763 gl_shader_stage consumer_stage)
1764 : disable_varying_packing(disable_varying_packing),
1765 disable_xfb_packing(disable_xfb_packing),
1766 xfb_enabled(xfb_enabled),
1767 enhanced_layouts_enabled(enhanced_layouts_enabled),
1768 producer_stage(producer_stage),
1769 consumer_stage(consumer_stage)
1770 {
1771 /* Note: this initial capacity is rather arbitrarily chosen to be large
1772 * enough for many cases without wasting an unreasonable amount of space.
1773 * varying_matches::record() will resize the array if there are more than
1774 * this number of varyings.
1775 */
1776 this->matches_capacity = 8;
1777 this->matches = (match *)
1778 malloc(sizeof(*this->matches) * this->matches_capacity);
1779 this->num_matches = 0;
1780 }
1781
1782
~varying_matches()1783 varying_matches::~varying_matches()
1784 {
1785 free(this->matches);
1786 }
1787
1788
1789 /**
1790 * Packing is always safe on individual arrays, structures, and matrices. It
1791 * is also safe if the varying is only used for transform feedback.
1792 */
1793 bool
is_varying_packing_safe(const glsl_type * type,const ir_variable * var) const1794 varying_matches::is_varying_packing_safe(const glsl_type *type,
1795 const ir_variable *var) const
1796 {
1797 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1798 consumer_stage == MESA_SHADER_TESS_CTRL ||
1799 producer_stage == MESA_SHADER_TESS_CTRL)
1800 return false;
1801
1802 return xfb_enabled && (type->is_array() || type->is_struct() ||
1803 type->is_matrix() || var->data.is_xfb_only);
1804 }
1805
1806
1807 /**
1808 * Record the given producer/consumer variable pair in the list of variables
1809 * that should later be assigned locations.
1810 *
1811 * It is permissible for \c consumer_var to be NULL (this happens if a
1812 * variable is output by the producer and consumed by transform feedback, but
1813 * not consumed by the consumer).
1814 *
1815 * If \c producer_var has already been paired up with a consumer_var, or
1816 * producer_var is part of fixed pipeline functionality (and hence already has
1817 * a location assigned), this function has no effect.
1818 *
1819 * Note: as a side effect this function may change the interpolation type of
1820 * \c producer_var, but only when the change couldn't possibly affect
1821 * rendering.
1822 */
1823 void
record(ir_variable * producer_var,ir_variable * consumer_var)1824 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1825 {
1826 assert(producer_var != NULL || consumer_var != NULL);
1827
1828 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1829 producer_var->data.explicit_location)) ||
1830 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1831 consumer_var->data.explicit_location))) {
1832 /* Either a location already exists for this variable (since it is part
1833 * of fixed functionality), or it has already been recorded as part of a
1834 * previous match.
1835 */
1836 return;
1837 }
1838
1839 bool needs_flat_qualifier = consumer_var == NULL &&
1840 (producer_var->type->contains_integer() ||
1841 producer_var->type->contains_double());
1842
1843 if (!disable_varying_packing &&
1844 (!disable_xfb_packing || producer_var == NULL || !producer_var->data.is_xfb) &&
1845 (needs_flat_qualifier ||
1846 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1847 /* Since this varying is not being consumed by the fragment shader, its
1848 * interpolation type varying cannot possibly affect rendering.
1849 * Also, this variable is non-flat and is (or contains) an integer
1850 * or a double.
1851 * If the consumer stage is unknown, don't modify the interpolation
1852 * type as it could affect rendering later with separate shaders.
1853 *
1854 * lower_packed_varyings requires all integer varyings to flat,
1855 * regardless of where they appear. We can trivially satisfy that
1856 * requirement by changing the interpolation type to flat here.
1857 */
1858 if (producer_var) {
1859 producer_var->data.centroid = false;
1860 producer_var->data.sample = false;
1861 producer_var->data.interpolation = INTERP_MODE_FLAT;
1862 }
1863
1864 if (consumer_var) {
1865 consumer_var->data.centroid = false;
1866 consumer_var->data.sample = false;
1867 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1868 }
1869 }
1870
1871 if (this->num_matches == this->matches_capacity) {
1872 this->matches_capacity *= 2;
1873 this->matches = (match *)
1874 realloc(this->matches,
1875 sizeof(*this->matches) * this->matches_capacity);
1876 }
1877
1878 /* We must use the consumer to compute the packing class because in GL4.4+
1879 * there is no guarantee interpolation qualifiers will match across stages.
1880 *
1881 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1882 *
1883 * "The type and presence of interpolation qualifiers of variables with
1884 * the same name declared in all linked shaders for the same cross-stage
1885 * interface must match, otherwise the link command will fail.
1886 *
1887 * When comparing an output from one stage to an input of a subsequent
1888 * stage, the input and output don't match if their interpolation
1889 * qualifiers (or lack thereof) are not the same."
1890 *
1891 * This text was also in at least revison 7 of the 4.40 spec but is no
1892 * longer in revision 9 and not in the 4.50 spec.
1893 */
1894 const ir_variable *const var = (consumer_var != NULL)
1895 ? consumer_var : producer_var;
1896 const gl_shader_stage stage = (consumer_var != NULL)
1897 ? consumer_stage : producer_stage;
1898 const glsl_type *type = get_varying_type(var, stage);
1899
1900 if (producer_var && consumer_var &&
1901 consumer_var->data.must_be_shader_input) {
1902 producer_var->data.must_be_shader_input = 1;
1903 }
1904
1905 this->matches[this->num_matches].packing_class
1906 = this->compute_packing_class(var);
1907 this->matches[this->num_matches].packing_order
1908 = this->compute_packing_order(var);
1909 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1910 (this->disable_xfb_packing && var->data.is_xfb) ||
1911 var->data.must_be_shader_input) {
1912 unsigned slots = type->count_attribute_slots(false);
1913 this->matches[this->num_matches].num_components = slots * 4;
1914 } else {
1915 this->matches[this->num_matches].num_components
1916 = type->component_slots();
1917 }
1918
1919 this->matches[this->num_matches].producer_var = producer_var;
1920 this->matches[this->num_matches].consumer_var = consumer_var;
1921 this->num_matches++;
1922 if (producer_var)
1923 producer_var->data.is_unmatched_generic_inout = 0;
1924 if (consumer_var)
1925 consumer_var->data.is_unmatched_generic_inout = 0;
1926 }
1927
1928
1929 /**
1930 * Choose locations for all of the variable matches that were previously
1931 * passed to varying_matches::record().
1932 * \param components returns array[slot] of number of components used
1933 * per slot (1, 2, 3 or 4)
1934 * \param reserved_slots bitmask indicating which varying slots are already
1935 * allocated
1936 * \return number of slots (4-element vectors) allocated
1937 */
1938 unsigned
assign_locations(struct gl_shader_program * prog,uint8_t components[],uint64_t reserved_slots)1939 varying_matches::assign_locations(struct gl_shader_program *prog,
1940 uint8_t components[],
1941 uint64_t reserved_slots)
1942 {
1943 /* If packing has been disabled then we cannot safely sort the varyings by
1944 * class as it may mean we are using a version of OpenGL where
1945 * interpolation qualifiers are not guaranteed to be matching across
1946 * shaders, sorting in this case could result in mismatching shader
1947 * interfaces.
1948 * When packing is disabled the sort orders varyings used by transform
1949 * feedback first, but also depends on *undefined behaviour* of qsort to
1950 * reverse the order of the varyings. See: xfb_comparator().
1951 *
1952 * If packing is only disabled for xfb varyings (mutually exclusive with
1953 * disable_varying_packing), we then group varyings depending on if they
1954 * are captured for transform feedback. The same *undefined behaviour* is
1955 * taken advantage of.
1956 */
1957 if (this->disable_varying_packing) {
1958 /* Only sort varyings that are only used by transform feedback. */
1959 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1960 &varying_matches::xfb_comparator);
1961 } else if (this->disable_xfb_packing) {
1962 /* Only sort varyings that are NOT used by transform feedback. */
1963 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1964 &varying_matches::not_xfb_comparator);
1965 } else {
1966 /* Sort varying matches into an order that makes them easy to pack. */
1967 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1968 &varying_matches::match_comparator);
1969 }
1970
1971 unsigned generic_location = 0;
1972 unsigned generic_patch_location = MAX_VARYING*4;
1973 bool previous_var_xfb = false;
1974 bool previous_var_xfb_only = false;
1975 unsigned previous_packing_class = ~0u;
1976
1977 /* For tranform feedback separate mode, we know the number of attributes
1978 * is <= the number of buffers. So packing isn't critical. In fact,
1979 * packing vec3 attributes can cause trouble because splitting a vec3
1980 * effectively creates an additional transform feedback output. The
1981 * extra TFB output may exceed device driver limits.
1982 */
1983 const bool dont_pack_vec3 =
1984 (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1985 prog->TransformFeedback.NumVarying > 0);
1986
1987 for (unsigned i = 0; i < this->num_matches; i++) {
1988 unsigned *location = &generic_location;
1989 const ir_variable *var;
1990 const glsl_type *type;
1991 bool is_vertex_input = false;
1992
1993 if (matches[i].consumer_var) {
1994 var = matches[i].consumer_var;
1995 type = get_varying_type(var, consumer_stage);
1996 if (consumer_stage == MESA_SHADER_VERTEX)
1997 is_vertex_input = true;
1998 } else {
1999 var = matches[i].producer_var;
2000 type = get_varying_type(var, producer_stage);
2001 }
2002
2003 if (var->data.patch)
2004 location = &generic_patch_location;
2005
2006 /* Advance to the next slot if this varying has a different packing
2007 * class than the previous one, and we're not already on a slot
2008 * boundary.
2009 *
2010 * Also advance if varying packing is disabled for transform feedback,
2011 * and previous or current varying is used for transform feedback.
2012 *
2013 * Also advance to the next slot if packing is disabled. This makes sure
2014 * we don't assign varyings the same locations which is possible
2015 * because we still pack individual arrays, records and matrices even
2016 * when packing is disabled. Note we don't advance to the next slot if
2017 * we can pack varyings together that are only used for transform
2018 * feedback.
2019 */
2020 if (var->data.must_be_shader_input ||
2021 (this->disable_xfb_packing &&
2022 (previous_var_xfb || var->data.is_xfb)) ||
2023 (this->disable_varying_packing &&
2024 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
2025 (previous_packing_class != this->matches[i].packing_class) ||
2026 (this->matches[i].packing_order == PACKING_ORDER_VEC3 &&
2027 dont_pack_vec3)) {
2028 *location = ALIGN(*location, 4);
2029 }
2030
2031 previous_var_xfb = var->data.is_xfb;
2032 previous_var_xfb_only = var->data.is_xfb_only;
2033 previous_packing_class = this->matches[i].packing_class;
2034
2035 /* The number of components taken up by this variable. For vertex shader
2036 * inputs, we use the number of slots * 4, as they have different
2037 * counting rules.
2038 */
2039 unsigned num_components = is_vertex_input ?
2040 type->count_attribute_slots(is_vertex_input) * 4 :
2041 this->matches[i].num_components;
2042
2043 /* The last slot for this variable, inclusive. */
2044 unsigned slot_end = *location + num_components - 1;
2045
2046 /* FIXME: We could be smarter in the below code and loop back over
2047 * trying to fill any locations that we skipped because we couldn't pack
2048 * the varying between an explicit location. For now just let the user
2049 * hit the linking error if we run out of room and suggest they use
2050 * explicit locations.
2051 */
2052 while (slot_end < MAX_VARYING * 4u) {
2053 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
2054 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
2055
2056 assert(slots > 0);
2057
2058 if ((reserved_slots & slot_mask) == 0) {
2059 break;
2060 }
2061
2062 *location = ALIGN(*location + 1, 4);
2063 slot_end = *location + num_components - 1;
2064 }
2065
2066 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
2067 linker_error(prog, "insufficient contiguous locations available for "
2068 "%s it is possible an array or struct could not be "
2069 "packed between varyings with explicit locations. Try "
2070 "using an explicit location for arrays and structs.",
2071 var->name);
2072 }
2073
2074 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
2075 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
2076 components[j] = 4;
2077 components[slot_end / 4u] = (slot_end & 3) + 1;
2078 }
2079
2080 this->matches[i].generic_location = *location;
2081
2082 *location = slot_end + 1;
2083 }
2084
2085 return (generic_location + 3) / 4;
2086 }
2087
2088
2089 /**
2090 * Update the producer and consumer shaders to reflect the locations
2091 * assignments that were made by varying_matches::assign_locations().
2092 */
2093 void
store_locations() const2094 varying_matches::store_locations() const
2095 {
2096 /* Check is location needs to be packed with lower_packed_varyings() or if
2097 * we can just use ARB_enhanced_layouts packing.
2098 */
2099 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
2100 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
2101
2102 for (unsigned i = 0; i < this->num_matches; i++) {
2103 ir_variable *producer_var = this->matches[i].producer_var;
2104 ir_variable *consumer_var = this->matches[i].consumer_var;
2105 unsigned generic_location = this->matches[i].generic_location;
2106 unsigned slot = generic_location / 4;
2107 unsigned offset = generic_location % 4;
2108
2109 if (producer_var) {
2110 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
2111 producer_var->data.location_frac = offset;
2112 }
2113
2114 if (consumer_var) {
2115 assert(consumer_var->data.location == -1);
2116 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
2117 consumer_var->data.location_frac = offset;
2118 }
2119
2120 /* Find locations suitable for native packing via
2121 * ARB_enhanced_layouts.
2122 */
2123 if (producer_var && consumer_var) {
2124 if (enhanced_layouts_enabled) {
2125 const glsl_type *type =
2126 get_varying_type(producer_var, producer_stage);
2127 if (type->is_array() || type->is_matrix() || type->is_struct() ||
2128 type->is_64bit()) {
2129 unsigned comp_slots = type->component_slots() + offset;
2130 unsigned slots = comp_slots / 4;
2131 if (comp_slots % 4)
2132 slots += 1;
2133
2134 for (unsigned j = 0; j < slots; j++) {
2135 pack_loc[slot + j] = true;
2136 }
2137 } else if (offset + type->vector_elements > 4) {
2138 pack_loc[slot] = true;
2139 pack_loc[slot + 1] = true;
2140 } else {
2141 loc_type[slot][offset] = type;
2142 }
2143 }
2144 }
2145 }
2146
2147 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
2148 * suitable.
2149 */
2150 if (enhanced_layouts_enabled) {
2151 for (unsigned i = 0; i < this->num_matches; i++) {
2152 ir_variable *producer_var = this->matches[i].producer_var;
2153 ir_variable *consumer_var = this->matches[i].consumer_var;
2154 unsigned generic_location = this->matches[i].generic_location;
2155 unsigned slot = generic_location / 4;
2156
2157 if (pack_loc[slot] || !producer_var || !consumer_var)
2158 continue;
2159
2160 const glsl_type *type =
2161 get_varying_type(producer_var, producer_stage);
2162 bool type_match = true;
2163 for (unsigned j = 0; j < 4; j++) {
2164 if (loc_type[slot][j]) {
2165 if (type->base_type != loc_type[slot][j]->base_type)
2166 type_match = false;
2167 }
2168 }
2169
2170 if (type_match) {
2171 producer_var->data.explicit_location = 1;
2172 consumer_var->data.explicit_location = 1;
2173 producer_var->data.explicit_component = 1;
2174 consumer_var->data.explicit_component = 1;
2175 }
2176 }
2177 }
2178 }
2179
2180
2181 /**
2182 * Compute the "packing class" of the given varying. This is an unsigned
2183 * integer with the property that two variables in the same packing class can
2184 * be safely backed into the same vec4.
2185 */
2186 unsigned
compute_packing_class(const ir_variable * var)2187 varying_matches::compute_packing_class(const ir_variable *var)
2188 {
2189 /* Without help from the back-end, there is no way to pack together
2190 * variables with different interpolation types, because
2191 * lower_packed_varyings must choose exactly one interpolation type for
2192 * each packed varying it creates.
2193 *
2194 * However, we can safely pack together floats, ints, and uints, because:
2195 *
2196 * - varyings of base type "int" and "uint" must use the "flat"
2197 * interpolation type, which can only occur in GLSL 1.30 and above.
2198 *
2199 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
2200 * can store flat floats as ints without losing any information (using
2201 * the ir_unop_bitcast_* opcodes).
2202 *
2203 * Therefore, the packing class depends only on the interpolation type.
2204 */
2205 const unsigned interp = var->is_interpolation_flat()
2206 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
2207
2208 assert(interp < (1 << 3));
2209
2210 const unsigned packing_class = (interp << 0) |
2211 (var->data.centroid << 3) |
2212 (var->data.sample << 4) |
2213 (var->data.patch << 5) |
2214 (var->data.must_be_shader_input << 6);
2215
2216 return packing_class;
2217 }
2218
2219
2220 /**
2221 * Compute the "packing order" of the given varying. This is a sort key we
2222 * use to determine when to attempt to pack the given varying relative to
2223 * other varyings in the same packing class.
2224 */
2225 varying_matches::packing_order_enum
compute_packing_order(const ir_variable * var)2226 varying_matches::compute_packing_order(const ir_variable *var)
2227 {
2228 const glsl_type *element_type = var->type;
2229
2230 while (element_type->is_array()) {
2231 element_type = element_type->fields.array;
2232 }
2233
2234 switch (element_type->component_slots() % 4) {
2235 case 1: return PACKING_ORDER_SCALAR;
2236 case 2: return PACKING_ORDER_VEC2;
2237 case 3: return PACKING_ORDER_VEC3;
2238 case 0: return PACKING_ORDER_VEC4;
2239 default:
2240 assert(!"Unexpected value of vector_elements");
2241 return PACKING_ORDER_VEC4;
2242 }
2243 }
2244
2245
2246 /**
2247 * Comparison function passed to qsort() to sort varyings by packing_class and
2248 * then by packing_order.
2249 */
2250 int
match_comparator(const void * x_generic,const void * y_generic)2251 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
2252 {
2253 const match *x = (const match *) x_generic;
2254 const match *y = (const match *) y_generic;
2255
2256 if (x->packing_class != y->packing_class)
2257 return x->packing_class - y->packing_class;
2258 return x->packing_order - y->packing_order;
2259 }
2260
2261
2262 /**
2263 * Comparison function passed to qsort() to sort varyings used only by
2264 * transform feedback when packing of other varyings is disabled.
2265 */
2266 int
xfb_comparator(const void * x_generic,const void * y_generic)2267 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
2268 {
2269 const match *x = (const match *) x_generic;
2270
2271 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
2272 return match_comparator(x_generic, y_generic);
2273
2274 /* FIXME: When the comparator returns 0 it means the elements being
2275 * compared are equivalent. However the qsort documentation says:
2276 *
2277 * "The order of equivalent elements is undefined."
2278 *
2279 * In practice the sort ends up reversing the order of the varyings which
2280 * means locations are also assigned in this reversed order and happens to
2281 * be what we want. This is also whats happening in
2282 * varying_matches::match_comparator().
2283 */
2284 return 0;
2285 }
2286
2287
2288 /**
2289 * Comparison function passed to qsort() to sort varyings NOT used by
2290 * transform feedback when packing of xfb varyings is disabled.
2291 */
2292 int
not_xfb_comparator(const void * x_generic,const void * y_generic)2293 varying_matches::not_xfb_comparator(const void *x_generic, const void *y_generic)
2294 {
2295 const match *x = (const match *) x_generic;
2296
2297 if (x->producer_var != NULL && !x->producer_var->data.is_xfb)
2298 return match_comparator(x_generic, y_generic);
2299
2300 /* FIXME: When the comparator returns 0 it means the elements being
2301 * compared are equivalent. However the qsort documentation says:
2302 *
2303 * "The order of equivalent elements is undefined."
2304 *
2305 * In practice the sort ends up reversing the order of the varyings which
2306 * means locations are also assigned in this reversed order and happens to
2307 * be what we want. This is also whats happening in
2308 * varying_matches::match_comparator().
2309 */
2310 return 0;
2311 }
2312
2313
2314 /**
2315 * Is the given variable a varying variable to be counted against the
2316 * limit in ctx->Const.MaxVarying?
2317 * This includes variables such as texcoords, colors and generic
2318 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
2319 */
2320 static bool
var_counts_against_varying_limit(gl_shader_stage stage,const ir_variable * var)2321 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
2322 {
2323 /* Only fragment shaders will take a varying variable as an input */
2324 if (stage == MESA_SHADER_FRAGMENT &&
2325 var->data.mode == ir_var_shader_in) {
2326 switch (var->data.location) {
2327 case VARYING_SLOT_POS:
2328 case VARYING_SLOT_FACE:
2329 case VARYING_SLOT_PNTC:
2330 return false;
2331 default:
2332 return true;
2333 }
2334 }
2335 return false;
2336 }
2337
2338
2339 /**
2340 * Visitor class that generates tfeedback_candidate structs describing all
2341 * possible targets of transform feedback.
2342 *
2343 * tfeedback_candidate structs are stored in the hash table
2344 * tfeedback_candidates, which is passed to the constructor. This hash table
2345 * maps varying names to instances of the tfeedback_candidate struct.
2346 */
2347 class tfeedback_candidate_generator : public program_resource_visitor
2348 {
2349 public:
tfeedback_candidate_generator(void * mem_ctx,hash_table * tfeedback_candidates,gl_shader_stage stage)2350 tfeedback_candidate_generator(void *mem_ctx,
2351 hash_table *tfeedback_candidates,
2352 gl_shader_stage stage)
2353 : mem_ctx(mem_ctx),
2354 tfeedback_candidates(tfeedback_candidates),
2355 stage(stage),
2356 toplevel_var(NULL),
2357 varying_floats(0)
2358 {
2359 }
2360
process(ir_variable * var)2361 void process(ir_variable *var)
2362 {
2363 /* All named varying interface blocks should be flattened by now */
2364 assert(!var->is_interface_instance());
2365 assert(var->data.mode == ir_var_shader_out);
2366
2367 this->toplevel_var = var;
2368 this->varying_floats = 0;
2369 const glsl_type *t =
2370 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
2371 if (!var->data.patch && stage == MESA_SHADER_TESS_CTRL) {
2372 assert(t->is_array());
2373 t = t->fields.array;
2374 }
2375 program_resource_visitor::process(var, t, false);
2376 }
2377
2378 private:
visit_field(const glsl_type * type,const char * name,bool,const glsl_type *,const enum glsl_interface_packing,bool)2379 virtual void visit_field(const glsl_type *type, const char *name,
2380 bool /* row_major */,
2381 const glsl_type * /* record_type */,
2382 const enum glsl_interface_packing,
2383 bool /* last_field */)
2384 {
2385 assert(!type->without_array()->is_struct());
2386 assert(!type->without_array()->is_interface());
2387
2388 tfeedback_candidate *candidate
2389 = rzalloc(this->mem_ctx, tfeedback_candidate);
2390 candidate->toplevel_var = this->toplevel_var;
2391 candidate->type = type;
2392 candidate->offset = this->varying_floats;
2393 _mesa_hash_table_insert(this->tfeedback_candidates,
2394 ralloc_strdup(this->mem_ctx, name),
2395 candidate);
2396 this->varying_floats += type->component_slots();
2397 }
2398
2399 /**
2400 * Memory context used to allocate hash table keys and values.
2401 */
2402 void * const mem_ctx;
2403
2404 /**
2405 * Hash table in which tfeedback_candidate objects should be stored.
2406 */
2407 hash_table * const tfeedback_candidates;
2408
2409 gl_shader_stage stage;
2410
2411 /**
2412 * Pointer to the toplevel variable that is being traversed.
2413 */
2414 ir_variable *toplevel_var;
2415
2416 /**
2417 * Total number of varying floats that have been visited so far. This is
2418 * used to determine the offset to each varying within the toplevel
2419 * variable.
2420 */
2421 unsigned varying_floats;
2422 };
2423
2424
2425 namespace linker {
2426
2427 void
populate_consumer_input_sets(void * mem_ctx,exec_list * ir,hash_table * consumer_inputs,hash_table * consumer_interface_inputs,ir_variable * consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])2428 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
2429 hash_table *consumer_inputs,
2430 hash_table *consumer_interface_inputs,
2431 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2432 {
2433 memset(consumer_inputs_with_locations,
2434 0,
2435 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
2436
2437 foreach_in_list(ir_instruction, node, ir) {
2438 ir_variable *const input_var = node->as_variable();
2439
2440 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
2441 /* All interface blocks should have been lowered by this point */
2442 assert(!input_var->type->is_interface());
2443
2444 if (input_var->data.explicit_location) {
2445 /* assign_varying_locations only cares about finding the
2446 * ir_variable at the start of a contiguous location block.
2447 *
2448 * - For !producer, consumer_inputs_with_locations isn't used.
2449 *
2450 * - For !consumer, consumer_inputs_with_locations is empty.
2451 *
2452 * For consumer && producer, if you were trying to set some
2453 * ir_variable to the middle of a location block on the other side
2454 * of producer/consumer, cross_validate_outputs_to_inputs() should
2455 * be link-erroring due to either type mismatch or location
2456 * overlaps. If the variables do match up, then they've got a
2457 * matching data.location and you only looked at
2458 * consumer_inputs_with_locations[var->data.location], not any
2459 * following entries for the array/structure.
2460 */
2461 consumer_inputs_with_locations[input_var->data.location] =
2462 input_var;
2463 } else if (input_var->get_interface_type() != NULL) {
2464 char *const iface_field_name =
2465 ralloc_asprintf(mem_ctx, "%s.%s",
2466 input_var->get_interface_type()->without_array()->name,
2467 input_var->name);
2468 _mesa_hash_table_insert(consumer_interface_inputs,
2469 iface_field_name, input_var);
2470 } else {
2471 _mesa_hash_table_insert(consumer_inputs,
2472 ralloc_strdup(mem_ctx, input_var->name),
2473 input_var);
2474 }
2475 }
2476 }
2477 }
2478
2479 /**
2480 * Find a variable from the consumer that "matches" the specified variable
2481 *
2482 * This function only finds inputs with names that match. There is no
2483 * validation (here) that the types, etc. are compatible.
2484 */
2485 ir_variable *
get_matching_input(void * mem_ctx,const ir_variable * output_var,hash_table * consumer_inputs,hash_table * consumer_interface_inputs,ir_variable * consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])2486 get_matching_input(void *mem_ctx,
2487 const ir_variable *output_var,
2488 hash_table *consumer_inputs,
2489 hash_table *consumer_interface_inputs,
2490 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2491 {
2492 ir_variable *input_var;
2493
2494 if (output_var->data.explicit_location) {
2495 input_var = consumer_inputs_with_locations[output_var->data.location];
2496 } else if (output_var->get_interface_type() != NULL) {
2497 char *const iface_field_name =
2498 ralloc_asprintf(mem_ctx, "%s.%s",
2499 output_var->get_interface_type()->without_array()->name,
2500 output_var->name);
2501 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2502 input_var = entry ? (ir_variable *) entry->data : NULL;
2503 } else {
2504 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2505 input_var = entry ? (ir_variable *) entry->data : NULL;
2506 }
2507
2508 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2509 ? NULL : input_var;
2510 }
2511
2512 }
2513
2514 static int
io_variable_cmp(const void * _a,const void * _b)2515 io_variable_cmp(const void *_a, const void *_b)
2516 {
2517 const ir_variable *const a = *(const ir_variable **) _a;
2518 const ir_variable *const b = *(const ir_variable **) _b;
2519
2520 if (a->data.explicit_location && b->data.explicit_location)
2521 return b->data.location - a->data.location;
2522
2523 if (a->data.explicit_location && !b->data.explicit_location)
2524 return 1;
2525
2526 if (!a->data.explicit_location && b->data.explicit_location)
2527 return -1;
2528
2529 return -strcmp(a->name, b->name);
2530 }
2531
2532 /**
2533 * Sort the shader IO variables into canonical order
2534 */
2535 static void
canonicalize_shader_io(exec_list * ir,enum ir_variable_mode io_mode)2536 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2537 {
2538 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2539 unsigned num_variables = 0;
2540
2541 foreach_in_list(ir_instruction, node, ir) {
2542 ir_variable *const var = node->as_variable();
2543
2544 if (var == NULL || var->data.mode != io_mode)
2545 continue;
2546
2547 /* If we have already encountered more I/O variables that could
2548 * successfully link, bail.
2549 */
2550 if (num_variables == ARRAY_SIZE(var_table))
2551 return;
2552
2553 var_table[num_variables++] = var;
2554 }
2555
2556 if (num_variables == 0)
2557 return;
2558
2559 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2560 * we're going to push the variables on to the IR list as a stack, so we
2561 * want the last variable (in canonical order) to be first in the list.
2562 */
2563 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2564
2565 /* Remove the variable from it's current location in the IR, and put it at
2566 * the front.
2567 */
2568 for (unsigned i = 0; i < num_variables; i++) {
2569 var_table[i]->remove();
2570 ir->push_head(var_table[i]);
2571 }
2572 }
2573
2574 /**
2575 * Generate a bitfield map of the explicit locations for shader varyings.
2576 *
2577 * Note: For Tessellation shaders we are sitting right on the limits of the
2578 * 64 bit map. Per-vertex and per-patch both have separate location domains
2579 * with a max of MAX_VARYING.
2580 */
2581 static uint64_t
reserved_varying_slot(struct gl_linked_shader * stage,ir_variable_mode io_mode)2582 reserved_varying_slot(struct gl_linked_shader *stage,
2583 ir_variable_mode io_mode)
2584 {
2585 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2586 /* Avoid an overflow of the returned value */
2587 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2588
2589 uint64_t slots = 0;
2590 int var_slot;
2591
2592 if (!stage)
2593 return slots;
2594
2595 foreach_in_list(ir_instruction, node, stage->ir) {
2596 ir_variable *const var = node->as_variable();
2597
2598 if (var == NULL || var->data.mode != io_mode ||
2599 !var->data.explicit_location ||
2600 var->data.location < VARYING_SLOT_VAR0)
2601 continue;
2602
2603 var_slot = var->data.location - VARYING_SLOT_VAR0;
2604
2605 unsigned num_elements = get_varying_type(var, stage->Stage)
2606 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2607 stage->Stage == MESA_SHADER_VERTEX);
2608 for (unsigned i = 0; i < num_elements; i++) {
2609 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2610 slots |= UINT64_C(1) << var_slot;
2611 var_slot += 1;
2612 }
2613 }
2614
2615 return slots;
2616 }
2617
2618
2619 /**
2620 * Assign locations for all variables that are produced in one pipeline stage
2621 * (the "producer") and consumed in the next stage (the "consumer").
2622 *
2623 * Variables produced by the producer may also be consumed by transform
2624 * feedback.
2625 *
2626 * \param num_tfeedback_decls is the number of declarations indicating
2627 * variables that may be consumed by transform feedback.
2628 *
2629 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2630 * representing the result of parsing the strings passed to
2631 * glTransformFeedbackVaryings(). assign_location() will be called for
2632 * each of these objects that matches one of the outputs of the
2633 * producer.
2634 *
2635 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2636 * be NULL. In this case, varying locations are assigned solely based on the
2637 * requirements of transform feedback.
2638 */
2639 static bool
assign_varying_locations(struct gl_context * ctx,void * mem_ctx,struct gl_shader_program * prog,gl_linked_shader * producer,gl_linked_shader * consumer,unsigned num_tfeedback_decls,tfeedback_decl * tfeedback_decls,const uint64_t reserved_slots)2640 assign_varying_locations(struct gl_context *ctx,
2641 void *mem_ctx,
2642 struct gl_shader_program *prog,
2643 gl_linked_shader *producer,
2644 gl_linked_shader *consumer,
2645 unsigned num_tfeedback_decls,
2646 tfeedback_decl *tfeedback_decls,
2647 const uint64_t reserved_slots)
2648 {
2649 /* Tessellation shaders treat inputs and outputs as shared memory and can
2650 * access inputs and outputs of other invocations.
2651 * Therefore, they can't be lowered to temps easily (and definitely not
2652 * efficiently).
2653 */
2654 bool unpackable_tess =
2655 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2656 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2657 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2658
2659 /* Transform feedback code assumes varying arrays are packed, so if the
2660 * driver has disabled varying packing, make sure to at least enable
2661 * packing required by transform feedback. See below for exception.
2662 */
2663 bool xfb_enabled =
2664 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2665
2666 /* Some drivers actually requires packing to be explicitly disabled
2667 * for varyings used by transform feedback.
2668 */
2669 bool disable_xfb_packing =
2670 ctx->Const.DisableTransformFeedbackPacking;
2671
2672 /* Disable packing on outward facing interfaces for SSO because in ES we
2673 * need to retain the unpacked varying information for draw time
2674 * validation.
2675 *
2676 * Packing is still enabled on individual arrays, structs, and matrices as
2677 * these are required by the transform feedback code and it is still safe
2678 * to do so. We also enable packing when a varying is only used for
2679 * transform feedback and its not a SSO.
2680 */
2681 bool disable_varying_packing =
2682 ctx->Const.DisableVaryingPacking || unpackable_tess;
2683 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2684 disable_varying_packing = true;
2685
2686 varying_matches matches(disable_varying_packing,
2687 disable_xfb_packing,
2688 xfb_enabled,
2689 ctx->Extensions.ARB_enhanced_layouts,
2690 producer ? producer->Stage : MESA_SHADER_NONE,
2691 consumer ? consumer->Stage : MESA_SHADER_NONE);
2692 void *hash_table_ctx = ralloc_context(NULL);
2693 hash_table *tfeedback_candidates =
2694 _mesa_hash_table_create(hash_table_ctx, _mesa_hash_string,
2695 _mesa_key_string_equal);
2696 hash_table *consumer_inputs =
2697 _mesa_hash_table_create(hash_table_ctx, _mesa_hash_string,
2698 _mesa_key_string_equal);
2699 hash_table *consumer_interface_inputs =
2700 _mesa_hash_table_create(hash_table_ctx, _mesa_hash_string,
2701 _mesa_key_string_equal);
2702 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2703 NULL,
2704 };
2705
2706 unsigned consumer_vertices = 0;
2707 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2708 consumer_vertices = prog->Geom.VerticesIn;
2709
2710 /* Operate in a total of four passes.
2711 *
2712 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2713 * that inputs / outputs of separable shaders will be assigned
2714 * predictable locations regardless of the order in which declarations
2715 * appeared in the shader source.
2716 *
2717 * 2. Assign locations for any matching inputs and outputs.
2718 *
2719 * 3. Mark output variables in the producer that do not have locations as
2720 * not being outputs. This lets the optimizer eliminate them.
2721 *
2722 * 4. Mark input variables in the consumer that do not have locations as
2723 * not being inputs. This lets the optimizer eliminate them.
2724 */
2725 if (consumer)
2726 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2727
2728 if (producer)
2729 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2730
2731 if (consumer)
2732 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2733 consumer_inputs,
2734 consumer_interface_inputs,
2735 consumer_inputs_with_locations);
2736
2737 if (producer) {
2738 foreach_in_list(ir_instruction, node, producer->ir) {
2739 ir_variable *const output_var = node->as_variable();
2740
2741 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2742 continue;
2743
2744 /* Only geometry shaders can use non-zero streams */
2745 assert(output_var->data.stream == 0 ||
2746 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2747 producer->Stage == MESA_SHADER_GEOMETRY));
2748
2749 if (num_tfeedback_decls > 0) {
2750 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates, producer->Stage);
2751 /* From OpenGL 4.6 (Core Profile) spec, section 11.1.2.1
2752 * ("Vertex Shader Variables / Output Variables")
2753 *
2754 * "Each program object can specify a set of output variables from
2755 * one shader to be recorded in transform feedback mode (see
2756 * section 13.3). The variables that can be recorded are those
2757 * emitted by the first active shader, in order, from the
2758 * following list:
2759 *
2760 * * geometry shader
2761 * * tessellation evaluation shader
2762 * * tessellation control shader
2763 * * vertex shader"
2764 *
2765 * But on OpenGL ES 3.2, section 11.1.2.1 ("Vertex Shader
2766 * Variables / Output Variables") tessellation control shader is
2767 * not included in the stages list.
2768 */
2769 if (!prog->IsES || producer->Stage != MESA_SHADER_TESS_CTRL) {
2770 g.process(output_var);
2771 }
2772 }
2773
2774 ir_variable *const input_var =
2775 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2776 consumer_interface_inputs,
2777 consumer_inputs_with_locations);
2778
2779 /* If a matching input variable was found, add this output (and the
2780 * input) to the set. If this is a separable program and there is no
2781 * consumer stage, add the output.
2782 *
2783 * Always add TCS outputs. They are shared by all invocations
2784 * within a patch and can be used as shared memory.
2785 */
2786 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2787 producer->Stage == MESA_SHADER_TESS_CTRL) {
2788 matches.record(output_var, input_var);
2789 }
2790
2791 /* Only stream 0 outputs can be consumed in the next stage */
2792 if (input_var && output_var->data.stream != 0) {
2793 linker_error(prog, "output %s is assigned to stream=%d but "
2794 "is linked to an input, which requires stream=0",
2795 output_var->name, output_var->data.stream);
2796 ralloc_free(hash_table_ctx);
2797 return false;
2798 }
2799 }
2800 } else {
2801 /* If there's no producer stage, then this must be a separable program.
2802 * For example, we may have a program that has just a fragment shader.
2803 * Later this program will be used with some arbitrary vertex (or
2804 * geometry) shader program. This means that locations must be assigned
2805 * for all the inputs.
2806 */
2807 foreach_in_list(ir_instruction, node, consumer->ir) {
2808 ir_variable *const input_var = node->as_variable();
2809 if (input_var && input_var->data.mode == ir_var_shader_in) {
2810 matches.record(NULL, input_var);
2811 }
2812 }
2813 }
2814
2815 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2816 if (!tfeedback_decls[i].is_varying())
2817 continue;
2818
2819 const tfeedback_candidate *matched_candidate
2820 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2821
2822 if (matched_candidate == NULL) {
2823 ralloc_free(hash_table_ctx);
2824 return false;
2825 }
2826
2827 /* There are two situations where a new output varying is needed:
2828 *
2829 * - If varying packing is disabled for xfb and the current declaration
2830 * is not aligned within the top level varying (e.g. vec3_arr[1]).
2831 *
2832 * - If a builtin variable needs to be copied to a new variable
2833 * before its content is modified by another lowering pass (e.g.
2834 * \c gl_Position is transformed by \c nir_lower_viewport_transform).
2835 */
2836 const unsigned dmul =
2837 matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
2838 const bool lowered =
2839 (disable_xfb_packing &&
2840 !tfeedback_decls[i].is_aligned(dmul, matched_candidate->offset)) ||
2841 (matched_candidate->toplevel_var->data.explicit_location &&
2842 matched_candidate->toplevel_var->data.location < VARYING_SLOT_VAR0 &&
2843 (!consumer || consumer->Stage == MESA_SHADER_FRAGMENT) &&
2844 (ctx->Const.ShaderCompilerOptions[producer->Stage].LowerBuiltinVariablesXfb &
2845 BITFIELD_BIT(matched_candidate->toplevel_var->data.location)));
2846
2847 if (lowered) {
2848 ir_variable *new_var;
2849 tfeedback_candidate *new_candidate = NULL;
2850
2851 new_var = lower_xfb_varying(mem_ctx, producer, tfeedback_decls[i].name());
2852 if (new_var == NULL) {
2853 ralloc_free(hash_table_ctx);
2854 return false;
2855 }
2856
2857 /* Create new candidate and replace matched_candidate */
2858 new_candidate = rzalloc(mem_ctx, tfeedback_candidate);
2859 new_candidate->toplevel_var = new_var;
2860 new_candidate->toplevel_var->data.is_unmatched_generic_inout = 1;
2861 new_candidate->type = new_var->type;
2862 new_candidate->offset = 0;
2863 _mesa_hash_table_insert(tfeedback_candidates,
2864 ralloc_strdup(mem_ctx, new_var->name),
2865 new_candidate);
2866
2867 tfeedback_decls[i].set_lowered_candidate(new_candidate);
2868 matched_candidate = new_candidate;
2869 }
2870
2871 /* Mark as xfb varying */
2872 matched_candidate->toplevel_var->data.is_xfb = 1;
2873
2874 /* Mark xfb varyings as always active */
2875 matched_candidate->toplevel_var->data.always_active_io = 1;
2876
2877 /* Mark any corresponding inputs as always active also. We must do this
2878 * because we have a NIR pass that lowers vectors to scalars and another
2879 * that removes unused varyings.
2880 * We don't split varyings marked as always active because there is no
2881 * point in doing so. This means we need to mark both sides of the
2882 * interface as always active otherwise we will have a mismatch and
2883 * start removing things we shouldn't.
2884 */
2885 ir_variable *const input_var =
2886 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2887 consumer_inputs,
2888 consumer_interface_inputs,
2889 consumer_inputs_with_locations);
2890 if (input_var) {
2891 input_var->data.is_xfb = 1;
2892 input_var->data.always_active_io = 1;
2893 }
2894
2895 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2896 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2897 matches.record(matched_candidate->toplevel_var, NULL);
2898 }
2899 }
2900
2901 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2902 const unsigned slots_used = matches.assign_locations(
2903 prog, components, reserved_slots);
2904 matches.store_locations();
2905
2906 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2907 if (tfeedback_decls[i].is_varying()) {
2908 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2909 ralloc_free(hash_table_ctx);
2910 return false;
2911 }
2912 }
2913 }
2914 ralloc_free(hash_table_ctx);
2915
2916 if (consumer && producer) {
2917 foreach_in_list(ir_instruction, node, consumer->ir) {
2918 ir_variable *const var = node->as_variable();
2919
2920 if (var && var->data.mode == ir_var_shader_in &&
2921 var->data.is_unmatched_generic_inout) {
2922 if (!prog->IsES && prog->data->Version <= 120) {
2923 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2924 *
2925 * Only those varying variables used (i.e. read) in
2926 * the fragment shader executable must be written to
2927 * by the vertex shader executable; declaring
2928 * superfluous varying variables in a vertex shader is
2929 * permissible.
2930 *
2931 * We interpret this text as meaning that the VS must
2932 * write the variable for the FS to read it. See
2933 * "glsl1-varying read but not written" in piglit.
2934 */
2935 linker_error(prog, "%s shader varying %s not written "
2936 "by %s shader\n.",
2937 _mesa_shader_stage_to_string(consumer->Stage),
2938 var->name,
2939 _mesa_shader_stage_to_string(producer->Stage));
2940 } else {
2941 linker_warning(prog, "%s shader varying %s not written "
2942 "by %s shader\n.",
2943 _mesa_shader_stage_to_string(consumer->Stage),
2944 var->name,
2945 _mesa_shader_stage_to_string(producer->Stage));
2946 }
2947 }
2948 }
2949
2950 /* Now that validation is done its safe to remove unused varyings. As
2951 * we have both a producer and consumer its safe to remove unused
2952 * varyings even if the program is a SSO because the stages are being
2953 * linked together i.e. we have a multi-stage SSO.
2954 */
2955 remove_unused_shader_inputs_and_outputs(false, producer,
2956 ir_var_shader_out);
2957 remove_unused_shader_inputs_and_outputs(false, consumer,
2958 ir_var_shader_in);
2959 }
2960
2961 if (producer) {
2962 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2963 0, producer, disable_varying_packing,
2964 disable_xfb_packing, xfb_enabled);
2965 }
2966
2967 if (consumer) {
2968 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2969 consumer_vertices, consumer, disable_varying_packing,
2970 disable_xfb_packing, xfb_enabled);
2971 }
2972
2973 return true;
2974 }
2975
2976 static bool
check_against_output_limit(struct gl_context * ctx,struct gl_shader_program * prog,gl_linked_shader * producer,unsigned num_explicit_locations)2977 check_against_output_limit(struct gl_context *ctx,
2978 struct gl_shader_program *prog,
2979 gl_linked_shader *producer,
2980 unsigned num_explicit_locations)
2981 {
2982 unsigned output_vectors = num_explicit_locations;
2983
2984 foreach_in_list(ir_instruction, node, producer->ir) {
2985 ir_variable *const var = node->as_variable();
2986
2987 if (var && !var->data.explicit_location &&
2988 var->data.mode == ir_var_shader_out &&
2989 var_counts_against_varying_limit(producer->Stage, var)) {
2990 /* outputs for fragment shader can't be doubles */
2991 output_vectors += var->type->count_attribute_slots(false);
2992 }
2993 }
2994
2995 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2996 unsigned max_output_components =
2997 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2998
2999 const unsigned output_components = output_vectors * 4;
3000 if (output_components > max_output_components) {
3001 if (ctx->API == API_OPENGLES2 || prog->IsES)
3002 linker_error(prog, "%s shader uses too many output vectors "
3003 "(%u > %u)\n",
3004 _mesa_shader_stage_to_string(producer->Stage),
3005 output_vectors,
3006 max_output_components / 4);
3007 else
3008 linker_error(prog, "%s shader uses too many output components "
3009 "(%u > %u)\n",
3010 _mesa_shader_stage_to_string(producer->Stage),
3011 output_components,
3012 max_output_components);
3013
3014 return false;
3015 }
3016
3017 return true;
3018 }
3019
3020 static bool
check_against_input_limit(struct gl_context * ctx,struct gl_shader_program * prog,gl_linked_shader * consumer,unsigned num_explicit_locations)3021 check_against_input_limit(struct gl_context *ctx,
3022 struct gl_shader_program *prog,
3023 gl_linked_shader *consumer,
3024 unsigned num_explicit_locations)
3025 {
3026 unsigned input_vectors = num_explicit_locations;
3027
3028 foreach_in_list(ir_instruction, node, consumer->ir) {
3029 ir_variable *const var = node->as_variable();
3030
3031 if (var && !var->data.explicit_location &&
3032 var->data.mode == ir_var_shader_in &&
3033 var_counts_against_varying_limit(consumer->Stage, var)) {
3034 /* vertex inputs aren't varying counted */
3035 input_vectors += var->type->count_attribute_slots(false);
3036 }
3037 }
3038
3039 assert(consumer->Stage != MESA_SHADER_VERTEX);
3040 unsigned max_input_components =
3041 ctx->Const.Program[consumer->Stage].MaxInputComponents;
3042
3043 const unsigned input_components = input_vectors * 4;
3044 if (input_components > max_input_components) {
3045 if (ctx->API == API_OPENGLES2 || prog->IsES)
3046 linker_error(prog, "%s shader uses too many input vectors "
3047 "(%u > %u)\n",
3048 _mesa_shader_stage_to_string(consumer->Stage),
3049 input_vectors,
3050 max_input_components / 4);
3051 else
3052 linker_error(prog, "%s shader uses too many input components "
3053 "(%u > %u)\n",
3054 _mesa_shader_stage_to_string(consumer->Stage),
3055 input_components,
3056 max_input_components);
3057
3058 return false;
3059 }
3060
3061 return true;
3062 }
3063
3064 bool
link_varyings(struct gl_shader_program * prog,unsigned first,unsigned last,struct gl_context * ctx,void * mem_ctx)3065 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
3066 struct gl_context *ctx, void *mem_ctx)
3067 {
3068 bool has_xfb_qualifiers = false;
3069 unsigned num_tfeedback_decls = 0;
3070 char **varying_names = NULL;
3071 tfeedback_decl *tfeedback_decls = NULL;
3072
3073 /* From the ARB_enhanced_layouts spec:
3074 *
3075 * "If the shader used to record output variables for transform feedback
3076 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
3077 * qualifiers, the values specified by TransformFeedbackVaryings are
3078 * ignored, and the set of variables captured for transform feedback is
3079 * instead derived from the specified layout qualifiers."
3080 */
3081 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
3082 /* Find last stage before fragment shader */
3083 if (prog->_LinkedShaders[i]) {
3084 has_xfb_qualifiers =
3085 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
3086 prog, &num_tfeedback_decls,
3087 &varying_names);
3088 break;
3089 }
3090 }
3091
3092 if (!has_xfb_qualifiers) {
3093 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
3094 varying_names = prog->TransformFeedback.VaryingNames;
3095 }
3096
3097 if (num_tfeedback_decls != 0) {
3098 /* From GL_EXT_transform_feedback:
3099 * A program will fail to link if:
3100 *
3101 * * the <count> specified by TransformFeedbackVaryingsEXT is
3102 * non-zero, but the program object has no vertex or geometry
3103 * shader;
3104 */
3105 if (first >= MESA_SHADER_FRAGMENT) {
3106 linker_error(prog, "Transform feedback varyings specified, but "
3107 "no vertex, tessellation, or geometry shader is "
3108 "present.\n");
3109 return false;
3110 }
3111
3112 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
3113 num_tfeedback_decls);
3114 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
3115 varying_names, tfeedback_decls))
3116 return false;
3117 }
3118
3119 /* If there is no fragment shader we need to set transform feedback.
3120 *
3121 * For SSO we also need to assign output locations. We assign them here
3122 * because we need to do it for both single stage programs and multi stage
3123 * programs.
3124 */
3125 if (last < MESA_SHADER_FRAGMENT &&
3126 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
3127 const uint64_t reserved_out_slots =
3128 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
3129 if (!assign_varying_locations(ctx, mem_ctx, prog,
3130 prog->_LinkedShaders[last], NULL,
3131 num_tfeedback_decls, tfeedback_decls,
3132 reserved_out_slots))
3133 return false;
3134 }
3135
3136 if (last <= MESA_SHADER_FRAGMENT) {
3137 /* Remove unused varyings from the first/last stage unless SSO */
3138 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
3139 prog->_LinkedShaders[first],
3140 ir_var_shader_in);
3141 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
3142 prog->_LinkedShaders[last],
3143 ir_var_shader_out);
3144
3145 /* If the program is made up of only a single stage */
3146 if (first == last) {
3147 gl_linked_shader *const sh = prog->_LinkedShaders[last];
3148
3149 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
3150 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
3151 tfeedback_decls);
3152
3153 if (prog->SeparateShader) {
3154 const uint64_t reserved_slots =
3155 reserved_varying_slot(sh, ir_var_shader_in);
3156
3157 /* Assign input locations for SSO, output locations are already
3158 * assigned.
3159 */
3160 if (!assign_varying_locations(ctx, mem_ctx, prog,
3161 NULL /* producer */,
3162 sh /* consumer */,
3163 0 /* num_tfeedback_decls */,
3164 NULL /* tfeedback_decls */,
3165 reserved_slots))
3166 return false;
3167 }
3168 } else {
3169 /* Linking the stages in the opposite order (from fragment to vertex)
3170 * ensures that inter-shader outputs written to in an earlier stage
3171 * are eliminated if they are (transitively) not used in a later
3172 * stage.
3173 */
3174 int next = last;
3175 for (int i = next - 1; i >= 0; i--) {
3176 if (prog->_LinkedShaders[i] == NULL && i != 0)
3177 continue;
3178
3179 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
3180 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
3181
3182 const uint64_t reserved_out_slots =
3183 reserved_varying_slot(sh_i, ir_var_shader_out);
3184 const uint64_t reserved_in_slots =
3185 reserved_varying_slot(sh_next, ir_var_shader_in);
3186
3187 do_dead_builtin_varyings(ctx, sh_i, sh_next,
3188 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
3189 tfeedback_decls);
3190
3191 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
3192 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
3193 tfeedback_decls,
3194 reserved_out_slots | reserved_in_slots))
3195 return false;
3196
3197 /* This must be done after all dead varyings are eliminated. */
3198 if (sh_i != NULL) {
3199 unsigned slots_used = util_bitcount64(reserved_out_slots);
3200 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
3201 return false;
3202 }
3203 }
3204
3205 unsigned slots_used = util_bitcount64(reserved_in_slots);
3206 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
3207 return false;
3208
3209 next = i;
3210 }
3211 }
3212 }
3213
3214 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
3215 has_xfb_qualifiers, mem_ctx))
3216 return false;
3217
3218 return true;
3219 }
3220