• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "st_nir.h"
25 
26 #include "pipe/p_defines.h"
27 #include "pipe/p_screen.h"
28 #include "pipe/p_context.h"
29 
30 #include "program/program.h"
31 #include "program/prog_statevars.h"
32 #include "program/prog_parameter.h"
33 #include "program/ir_to_mesa.h"
34 #include "main/context.h"
35 #include "main/mtypes.h"
36 #include "main/errors.h"
37 #include "main/glspirv.h"
38 #include "main/shaderapi.h"
39 #include "main/uniforms.h"
40 
41 #include "main/shaderobj.h"
42 #include "st_context.h"
43 #include "st_program.h"
44 #include "st_shader_cache.h"
45 
46 #include "compiler/nir/nir.h"
47 #include "compiler/glsl_types.h"
48 #include "compiler/glsl/glsl_to_nir.h"
49 #include "compiler/glsl/gl_nir.h"
50 #include "compiler/glsl/gl_nir_linker.h"
51 #include "compiler/glsl/ir.h"
52 #include "compiler/glsl/ir_optimization.h"
53 #include "compiler/glsl/linker_util.h"
54 #include "compiler/glsl/string_to_uint_map.h"
55 
56 static int
type_size(const struct glsl_type * type)57 type_size(const struct glsl_type *type)
58 {
59    return type->count_attribute_slots(false);
60 }
61 
62 /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
63  * may need to fix up varying slots so the glsl->nir path is aligned
64  * with the anything->tgsi->nir path.
65  */
66 static void
st_nir_fixup_varying_slots(struct st_context * st,nir_shader * shader,nir_variable_mode mode)67 st_nir_fixup_varying_slots(struct st_context *st, nir_shader *shader,
68                            nir_variable_mode mode)
69 {
70    if (st->needs_texcoord_semantic)
71       return;
72 
73    /* This is called from finalize, but we don't want to do this adjustment twice. */
74    assert(!st->allow_st_finalize_nir_twice);
75 
76    nir_foreach_variable_with_modes(var, shader, mode) {
77       if (var->data.location >= VARYING_SLOT_VAR0 && var->data.location < VARYING_SLOT_PATCH0) {
78          var->data.location += 9;
79       } else if (var->data.location == VARYING_SLOT_PNTC) {
80          var->data.location = VARYING_SLOT_VAR8;
81       } else if ((var->data.location >= VARYING_SLOT_TEX0) &&
82                (var->data.location <= VARYING_SLOT_TEX7)) {
83          var->data.location += VARYING_SLOT_VAR0 - VARYING_SLOT_TEX0;
84       }
85    }
86 }
87 
88 static void
st_shader_gather_info(nir_shader * nir,struct gl_program * prog)89 st_shader_gather_info(nir_shader *nir, struct gl_program *prog)
90 {
91    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
92 
93    /* Copy the info we just generated back into the gl_program */
94    const char *prog_name = prog->info.name;
95    const char *prog_label = prog->info.label;
96    prog->info = nir->info;
97    prog->info.name = prog_name;
98    prog->info.label = prog_label;
99 }
100 
101 /* input location assignment for VS inputs must be handled specially, so
102  * that it is aligned w/ st's vbo state.
103  * (This isn't the case with, for ex, FS inputs, which only need to agree
104  * on varying-slot w/ the VS outputs)
105  */
106 void
st_nir_assign_vs_in_locations(struct nir_shader * nir)107 st_nir_assign_vs_in_locations(struct nir_shader *nir)
108 {
109    if (nir->info.stage != MESA_SHADER_VERTEX || nir->info.io_lowered)
110       return;
111 
112    nir->num_inputs = util_bitcount64(nir->info.inputs_read);
113 
114    bool removed_inputs = false;
115 
116    nir_foreach_shader_in_variable_safe(var, nir) {
117       /* NIR already assigns dual-slot inputs to two locations so all we have
118        * to do is compact everything down.
119        */
120       if (nir->info.inputs_read & BITFIELD64_BIT(var->data.location)) {
121          var->data.driver_location =
122             util_bitcount64(nir->info.inputs_read &
123                               BITFIELD64_MASK(var->data.location));
124       } else {
125          /* Convert unused input variables to shader_temp (with no
126           * initialization), to avoid confusing drivers looking through the
127           * inputs array and expecting to find inputs with a driver_location
128           * set.
129           */
130          var->data.mode = nir_var_shader_temp;
131          removed_inputs = true;
132       }
133    }
134 
135    /* Re-lower global vars, to deal with any dead VS inputs. */
136    if (removed_inputs)
137       NIR_PASS_V(nir, nir_lower_global_vars_to_local);
138 }
139 
140 static int
st_nir_lookup_parameter_index(struct gl_program * prog,nir_variable * var)141 st_nir_lookup_parameter_index(struct gl_program *prog, nir_variable *var)
142 {
143    struct gl_program_parameter_list *params = prog->Parameters;
144 
145    /* Lookup the first parameter that the uniform storage that match the
146     * variable location.
147     */
148    for (unsigned i = 0; i < params->NumParameters; i++) {
149       int index = params->Parameters[i].MainUniformStorageIndex;
150       if (index == var->data.location)
151          return i;
152    }
153 
154    /* TODO: Handle this fallback for SPIR-V.  We need this for GLSL e.g. in
155     * dEQP-GLES2.functional.uniform_api.random.3
156     */
157 
158    /* is there a better way to do this?  If we have something like:
159     *
160     *    struct S {
161     *           float f;
162     *           vec4 v;
163     *    };
164     *    uniform S color;
165     *
166     * Then what we get in prog->Parameters looks like:
167     *
168     *    0: Name=color.f, Type=6, DataType=1406, Size=1
169     *    1: Name=color.v, Type=6, DataType=8b52, Size=4
170     *
171     * So the name doesn't match up and _mesa_lookup_parameter_index()
172     * fails.  In this case just find the first matching "color.*"..
173     *
174     * Note for arrays you could end up w/ color[n].f, for example.
175     *
176     * glsl_to_tgsi works slightly differently in this regard.  It is
177     * emitting something more low level, so it just translates the
178     * params list 1:1 to CONST[] regs.  Going from GLSL IR to TGSI,
179     * it just calculates the additional offset of struct field members
180     * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
181     * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir).  It never
182     * needs to work backwards to get base var loc from the param-list
183     * which already has them separated out.
184     */
185    if (!prog->sh.data->spirv) {
186       int namelen = strlen(var->name);
187       for (unsigned i = 0; i < params->NumParameters; i++) {
188          struct gl_program_parameter *p = &params->Parameters[i];
189          if ((strncmp(p->Name, var->name, namelen) == 0) &&
190              ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
191             return i;
192          }
193       }
194    }
195 
196    return -1;
197 }
198 
199 static void
st_nir_assign_uniform_locations(struct gl_context * ctx,struct gl_program * prog,nir_shader * nir)200 st_nir_assign_uniform_locations(struct gl_context *ctx,
201                                 struct gl_program *prog,
202                                 nir_shader *nir)
203 {
204    int shaderidx = 0;
205    int imageidx = 0;
206 
207    nir_foreach_uniform_variable(uniform, nir) {
208       int loc;
209 
210       const struct glsl_type *type = glsl_without_array(uniform->type);
211       if (!uniform->data.bindless && (type->is_sampler() || type->is_image())) {
212          if (type->is_sampler()) {
213             loc = shaderidx;
214             shaderidx += type_size(uniform->type);
215          } else {
216             loc = imageidx;
217             imageidx += type_size(uniform->type);
218          }
219       } else if (uniform->state_slots) {
220          const gl_state_index16 *const stateTokens = uniform->state_slots[0].tokens;
221          /* This state reference has already been setup by ir_to_mesa, but we'll
222           * get the same index back here.
223           */
224 
225          unsigned comps;
226          if (glsl_type_is_struct_or_ifc(type)) {
227             comps = 4;
228          } else {
229             comps = glsl_get_vector_elements(type);
230          }
231 
232          if (ctx->Const.PackedDriverUniformStorage) {
233             loc = _mesa_add_sized_state_reference(prog->Parameters,
234                                                   stateTokens, comps, false);
235             loc = prog->Parameters->Parameters[loc].ValueOffset;
236          } else {
237             loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
238          }
239       } else {
240          loc = st_nir_lookup_parameter_index(prog, uniform);
241 
242          /* We need to check that loc is not -1 here before accessing the
243           * array. It can be negative for example when we have a struct that
244           * only contains opaque types.
245           */
246          if (loc >= 0 && ctx->Const.PackedDriverUniformStorage) {
247             loc = prog->Parameters->Parameters[loc].ValueOffset;
248          }
249       }
250 
251       uniform->data.driver_location = loc;
252    }
253 }
254 
255 void
st_nir_opts(nir_shader * nir)256 st_nir_opts(nir_shader *nir)
257 {
258    bool progress;
259 
260    do {
261       progress = false;
262 
263       NIR_PASS_V(nir, nir_lower_vars_to_ssa);
264 
265       /* Linking deals with unused inputs/outputs, but here we can remove
266        * things local to the shader in the hopes that we can cleanup other
267        * things. This pass will also remove variables with only stores, so we
268        * might be able to make progress after it.
269        */
270       NIR_PASS(progress, nir, nir_remove_dead_variables,
271                nir_var_function_temp | nir_var_shader_temp |
272                nir_var_mem_shared,
273                NULL);
274 
275       NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
276       NIR_PASS(progress, nir, nir_opt_dead_write_vars);
277 
278       if (nir->options->lower_to_scalar) {
279          NIR_PASS_V(nir, nir_lower_alu_to_scalar,
280                     nir->options->lower_to_scalar_filter, NULL);
281          NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
282       }
283 
284       NIR_PASS_V(nir, nir_lower_alu);
285       NIR_PASS_V(nir, nir_lower_pack);
286       NIR_PASS(progress, nir, nir_copy_prop);
287       NIR_PASS(progress, nir, nir_opt_remove_phis);
288       NIR_PASS(progress, nir, nir_opt_dce);
289       if (nir_opt_trivial_continues(nir)) {
290          progress = true;
291          NIR_PASS(progress, nir, nir_copy_prop);
292          NIR_PASS(progress, nir, nir_opt_dce);
293       }
294       NIR_PASS(progress, nir, nir_opt_if, false);
295       NIR_PASS(progress, nir, nir_opt_dead_cf);
296       NIR_PASS(progress, nir, nir_opt_cse);
297       NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
298 
299       NIR_PASS(progress, nir, nir_opt_phi_precision);
300       NIR_PASS(progress, nir, nir_opt_algebraic);
301       NIR_PASS(progress, nir, nir_opt_constant_folding);
302 
303       if (!nir->info.flrp_lowered) {
304          unsigned lower_flrp =
305             (nir->options->lower_flrp16 ? 16 : 0) |
306             (nir->options->lower_flrp32 ? 32 : 0) |
307             (nir->options->lower_flrp64 ? 64 : 0);
308 
309          if (lower_flrp) {
310             bool lower_flrp_progress = false;
311 
312             NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
313                      lower_flrp,
314                      false /* always_precise */);
315             if (lower_flrp_progress) {
316                NIR_PASS(progress, nir,
317                         nir_opt_constant_folding);
318                progress = true;
319             }
320          }
321 
322          /* Nothing should rematerialize any flrps, so we only need to do this
323           * lowering once.
324           */
325          nir->info.flrp_lowered = true;
326       }
327 
328       NIR_PASS(progress, nir, nir_opt_undef);
329       NIR_PASS(progress, nir, nir_opt_conditional_discard);
330       if (nir->options->max_unroll_iterations) {
331          NIR_PASS(progress, nir, nir_opt_loop_unroll);
332       }
333    } while (progress);
334 }
335 
336 static void
shared_type_info(const struct glsl_type * type,unsigned * size,unsigned * align)337 shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
338 {
339    assert(glsl_type_is_vector_or_scalar(type));
340 
341    uint32_t comp_size = glsl_type_is_boolean(type)
342       ? 4 : glsl_get_bit_size(type) / 8;
343    unsigned length = glsl_get_vector_elements(type);
344    *size = comp_size * length,
345    *align = comp_size * (length == 3 ? 4 : length);
346 }
347 
348 /* First third of converting glsl_to_nir.. this leaves things in a pre-
349  * nir_lower_io state, so that shader variants can more easily insert/
350  * replace variables, etc.
351  */
352 static void
st_nir_preprocess(struct st_context * st,struct gl_program * prog,struct gl_shader_program * shader_program,gl_shader_stage stage)353 st_nir_preprocess(struct st_context *st, struct gl_program *prog,
354                   struct gl_shader_program *shader_program,
355                   gl_shader_stage stage)
356 {
357    struct pipe_screen *screen = st->screen;
358    const nir_shader_compiler_options *options =
359       st->ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
360    assert(options);
361    nir_shader *nir = prog->nir;
362 
363    /* Set the next shader stage hint for VS and TES. */
364    if (!nir->info.separate_shader &&
365        (nir->info.stage == MESA_SHADER_VERTEX ||
366         nir->info.stage == MESA_SHADER_TESS_EVAL)) {
367 
368       unsigned prev_stages = (1 << (prog->info.stage + 1)) - 1;
369       unsigned stages_mask =
370          ~prev_stages & shader_program->data->linked_stages;
371 
372       nir->info.next_stage = stages_mask ?
373          (gl_shader_stage) u_bit_scan(&stages_mask) : MESA_SHADER_FRAGMENT;
374    } else {
375       nir->info.next_stage = MESA_SHADER_FRAGMENT;
376    }
377 
378    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
379    if (!st->ctx->SoftFP64 && ((nir->info.bit_sizes_int | nir->info.bit_sizes_float) & 64) &&
380        (options->lower_doubles_options & nir_lower_fp64_full_software) != 0) {
381       st->ctx->SoftFP64 = glsl_float64_funcs_to_nir(st->ctx, options);
382    }
383 
384    /* ES has strict SSO validation rules for shader IO matching so we can't
385     * remove dead IO until the resource list has been built. Here we skip
386     * removing them until later. This will potentially make the IO lowering
387     * calls below do a little extra work but should otherwise have no impact.
388     */
389    if (!_mesa_is_gles(st->ctx) || !nir->info.separate_shader) {
390       nir_variable_mode mask = nir_var_shader_in | nir_var_shader_out;
391       nir_remove_dead_variables(nir, mask, NULL);
392    }
393 
394    if (options->lower_all_io_to_temps ||
395        nir->info.stage == MESA_SHADER_VERTEX ||
396        nir->info.stage == MESA_SHADER_GEOMETRY) {
397       NIR_PASS_V(nir, nir_lower_io_to_temporaries,
398                  nir_shader_get_entrypoint(nir),
399                  true, true);
400    } else if (nir->info.stage == MESA_SHADER_FRAGMENT ||
401               !screen->get_param(screen, PIPE_CAP_TGSI_CAN_READ_OUTPUTS)) {
402       NIR_PASS_V(nir, nir_lower_io_to_temporaries,
403                  nir_shader_get_entrypoint(nir),
404                  true, false);
405    }
406 
407    NIR_PASS_V(nir, nir_lower_global_vars_to_local);
408    NIR_PASS_V(nir, nir_split_var_copies);
409    NIR_PASS_V(nir, nir_lower_var_copies);
410 
411    if (options->lower_to_scalar) {
412      NIR_PASS_V(nir, nir_lower_alu_to_scalar,
413                 options->lower_to_scalar_filter, NULL);
414    }
415 
416    /* before buffers and vars_to_ssa */
417    NIR_PASS_V(nir, gl_nir_lower_images, true);
418 
419    /* TODO: Change GLSL to not lower shared memory. */
420    if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
421        shader_program->data->spirv) {
422       NIR_PASS_V(prog->nir, nir_lower_vars_to_explicit_types,
423                  nir_var_mem_shared, shared_type_info);
424       NIR_PASS_V(prog->nir, nir_lower_explicit_io,
425                  nir_var_mem_shared, nir_address_format_32bit_offset);
426    }
427 
428    /* Do a round of constant folding to clean up address calculations */
429    NIR_PASS_V(nir, nir_opt_constant_folding);
430 }
431 
432 static bool
dest_is_64bit(nir_dest * dest,void * state)433 dest_is_64bit(nir_dest *dest, void *state)
434 {
435    bool *lower = (bool *)state;
436    if (dest && (nir_dest_bit_size(*dest) == 64)) {
437       *lower = true;
438       return false;
439    }
440    return true;
441 }
442 
443 static bool
src_is_64bit(nir_src * src,void * state)444 src_is_64bit(nir_src *src, void *state)
445 {
446    bool *lower = (bool *)state;
447    if (src && (nir_src_bit_size(*src) == 64)) {
448       *lower = true;
449       return false;
450    }
451    return true;
452 }
453 
454 static bool
filter_64_bit_instr(const nir_instr * const_instr,UNUSED const void * data)455 filter_64_bit_instr(const nir_instr *const_instr, UNUSED const void *data)
456 {
457    bool lower = false;
458    /* lower_alu_to_scalar required nir_instr to be const, but nir_foreach_*
459     * doesn't have const variants, so do the ugly const_cast here. */
460    nir_instr *instr = const_cast<nir_instr *>(const_instr);
461 
462    nir_foreach_dest(instr, dest_is_64bit, &lower);
463    if (lower)
464       return true;
465    nir_foreach_src(instr, src_is_64bit, &lower);
466    return lower;
467 }
468 
469 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
470  * info on varyings, etc after NIR link time opts have been applied.
471  */
472 static char *
st_glsl_to_nir_post_opts(struct st_context * st,struct gl_program * prog,struct gl_shader_program * shader_program)473 st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
474                          struct gl_shader_program *shader_program)
475 {
476    nir_shader *nir = prog->nir;
477    struct pipe_screen *screen = st->screen;
478 
479    /* Make a pass over the IR to add state references for any built-in
480     * uniforms that are used.  This has to be done now (during linking).
481     * Code generation doesn't happen until the first time this shader is
482     * used for rendering.  Waiting until then to generate the parameters is
483     * too late.  At that point, the values for the built-in uniforms won't
484     * get sent to the shader.
485     */
486    nir_foreach_uniform_variable(var, nir) {
487       const nir_state_slot *const slots = var->state_slots;
488       if (slots != NULL) {
489          const struct glsl_type *type = glsl_without_array(var->type);
490          for (unsigned int i = 0; i < var->num_state_slots; i++) {
491             unsigned comps;
492             if (glsl_type_is_struct_or_ifc(type)) {
493                comps = _mesa_program_state_value_size(slots[i].tokens);
494             } else {
495                comps = glsl_get_vector_elements(type);
496             }
497 
498             if (st->ctx->Const.PackedDriverUniformStorage) {
499                _mesa_add_sized_state_reference(prog->Parameters,
500                                                slots[i].tokens,
501                                                comps, false);
502             } else {
503                _mesa_add_state_reference(prog->Parameters,
504                                          slots[i].tokens);
505             }
506          }
507       }
508    }
509 
510    /* Avoid reallocation of the program parameter list, because the uniform
511     * storage is only associated with the original parameter list.
512     * This should be enough for Bitmap and DrawPixels constants.
513     */
514    _mesa_ensure_and_associate_uniform_storage(st->ctx, shader_program, prog, 16);
515 
516    st_set_prog_affected_state_flags(prog);
517 
518    /* None of the builtins being lowered here can be produced by SPIR-V.  See
519     * _mesa_builtin_uniform_desc. Also drivers that support packed uniform
520     * storage don't need to lower builtins.
521     */
522    if (!shader_program->data->spirv &&
523        !st->ctx->Const.PackedDriverUniformStorage) {
524       /* at this point, array uniforms have been split into separate
525        * nir_variable structs where possible. this codepath can't handle dynamic
526        * array indexing, however, so all indirect uniform derefs
527        * must be eliminated beforehand to avoid trying to lower one of those builtins
528        */
529       NIR_PASS_V(nir, nir_lower_indirect_builtin_uniform_derefs);
530       NIR_PASS_V(nir, st_nir_lower_builtin);
531    }
532 
533    if (!screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF))
534       NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
535 
536    NIR_PASS_V(nir, nir_opt_intrinsics);
537    NIR_PASS_V(nir, nir_opt_fragdepth);
538 
539    /* Lower 64-bit ops. */
540    if (nir->options->lower_int64_options ||
541        nir->options->lower_doubles_options) {
542       bool lowered_64bit_ops = false;
543       bool revectorize = false;
544 
545       /* nir_lower_doubles is not prepared for vector ops, so if the backend doesn't
546        * request lower_alu_to_scalar until now, lower all 64 bit ops, and try to
547        * vectorize them afterwards again */
548       if (!nir->options->lower_to_scalar) {
549          NIR_PASS(revectorize, nir, nir_lower_alu_to_scalar, filter_64_bit_instr, nullptr);
550          NIR_PASS(revectorize, nir, nir_lower_phis_to_scalar, false);
551       }
552 
553       if (nir->options->lower_doubles_options) {
554          NIR_PASS(lowered_64bit_ops, nir, nir_lower_doubles,
555                   st->ctx->SoftFP64, nir->options->lower_doubles_options);
556       }
557       if (nir->options->lower_int64_options)
558          NIR_PASS(lowered_64bit_ops, nir, nir_lower_int64);
559 
560       if (revectorize)
561          NIR_PASS_V(nir, nir_opt_vectorize, nullptr, nullptr);
562 
563       if (revectorize || lowered_64bit_ops)
564          st_nir_opts(nir);
565    }
566 
567    nir_variable_mode mask =
568       nir_var_shader_in | nir_var_shader_out | nir_var_function_temp;
569    nir_remove_dead_variables(nir, mask, NULL);
570 
571    if (!st->has_hw_atomics && !screen->get_param(screen, PIPE_CAP_NIR_ATOMICS_AS_DEREF))
572       NIR_PASS_V(nir, nir_lower_atomics_to_ssbo);
573 
574    st_finalize_nir_before_variants(nir);
575 
576    char *msg = NULL;
577    if (st->allow_st_finalize_nir_twice)
578       msg = st_finalize_nir(st, prog, shader_program, nir, true, true);
579 
580    if (st->ctx->_Shader->Flags & GLSL_DUMP) {
581       _mesa_log("\n");
582       _mesa_log("NIR IR for linked %s program %d:\n",
583              _mesa_shader_stage_to_string(prog->info.stage),
584              shader_program->Name);
585       nir_print_shader(nir, _mesa_get_log_file());
586       _mesa_log("\n\n");
587    }
588 
589    return msg;
590 }
591 
592 static void
st_nir_vectorize_io(nir_shader * producer,nir_shader * consumer)593 st_nir_vectorize_io(nir_shader *producer, nir_shader *consumer)
594 {
595    NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
596    NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
597    NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
598 
599    if ((producer)->info.stage != MESA_SHADER_TESS_CTRL) {
600       /* Calling lower_io_to_vector creates output variable writes with
601        * write-masks.  We only support these for TCS outputs, so for other
602        * stages, we need to call nir_lower_io_to_temporaries to get rid of
603        * them.  This, in turn, creates temporary variables and extra
604        * copy_deref intrinsics that we need to clean up.
605        */
606       NIR_PASS_V(producer, nir_lower_io_to_temporaries,
607                  nir_shader_get_entrypoint(producer), true, false);
608       NIR_PASS_V(producer, nir_lower_global_vars_to_local);
609       NIR_PASS_V(producer, nir_split_var_copies);
610       NIR_PASS_V(producer, nir_lower_var_copies);
611    }
612 
613    /* Undef scalar store_deref intrinsics are not ignored by nir_lower_io,
614     * so they must be removed before that. These passes remove them.
615     */
616    NIR_PASS_V(producer, nir_lower_vars_to_ssa);
617    NIR_PASS_V(producer, nir_opt_undef);
618    NIR_PASS_V(producer, nir_opt_dce);
619 }
620 
621 static void
st_nir_link_shaders(nir_shader * producer,nir_shader * consumer)622 st_nir_link_shaders(nir_shader *producer, nir_shader *consumer)
623 {
624    if (producer->options->lower_to_scalar) {
625       NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
626       NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
627    }
628 
629    nir_lower_io_arrays_to_elements(producer, consumer);
630 
631    st_nir_opts(producer);
632    st_nir_opts(consumer);
633 
634    if (nir_link_opt_varyings(producer, consumer))
635       st_nir_opts(consumer);
636 
637    NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
638    NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
639 
640    if (nir_remove_unused_varyings(producer, consumer)) {
641       NIR_PASS_V(producer, nir_lower_global_vars_to_local);
642       NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
643 
644       st_nir_opts(producer);
645       st_nir_opts(consumer);
646 
647       /* Optimizations can cause varyings to become unused.
648        * nir_compact_varyings() depends on all dead varyings being removed so
649        * we need to call nir_remove_dead_variables() again here.
650        */
651       NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out,
652                  NULL);
653       NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in,
654                  NULL);
655    }
656 
657    nir_link_varying_precision(producer, consumer);
658 }
659 
660 static void
st_lower_patch_vertices_in(struct gl_shader_program * shader_prog)661 st_lower_patch_vertices_in(struct gl_shader_program *shader_prog)
662 {
663    struct gl_linked_shader *linked_tcs =
664       shader_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
665    struct gl_linked_shader *linked_tes =
666       shader_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
667 
668    /* If we have a TCS and TES linked together, lower TES patch vertices. */
669    if (linked_tcs && linked_tes) {
670       nir_shader *tcs_nir = linked_tcs->Program->nir;
671       nir_shader *tes_nir = linked_tes->Program->nir;
672 
673       /* The TES input vertex count is the TCS output vertex count,
674        * lower TES gl_PatchVerticesIn to a constant.
675        */
676       uint32_t tes_patch_verts = tcs_nir->info.tess.tcs_vertices_out;
677       NIR_PASS_V(tes_nir, nir_lower_patch_vertices, tes_patch_verts, NULL);
678    }
679 }
680 
681 extern "C" {
682 
683 void
st_nir_lower_wpos_ytransform(struct nir_shader * nir,struct gl_program * prog,struct pipe_screen * pscreen)684 st_nir_lower_wpos_ytransform(struct nir_shader *nir,
685                              struct gl_program *prog,
686                              struct pipe_screen *pscreen)
687 {
688    if (nir->info.stage != MESA_SHADER_FRAGMENT)
689       return;
690 
691    static const gl_state_index16 wposTransformState[STATE_LENGTH] = {
692       STATE_FB_WPOS_Y_TRANSFORM
693    };
694    nir_lower_wpos_ytransform_options wpos_options = { { 0 } };
695 
696    memcpy(wpos_options.state_tokens, wposTransformState,
697           sizeof(wpos_options.state_tokens));
698    wpos_options.fs_coord_origin_upper_left =
699       pscreen->get_param(pscreen,
700                          PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT);
701    wpos_options.fs_coord_origin_lower_left =
702       pscreen->get_param(pscreen,
703                          PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
704    wpos_options.fs_coord_pixel_center_integer =
705       pscreen->get_param(pscreen,
706                          PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
707    wpos_options.fs_coord_pixel_center_half_integer =
708       pscreen->get_param(pscreen,
709                          PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER);
710 
711    if (nir_lower_wpos_ytransform(nir, &wpos_options)) {
712       nir_validate_shader(nir, "after nir_lower_wpos_ytransform");
713       _mesa_add_state_reference(prog->Parameters, wposTransformState);
714    }
715 
716    static const gl_state_index16 pntcTransformState[STATE_LENGTH] = {
717       STATE_FB_PNTC_Y_TRANSFORM
718    };
719 
720    if (nir_lower_pntc_ytransform(nir, &pntcTransformState)) {
721       _mesa_add_state_reference(prog->Parameters, pntcTransformState);
722    }
723 }
724 
725 bool
st_link_nir(struct gl_context * ctx,struct gl_shader_program * shader_program)726 st_link_nir(struct gl_context *ctx,
727             struct gl_shader_program *shader_program)
728 {
729    struct st_context *st = st_context(ctx);
730    struct gl_linked_shader *linked_shader[MESA_SHADER_STAGES];
731    unsigned num_shaders = 0;
732 
733    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
734       if (shader_program->_LinkedShaders[i])
735          linked_shader[num_shaders++] = shader_program->_LinkedShaders[i];
736    }
737 
738    for (unsigned i = 0; i < num_shaders; i++) {
739       struct gl_linked_shader *shader = linked_shader[i];
740       const nir_shader_compiler_options *options =
741          st->ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions;
742       struct gl_program *prog = shader->Program;
743       struct st_program *stp = (struct st_program *)prog;
744 
745       _mesa_copy_linked_program_data(shader_program, shader);
746 
747       assert(!prog->nir);
748       stp->shader_program = shader_program;
749       stp->state.type = PIPE_SHADER_IR_NIR;
750 
751       /* Parameters will be filled during NIR linking. */
752       prog->Parameters = _mesa_new_parameter_list();
753 
754       if (shader_program->data->spirv) {
755          prog->nir = _mesa_spirv_to_nir(ctx, shader_program, shader->Stage, options);
756       } else {
757          validate_ir_tree(shader->ir);
758 
759          if (ctx->_Shader->Flags & GLSL_DUMP) {
760             _mesa_log("\n");
761             _mesa_log("GLSL IR for linked %s program %d:\n",
762                       _mesa_shader_stage_to_string(shader->Stage),
763                       shader_program->Name);
764             _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
765             _mesa_log("\n\n");
766          }
767 
768          prog->nir = glsl_to_nir(st->ctx, shader_program, shader->Stage, options);
769       }
770 
771       st_nir_preprocess(st, prog, shader_program, shader->Stage);
772 
773       if (options->lower_to_scalar) {
774          NIR_PASS_V(shader->Program->nir, nir_lower_load_const_to_scalar);
775       }
776    }
777 
778    st_lower_patch_vertices_in(shader_program);
779 
780    /* Linking the stages in the opposite order (from fragment to vertex)
781     * ensures that inter-shader outputs written to in an earlier stage
782     * are eliminated if they are (transitively) not used in a later
783     * stage.
784     */
785    for (int i = num_shaders - 2; i >= 0; i--) {
786       st_nir_link_shaders(linked_shader[i]->Program->nir,
787                           linked_shader[i + 1]->Program->nir);
788    }
789    /* Linking shaders also optimizes them. Separate shaders, compute shaders
790     * and shaders with a fixed-func VS or FS that don't need linking are
791     * optimized here.
792     */
793    if (num_shaders == 1)
794       st_nir_opts(linked_shader[0]->Program->nir);
795 
796    if (shader_program->data->spirv) {
797       static const gl_nir_linker_options opts = {
798          true /*fill_parameters */
799       };
800       if (!gl_nir_link_spirv(ctx, shader_program, &opts))
801          return GL_FALSE;
802    } else {
803       if (!gl_nir_link_glsl(ctx, shader_program))
804          return GL_FALSE;
805    }
806 
807    for (unsigned i = 0; i < num_shaders; i++) {
808       struct gl_program *prog = linked_shader[i]->Program;
809       prog->ExternalSamplersUsed = gl_external_samplers(prog);
810       _mesa_update_shader_textures_used(shader_program, prog);
811    }
812 
813    nir_build_program_resource_list(ctx, shader_program,
814                                    shader_program->data->spirv);
815 
816    for (unsigned i = 0; i < num_shaders; i++) {
817       struct gl_linked_shader *shader = linked_shader[i];
818       nir_shader *nir = shader->Program->nir;
819 
820       /* don't infer ACCESS_NON_READABLE so that Program->sh.ImageAccess is
821        * correct: https://gitlab.freedesktop.org/mesa/mesa/-/issues/3278
822        */
823       nir_opt_access_options opt_access_options;
824       opt_access_options.is_vulkan = false;
825       opt_access_options.infer_non_readable = false;
826       NIR_PASS_V(nir, nir_opt_access, &opt_access_options);
827 
828       /* This needs to run after the initial pass of nir_lower_vars_to_ssa, so
829        * that the buffer indices are constants in nir where they where
830        * constants in GLSL. */
831       NIR_PASS_V(nir, gl_nir_lower_buffers, shader_program);
832 
833       /* Remap the locations to slots so those requiring two slots will occupy
834        * two locations. For instance, if we have in the IR code a dvec3 attr0 in
835        * location 0 and vec4 attr1 in location 1, in NIR attr0 will use
836        * locations/slots 0 and 1, and attr1 will use location/slot 2
837        */
838       if (nir->info.stage == MESA_SHADER_VERTEX && !shader_program->data->spirv)
839          nir_remap_dual_slot_attributes(nir, &shader->Program->DualSlotInputs);
840 
841       NIR_PASS_V(nir, st_nir_lower_wpos_ytransform, shader->Program,
842                  st->screen);
843 
844       NIR_PASS_V(nir, nir_lower_system_values);
845       NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
846 
847       NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
848 
849       st_shader_gather_info(nir, shader->Program);
850       if (shader->Stage == MESA_SHADER_VERTEX) {
851          /* NIR expands dual-slot inputs out to two locations.  We need to
852           * compact things back down GL-style single-slot inputs to avoid
853           * confusing the state tracker.
854           */
855          shader->Program->info.inputs_read =
856             nir_get_single_slot_attribs_mask(nir->info.inputs_read,
857                                              shader->Program->DualSlotInputs);
858       }
859 
860       if (i >= 1) {
861          struct gl_program *prev_shader = linked_shader[i - 1]->Program;
862 
863          /* We can't use nir_compact_varyings with transform feedback, since
864           * the pipe_stream_output->output_register field is based on the
865           * pre-compacted driver_locations.
866           */
867          if (!(prev_shader->sh.LinkedTransformFeedback &&
868                prev_shader->sh.LinkedTransformFeedback->NumVarying > 0))
869             nir_compact_varyings(prev_shader->nir,
870                                  nir, ctx->API != API_OPENGL_COMPAT);
871 
872          if (ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions->vectorize_io)
873             st_nir_vectorize_io(prev_shader->nir, nir);
874       }
875    }
876 
877    struct shader_info *prev_info = NULL;
878 
879    for (unsigned i = 0; i < num_shaders; i++) {
880       struct gl_linked_shader *shader = linked_shader[i];
881       struct shader_info *info = &shader->Program->nir->info;
882 
883       char *msg = st_glsl_to_nir_post_opts(st, shader->Program, shader_program);
884       if (msg) {
885          linker_error(shader_program, msg);
886          break;
887       }
888 
889       if (prev_info &&
890           ctx->Const.ShaderCompilerOptions[shader->Stage].NirOptions->unify_interfaces) {
891          prev_info->outputs_written |= info->inputs_read &
892             ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
893          info->inputs_read |= prev_info->outputs_written &
894             ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
895 
896          prev_info->patch_outputs_written |= info->patch_inputs_read;
897          info->patch_inputs_read |= prev_info->patch_outputs_written;
898       }
899       prev_info = info;
900    }
901 
902    for (unsigned i = 0; i < num_shaders; i++) {
903       struct gl_linked_shader *shader = linked_shader[i];
904       struct gl_program *prog = shader->Program;
905       struct st_program *stp = st_program(prog);
906 
907       /* Make sure that prog->info is in sync with nir->info, but st/mesa
908        * expects some of the values to be from before lowering.
909        */
910       shader_info old_info = prog->info;
911       prog->info = prog->nir->info;
912       prog->info.name = old_info.name;
913       prog->info.label = old_info.label;
914       prog->info.num_ssbos = old_info.num_ssbos;
915       prog->info.num_ubos = old_info.num_ubos;
916       prog->info.num_abos = old_info.num_abos;
917       if (prog->info.stage == MESA_SHADER_VERTEX)
918          prog->info.inputs_read = old_info.inputs_read;
919 
920       /* Initialize st_vertex_program members. */
921       if (shader->Stage == MESA_SHADER_VERTEX)
922          st_prepare_vertex_program(stp, NULL);
923 
924       /* Get pipe_stream_output_info. */
925       if (shader->Stage == MESA_SHADER_VERTEX ||
926           shader->Stage == MESA_SHADER_TESS_EVAL ||
927           shader->Stage == MESA_SHADER_GEOMETRY)
928          st_translate_stream_output_info(prog);
929 
930       st_store_ir_in_disk_cache(st, prog, true);
931 
932       st_release_variants(st, stp);
933       st_finalize_program(st, prog);
934 
935       /* The GLSL IR won't be needed anymore. */
936       ralloc_free(shader->ir);
937       shader->ir = NULL;
938    }
939 
940    return true;
941 }
942 
943 void
st_nir_assign_varying_locations(struct st_context * st,nir_shader * nir)944 st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
945 {
946    if (nir->info.stage == MESA_SHADER_VERTEX) {
947       nir_assign_io_var_locations(nir, nir_var_shader_out,
948                                   &nir->num_outputs,
949                                   nir->info.stage);
950       st_nir_fixup_varying_slots(st, nir, nir_var_shader_out);
951    } else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
952               nir->info.stage == MESA_SHADER_TESS_CTRL ||
953               nir->info.stage == MESA_SHADER_TESS_EVAL) {
954       nir_assign_io_var_locations(nir, nir_var_shader_in,
955                                   &nir->num_inputs,
956                                   nir->info.stage);
957       st_nir_fixup_varying_slots(st, nir, nir_var_shader_in);
958 
959       nir_assign_io_var_locations(nir, nir_var_shader_out,
960                                   &nir->num_outputs,
961                                   nir->info.stage);
962       st_nir_fixup_varying_slots(st, nir, nir_var_shader_out);
963    } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
964       nir_assign_io_var_locations(nir, nir_var_shader_in,
965                                   &nir->num_inputs,
966                                   nir->info.stage);
967       st_nir_fixup_varying_slots(st, nir, nir_var_shader_in);
968       nir_assign_io_var_locations(nir, nir_var_shader_out,
969                                   &nir->num_outputs,
970                                   nir->info.stage);
971    } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
972        /* TODO? */
973    } else {
974       unreachable("invalid shader type");
975    }
976 }
977 
978 void
st_nir_lower_samplers(struct pipe_screen * screen,nir_shader * nir,struct gl_shader_program * shader_program,struct gl_program * prog)979 st_nir_lower_samplers(struct pipe_screen *screen, nir_shader *nir,
980                       struct gl_shader_program *shader_program,
981                       struct gl_program *prog)
982 {
983    if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
984       NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
985    else
986       NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
987 
988    if (prog) {
989       BITSET_COPY(prog->info.textures_used, nir->info.textures_used);
990       BITSET_COPY(prog->info.textures_used_by_txf, nir->info.textures_used_by_txf);
991       prog->info.images_used = nir->info.images_used;
992    }
993 }
994 
995 static int
st_packed_uniforms_type_size(const struct glsl_type * type,bool bindless)996 st_packed_uniforms_type_size(const struct glsl_type *type, bool bindless)
997 {
998    return glsl_count_dword_slots(type, bindless);
999 }
1000 
1001 static int
st_unpacked_uniforms_type_size(const struct glsl_type * type,bool bindless)1002 st_unpacked_uniforms_type_size(const struct glsl_type *type, bool bindless)
1003 {
1004    return glsl_count_vec4_slots(type, false, bindless);
1005 }
1006 
1007 void
st_nir_lower_uniforms(struct st_context * st,nir_shader * nir)1008 st_nir_lower_uniforms(struct st_context *st, nir_shader *nir)
1009 {
1010    if (st->ctx->Const.PackedDriverUniformStorage) {
1011       NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
1012                  st_packed_uniforms_type_size,
1013                  (nir_lower_io_options)0);
1014    } else {
1015       NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
1016                  st_unpacked_uniforms_type_size,
1017                  (nir_lower_io_options)0);
1018    }
1019 
1020    if (nir->options->lower_uniforms_to_ubo)
1021       NIR_PASS_V(nir, nir_lower_uniforms_to_ubo,
1022                  st->ctx->Const.PackedDriverUniformStorage,
1023                  !st->ctx->Const.NativeIntegers);
1024 }
1025 
1026 /* Last third of preparing nir from glsl, which happens after shader
1027  * variant lowering.
1028  */
1029 char *
st_finalize_nir(struct st_context * st,struct gl_program * prog,struct gl_shader_program * shader_program,nir_shader * nir,bool finalize_by_driver,bool is_before_variants)1030 st_finalize_nir(struct st_context *st, struct gl_program *prog,
1031                 struct gl_shader_program *shader_program,
1032                 nir_shader *nir, bool finalize_by_driver,
1033                 bool is_before_variants)
1034 {
1035    struct pipe_screen *screen = st->screen;
1036 
1037    NIR_PASS_V(nir, nir_split_var_copies);
1038    NIR_PASS_V(nir, nir_lower_var_copies);
1039 
1040    if (st->lower_rect_tex) {
1041       struct nir_lower_tex_options opts = { 0 };
1042 
1043       opts.lower_rect = true;
1044 
1045       NIR_PASS_V(nir, nir_lower_tex, &opts);
1046    }
1047 
1048    st_nir_assign_varying_locations(st, nir);
1049    st_nir_assign_uniform_locations(st->ctx, prog, nir);
1050 
1051    /* Set num_uniforms in number of attribute slots (vec4s) */
1052    nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
1053 
1054    st_nir_lower_uniforms(st, nir);
1055 
1056    if (is_before_variants && nir->options->lower_uniforms_to_ubo) {
1057       /* This must be done after uniforms are lowered to UBO and all
1058        * nir_var_uniform variables are removed from NIR to prevent conflicts
1059        * between state parameter merging and shader variant generation.
1060        */
1061       _mesa_optimize_state_parameters(&st->ctx->Const, prog->Parameters);
1062    }
1063 
1064    st_nir_lower_samplers(screen, nir, shader_program, prog);
1065    if (!screen->get_param(screen, PIPE_CAP_NIR_IMAGES_AS_DEREF))
1066       NIR_PASS_V(nir, gl_nir_lower_images, false);
1067 
1068    char *msg = NULL;
1069    if (finalize_by_driver && screen->finalize_nir)
1070       msg = screen->finalize_nir(screen, nir);
1071 
1072    return msg;
1073 }
1074 
1075 } /* extern "C" */
1076