• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "st_nir.h"
25 
26 #include "pipe/p_defines.h"
27 #include "pipe/p_screen.h"
28 #include "pipe/p_context.h"
29 
30 #include "program/program.h"
31 #include "program/prog_statevars.h"
32 #include "program/prog_parameter.h"
33 #include "program/ir_to_mesa.h"
34 #include "main/mtypes.h"
35 #include "main/errors.h"
36 #include "main/shaderapi.h"
37 #include "main/uniforms.h"
38 
39 #include "st_context.h"
40 #include "st_program.h"
41 
42 #include "compiler/nir/nir.h"
43 #include "compiler/glsl_types.h"
44 #include "compiler/glsl/glsl_to_nir.h"
45 #include "compiler/glsl/ir.h"
46 #include "compiler/glsl/string_to_uint_map.h"
47 
48 
49 static int
type_size(const struct glsl_type * type)50 type_size(const struct glsl_type *type)
51 {
52    return type->count_attribute_slots(false);
53 }
54 
55 /* Depending on PIPE_CAP_TGSI_TEXCOORD (st->needs_texcoord_semantic) we
56  * may need to fix up varying slots so the glsl->nir path is aligned
57  * with the anything->tgsi->nir path.
58  */
59 static void
st_nir_fixup_varying_slots(struct st_context * st,struct exec_list * var_list)60 st_nir_fixup_varying_slots(struct st_context *st, struct exec_list *var_list)
61 {
62    if (st->needs_texcoord_semantic)
63       return;
64 
65    nir_foreach_variable(var, var_list) {
66       if (var->data.location >= VARYING_SLOT_VAR0) {
67          var->data.location += 9;
68       } else if ((var->data.location >= VARYING_SLOT_TEX0) &&
69                (var->data.location <= VARYING_SLOT_TEX7)) {
70          var->data.location += VARYING_SLOT_VAR0 - VARYING_SLOT_TEX0;
71       }
72    }
73 }
74 
75 /* input location assignment for VS inputs must be handled specially, so
76  * that it is aligned w/ st's vbo state.
77  * (This isn't the case with, for ex, FS inputs, which only need to agree
78  * on varying-slot w/ the VS outputs)
79  */
80 static void
st_nir_assign_vs_in_locations(struct gl_program * prog,nir_shader * nir)81 st_nir_assign_vs_in_locations(struct gl_program *prog, nir_shader *nir)
82 {
83    unsigned attr, num_inputs = 0;
84    unsigned input_to_index[VERT_ATTRIB_MAX] = {0};
85 
86    /* TODO de-duplicate w/ similar code in st_translate_vertex_program()? */
87    for (attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
88       if ((prog->info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
89          input_to_index[attr] = num_inputs;
90          num_inputs++;
91          if ((prog->info.double_inputs_read & BITFIELD64_BIT(attr)) != 0) {
92             /* add placeholder for second part of a double attribute */
93             num_inputs++;
94          }
95       } else {
96          input_to_index[attr] = ~0;
97       }
98    }
99 
100    /* bit of a hack, mirroring st_translate_vertex_program */
101    input_to_index[VERT_ATTRIB_EDGEFLAG] = num_inputs;
102 
103    nir->num_inputs = 0;
104    nir_foreach_variable_safe(var, &nir->inputs) {
105       attr = var->data.location;
106       assert(attr < ARRAY_SIZE(input_to_index));
107 
108       if (input_to_index[attr] != ~0u) {
109          var->data.driver_location = input_to_index[attr];
110          nir->num_inputs++;
111       } else {
112          /* Move unused input variables to the globals list (with no
113           * initialization), to avoid confusing drivers looking through the
114           * inputs array and expecting to find inputs with a driver_location
115           * set.
116           */
117          exec_node_remove(&var->node);
118          var->data.mode = nir_var_global;
119          exec_list_push_tail(&nir->globals, &var->node);
120       }
121    }
122 }
123 
124 static void
st_nir_assign_var_locations(struct exec_list * var_list,unsigned * size,gl_shader_stage stage)125 st_nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
126                             gl_shader_stage stage)
127 {
128    unsigned location = 0;
129    unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
130    uint64_t processed_locs = 0;
131    uint32_t processed_patch_locs = 0;
132 
133    nir_foreach_variable(var, var_list) {
134 
135       const struct glsl_type *type = var->type;
136       if (nir_is_per_vertex_io(var, stage)) {
137          assert(glsl_type_is_array(type));
138          type = glsl_get_array_element(type);
139       }
140 
141       bool processed = false;
142       if (var->data.patch &&
143           var->data.location != VARYING_SLOT_TESS_LEVEL_INNER &&
144           var->data.location != VARYING_SLOT_TESS_LEVEL_OUTER &&
145           var->data.location != VARYING_SLOT_BOUNDING_BOX0 &&
146           var->data.location != VARYING_SLOT_BOUNDING_BOX1) {
147          unsigned patch_loc = var->data.location - VARYING_SLOT_PATCH0;
148          if (processed_patch_locs & (1 << patch_loc))
149             processed = true;
150 
151          processed_patch_locs |= (1 << patch_loc);
152       } else {
153          if (processed_locs & ((uint64_t)1 << var->data.location))
154             processed = true;
155 
156          processed_locs |= ((uint64_t)1 << var->data.location);
157       }
158 
159       /* Because component packing allows varyings to share the same location
160        * we may have already have processed this location.
161        */
162       if (processed && var->data.location >= VARYING_SLOT_VAR0) {
163          var->data.driver_location = assigned_locations[var->data.location];
164          *size += type_size(type);
165          continue;
166       }
167 
168       assigned_locations[var->data.location] = location;
169       var->data.driver_location = location;
170       location += type_size(type);
171    }
172 
173    *size += location;
174 }
175 
176 static int
st_nir_lookup_parameter_index(const struct gl_program_parameter_list * params,const char * name)177 st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
178                               const char *name)
179 {
180    int loc = _mesa_lookup_parameter_index(params, name);
181 
182    /* is there a better way to do this?  If we have something like:
183     *
184     *    struct S {
185     *           float f;
186     *           vec4 v;
187     *    };
188     *    uniform S color;
189     *
190     * Then what we get in prog->Parameters looks like:
191     *
192     *    0: Name=color.f, Type=6, DataType=1406, Size=1
193     *    1: Name=color.v, Type=6, DataType=8b52, Size=4
194     *
195     * So the name doesn't match up and _mesa_lookup_parameter_index()
196     * fails.  In this case just find the first matching "color.*"..
197     *
198     * Note for arrays you could end up w/ color[n].f, for example.
199     *
200     * glsl_to_tgsi works slightly differently in this regard.  It is
201     * emitting something more low level, so it just translates the
202     * params list 1:1 to CONST[] regs.  Going from GLSL IR to TGSI,
203     * it just calculates the additional offset of struct field members
204     * in glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) or
205     * glsl_to_tgsi_visitor::visit(ir_dereference_array *ir).  It never
206     * needs to work backwards to get base var loc from the param-list
207     * which already has them separated out.
208     */
209    if (loc < 0) {
210       int namelen = strlen(name);
211       for (unsigned i = 0; i < params->NumParameters; i++) {
212          struct gl_program_parameter *p = &params->Parameters[i];
213          if ((strncmp(p->Name, name, namelen) == 0) &&
214              ((p->Name[namelen] == '.') || (p->Name[namelen] == '['))) {
215             loc = i;
216             break;
217          }
218       }
219    }
220 
221    return loc;
222 }
223 
224 static void
st_nir_assign_uniform_locations(struct gl_program * prog,struct gl_shader_program * shader_program,struct exec_list * uniform_list,unsigned * size)225 st_nir_assign_uniform_locations(struct gl_program *prog,
226                                 struct gl_shader_program *shader_program,
227                                 struct exec_list *uniform_list, unsigned *size)
228 {
229    int max = 0;
230    int shaderidx = 0;
231    int imageidx = 0;
232 
233    nir_foreach_variable(uniform, uniform_list) {
234       int loc;
235 
236       /*
237        * UBO's have their own address spaces, so don't count them towards the
238        * number of global uniforms
239        */
240       if ((uniform->data.mode == nir_var_uniform || uniform->data.mode == nir_var_shader_storage) &&
241           uniform->interface_type != NULL)
242          continue;
243 
244       if (uniform->type->is_sampler() || uniform->type->is_image()) {
245          unsigned val = 0;
246          bool found = shader_program->UniformHash->get(val, uniform->name);
247          if (uniform->type->is_sampler())
248             loc = shaderidx++;
249          else
250             loc = imageidx++;
251          assert(found);
252          (void) found; /* silence unused var warning */
253          /* this ensure that nir_lower_samplers looks at the correct
254           * shader_program->UniformStorage[location]:
255           */
256          uniform->data.location = val;
257       } else if (strncmp(uniform->name, "gl_", 3) == 0) {
258          const gl_state_index *const stateTokens = (gl_state_index *)uniform->state_slots[0].tokens;
259          /* This state reference has already been setup by ir_to_mesa, but we'll
260           * get the same index back here.
261           */
262          loc = _mesa_add_state_reference(prog->Parameters, stateTokens);
263       } else {
264          loc = st_nir_lookup_parameter_index(prog->Parameters, uniform->name);
265       }
266 
267       uniform->data.driver_location = loc;
268 
269       max = MAX2(max, loc + type_size(uniform->type));
270    }
271    *size = max;
272 }
273 
274 static void
st_nir_opts(nir_shader * nir)275 st_nir_opts(nir_shader *nir)
276 {
277    bool progress;
278    do {
279       progress = false;
280 
281       NIR_PASS_V(nir, nir_lower_64bit_pack);
282       NIR_PASS(progress, nir, nir_copy_prop);
283       NIR_PASS(progress, nir, nir_opt_remove_phis);
284       NIR_PASS(progress, nir, nir_opt_dce);
285       if (nir_opt_trivial_continues(nir)) {
286          progress = true;
287          NIR_PASS(progress, nir, nir_copy_prop);
288          NIR_PASS(progress, nir, nir_opt_dce);
289       }
290       NIR_PASS(progress, nir, nir_opt_if);
291       NIR_PASS(progress, nir, nir_opt_dead_cf);
292       NIR_PASS(progress, nir, nir_opt_cse);
293       NIR_PASS(progress, nir, nir_opt_peephole_select, 8);
294 
295       NIR_PASS(progress, nir, nir_opt_algebraic);
296       NIR_PASS(progress, nir, nir_opt_constant_folding);
297 
298       NIR_PASS(progress, nir, nir_opt_undef);
299       NIR_PASS(progress, nir, nir_opt_conditional_discard);
300       if (nir->options->max_unroll_iterations) {
301          NIR_PASS(progress, nir, nir_opt_loop_unroll, (nir_variable_mode)0);
302       }
303    } while (progress);
304 }
305 
306 /* First third of converting glsl_to_nir.. this leaves things in a pre-
307  * nir_lower_io state, so that shader variants can more easily insert/
308  * replace variables, etc.
309  */
310 static nir_shader *
st_glsl_to_nir(struct st_context * st,struct gl_program * prog,struct gl_shader_program * shader_program,gl_shader_stage stage)311 st_glsl_to_nir(struct st_context *st, struct gl_program *prog,
312                struct gl_shader_program *shader_program,
313                gl_shader_stage stage)
314 {
315    struct pipe_screen *pscreen = st->pipe->screen;
316    enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage);
317    const nir_shader_compiler_options *options;
318 
319    assert(pscreen->get_compiler_options);   /* drivers using NIR must implement this */
320 
321    options = (const nir_shader_compiler_options *)
322       pscreen->get_compiler_options(pscreen, PIPE_SHADER_IR_NIR, ptarget);
323    assert(options);
324 
325    if (prog->nir)
326       return prog->nir;
327 
328    nir_shader *nir = glsl_to_nir(shader_program, stage, options);
329 
330    st_nir_opts(nir);
331 
332    return nir;
333 }
334 
335 /* Second third of converting glsl_to_nir. This creates uniforms, gathers
336  * info on varyings, etc after NIR link time opts have been applied.
337  */
338 static void
st_glsl_to_nir_post_opts(struct st_context * st,struct gl_program * prog,struct gl_shader_program * shader_program)339 st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
340                          struct gl_shader_program *shader_program)
341 {
342    nir_shader *nir = prog->nir;
343 
344    /* Make a pass over the IR to add state references for any built-in
345     * uniforms that are used.  This has to be done now (during linking).
346     * Code generation doesn't happen until the first time this shader is
347     * used for rendering.  Waiting until then to generate the parameters is
348     * too late.  At that point, the values for the built-in uniforms won't
349     * get sent to the shader.
350     */
351    nir_foreach_variable(var, &nir->uniforms) {
352       if (strncmp(var->name, "gl_", 3) == 0) {
353          const nir_state_slot *const slots = var->state_slots;
354          assert(var->state_slots != NULL);
355 
356          for (unsigned int i = 0; i < var->num_state_slots; i++) {
357             _mesa_add_state_reference(prog->Parameters,
358                                       (gl_state_index *)slots[i].tokens);
359          }
360       }
361    }
362 
363    /* Avoid reallocation of the program parameter list, because the uniform
364     * storage is only associated with the original parameter list.
365     * This should be enough for Bitmap and DrawPixels constants.
366     */
367    _mesa_reserve_parameter_storage(prog->Parameters, 8);
368 
369    /* This has to be done last.  Any operation the can cause
370     * prog->ParameterValues to get reallocated (e.g., anything that adds a
371     * program constant) has to happen before creating this linkage.
372     */
373    _mesa_associate_uniform_storage(st->ctx, shader_program, prog, true);
374 
375    st_set_prog_affected_state_flags(prog);
376 
377    NIR_PASS_V(nir, st_nir_lower_builtin);
378    NIR_PASS_V(nir, nir_lower_atomics, shader_program);
379 
380    if (st->ctx->_Shader->Flags & GLSL_DUMP) {
381       _mesa_log("\n");
382       _mesa_log("NIR IR for linked %s program %d:\n",
383              _mesa_shader_stage_to_string(prog->info.stage),
384              shader_program->Name);
385       nir_print_shader(nir, _mesa_get_log_file());
386       _mesa_log("\n\n");
387    }
388 }
389 
390 /* TODO any better helper somewhere to sort a list? */
391 
392 static void
insert_sorted(struct exec_list * var_list,nir_variable * new_var)393 insert_sorted(struct exec_list *var_list, nir_variable *new_var)
394 {
395    nir_foreach_variable(var, var_list) {
396       if (var->data.location > new_var->data.location) {
397          exec_node_insert_node_before(&var->node, &new_var->node);
398          return;
399       }
400    }
401    exec_list_push_tail(var_list, &new_var->node);
402 }
403 
404 static void
sort_varyings(struct exec_list * var_list)405 sort_varyings(struct exec_list *var_list)
406 {
407    struct exec_list new_list;
408    exec_list_make_empty(&new_list);
409    nir_foreach_variable_safe(var, var_list) {
410       exec_node_remove(&var->node);
411       insert_sorted(&new_list, var);
412    }
413    exec_list_move_nodes_to(&new_list, var_list);
414 }
415 
416 static void
set_st_program(struct gl_program * prog,struct gl_shader_program * shader_program,nir_shader * nir)417 set_st_program(struct gl_program *prog,
418                struct gl_shader_program *shader_program,
419                nir_shader *nir)
420 {
421    struct st_vertex_program *stvp;
422    struct st_common_program *stp;
423    struct st_fragment_program *stfp;
424    struct st_compute_program *stcp;
425 
426    switch (prog->info.stage) {
427    case MESA_SHADER_VERTEX:
428       stvp = (struct st_vertex_program *)prog;
429       stvp->shader_program = shader_program;
430       stvp->tgsi.type = PIPE_SHADER_IR_NIR;
431       stvp->tgsi.ir.nir = nir;
432       break;
433    case MESA_SHADER_GEOMETRY:
434    case MESA_SHADER_TESS_CTRL:
435    case MESA_SHADER_TESS_EVAL:
436       stp = (struct st_common_program *)prog;
437       stp->shader_program = shader_program;
438       stp->tgsi.type = PIPE_SHADER_IR_NIR;
439       stp->tgsi.ir.nir = nir;
440       break;
441    case MESA_SHADER_FRAGMENT:
442       stfp = (struct st_fragment_program *)prog;
443       stfp->shader_program = shader_program;
444       stfp->tgsi.type = PIPE_SHADER_IR_NIR;
445       stfp->tgsi.ir.nir = nir;
446       break;
447    case MESA_SHADER_COMPUTE:
448       stcp = (struct st_compute_program *)prog;
449       stcp->shader_program = shader_program;
450       stcp->tgsi.ir_type = PIPE_SHADER_IR_NIR;
451       stcp->tgsi.prog = nir;
452       break;
453    default:
454       unreachable("unknown shader stage");
455    }
456 }
457 
458 static void
st_nir_get_mesa_program(struct gl_context * ctx,struct gl_shader_program * shader_program,struct gl_linked_shader * shader)459 st_nir_get_mesa_program(struct gl_context *ctx,
460                         struct gl_shader_program *shader_program,
461                         struct gl_linked_shader *shader)
462 {
463    struct st_context *st = st_context(ctx);
464    struct gl_program *prog;
465 
466    validate_ir_tree(shader->ir);
467 
468    prog = shader->Program;
469 
470    prog->Parameters = _mesa_new_parameter_list();
471 
472    _mesa_copy_linked_program_data(shader_program, shader);
473    _mesa_generate_parameters_list_for_uniforms(ctx, shader_program, shader,
474                                                prog->Parameters);
475 
476    if (ctx->_Shader->Flags & GLSL_DUMP) {
477       _mesa_log("\n");
478       _mesa_log("GLSL IR for linked %s program %d:\n",
479              _mesa_shader_stage_to_string(shader->Stage),
480              shader_program->Name);
481       _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
482       _mesa_log("\n\n");
483    }
484 
485    prog->ExternalSamplersUsed = gl_external_samplers(prog);
486    _mesa_update_shader_textures_used(shader_program, prog);
487 
488    nir_shader *nir = st_glsl_to_nir(st, prog, shader_program, shader->Stage);
489 
490    set_st_program(prog, shader_program, nir);
491    prog->nir = nir;
492 
493    if (nir->info.stage != MESA_SHADER_TESS_CTRL &&
494        nir->info.stage != MESA_SHADER_TESS_EVAL) {
495       NIR_PASS_V(nir, nir_lower_io_to_temporaries,
496                  nir_shader_get_entrypoint(nir),
497                  true, true);
498    }
499    NIR_PASS_V(nir, nir_lower_global_vars_to_local);
500    NIR_PASS_V(nir, nir_split_var_copies);
501    NIR_PASS_V(nir, nir_lower_var_copies);
502 }
503 
504 static void
st_nir_link_shaders(nir_shader ** producer,nir_shader ** consumer)505 st_nir_link_shaders(nir_shader **producer, nir_shader **consumer)
506 {
507    nir_lower_io_arrays_to_elements(*producer, *consumer);
508 
509    NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
510    NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
511 
512    if (nir_remove_unused_varyings(*producer, *consumer)) {
513       NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
514       NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
515 
516       /* The backend might not be able to handle indirects on
517        * temporaries so we need to lower indirects on any of the
518        * varyings we have demoted here.
519        *
520        * TODO: radeonsi shouldn't need to do this, however LLVM isn't
521        * currently smart enough to handle indirects without causing excess
522        * spilling causing the gpu to hang.
523        *
524        * See the following thread for more details of the problem:
525        * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
526        */
527       nir_variable_mode indirect_mask = nir_var_local;
528 
529       NIR_PASS_V(*producer, nir_lower_indirect_derefs, indirect_mask);
530       NIR_PASS_V(*consumer, nir_lower_indirect_derefs, indirect_mask);
531 
532       st_nir_opts(*producer);
533       st_nir_opts(*consumer);
534    }
535 }
536 
537 extern "C" {
538 
539 bool
st_link_nir(struct gl_context * ctx,struct gl_shader_program * shader_program)540 st_link_nir(struct gl_context *ctx,
541             struct gl_shader_program *shader_program)
542 {
543    struct st_context *st = st_context(ctx);
544 
545    /* Determine first and last stage. */
546    unsigned first = MESA_SHADER_STAGES;
547    unsigned last = 0;
548    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
549       if (!shader_program->_LinkedShaders[i])
550          continue;
551       if (first == MESA_SHADER_STAGES)
552          first = i;
553       last = i;
554    }
555 
556    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
557       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
558       if (shader == NULL)
559          continue;
560 
561       st_nir_get_mesa_program(ctx, shader_program, shader);
562 
563       nir_variable_mode mask = (nir_variable_mode) 0;
564       if (i != first)
565          mask = (nir_variable_mode)(mask | nir_var_shader_in);
566 
567       if (i != last)
568          mask = (nir_variable_mode)(mask | nir_var_shader_out);
569 
570       nir_shader *nir = shader->Program->nir;
571       nir_lower_io_to_scalar_early(nir, mask);
572       st_nir_opts(nir);
573    }
574 
575    /* Linking the stages in the opposite order (from fragment to vertex)
576     * ensures that inter-shader outputs written to in an earlier stage
577     * are eliminated if they are (transitively) not used in a later
578     * stage.
579     */
580    int next = last;
581    for (int i = next - 1; i >= 0; i--) {
582       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
583       if (shader == NULL)
584          continue;
585 
586       st_nir_link_shaders(&shader->Program->nir,
587                           &shader_program->_LinkedShaders[next]->Program->nir);
588       next = i;
589    }
590 
591    int prev = -1;
592    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
593       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
594       if (shader == NULL)
595          continue;
596 
597       nir_shader *nir = shader->Program->nir;
598 
599       /* fragment shaders may need : */
600       if (nir->info.stage == MESA_SHADER_FRAGMENT) {
601          static const gl_state_index wposTransformState[STATE_LENGTH] = {
602             STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM
603          };
604          nir_lower_wpos_ytransform_options wpos_options = { { 0 } };
605          struct pipe_screen *pscreen = st->pipe->screen;
606 
607          memcpy(wpos_options.state_tokens, wposTransformState,
608                 sizeof(wpos_options.state_tokens));
609          wpos_options.fs_coord_origin_upper_left =
610             pscreen->get_param(pscreen,
611                                PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT);
612          wpos_options.fs_coord_origin_lower_left =
613             pscreen->get_param(pscreen,
614                                PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
615          wpos_options.fs_coord_pixel_center_integer =
616             pscreen->get_param(pscreen,
617                                PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
618          wpos_options.fs_coord_pixel_center_half_integer =
619             pscreen->get_param(pscreen,
620                                PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER);
621 
622          if (nir_lower_wpos_ytransform(nir, &wpos_options)) {
623             nir_validate_shader(nir);
624             _mesa_add_state_reference(shader->Program->Parameters,
625                                       wposTransformState);
626          }
627       }
628 
629       NIR_PASS_V(nir, nir_lower_system_values);
630 
631       nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
632       shader->Program->info = nir->info;
633 
634       if (prev != -1) {
635          nir_compact_varyings(shader_program->_LinkedShaders[prev]->Program->nir,
636                               nir, ctx->API != API_OPENGL_COMPAT);
637       }
638       prev = i;
639    }
640 
641    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
642       struct gl_linked_shader *shader = shader_program->_LinkedShaders[i];
643       if (shader == NULL)
644          continue;
645 
646       st_glsl_to_nir_post_opts(st, shader->Program, shader_program);
647 
648       assert(shader->Program);
649       if (!ctx->Driver.ProgramStringNotify(ctx,
650                                            _mesa_shader_stage_to_program(i),
651                                            shader->Program)) {
652          _mesa_reference_program(ctx, &shader->Program, NULL);
653          return false;
654       }
655    }
656 
657    return true;
658 }
659 
660 /* Last third of preparing nir from glsl, which happens after shader
661  * variant lowering.
662  */
663 void
st_finalize_nir(struct st_context * st,struct gl_program * prog,struct gl_shader_program * shader_program,nir_shader * nir)664 st_finalize_nir(struct st_context *st, struct gl_program *prog,
665                 struct gl_shader_program *shader_program, nir_shader *nir)
666 {
667    struct pipe_screen *screen = st->pipe->screen;
668 
669    NIR_PASS_V(nir, nir_split_var_copies);
670    NIR_PASS_V(nir, nir_lower_var_copies);
671    if (nir->info.stage != MESA_SHADER_TESS_CTRL &&
672        nir->info.stage != MESA_SHADER_TESS_EVAL)
673       NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects);
674 
675    if (nir->info.stage == MESA_SHADER_VERTEX) {
676       /* Needs special handling so drvloc matches the vbo state: */
677       st_nir_assign_vs_in_locations(prog, nir);
678       /* Re-lower global vars, to deal with any dead VS inputs. */
679       NIR_PASS_V(nir, nir_lower_global_vars_to_local);
680 
681       sort_varyings(&nir->outputs);
682       st_nir_assign_var_locations(&nir->outputs,
683                                   &nir->num_outputs,
684                                   nir->info.stage);
685       st_nir_fixup_varying_slots(st, &nir->outputs);
686    } else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
687               nir->info.stage == MESA_SHADER_TESS_CTRL ||
688               nir->info.stage == MESA_SHADER_TESS_EVAL) {
689       sort_varyings(&nir->inputs);
690       st_nir_assign_var_locations(&nir->inputs,
691                                   &nir->num_inputs,
692                                   nir->info.stage);
693       st_nir_fixup_varying_slots(st, &nir->inputs);
694 
695       sort_varyings(&nir->outputs);
696       st_nir_assign_var_locations(&nir->outputs,
697                                   &nir->num_outputs,
698                                   nir->info.stage);
699       st_nir_fixup_varying_slots(st, &nir->outputs);
700    } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
701       sort_varyings(&nir->inputs);
702       st_nir_assign_var_locations(&nir->inputs,
703                                   &nir->num_inputs,
704                                   nir->info.stage);
705       st_nir_fixup_varying_slots(st, &nir->inputs);
706       st_nir_assign_var_locations(&nir->outputs,
707                                   &nir->num_outputs,
708                                   nir->info.stage);
709    } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
710        /* TODO? */
711    } else {
712       unreachable("invalid shader type for tgsi bypass\n");
713    }
714 
715    NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
716          st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
717 
718    st_nir_assign_uniform_locations(prog, shader_program,
719                                    &nir->uniforms, &nir->num_uniforms);
720 
721    if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
722       NIR_PASS_V(nir, nir_lower_samplers_as_deref, shader_program);
723    else
724       NIR_PASS_V(nir, nir_lower_samplers, shader_program);
725 }
726 
727 } /* extern "C" */
728