• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/u_debug.h"
28 #include "util/u_math.h"
29 
30 #include "ir3_compiler.h"
31 #include "ir3_nir.h"
32 #include "ir3_shader.h"
33 
34 
35 nir_def *
ir3_get_driver_ubo(nir_builder * b,struct ir3_driver_ubo * ubo)36 ir3_get_driver_ubo(nir_builder *b, struct ir3_driver_ubo *ubo)
37 {
38    /* Pick a UBO index to use as our constant data.  Skip UBO 0 since that's
39     * reserved for gallium's cb0.
40     */
41    if (ubo->idx == -1) {
42       if (b->shader->info.num_ubos == 0)
43          b->shader->info.num_ubos++;
44       ubo->idx = b->shader->info.num_ubos++;
45    } else {
46       assert(ubo->idx != 0);
47       /* Binning shader shared ir3_driver_ubo definitions but not shader info */
48       b->shader->info.num_ubos = MAX2(b->shader->info.num_ubos, ubo->idx + 1);
49    }
50 
51    return nir_imm_int(b, ubo->idx);
52 }
53 
54 nir_def *
ir3_load_driver_ubo(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned offset)55 ir3_load_driver_ubo(nir_builder *b, unsigned components,
56                     struct ir3_driver_ubo *ubo,
57                     unsigned offset)
58 {
59    ubo->size = MAX2(ubo->size, offset + components);
60 
61    return nir_load_ubo(b, components, 32, ir3_get_driver_ubo(b, ubo),
62                        nir_imm_int(b, offset * sizeof(uint32_t)),
63                        .align_mul = 16,
64                        .align_offset = (offset % 4) * sizeof(uint32_t),
65                        .range_base = offset * sizeof(uint32_t),
66                        .range = components * sizeof(uint32_t));
67 }
68 
69 nir_def *
ir3_load_driver_ubo_indirect(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned base,nir_def * offset,unsigned range)70 ir3_load_driver_ubo_indirect(nir_builder *b, unsigned components,
71                              struct ir3_driver_ubo *ubo,
72                              unsigned base, nir_def *offset,
73                              unsigned range)
74 {
75    ubo->size = MAX2(ubo->size, base + components + range * 4);
76 
77    return nir_load_ubo(b, components, 32, ir3_get_driver_ubo(b, ubo),
78                        nir_iadd(b, nir_imul24(b, offset, nir_imm_int(b, 16)),
79                                 nir_imm_int(b, base * sizeof(uint32_t))),
80                        .align_mul = 16,
81                        .align_offset = (base % 4) * sizeof(uint32_t),
82                        .range_base = base * sizeof(uint32_t),
83                        .range = components * sizeof(uint32_t) +
84                         (range - 1) * 16);
85 }
86 
87 static bool
ir3_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)88 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
89                              unsigned bit_size, unsigned num_components,
90                              nir_intrinsic_instr *low,
91                              nir_intrinsic_instr *high, void *data)
92 {
93    struct ir3_compiler *compiler = data;
94    unsigned byte_size = bit_size / 8;
95 
96    /* Don't vectorize load_ssbo's that we could otherwise lower to isam,
97     * as the tex cache benefit outweighs the benefit of vectorizing
98     */
99    if ((low->intrinsic == nir_intrinsic_load_ssbo) &&
100        (nir_intrinsic_access(low) & ACCESS_CAN_REORDER) &&
101        compiler->has_isam_ssbo) {
102       return false;
103    }
104 
105    if (low->intrinsic != nir_intrinsic_load_ubo) {
106       return bit_size <= 32 && align_mul >= byte_size &&
107          align_offset % byte_size == 0 &&
108          num_components <= 4;
109    }
110 
111    assert(bit_size >= 8);
112    if (bit_size != 32)
113       return false;
114 
115    int size = num_components * byte_size;
116 
117    /* Don't care about alignment past vec4. */
118    assert(util_is_power_of_two_nonzero(align_mul));
119    align_mul = MIN2(align_mul, 16);
120    align_offset &= 15;
121 
122    /* Our offset alignment should aways be at least 4 bytes */
123    if (align_mul < 4)
124       return false;
125 
126    unsigned worst_start_offset = 16 - align_mul + align_offset;
127    if (worst_start_offset + size > 16)
128       return false;
129 
130    return true;
131 }
132 
133 #define OPT(nir, pass, ...)                                                    \
134    ({                                                                          \
135       bool this_progress = false;                                              \
136       NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);                       \
137       this_progress;                                                           \
138    })
139 
140 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
141 
142 void
ir3_optimize_loop(struct ir3_compiler * compiler,nir_shader * s)143 ir3_optimize_loop(struct ir3_compiler *compiler, nir_shader *s)
144 {
145    MESA_TRACE_FUNC();
146 
147    bool progress;
148    unsigned lower_flrp = (s->options->lower_flrp16 ? 16 : 0) |
149                          (s->options->lower_flrp32 ? 32 : 0) |
150                          (s->options->lower_flrp64 ? 64 : 0);
151 
152    do {
153       progress = false;
154 
155       OPT_V(s, nir_lower_vars_to_ssa);
156       progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
157       progress |= OPT(s, nir_lower_phis_to_scalar, false);
158 
159       progress |= OPT(s, nir_copy_prop);
160       progress |= OPT(s, nir_opt_deref);
161       progress |= OPT(s, nir_opt_dce);
162       progress |= OPT(s, nir_opt_cse);
163 
164       progress |= OPT(s, nir_opt_find_array_copies);
165       progress |= OPT(s, nir_opt_copy_prop_vars);
166       progress |= OPT(s, nir_opt_dead_write_vars);
167 
168       static int gcm = -1;
169       if (gcm == -1)
170          gcm = debug_get_num_option("GCM", 0);
171       if (gcm == 1)
172          progress |= OPT(s, nir_opt_gcm, true);
173       else if (gcm == 2)
174          progress |= OPT(s, nir_opt_gcm, false);
175       progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
176       progress |= OPT(s, nir_opt_intrinsics);
177       /* NOTE: GS lowering inserts an output var with varying slot that
178        * is larger than VARYING_SLOT_MAX (ie. GS_VERTEX_FLAGS_IR3),
179        * which triggers asserts in nir_shader_gather_info().  To work
180        * around that skip lowering phi precision for GS.
181        *
182        * Calling nir_shader_gather_info() late also seems to cause
183        * problems for tess lowering, for now since we only enable
184        * fp16/int16 for frag and compute, skip phi precision lowering
185        * for other stages.
186        */
187       if ((s->info.stage == MESA_SHADER_FRAGMENT) ||
188           (s->info.stage == MESA_SHADER_COMPUTE) ||
189           (s->info.stage == MESA_SHADER_KERNEL)) {
190          progress |= OPT(s, nir_opt_phi_precision);
191       }
192       progress |= OPT(s, nir_opt_algebraic);
193       progress |= OPT(s, nir_lower_alu);
194       progress |= OPT(s, nir_lower_pack);
195       progress |= OPT(s, nir_opt_constant_folding);
196 
197       static const nir_opt_offsets_options offset_options = {
198          /* How large an offset we can encode in the instr's immediate field.
199           */
200          .uniform_max = (1 << 9) - 1,
201 
202          /* STL/LDL have 13b for offset with MSB being a sign bit, but this opt
203           * doesn't deal with negative offsets.
204           */
205          .shared_max = (1 << 12) - 1,
206 
207          .buffer_max = ~0,
208       };
209       progress |= OPT(s, nir_opt_offsets, &offset_options);
210 
211       nir_load_store_vectorize_options vectorize_opts = {
212          .modes = nir_var_mem_ubo | nir_var_mem_ssbo,
213          .callback = ir3_nir_should_vectorize_mem,
214          .robust_modes = compiler->options.robust_buffer_access2 ?
215                nir_var_mem_ubo | nir_var_mem_ssbo : 0,
216          .cb_data = compiler,
217       };
218       progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
219 
220       if (lower_flrp != 0) {
221          if (OPT(s, nir_lower_flrp, lower_flrp, false /* always_precise */)) {
222             OPT(s, nir_opt_constant_folding);
223             progress = true;
224          }
225 
226          /* Nothing should rematerialize any flrps, so we only
227           * need to do this lowering once.
228           */
229          lower_flrp = 0;
230       }
231 
232       progress |= OPT(s, nir_opt_dead_cf);
233       if (OPT(s, nir_opt_loop)) {
234          progress |= true;
235          /* If nir_opt_loop makes progress, then we need to clean
236           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
237           * to make progress.
238           */
239          OPT(s, nir_copy_prop);
240          OPT(s, nir_opt_dce);
241       }
242       progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
243       progress |= OPT(s, nir_opt_loop_unroll);
244       progress |= OPT(s, nir_lower_64bit_phis);
245       progress |= OPT(s, nir_opt_remove_phis);
246       progress |= OPT(s, nir_opt_undef);
247    } while (progress);
248 
249    OPT(s, nir_lower_var_copies);
250 }
251 
252 static bool
should_split_wrmask(const nir_instr * instr,const void * data)253 should_split_wrmask(const nir_instr *instr, const void *data)
254 {
255    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
256 
257    switch (intr->intrinsic) {
258    case nir_intrinsic_store_ssbo:
259    case nir_intrinsic_store_shared:
260    case nir_intrinsic_store_global:
261    case nir_intrinsic_store_scratch:
262       return true;
263    default:
264       return false;
265    }
266 }
267 
268 static bool
ir3_nir_lower_ssbo_size_filter(const nir_instr * instr,const void * data)269 ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
270 {
271    return instr->type == nir_instr_type_intrinsic &&
272           nir_instr_as_intrinsic(instr)->intrinsic ==
273              nir_intrinsic_get_ssbo_size;
274 }
275 
276 static nir_def *
ir3_nir_lower_ssbo_size_instr(nir_builder * b,nir_instr * instr,void * data)277 ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
278 {
279    uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
280    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
281    return nir_ishl_imm(b, &intr->def, ssbo_size_to_bytes_shift);
282 }
283 
284 static bool
ir3_nir_lower_ssbo_size(nir_shader * s,uint8_t ssbo_size_to_bytes_shift)285 ir3_nir_lower_ssbo_size(nir_shader *s, uint8_t ssbo_size_to_bytes_shift)
286 {
287    return nir_shader_lower_instructions(s, ir3_nir_lower_ssbo_size_filter,
288                                         ir3_nir_lower_ssbo_size_instr,
289                                         &ssbo_size_to_bytes_shift);
290 }
291 
292 void
ir3_nir_lower_io_to_temporaries(nir_shader * s)293 ir3_nir_lower_io_to_temporaries(nir_shader *s)
294 {
295    /* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
296     * by the hardware pre-loading registers at the beginning and then reading
297     * them at the end, so we can't access them indirectly except through
298     * normal register-indirect accesses, and therefore ir3 doesn't support
299     * indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
300     * and indirects work just fine for those. GS outputs may be consumed by
301     * VPC, but have their own lowering in ir3_nir_lower_gs() which does
302     * something similar to nir_lower_io_to_temporaries so we shouldn't need
303     * to lower them.
304     *
305     * Note: this might be a little inefficient for VS or TES outputs which are
306     * when the next stage isn't an FS, but it probably don't make sense to
307     * depend on the next stage before variant creation.
308     *
309     * TODO: for gallium, mesa/st also does some redundant lowering, including
310     * running this pass for GS inputs/outputs which we don't want but not
311     * including TES outputs or FS inputs which we do need. We should probably
312     * stop doing that once we're sure all drivers are doing their own
313     * indirect i/o lowering.
314     */
315    bool lower_input = s->info.stage == MESA_SHADER_VERTEX ||
316                       s->info.stage == MESA_SHADER_FRAGMENT;
317    bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
318                        s->info.stage != MESA_SHADER_GEOMETRY;
319    if (lower_input || lower_output) {
320       NIR_PASS_V(s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
321                  lower_output, lower_input);
322 
323       /* nir_lower_io_to_temporaries() creates global variables and copy
324        * instructions which need to be cleaned up.
325        */
326       NIR_PASS_V(s, nir_split_var_copies);
327       NIR_PASS_V(s, nir_lower_var_copies);
328       NIR_PASS_V(s, nir_lower_global_vars_to_local);
329    }
330 
331    /* Regardless of the above, we need to lower indirect references to
332     * compact variables such as clip/cull distances because due to how
333     * TCS<->TES IO works we cannot handle indirect accesses that "straddle"
334     * vec4 components. nir_lower_indirect_derefs has a special case for
335     * compact variables, so it will actually lower them even though we pass
336     * in 0 modes.
337     *
338     * Using temporaries would be slightly better but
339     * nir_lower_io_to_temporaries currently doesn't support TCS i/o.
340     */
341    NIR_PASS_V(s, nir_lower_indirect_derefs, 0, UINT32_MAX);
342 }
343 
344 /**
345  * Inserts an add of 0.5 to floating point array index values in texture coordinates.
346  */
347 static bool
ir3_nir_lower_array_sampler_cb(struct nir_builder * b,nir_instr * instr,void * _data)348 ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_data)
349 {
350    if (instr->type != nir_instr_type_tex)
351       return false;
352 
353    nir_tex_instr *tex = nir_instr_as_tex(instr);
354    if (!tex->is_array || tex->op == nir_texop_lod)
355       return false;
356 
357    int coord_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
358    if (coord_idx == -1 ||
359        nir_tex_instr_src_type(tex, coord_idx) != nir_type_float)
360       return false;
361 
362    b->cursor = nir_before_instr(&tex->instr);
363 
364    unsigned ncomp = tex->coord_components;
365    nir_def *src = tex->src[coord_idx].src.ssa;
366 
367    assume(ncomp >= 1);
368    nir_def *ai = nir_channel(b, src, ncomp - 1);
369    ai = nir_fadd_imm(b, ai, 0.5);
370    nir_src_rewrite(&tex->src[coord_idx].src,
371                    nir_vector_insert_imm(b, src, ai, ncomp - 1));
372    return true;
373 }
374 
375 static bool
ir3_nir_lower_array_sampler(nir_shader * shader)376 ir3_nir_lower_array_sampler(nir_shader *shader)
377 {
378    return nir_shader_instructions_pass(
379       shader, ir3_nir_lower_array_sampler_cb,
380       nir_metadata_block_index | nir_metadata_dominance, NULL);
381 }
382 
383 void
ir3_finalize_nir(struct ir3_compiler * compiler,nir_shader * s)384 ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s)
385 {
386    MESA_TRACE_FUNC();
387 
388    struct nir_lower_tex_options tex_options = {
389       .lower_rect = 0,
390       .lower_tg4_offsets = true,
391       .lower_invalid_implicit_lod = true,
392       .lower_index_to_offset = true,
393    };
394 
395    if (compiler->gen >= 4) {
396       /* a4xx seems to have *no* sam.p */
397       tex_options.lower_txp = ~0; /* lower all txp */
398    } else {
399       /* a3xx just needs to avoid sam.p for 3d tex */
400       tex_options.lower_txp = (1 << GLSL_SAMPLER_DIM_3D);
401    }
402 
403    if (ir3_shader_debug & IR3_DBG_DISASM) {
404       mesa_logi("----------------------");
405       nir_log_shaderi(s);
406       mesa_logi("----------------------");
407    }
408 
409    if (s->info.stage == MESA_SHADER_GEOMETRY)
410       NIR_PASS_V(s, ir3_nir_lower_gs);
411 
412    NIR_PASS_V(s, nir_lower_frexp);
413    NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
414 
415    OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
416 
417    OPT_V(s, nir_lower_tex, &tex_options);
418    OPT_V(s, nir_lower_load_const_to_scalar);
419 
420    if (compiler->array_index_add_half)
421       OPT_V(s, ir3_nir_lower_array_sampler);
422 
423    OPT_V(s, nir_lower_is_helper_invocation);
424 
425    ir3_optimize_loop(compiler, s);
426 
427    /* do idiv lowering after first opt loop to get a chance to propagate
428     * constants for divide by immed power-of-two:
429     */
430    nir_lower_idiv_options idiv_options = {
431       .allow_fp16 = true,
432    };
433    bool idiv_progress = OPT(s, nir_opt_idiv_const, 8);
434    idiv_progress |= OPT(s, nir_lower_idiv, &idiv_options);
435 
436    if (idiv_progress)
437       ir3_optimize_loop(compiler, s);
438 
439    OPT_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
440 
441    if (ir3_shader_debug & IR3_DBG_DISASM) {
442       mesa_logi("----------------------");
443       nir_log_shaderi(s);
444       mesa_logi("----------------------");
445    }
446 
447    /* st_program.c's parameter list optimization requires that future nir
448     * variants don't reallocate the uniform storage, so we have to remove
449     * uniforms that occupy storage.  But we don't want to remove samplers,
450     * because they're needed for YUV variant lowering.
451     */
452    nir_foreach_uniform_variable_safe (var, s) {
453       if (var->data.mode == nir_var_uniform &&
454           (glsl_type_get_image_count(var->type) ||
455            glsl_type_get_sampler_count(var->type)))
456          continue;
457 
458       exec_node_remove(&var->node);
459    }
460    nir_validate_shader(s, "after uniform var removal");
461 
462    nir_sweep(s);
463 }
464 
465 static bool
lower_subgroup_id_filter(const nir_instr * instr,const void * unused)466 lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
467 {
468    (void)unused;
469 
470    if (instr->type != nir_instr_type_intrinsic)
471       return false;
472 
473    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
474    return intr->intrinsic == nir_intrinsic_load_subgroup_invocation ||
475           intr->intrinsic == nir_intrinsic_load_subgroup_id ||
476           intr->intrinsic == nir_intrinsic_load_num_subgroups;
477 }
478 
479 static nir_def *
lower_subgroup_id(nir_builder * b,nir_instr * instr,void * unused)480 lower_subgroup_id(nir_builder *b, nir_instr *instr, void *unused)
481 {
482    (void)unused;
483 
484    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
485    if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
486       return nir_iand(
487          b, nir_load_local_invocation_index(b),
488          nir_iadd_imm(b, nir_load_subgroup_size(b), -1));
489    } else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
490       return nir_ishr(b, nir_load_local_invocation_index(b),
491                       nir_load_subgroup_id_shift_ir3(b));
492    } else {
493       assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
494       /* If the workgroup size is constant,
495        * nir_lower_compute_system_values() will replace local_size with a
496        * constant so this can mostly be constant folded away.
497        */
498       nir_def *local_size = nir_load_workgroup_size(b);
499       nir_def *size =
500          nir_imul24(b, nir_channel(b, local_size, 0),
501                     nir_imul24(b, nir_channel(b, local_size, 1),
502                                nir_channel(b, local_size, 2)));
503       nir_def *one = nir_imm_int(b, 1);
504       return nir_iadd(b, one,
505                       nir_ishr(b, nir_isub(b, size, one),
506                                nir_load_subgroup_id_shift_ir3(b)));
507    }
508 }
509 
510 static bool
ir3_nir_lower_subgroup_id_cs(nir_shader * shader)511 ir3_nir_lower_subgroup_id_cs(nir_shader *shader)
512 {
513    return nir_shader_lower_instructions(shader, lower_subgroup_id_filter,
514                                         lower_subgroup_id, NULL);
515 }
516 
517 /**
518  * Late passes that need to be done after pscreen->finalize_nir()
519  */
520 void
ir3_nir_post_finalize(struct ir3_shader * shader)521 ir3_nir_post_finalize(struct ir3_shader *shader)
522 {
523    struct nir_shader *s = shader->nir;
524    struct ir3_compiler *compiler = shader->compiler;
525 
526    MESA_TRACE_FUNC();
527 
528    NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
529               ir3_glsl_type_size, nir_lower_io_lower_64bit_to_32);
530 
531    if (s->info.stage == MESA_SHADER_FRAGMENT) {
532       /* NOTE: lower load_barycentric_at_sample first, since it
533        * produces load_barycentric_at_offset:
534        */
535       NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
536       NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
537       NIR_PASS_V(s, ir3_nir_move_varying_inputs);
538       NIR_PASS_V(s, nir_lower_fb_read);
539       NIR_PASS_V(s, ir3_nir_lower_layer_id);
540    }
541 
542    if (compiler->gen >= 6 && s->info.stage == MESA_SHADER_FRAGMENT &&
543        !(ir3_shader_debug & IR3_DBG_NOFP16)) {
544       /* Lower FS mediump inputs to 16-bit. If you declared it mediump, you
545        * probably want 16-bit instructions (and have set
546        * mediump/RelaxedPrecision on most of the rest of the shader's
547        * instructions).  If we don't lower it in NIR, then comparisons of the
548        * results of mediump ALU ops with the mediump input will happen in highp,
549        * causing extra conversions (and, incidentally, causing
550        * dEQP-GLES2.functional.shaders.algorithm.rgb_to_hsl_fragment on ANGLE to
551        * fail)
552        *
553        * However, we can't do flat inputs because flat.b doesn't have the
554        * destination type for how to downconvert the
555        * 32-bit-in-the-varyings-interpolator value. (also, even if it did, watch
556        * out for how gl_nir_lower_packed_varyings packs all flat-interpolated
557        * things together as ivec4s, so when we lower a formerly-float input
558        * you'd end up with an incorrect f2f16(i2i32(load_input())) instead of
559        * load_input).
560        */
561       uint64_t mediump_varyings = 0;
562       nir_foreach_shader_in_variable(var, s) {
563          if ((var->data.precision == GLSL_PRECISION_MEDIUM ||
564               var->data.precision == GLSL_PRECISION_LOW) &&
565              var->data.interpolation != INTERP_MODE_FLAT) {
566             mediump_varyings |= BITFIELD64_BIT(var->data.location);
567          }
568       }
569 
570       if (mediump_varyings) {
571          NIR_PASS_V(s, nir_lower_mediump_io,
572                   nir_var_shader_in,
573                   mediump_varyings,
574                   false);
575       }
576 
577       /* This should come after input lowering, to opportunistically lower non-mediump outputs. */
578       NIR_PASS_V(s, nir_lower_mediump_io, nir_var_shader_out, 0, false);
579    }
580 
581    {
582       /* If the API-facing subgroup size is forced to a particular value, lower
583        * it here. Beyond this point nir_intrinsic_load_subgroup_size will return
584        * the "real" subgroup size.
585        */
586       unsigned subgroup_size = 0, max_subgroup_size = 0;
587       switch (shader->options.api_wavesize) {
588       case IR3_SINGLE_ONLY:
589          subgroup_size = max_subgroup_size = compiler->threadsize_base;
590          break;
591       case IR3_DOUBLE_ONLY:
592          subgroup_size = max_subgroup_size = compiler->threadsize_base * 2;
593          break;
594       case IR3_SINGLE_OR_DOUBLE:
595          /* For vertex stages, we know the wavesize will never be doubled.
596           * Lower subgroup_size here, to avoid having to deal with it when
597           * translating from NIR. Otherwise use the "real" wavesize obtained as
598           * a driver param.
599           */
600          if (s->info.stage != MESA_SHADER_COMPUTE &&
601              s->info.stage != MESA_SHADER_FRAGMENT) {
602             subgroup_size = max_subgroup_size = compiler->threadsize_base;
603          } else {
604             subgroup_size = 0;
605             max_subgroup_size = compiler->threadsize_base * 2;
606          }
607          break;
608       }
609 
610       nir_lower_subgroups_options options = {
611             .subgroup_size = subgroup_size,
612             .ballot_bit_size = 32,
613             .ballot_components = max_subgroup_size / 32,
614             .lower_to_scalar = true,
615             .lower_vote_eq = true,
616             .lower_vote_bool_eq = true,
617             .lower_subgroup_masks = true,
618             .lower_read_invocation_to_cond = true,
619             .lower_shuffle = true,
620             .lower_relative_shuffle = true,
621             .lower_inverse_ballot = true,
622       };
623 
624       if (!((s->info.stage == MESA_SHADER_COMPUTE) ||
625             (s->info.stage == MESA_SHADER_KERNEL) ||
626             compiler->has_getfiberid)) {
627          options.subgroup_size = 1;
628          options.lower_vote_trivial = true;
629       }
630 
631       OPT(s, nir_lower_subgroups, &options);
632    }
633 
634    if ((s->info.stage == MESA_SHADER_COMPUTE) ||
635        (s->info.stage == MESA_SHADER_KERNEL)) {
636       bool progress = false;
637       NIR_PASS(progress, s, ir3_nir_lower_subgroup_id_cs);
638 
639       /* ir3_nir_lower_subgroup_id_cs creates extra compute intrinsics which
640        * we need to lower again.
641        */
642       if (progress)
643          NIR_PASS_V(s, nir_lower_compute_system_values, NULL);
644    }
645 
646    /* we cannot ensure that ir3_finalize_nir() is only called once, so
647     * we also need to do any run-once workarounds here:
648     */
649    OPT_V(s, ir3_nir_apply_trig_workarounds);
650 
651    const nir_lower_image_options lower_image_opts = {
652       .lower_cube_size = true,
653       .lower_image_samples_to_one = true
654    };
655    NIR_PASS_V(s, nir_lower_image, &lower_image_opts);
656 
657    const nir_lower_idiv_options lower_idiv_options = {
658       .allow_fp16 = true,
659    };
660    NIR_PASS_V(s, nir_lower_idiv, &lower_idiv_options); /* idiv generated by cube lowering */
661 
662 
663    /* The resinfo opcode returns the size in dwords on a4xx */
664    if (compiler->gen == 4)
665       OPT_V(s, ir3_nir_lower_ssbo_size, 2);
666 
667    /* The resinfo opcode we have for getting the SSBO size on a6xx returns a
668     * byte length divided by IBO_0_FMT, while the NIR intrinsic coming in is a
669     * number of bytes. Switch things so the NIR intrinsic in our backend means
670     * dwords.
671     */
672    if (compiler->gen >= 6)
673       OPT_V(s, ir3_nir_lower_ssbo_size, compiler->options.storage_16bit ? 1 : 2);
674 
675    ir3_optimize_loop(compiler, s);
676 }
677 
678 static bool
lower_ucp_vs(struct ir3_shader_variant * so)679 lower_ucp_vs(struct ir3_shader_variant *so)
680 {
681    if (!so->key.ucp_enables)
682       return false;
683 
684    gl_shader_stage last_geom_stage;
685 
686    if (so->key.has_gs) {
687       last_geom_stage = MESA_SHADER_GEOMETRY;
688    } else if (so->key.tessellation) {
689       last_geom_stage = MESA_SHADER_TESS_EVAL;
690    } else {
691       last_geom_stage = MESA_SHADER_VERTEX;
692    }
693 
694    return so->type == last_geom_stage;
695 }
696 
697 void
ir3_nir_lower_variant(struct ir3_shader_variant * so,nir_shader * s)698 ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
699 {
700    MESA_TRACE_FUNC();
701 
702    if (ir3_shader_debug & IR3_DBG_DISASM) {
703       mesa_logi("----------------------");
704       nir_log_shaderi(s);
705       mesa_logi("----------------------");
706    }
707 
708    bool progress = false;
709 
710    NIR_PASS_V(s, nir_lower_io_to_scalar, nir_var_mem_ssbo, NULL, NULL);
711 
712    if (so->key.has_gs || so->key.tessellation) {
713       switch (so->type) {
714       case MESA_SHADER_VERTEX:
715          NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
716                     so->key.tessellation);
717          progress = true;
718          break;
719       case MESA_SHADER_TESS_CTRL:
720          NIR_PASS_V(s, nir_lower_io_to_scalar,
721                      nir_var_shader_in | nir_var_shader_out, NULL, NULL);
722          NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
723          NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
724          progress = true;
725          break;
726       case MESA_SHADER_TESS_EVAL:
727          NIR_PASS_V(s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
728          if (so->key.has_gs)
729             NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
730                        so->key.tessellation);
731          progress = true;
732          break;
733       case MESA_SHADER_GEOMETRY:
734          NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
735          progress = true;
736          break;
737       default:
738          break;
739       }
740    }
741 
742    /* Note that it is intentional to use the VS lowering pass for GS, since we
743     * lower GS into something that looks more like a VS in ir3_nir_lower_gs():
744     */
745    if (lower_ucp_vs(so)) {
746       progress |= OPT(s, nir_lower_clip_vs, so->key.ucp_enables, false, true, NULL);
747    } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
748       if (so->key.ucp_enables && !so->compiler->has_clip_cull)
749          progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, true);
750    }
751 
752    /* Move large constant variables to the constants attached to the NIR
753     * shader, which we will upload in the immediates range.  This generates
754     * amuls, so we need to clean those up after.
755     *
756     * Passing no size_align, we would get packed values, which if we end up
757     * having to load with LDC would result in extra reads to unpack from
758     * straddling loads.  Align everything to vec4 to avoid that, though we
759     * could theoretically do better.
760     */
761    OPT_V(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
762          32 /* bytes */);
763    OPT_V(s, ir3_nir_lower_load_constant, so);
764 
765    /* Lower large temporaries to scratch, which in Qualcomm terms is private
766     * memory, to avoid excess register pressure. This should happen after
767     * nir_opt_large_constants, because loading from a UBO is much, much less
768     * expensive.
769     */
770    if (so->compiler->has_pvtmem) {
771       progress |= OPT(s, nir_lower_vars_to_scratch, nir_var_function_temp,
772                       16 * 16 /* bytes */, glsl_get_natural_size_align_bytes);
773    }
774 
775    /* Lower scratch writemasks */
776    progress |= OPT(s, nir_lower_wrmasks, should_split_wrmask, s);
777 
778    if (OPT(s, nir_lower_locals_to_regs, 1)) {
779       progress = true;
780 
781       /* Split 64b registers into two 32b ones. */
782       OPT_V(s, ir3_nir_lower_64b_regs);
783    }
784 
785    progress |= OPT(s, ir3_nir_lower_wide_load_store);
786    progress |= OPT(s, ir3_nir_lower_64b_global);
787    progress |= OPT(s, ir3_nir_lower_64b_intrinsics);
788    progress |= OPT(s, ir3_nir_lower_64b_undef);
789    progress |= OPT(s, nir_lower_int64);
790 
791    /* Cleanup code leftover from lowering passes before opt_preamble */
792    if (progress) {
793       progress |= OPT(s, nir_opt_constant_folding);
794    }
795 
796    OPT(s, ir3_nir_opt_subgroups, so);
797 
798    if (so->compiler->load_shader_consts_via_preamble)
799       progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
800 
801    /* Do the preamble before analysing UBO ranges, because it's usually
802     * higher-value and because it can result in eliminating some indirect UBO
803     * accesses where otherwise we'd have to push the whole range. However we
804     * have to lower the preamble after UBO lowering so that UBO lowering can
805     * insert instructions in the preamble to push UBOs.
806     */
807    if (so->compiler->has_preamble &&
808        !(ir3_shader_debug & IR3_DBG_NOPREAMBLE))
809       progress |= OPT(s, ir3_nir_opt_preamble, so);
810 
811    if (so->compiler->load_shader_consts_via_preamble)
812       progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
813 
814    /* TODO: ldg.k might also work on a6xx */
815    if (so->compiler->gen >= 7)
816       progress |= OPT(s, ir3_nir_lower_const_global_loads, so);
817 
818    if (!so->binning_pass)
819       OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
820 
821    progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
822 
823    if (so->shader_options.push_consts_type == IR3_PUSH_CONSTS_SHARED_PREAMBLE)
824       progress |= OPT(s, ir3_nir_lower_push_consts_to_preamble, so);
825 
826    progress |= OPT(s, ir3_nir_lower_preamble, so);
827 
828    OPT_V(s, nir_lower_amul, ir3_glsl_type_size);
829 
830    /* UBO offset lowering has to come after we've decided what will
831     * be left as load_ubo
832     */
833    if (so->compiler->gen >= 6)
834       progress |= OPT(s, nir_lower_ubo_vec4);
835 
836    OPT_V(s, ir3_nir_lower_io_offsets);
837 
838    if (progress)
839       ir3_optimize_loop(so->compiler, s);
840 
841    /* Fixup indirect load_uniform's which end up with a const base offset
842     * which is too large to encode.  Do this late(ish) so we actually
843     * can differentiate indirect vs non-indirect.
844     */
845    if (OPT(s, ir3_nir_fixup_load_uniform))
846       ir3_optimize_loop(so->compiler, s);
847 
848    /* Do late algebraic optimization to turn add(a, neg(b)) back into
849     * subs, then the mandatory cleanup after algebraic.  Note that it may
850     * produce fnegs, and if so then we need to keep running to squash
851     * fneg(fneg(a)).
852     */
853    bool more_late_algebraic = true;
854    while (more_late_algebraic) {
855       more_late_algebraic = OPT(s, nir_opt_algebraic_late);
856       if (!more_late_algebraic && so->compiler->gen >= 5) {
857          /* Lowers texture operations that have only f2f16 or u2u16 called on
858           * them to have a 16-bit destination.  Also, lower 16-bit texture
859           * coordinates that had been upconverted to 32-bits just for the
860           * sampler to just be 16-bit texture sources.
861           */
862          struct nir_fold_tex_srcs_options fold_srcs_options = {
863             .sampler_dims = ~0,
864             .src_types = (1 << nir_tex_src_coord) |
865                          (1 << nir_tex_src_lod) |
866                          (1 << nir_tex_src_bias) |
867                          (1 << nir_tex_src_offset) |
868                          (1 << nir_tex_src_comparator) |
869                          (1 << nir_tex_src_min_lod) |
870                          (1 << nir_tex_src_ms_index) |
871                          (1 << nir_tex_src_ddx) |
872                          (1 << nir_tex_src_ddy),
873          };
874          struct nir_fold_16bit_tex_image_options fold_16bit_options = {
875             .rounding_mode = nir_rounding_mode_rtz,
876             .fold_tex_dest_types = nir_type_float,
877             /* blob dumps have no half regs on pixel 2's ldib or stib, so only enable for a6xx+. */
878             .fold_image_dest_types = so->compiler->gen >= 6 ?
879                                         nir_type_float | nir_type_uint | nir_type_int : 0,
880             .fold_image_store_data = so->compiler->gen >= 6,
881             .fold_srcs_options_count = 1,
882             .fold_srcs_options = &fold_srcs_options,
883          };
884          OPT(s, nir_fold_16bit_tex_image, &fold_16bit_options);
885       }
886       OPT_V(s, nir_opt_constant_folding);
887       OPT_V(s, nir_copy_prop);
888       OPT_V(s, nir_opt_dce);
889       OPT_V(s, nir_opt_cse);
890    }
891 
892    OPT_V(s, nir_opt_sink, nir_move_const_undef);
893 
894    if (ir3_shader_debug & IR3_DBG_DISASM) {
895       mesa_logi("----------------------");
896       nir_log_shaderi(s);
897       mesa_logi("----------------------");
898    }
899 
900    nir_sweep(s);
901 
902    /* Binning pass variants re-use  the const_state of the corresponding
903     * draw pass shader, so that same const emit can be re-used for both
904     * passes:
905     */
906    if (!so->binning_pass)
907       ir3_setup_const_state(s, so, ir3_const_state(so));
908 }
909 
910 bool
ir3_get_driver_param_info(const nir_shader * shader,nir_intrinsic_instr * intr,struct driver_param_info * param_info)911 ir3_get_driver_param_info(const nir_shader *shader, nir_intrinsic_instr *intr,
912                           struct driver_param_info *param_info)
913 {
914    switch (intr->intrinsic) {
915    case nir_intrinsic_load_base_workgroup_id:
916       param_info->offset = IR3_DP_BASE_GROUP_X;
917       break;
918    case nir_intrinsic_load_num_workgroups:
919       param_info->offset = IR3_DP_NUM_WORK_GROUPS_X;
920       break;
921    case nir_intrinsic_load_workgroup_size:
922       param_info->offset = IR3_DP_LOCAL_GROUP_SIZE_X;
923       break;
924    case nir_intrinsic_load_subgroup_size:
925       assert(shader->info.stage == MESA_SHADER_COMPUTE ||
926              shader->info.stage == MESA_SHADER_FRAGMENT);
927       if (shader->info.stage == MESA_SHADER_COMPUTE) {
928          param_info->offset = IR3_DP_CS_SUBGROUP_SIZE;
929       } else {
930          param_info->offset = IR3_DP_FS_SUBGROUP_SIZE;
931       }
932       break;
933    case nir_intrinsic_load_subgroup_id_shift_ir3:
934       param_info->offset = IR3_DP_SUBGROUP_ID_SHIFT;
935       break;
936    case nir_intrinsic_load_work_dim:
937       param_info->offset = IR3_DP_WORK_DIM;
938       break;
939    case nir_intrinsic_load_base_vertex:
940    case nir_intrinsic_load_first_vertex:
941       param_info->offset = IR3_DP_VTXID_BASE;
942       break;
943    case nir_intrinsic_load_is_indexed_draw:
944       param_info->offset = IR3_DP_IS_INDEXED_DRAW;
945       break;
946    case nir_intrinsic_load_draw_id:
947       param_info->offset = IR3_DP_DRAWID;
948       break;
949    case nir_intrinsic_load_base_instance:
950       param_info->offset = IR3_DP_INSTID_BASE;
951       break;
952    case nir_intrinsic_load_user_clip_plane: {
953       uint32_t idx = nir_intrinsic_ucp_id(intr);
954       param_info->offset = IR3_DP_UCP0_X + 4 * idx;
955       break;
956    }
957    case nir_intrinsic_load_tess_level_outer_default:
958       param_info->offset = IR3_DP_HS_DEFAULT_OUTER_LEVEL_X;
959       break;
960    case nir_intrinsic_load_tess_level_inner_default:
961       param_info->offset = IR3_DP_HS_DEFAULT_INNER_LEVEL_X;
962       break;
963    case nir_intrinsic_load_frag_size_ir3:
964       param_info->offset = IR3_DP_FS_FRAG_SIZE;
965       break;
966    case nir_intrinsic_load_frag_offset_ir3:
967       param_info->offset = IR3_DP_FS_FRAG_OFFSET;
968       break;
969    case nir_intrinsic_load_frag_invocation_count:
970       param_info->offset = IR3_DP_FS_FRAG_INVOCATION_COUNT;
971       break;
972    default:
973       return false;
974    }
975 
976    return true;
977 }
978 
979 static void
ir3_nir_scan_driver_consts(struct ir3_compiler * compiler,nir_shader * shader,struct ir3_const_state * layout)980 ir3_nir_scan_driver_consts(struct ir3_compiler *compiler, nir_shader *shader, struct ir3_const_state *layout)
981 {
982    nir_foreach_function (function, shader) {
983       if (!function->impl)
984          continue;
985 
986       nir_foreach_block (block, function->impl) {
987          nir_foreach_instr (instr, block) {
988             if (instr->type != nir_instr_type_intrinsic)
989                continue;
990 
991             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
992             unsigned idx;
993 
994             switch (intr->intrinsic) {
995             case nir_intrinsic_image_atomic:
996             case nir_intrinsic_image_atomic_swap:
997             case nir_intrinsic_image_load:
998             case nir_intrinsic_image_store:
999             case nir_intrinsic_image_size:
1000                /* a4xx gets these supplied by the hw directly (maybe CP?) */
1001                if (compiler->gen == 5 &&
1002                    !(intr->intrinsic == nir_intrinsic_image_load &&
1003                      !(nir_intrinsic_access(intr) & ACCESS_COHERENT))) {
1004                   idx = nir_src_as_uint(intr->src[0]);
1005                   if (layout->image_dims.mask & (1 << idx))
1006                      break;
1007                   layout->image_dims.mask |= (1 << idx);
1008                   layout->image_dims.off[idx] = layout->image_dims.count;
1009                   layout->image_dims.count += 3; /* three const per */
1010                }
1011                break;
1012             default:
1013                break;
1014             }
1015 
1016             struct driver_param_info param_info;
1017             if (ir3_get_driver_param_info(shader, intr, &param_info)) {
1018                layout->num_driver_params =
1019                   MAX2(layout->num_driver_params,
1020                        param_info.offset + nir_intrinsic_dest_components(intr));
1021             }
1022          }
1023       }
1024    }
1025 
1026    /* TODO: Provide a spot somewhere to safely upload unwanted values, and a way
1027     * to determine if they're wanted or not. For now we always make the whole
1028     * driver param range available, since the driver will always instruct the
1029     * hardware to upload these.
1030     */
1031    if (!compiler->has_shared_regfile &&
1032          shader->info.stage == MESA_SHADER_COMPUTE) {
1033       layout->num_driver_params =
1034          MAX2(layout->num_driver_params, IR3_DP_WORKGROUP_ID_Z + 1);
1035    }
1036 }
1037 
1038 /* Sets up the variant-dependent constant state for the ir3_shader.  Note
1039  * that it is also used from ir3_nir_analyze_ubo_ranges() to figure out the
1040  * maximum number of driver params that would eventually be used, to leave
1041  * space for this function to allocate the driver params.
1042  */
1043 void
ir3_setup_const_state(nir_shader * nir,struct ir3_shader_variant * v,struct ir3_const_state * const_state)1044 ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
1045                       struct ir3_const_state *const_state)
1046 {
1047    struct ir3_compiler *compiler = v->compiler;
1048 
1049    memset(&const_state->offsets, ~0, sizeof(const_state->offsets));
1050 
1051    ir3_nir_scan_driver_consts(compiler, nir, const_state);
1052 
1053    if ((compiler->gen < 5) && (v->stream_output.num_outputs > 0)) {
1054       const_state->num_driver_params =
1055          MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
1056    }
1057 
1058    const_state->num_ubos = nir->info.num_ubos;
1059 
1060    assert((const_state->ubo_state.size % 16) == 0);
1061    unsigned constoff = v->shader_options.num_reserved_user_consts +
1062       const_state->ubo_state.size / 16 +
1063       const_state->preamble_size +
1064       const_state->global_size;
1065    unsigned ptrsz = ir3_pointer_size(compiler);
1066 
1067    if (const_state->num_ubos > 0 && compiler->gen < 6) {
1068       const_state->offsets.ubo = constoff;
1069       constoff += align(const_state->num_ubos * ptrsz, 4) / 4;
1070    }
1071 
1072    if (const_state->image_dims.count > 0) {
1073       unsigned cnt = const_state->image_dims.count;
1074       const_state->offsets.image_dims = constoff;
1075       constoff += align(cnt, 4) / 4;
1076    }
1077 
1078    if (v->type == MESA_SHADER_KERNEL) {
1079       const_state->offsets.kernel_params = constoff;
1080       constoff += align(v->cs.req_input_mem, 4) / 4;
1081    }
1082 
1083    if (const_state->num_driver_params > 0) {
1084       /* num_driver_params in dwords.  we only need to align to vec4s for the
1085        * common case of immediate constant uploads, but for indirect dispatch
1086        * the constants may also be indirect and so we have to align the area in
1087        * const space to that requirement.
1088        */
1089       const_state->num_driver_params = align(const_state->num_driver_params, 4);
1090       unsigned upload_unit = 1;
1091       if (v->type == MESA_SHADER_COMPUTE ||
1092           (const_state->num_driver_params >= IR3_DP_VTXID_BASE)) {
1093          upload_unit = compiler->const_upload_unit;
1094       }
1095 
1096       /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
1097       if (v->type == MESA_SHADER_VERTEX && compiler->gen >= 6)
1098          constoff = MAX2(constoff, 1);
1099       constoff = align(constoff, upload_unit);
1100       const_state->offsets.driver_param = constoff;
1101 
1102       constoff += align(const_state->num_driver_params / 4, upload_unit);
1103    }
1104 
1105    if ((v->type == MESA_SHADER_VERTEX) && (compiler->gen < 5) &&
1106        v->stream_output.num_outputs > 0) {
1107       const_state->offsets.tfbo = constoff;
1108       constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
1109    }
1110 
1111    if (!compiler->load_shader_consts_via_preamble) {
1112       switch (v->type) {
1113       case MESA_SHADER_TESS_CTRL:
1114       case MESA_SHADER_TESS_EVAL:
1115          const_state->offsets.primitive_param = constoff;
1116          constoff += 2;
1117 
1118          const_state->offsets.primitive_map = constoff;
1119          break;
1120       case MESA_SHADER_GEOMETRY:
1121          const_state->offsets.primitive_param = constoff;
1122          constoff += 1;
1123 
1124          const_state->offsets.primitive_map = constoff;
1125          break;
1126       default:
1127          break;
1128       }
1129    }
1130 
1131    switch (v->type) {
1132    case MESA_SHADER_VERTEX:
1133       const_state->offsets.primitive_param = constoff;
1134       constoff += 1;
1135       break;
1136    case MESA_SHADER_TESS_CTRL:
1137    case MESA_SHADER_TESS_EVAL:
1138       constoff += DIV_ROUND_UP(v->input_size, 4);
1139       break;
1140    case MESA_SHADER_GEOMETRY:
1141       constoff += DIV_ROUND_UP(v->input_size, 4);
1142       break;
1143    default:
1144       break;
1145    }
1146 
1147    const_state->offsets.immediate = constoff;
1148 
1149    assert(constoff <= ir3_max_const(v));
1150 }
1151