• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Rob Clark <robclark@freedesktop.org>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <robclark@freedesktop.org>
7  */
8 
9 #include "util/u_debug.h"
10 #include "util/u_math.h"
11 
12 #include "ir3_compiler.h"
13 #include "ir3_nir.h"
14 #include "ir3_shader.h"
15 
16 /* For use by binning_pass shaders, where const_state is const, but expected
17  * to be already set up when we compiled the corresponding non-binning variant
18  */
19 nir_def *
ir3_get_shared_driver_ubo(nir_builder * b,const struct ir3_driver_ubo * ubo)20 ir3_get_shared_driver_ubo(nir_builder *b, const struct ir3_driver_ubo *ubo)
21 {
22    assert(ubo->idx > 0);
23 
24    /* Binning shader shared ir3_driver_ubo definitions but not shader info */
25    b->shader->info.num_ubos = MAX2(b->shader->info.num_ubos, ubo->idx + 1);
26    return nir_imm_int(b, ubo->idx);
27 }
28 
29 nir_def *
ir3_get_driver_ubo(nir_builder * b,struct ir3_driver_ubo * ubo)30 ir3_get_driver_ubo(nir_builder *b, struct ir3_driver_ubo *ubo)
31 {
32    /* Pick a UBO index to use as our constant data.  Skip UBO 0 since that's
33     * reserved for gallium's cb0.
34     */
35    if (ubo->idx == -1) {
36       if (b->shader->info.num_ubos == 0)
37          b->shader->info.num_ubos++;
38       ubo->idx = b->shader->info.num_ubos++;
39       return nir_imm_int(b, ubo->idx);
40    }
41 
42    return ir3_get_shared_driver_ubo(b, ubo);
43 }
44 
45 nir_def *
ir3_get_driver_consts_ubo(nir_builder * b,struct ir3_shader_variant * v)46 ir3_get_driver_consts_ubo(nir_builder *b, struct ir3_shader_variant *v)
47 {
48    if (v->binning_pass)
49       return ir3_get_shared_driver_ubo(b, &ir3_const_state(v)->consts_ubo);
50    return ir3_get_driver_ubo(b, &ir3_const_state_mut(v)->consts_ubo);
51 }
52 
53 static const struct glsl_type *
get_driver_ubo_type(const struct ir3_driver_ubo * ubo)54 get_driver_ubo_type(const struct ir3_driver_ubo *ubo)
55 {
56    return glsl_array_type(glsl_uint_type(), ubo->size, 0);
57 }
58 
59 /* Create or update the size of a driver-ubo: */
60 void
ir3_update_driver_ubo(nir_shader * nir,const struct ir3_driver_ubo * ubo,const char * name)61 ir3_update_driver_ubo(nir_shader *nir, const struct ir3_driver_ubo *ubo, const char *name)
62 {
63    if (ubo->idx < 0)
64       return;
65 
66 
67    nir_foreach_variable_in_shader(var, nir) {
68       if (var->data.mode != nir_var_mem_ubo)
69          continue;
70       if (var->data.binding != ubo->idx)
71          continue;
72 
73       /* UBO already exists, make sure it is big enough: */
74       if (glsl_array_size(var->type) < ubo->size)
75          var->type = get_driver_ubo_type(ubo);
76    }
77 
78    /* UBO variable does not exist yet, so create it: */
79    nir_variable *var =
80       nir_variable_create(nir, nir_var_mem_ubo, get_driver_ubo_type(ubo), name);
81    var->data.driver_location = ubo->idx;
82 }
83 
84 static nir_def *
load_driver_ubo(nir_builder * b,unsigned components,nir_def * ubo,unsigned offset)85 load_driver_ubo(nir_builder *b, unsigned components, nir_def *ubo, unsigned offset)
86 {
87    return nir_load_ubo(b, components, 32, ubo,
88                        nir_imm_int(b, offset * sizeof(uint32_t)),
89                        .align_mul = 16,
90                        .align_offset = (offset % 4) * sizeof(uint32_t),
91                        .range_base = offset * sizeof(uint32_t),
92                        .range = components * sizeof(uint32_t));
93 }
94 
95 /* For use by binning_pass shaders, where const_state is const, but expected
96  * to be already set up when we compiled the corresponding non-binning variant
97  */
98 nir_def *
ir3_load_shared_driver_ubo(nir_builder * b,unsigned components,const struct ir3_driver_ubo * ubo,unsigned offset)99 ir3_load_shared_driver_ubo(nir_builder *b, unsigned components,
100                            const struct ir3_driver_ubo *ubo,
101                            unsigned offset)
102 {
103    assert(ubo->size >= MAX2(ubo->size, offset + components));
104 
105    return load_driver_ubo(b, components, ir3_get_shared_driver_ubo(b, ubo), offset);
106 }
107 
108 nir_def *
ir3_load_driver_ubo(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned offset)109 ir3_load_driver_ubo(nir_builder *b, unsigned components,
110                     struct ir3_driver_ubo *ubo,
111                     unsigned offset)
112 {
113    ubo->size = MAX2(ubo->size, offset + components);
114 
115    return load_driver_ubo(b, components, ir3_get_driver_ubo(b, ubo), offset);
116 }
117 
118 nir_def *
ir3_load_driver_ubo_indirect(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned base,nir_def * offset,unsigned range)119 ir3_load_driver_ubo_indirect(nir_builder *b, unsigned components,
120                              struct ir3_driver_ubo *ubo,
121                              unsigned base, nir_def *offset,
122                              unsigned range)
123 {
124    assert(range > 0);
125    ubo->size = MAX2(ubo->size, base + components + (range - 1) * 4);
126 
127    return nir_load_ubo(b, components, 32, ir3_get_driver_ubo(b, ubo),
128                        nir_iadd(b, nir_imul24(b, offset, nir_imm_int(b, 16)),
129                                 nir_imm_int(b, base * sizeof(uint32_t))),
130                        .align_mul = 16,
131                        .align_offset = (base % 4) * sizeof(uint32_t),
132                        .range_base = base * sizeof(uint32_t),
133                        .range = components * sizeof(uint32_t) +
134                         (range - 1) * 16);
135 }
136 
137 static bool
ir3_nir_should_scalarize_mem(const nir_instr * instr,const void * data)138 ir3_nir_should_scalarize_mem(const nir_instr *instr, const void *data)
139 {
140    const struct ir3_compiler *compiler = data;
141    const nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
142 
143    /* Scalarize load_ssbo's that we could otherwise lower to isam,
144     * as the tex cache benefit outweighs the benefit of vectorizing
145     * Don't do this if (vectorized) isam.v is supported.
146     */
147    if ((intrin->intrinsic == nir_intrinsic_load_ssbo) &&
148        (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
149        compiler->has_isam_ssbo && !compiler->has_isam_v) {
150       return true;
151    }
152 
153    if ((intrin->intrinsic == nir_intrinsic_load_ssbo &&
154         intrin->def.bit_size == 8) ||
155        (intrin->intrinsic == nir_intrinsic_store_ssbo &&
156         intrin->src[0].ssa->bit_size == 8)) {
157       return true;
158    }
159 
160    return false;
161 }
162 
163 static bool
ir3_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,int64_t hole_size,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)164 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
165                              unsigned bit_size, unsigned num_components,
166                              int64_t hole_size, nir_intrinsic_instr *low,
167                              nir_intrinsic_instr *high, void *data)
168 {
169    if (hole_size > 0 || !nir_num_components_valid(num_components))
170       return false;
171 
172    struct ir3_compiler *compiler = data;
173    unsigned byte_size = bit_size / 8;
174 
175    if (low->intrinsic == nir_intrinsic_load_const_ir3)
176       return bit_size <= 32 && num_components <= 4;
177 
178    if (low->intrinsic == nir_intrinsic_store_const_ir3)
179       return bit_size == 32 && num_components <= 4;
180 
181    /* Don't vectorize load_ssbo's that we could otherwise lower to isam,
182     * as the tex cache benefit outweighs the benefit of vectorizing. If we
183     * support isam.v, we can vectorize this though.
184     */
185    if ((low->intrinsic == nir_intrinsic_load_ssbo) &&
186        (nir_intrinsic_access(low) & ACCESS_CAN_REORDER) &&
187        compiler->has_isam_ssbo && !compiler->has_isam_v) {
188       return false;
189    }
190 
191    if (low->intrinsic != nir_intrinsic_load_ubo) {
192       return bit_size <= 32 && align_mul >= byte_size &&
193          align_offset % byte_size == 0 &&
194          num_components <= 4;
195    }
196 
197    assert(bit_size >= 8);
198    if (bit_size != 32)
199       return false;
200 
201    int size = num_components * byte_size;
202 
203    /* Don't care about alignment past vec4. */
204    assert(util_is_power_of_two_nonzero(align_mul));
205    align_mul = MIN2(align_mul, 16);
206    align_offset &= 15;
207 
208    /* Our offset alignment should aways be at least 4 bytes */
209    if (align_mul < 4)
210       return false;
211 
212    unsigned worst_start_offset = 16 - align_mul + align_offset;
213    if (worst_start_offset + size > 16)
214       return false;
215 
216    return true;
217 }
218 
219 static unsigned
ir3_lower_bit_size(const nir_instr * instr,UNUSED void * data)220 ir3_lower_bit_size(const nir_instr *instr, UNUSED void *data)
221 {
222    if (instr->type == nir_instr_type_intrinsic) {
223       nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
224       switch (intrinsic->intrinsic) {
225       case nir_intrinsic_exclusive_scan:
226       case nir_intrinsic_inclusive_scan:
227       case nir_intrinsic_quad_broadcast:
228       case nir_intrinsic_quad_swap_diagonal:
229       case nir_intrinsic_quad_swap_horizontal:
230       case nir_intrinsic_quad_swap_vertical:
231       case nir_intrinsic_reduce:
232          return intrinsic->def.bit_size == 8 ? 16 : 0;
233       default:
234          break;
235       }
236    }
237 
238    if (instr->type == nir_instr_type_alu) {
239       nir_alu_instr *alu = nir_instr_as_alu(instr);
240       switch (alu->op) {
241       case nir_op_iabs:
242       case nir_op_iadd_sat:
243       case nir_op_imax:
244       case nir_op_imin:
245       case nir_op_ineg:
246       case nir_op_ishl:
247       case nir_op_ishr:
248       case nir_op_isub_sat:
249       case nir_op_uadd_sat:
250       case nir_op_umax:
251       case nir_op_umin:
252       case nir_op_ushr:
253          return alu->def.bit_size == 8 ? 16 : 0;
254       case nir_op_ieq:
255       case nir_op_ige:
256       case nir_op_ilt:
257       case nir_op_ine:
258       case nir_op_uge:
259       case nir_op_ult:
260          return nir_src_bit_size(alu->src[0].src) == 8 ? 16 : 0;
261       default:
262          break;
263       }
264    }
265 
266    return 0;
267 }
268 
269 static void
ir3_get_variable_size_align_bytes(const glsl_type * type,unsigned * size,unsigned * align)270 ir3_get_variable_size_align_bytes(const glsl_type *type, unsigned *size, unsigned *align)
271 {
272    switch (type->base_type) {
273    case GLSL_TYPE_ARRAY:
274    case GLSL_TYPE_INTERFACE:
275    case GLSL_TYPE_STRUCT:
276       glsl_size_align_handle_array_and_structs(type, ir3_get_variable_size_align_bytes,
277                                                size, align);
278       break;
279    case GLSL_TYPE_UINT8:
280    case GLSL_TYPE_INT8:
281       /* 8-bit values are handled through 16-bit half-registers, so the resulting size
282        * and alignment value has to be doubled to reflect the actual variable size
283        * requirement.
284        */
285       *size = 2 * glsl_get_components(type);
286       *align = 2;
287       break;
288    default:
289       glsl_get_natural_size_align_bytes(type, size, align);
290       break;
291    }
292 }
293 
294 #define OPT(nir, pass, ...)                                                    \
295    ({                                                                          \
296       bool this_progress = false;                                              \
297       NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);                       \
298       this_progress;                                                           \
299    })
300 
301 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
302 
303 bool
ir3_optimize_loop(struct ir3_compiler * compiler,const struct ir3_shader_nir_options * options,nir_shader * s)304 ir3_optimize_loop(struct ir3_compiler *compiler,
305                   const struct ir3_shader_nir_options *options,
306                   nir_shader *s)
307 {
308    MESA_TRACE_FUNC();
309 
310    bool progress;
311    bool did_progress = false;
312    unsigned lower_flrp = (s->options->lower_flrp16 ? 16 : 0) |
313                          (s->options->lower_flrp32 ? 32 : 0) |
314                          (s->options->lower_flrp64 ? 64 : 0);
315 
316    do {
317       progress = false;
318 
319       OPT_V(s, nir_lower_vars_to_ssa);
320       progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
321       progress |= OPT(s, nir_lower_phis_to_scalar, false);
322 
323       progress |= OPT(s, nir_copy_prop);
324       progress |= OPT(s, nir_opt_deref);
325       progress |= OPT(s, nir_opt_dce);
326       progress |= OPT(s, nir_opt_cse);
327 
328       progress |= OPT(s, nir_opt_find_array_copies);
329       progress |= OPT(s, nir_opt_copy_prop_vars);
330       progress |= OPT(s, nir_opt_dead_write_vars);
331 
332       static int gcm = -1;
333       if (gcm == -1)
334          gcm = debug_get_num_option("GCM", 0);
335       if (gcm == 1)
336          progress |= OPT(s, nir_opt_gcm, true);
337       else if (gcm == 2)
338          progress |= OPT(s, nir_opt_gcm, false);
339       progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
340       progress |= OPT(s, nir_opt_intrinsics);
341       /* NOTE: GS lowering inserts an output var with varying slot that
342        * is larger than VARYING_SLOT_MAX (ie. GS_VERTEX_FLAGS_IR3),
343        * which triggers asserts in nir_shader_gather_info().  To work
344        * around that skip lowering phi precision for GS.
345        *
346        * Calling nir_shader_gather_info() late also seems to cause
347        * problems for tess lowering, for now since we only enable
348        * fp16/int16 for frag and compute, skip phi precision lowering
349        * for other stages.
350        */
351       if ((s->info.stage == MESA_SHADER_FRAGMENT) ||
352           (s->info.stage == MESA_SHADER_COMPUTE) ||
353           (s->info.stage == MESA_SHADER_KERNEL)) {
354          progress |= OPT(s, nir_opt_phi_precision);
355       }
356       progress |= OPT(s, nir_opt_algebraic);
357       progress |= OPT(s, nir_lower_alu);
358       progress |= OPT(s, nir_lower_pack);
359       progress |= OPT(s, nir_lower_bit_size, ir3_lower_bit_size, NULL);
360       progress |= OPT(s, nir_opt_constant_folding);
361 
362       const nir_opt_offsets_options offset_options = {
363          /* How large an offset we can encode in the instr's immediate field.
364           */
365          .uniform_max = (1 << 9) - 1,
366 
367          /* STL/LDL have 13b for offset with MSB being a sign bit, but this opt
368           * doesn't deal with negative offsets.
369           */
370          .shared_max = (1 << 12) - 1,
371 
372          .buffer_max = 0,
373          .max_offset_cb = ir3_nir_max_imm_offset,
374          .max_offset_data = compiler,
375          .allow_offset_wrap = true,
376       };
377       progress |= OPT(s, nir_opt_offsets, &offset_options);
378 
379       nir_load_store_vectorize_options vectorize_opts = {
380          .modes = nir_var_mem_ubo | nir_var_mem_ssbo | nir_var_uniform,
381          .callback = ir3_nir_should_vectorize_mem,
382          .robust_modes = options->robust_modes,
383          .cb_data = compiler,
384       };
385       progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
386 
387       if (lower_flrp != 0) {
388          if (OPT(s, nir_lower_flrp, lower_flrp, false /* always_precise */)) {
389             OPT(s, nir_opt_constant_folding);
390             progress = true;
391          }
392 
393          /* Nothing should rematerialize any flrps, so we only
394           * need to do this lowering once.
395           */
396          lower_flrp = 0;
397       }
398 
399       progress |= OPT(s, nir_opt_dead_cf);
400       if (OPT(s, nir_opt_loop)) {
401          progress |= true;
402          /* If nir_opt_loop makes progress, then we need to clean
403           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
404           * to make progress.
405           */
406          OPT(s, nir_copy_prop);
407          OPT(s, nir_opt_dce);
408       }
409       progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
410       progress |= OPT(s, nir_opt_loop_unroll);
411       progress |= OPT(s, nir_opt_remove_phis);
412       progress |= OPT(s, nir_opt_undef);
413       did_progress |= progress;
414    } while (progress);
415 
416    OPT(s, nir_lower_var_copies);
417    return did_progress;
418 }
419 
420 static bool
should_split_wrmask(const nir_instr * instr,const void * data)421 should_split_wrmask(const nir_instr *instr, const void *data)
422 {
423    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
424 
425    switch (intr->intrinsic) {
426    case nir_intrinsic_store_ssbo:
427    case nir_intrinsic_store_shared:
428    case nir_intrinsic_store_global:
429    case nir_intrinsic_store_scratch:
430       return true;
431    default:
432       return false;
433    }
434 }
435 
436 static bool
ir3_nir_lower_ssbo_size_filter(const nir_instr * instr,const void * data)437 ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
438 {
439    return instr->type == nir_instr_type_intrinsic &&
440           nir_instr_as_intrinsic(instr)->intrinsic ==
441              nir_intrinsic_get_ssbo_size;
442 }
443 
444 static nir_def *
ir3_nir_lower_ssbo_size_instr(nir_builder * b,nir_instr * instr,void * data)445 ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
446 {
447    uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
448    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
449    return nir_ishl_imm(b, &intr->def, ssbo_size_to_bytes_shift);
450 }
451 
452 static bool
ir3_nir_lower_ssbo_size(nir_shader * s,uint8_t ssbo_size_to_bytes_shift)453 ir3_nir_lower_ssbo_size(nir_shader *s, uint8_t ssbo_size_to_bytes_shift)
454 {
455    return nir_shader_lower_instructions(s, ir3_nir_lower_ssbo_size_filter,
456                                         ir3_nir_lower_ssbo_size_instr,
457                                         &ssbo_size_to_bytes_shift);
458 }
459 
460 void
ir3_nir_lower_io_to_temporaries(nir_shader * s)461 ir3_nir_lower_io_to_temporaries(nir_shader *s)
462 {
463    /* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
464     * by the hardware pre-loading registers at the beginning and then reading
465     * them at the end, so we can't access them indirectly except through
466     * normal register-indirect accesses, and therefore ir3 doesn't support
467     * indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
468     * and indirects work just fine for those. GS outputs may be consumed by
469     * VPC, but have their own lowering in ir3_nir_lower_gs() which does
470     * something similar to nir_lower_io_to_temporaries so we shouldn't need
471     * to lower them.
472     *
473     * Note: this might be a little inefficient for VS or TES outputs which are
474     * when the next stage isn't an FS, but it probably don't make sense to
475     * depend on the next stage before variant creation.
476     *
477     * TODO: for gallium, mesa/st also does some redundant lowering, including
478     * running this pass for GS inputs/outputs which we don't want but not
479     * including TES outputs or FS inputs which we do need. We should probably
480     * stop doing that once we're sure all drivers are doing their own
481     * indirect i/o lowering.
482     */
483    bool lower_input = s->info.stage == MESA_SHADER_VERTEX ||
484                       s->info.stage == MESA_SHADER_FRAGMENT;
485    bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
486                        s->info.stage != MESA_SHADER_GEOMETRY;
487    if (lower_input || lower_output) {
488       NIR_PASS_V(s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
489                  lower_output, lower_input);
490 
491       /* nir_lower_io_to_temporaries() creates global variables and copy
492        * instructions which need to be cleaned up.
493        */
494       NIR_PASS_V(s, nir_split_var_copies);
495       NIR_PASS_V(s, nir_lower_var_copies);
496       NIR_PASS_V(s, nir_lower_global_vars_to_local);
497    }
498 
499    /* Regardless of the above, we need to lower indirect references to
500     * compact variables such as clip/cull distances because due to how
501     * TCS<->TES IO works we cannot handle indirect accesses that "straddle"
502     * vec4 components. nir_lower_indirect_derefs has a special case for
503     * compact variables, so it will actually lower them even though we pass
504     * in 0 modes.
505     *
506     * Using temporaries would be slightly better but
507     * nir_lower_io_to_temporaries currently doesn't support TCS i/o.
508     */
509    NIR_PASS_V(s, nir_lower_indirect_derefs, 0, UINT32_MAX);
510 }
511 
512 /**
513  * Inserts an add of 0.5 to floating point array index values in texture coordinates.
514  */
515 static bool
ir3_nir_lower_array_sampler_cb(struct nir_builder * b,nir_instr * instr,void * _data)516 ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_data)
517 {
518    if (instr->type != nir_instr_type_tex)
519       return false;
520 
521    nir_tex_instr *tex = nir_instr_as_tex(instr);
522    if (!tex->is_array || tex->op == nir_texop_lod)
523       return false;
524 
525    int coord_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
526    if (coord_idx == -1 ||
527        nir_tex_instr_src_type(tex, coord_idx) != nir_type_float)
528       return false;
529 
530    b->cursor = nir_before_instr(&tex->instr);
531 
532    unsigned ncomp = tex->coord_components;
533    nir_def *src = tex->src[coord_idx].src.ssa;
534 
535    assume(ncomp >= 1);
536    nir_def *ai = nir_channel(b, src, ncomp - 1);
537    ai = nir_fadd_imm(b, ai, 0.5);
538    nir_src_rewrite(&tex->src[coord_idx].src,
539                    nir_vector_insert_imm(b, src, ai, ncomp - 1));
540    return true;
541 }
542 
543 static bool
ir3_nir_lower_array_sampler(nir_shader * shader)544 ir3_nir_lower_array_sampler(nir_shader *shader)
545 {
546    return nir_shader_instructions_pass(
547       shader, ir3_nir_lower_array_sampler_cb,
548       nir_metadata_control_flow, NULL);
549 }
550 
551 void
ir3_finalize_nir(struct ir3_compiler * compiler,const struct ir3_shader_nir_options * options,nir_shader * s)552 ir3_finalize_nir(struct ir3_compiler *compiler,
553                  const struct ir3_shader_nir_options *options,
554                  nir_shader *s)
555 {
556    MESA_TRACE_FUNC();
557 
558    struct nir_lower_tex_options tex_options = {
559       .lower_rect = 0,
560       .lower_tg4_offsets = true,
561       .lower_invalid_implicit_lod = true,
562       .lower_index_to_offset = true,
563    };
564 
565    if (compiler->gen >= 4) {
566       /* a4xx seems to have *no* sam.p */
567       tex_options.lower_txp = ~0; /* lower all txp */
568    } else {
569       /* a3xx just needs to avoid sam.p for 3d tex */
570       tex_options.lower_txp = (1 << GLSL_SAMPLER_DIM_3D);
571    }
572 
573    if (ir3_shader_debug & IR3_DBG_DISASM) {
574       mesa_logi("----------------------");
575       nir_log_shaderi(s);
576       mesa_logi("----------------------");
577    }
578 
579    if (s->info.stage == MESA_SHADER_GEOMETRY)
580       NIR_PASS_V(s, ir3_nir_lower_gs);
581 
582    NIR_PASS_V(s, nir_lower_frexp);
583    NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
584 
585    OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
586 
587    OPT_V(s, nir_lower_tex, &tex_options);
588    OPT_V(s, nir_lower_load_const_to_scalar);
589 
590    if (compiler->array_index_add_half)
591       OPT_V(s, ir3_nir_lower_array_sampler);
592 
593    OPT_V(s, nir_lower_is_helper_invocation);
594 
595    ir3_optimize_loop(compiler, options, s);
596 
597    /* do idiv lowering after first opt loop to get a chance to propagate
598     * constants for divide by immed power-of-two:
599     */
600    nir_lower_idiv_options idiv_options = {
601       .allow_fp16 = true,
602    };
603    bool idiv_progress = OPT(s, nir_opt_idiv_const, 8);
604    idiv_progress |= OPT(s, nir_lower_idiv, &idiv_options);
605 
606    if (idiv_progress)
607       ir3_optimize_loop(compiler, options, s);
608 
609    OPT_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
610 
611    if (ir3_shader_debug & IR3_DBG_DISASM) {
612       mesa_logi("----------------------");
613       nir_log_shaderi(s);
614       mesa_logi("----------------------");
615    }
616 
617    /* st_program.c's parameter list optimization requires that future nir
618     * variants don't reallocate the uniform storage, so we have to remove
619     * uniforms that occupy storage.  But we don't want to remove samplers,
620     * because they're needed for YUV variant lowering.
621     */
622    nir_foreach_uniform_variable_safe (var, s) {
623       if (var->data.mode == nir_var_uniform &&
624           (glsl_type_get_image_count(var->type) ||
625            glsl_type_get_sampler_count(var->type)))
626          continue;
627 
628       exec_node_remove(&var->node);
629    }
630    nir_validate_shader(s, "after uniform var removal");
631 
632    nir_sweep(s);
633 }
634 
635 static bool
lower_subgroup_id_filter(const nir_instr * instr,const void * unused)636 lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
637 {
638    (void)unused;
639 
640    if (instr->type != nir_instr_type_intrinsic)
641       return false;
642 
643    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
644    return intr->intrinsic == nir_intrinsic_load_subgroup_invocation ||
645           intr->intrinsic == nir_intrinsic_load_subgroup_id ||
646           intr->intrinsic == nir_intrinsic_load_num_subgroups;
647 }
648 
649 static nir_def *
lower_subgroup_id(nir_builder * b,nir_instr * instr,void * _shader)650 lower_subgroup_id(nir_builder *b, nir_instr *instr, void *_shader)
651 {
652    struct ir3_shader *shader = _shader;
653 
654    /* Vulkan allows implementations to tile workgroup invocations even when
655     * subgroup operations are involved, which is implied by this Note:
656     *
657     *    "There is no direct relationship between SubgroupLocalInvocationId and
658     *    LocalInvocationId or LocalInvocationIndex."
659     *
660     * However there is no way to get SubgroupId directly, so we have to use
661     * LocalInvocationIndex here. This means that whenever we do this lowering we
662     * have to force linear dispatch to make sure that the relation between
663     * SubgroupId/SubgroupLocalInvocationId and LocalInvocationIndex is what we
664     * expect, unless the shader forces us to do the quad layout in which case we
665     * have to use the tiled layout.
666     */
667    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
668    if (intr->intrinsic == nir_intrinsic_load_subgroup_id &&
669        shader->nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
670       /* We have to manually figure out which subgroup we're in using the
671        * tiling. The tiling is 4x4, unless one of the dimensions is not a
672        * multiple of 4 in which case it drops to 2.
673        */
674       nir_def *local_size = nir_load_workgroup_size(b);
675       nir_def *local_size_x = nir_channel(b, local_size, 0);
676       nir_def *local_size_y = nir_channel(b, local_size, 1);
677       /* Calculate the shift from invocation to tile index for x and y */
678       nir_def *x_shift = nir_bcsel(b,
679                                    nir_ieq_imm(b,
680                                                nir_iand_imm(b, local_size_x, 3),
681                                                0),
682                                    nir_imm_int(b, 2), nir_imm_int(b, 1));
683       nir_def *y_shift = nir_bcsel(b,
684                                    nir_ieq_imm(b,
685                                                nir_iand_imm(b, local_size_y, 3),
686                                                0),
687                                    nir_imm_int(b, 2), nir_imm_int(b, 1));
688       nir_def *id = nir_load_local_invocation_id(b);
689       nir_def *id_x = nir_channel(b, id, 0);
690       nir_def *id_y = nir_channel(b, id, 1);
691       /* Calculate which tile we're in */
692       nir_def *tile_id =
693          nir_iadd(b, nir_imul24(b, nir_ishr(b, id_y, y_shift),
694                                 nir_ishr(b, local_size_x, x_shift)),
695                   nir_ishr(b, id_x, x_shift));
696       /* Finally calculate the subgroup id */
697       return nir_ishr(b, tile_id, nir_isub(b,
698                                            nir_load_subgroup_id_shift_ir3(b),
699                                            nir_iadd(b, x_shift, y_shift)));
700    }
701 
702    /* Just use getfiberid if we have to use tiling */
703    if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation &&
704        shader->nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
705       return NULL;
706    }
707 
708 
709    if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
710       shader->cs.force_linear_dispatch = true;
711       return nir_iand(
712          b, nir_load_local_invocation_index(b),
713          nir_iadd_imm(b, nir_load_subgroup_size(b), -1));
714    } else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
715       shader->cs.force_linear_dispatch = true;
716       return nir_ishr(b, nir_load_local_invocation_index(b),
717                       nir_load_subgroup_id_shift_ir3(b));
718    } else {
719       assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
720       /* If the workgroup size is constant,
721        * nir_lower_compute_system_values() will replace local_size with a
722        * constant so this can mostly be constant folded away.
723        */
724       nir_def *local_size = nir_load_workgroup_size(b);
725       nir_def *size =
726          nir_imul24(b, nir_channel(b, local_size, 0),
727                     nir_imul24(b, nir_channel(b, local_size, 1),
728                                nir_channel(b, local_size, 2)));
729       nir_def *one = nir_imm_int(b, 1);
730       return nir_iadd(b, one,
731                       nir_ishr(b, nir_isub(b, size, one),
732                                nir_load_subgroup_id_shift_ir3(b)));
733    }
734 }
735 
736 static bool
ir3_nir_lower_subgroup_id_cs(nir_shader * nir,struct ir3_shader * shader)737 ir3_nir_lower_subgroup_id_cs(nir_shader *nir, struct ir3_shader *shader)
738 {
739    return nir_shader_lower_instructions(nir, lower_subgroup_id_filter,
740                                         lower_subgroup_id, shader);
741 }
742 
743 /**
744  * Late passes that need to be done after pscreen->finalize_nir()
745  */
746 void
ir3_nir_post_finalize(struct ir3_shader * shader)747 ir3_nir_post_finalize(struct ir3_shader *shader)
748 {
749    struct nir_shader *s = shader->nir;
750    struct ir3_compiler *compiler = shader->compiler;
751 
752    MESA_TRACE_FUNC();
753 
754    NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
755               ir3_glsl_type_size, nir_lower_io_lower_64bit_to_32 |
756               nir_lower_io_use_interpolated_input_intrinsics);
757 
758    if (s->info.stage == MESA_SHADER_FRAGMENT) {
759       /* NOTE: lower load_barycentric_at_sample first, since it
760        * produces load_barycentric_at_offset:
761        */
762       NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
763       NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
764       NIR_PASS_V(s, ir3_nir_move_varying_inputs);
765       NIR_PASS_V(s, nir_lower_fb_read);
766       NIR_PASS_V(s, ir3_nir_lower_layer_id);
767       NIR_PASS_V(s, ir3_nir_lower_frag_shading_rate);
768    }
769 
770    if (s->info.stage == MESA_SHADER_VERTEX || s->info.stage == MESA_SHADER_GEOMETRY) {
771       NIR_PASS_V(s, ir3_nir_lower_primitive_shading_rate);
772    }
773 
774    if (compiler->gen >= 6 && s->info.stage == MESA_SHADER_FRAGMENT &&
775        !(ir3_shader_debug & IR3_DBG_NOFP16)) {
776       /* Lower FS mediump inputs to 16-bit. If you declared it mediump, you
777        * probably want 16-bit instructions (and have set
778        * mediump/RelaxedPrecision on most of the rest of the shader's
779        * instructions).  If we don't lower it in NIR, then comparisons of the
780        * results of mediump ALU ops with the mediump input will happen in highp,
781        * causing extra conversions (and, incidentally, causing
782        * dEQP-GLES2.functional.shaders.algorithm.rgb_to_hsl_fragment on ANGLE to
783        * fail)
784        *
785        * However, we can't do flat inputs because flat.b doesn't have the
786        * destination type for how to downconvert the
787        * 32-bit-in-the-varyings-interpolator value. (also, even if it did, watch
788        * out for how gl_nir_lower_packed_varyings packs all flat-interpolated
789        * things together as ivec4s, so when we lower a formerly-float input
790        * you'd end up with an incorrect f2f16(i2i32(load_input())) instead of
791        * load_input).
792        */
793       uint64_t mediump_varyings = 0;
794       nir_foreach_shader_in_variable(var, s) {
795          if ((var->data.precision == GLSL_PRECISION_MEDIUM ||
796               var->data.precision == GLSL_PRECISION_LOW) &&
797              var->data.interpolation != INTERP_MODE_FLAT) {
798             mediump_varyings |= BITFIELD64_BIT(var->data.location);
799          }
800       }
801 
802       if (mediump_varyings) {
803          NIR_PASS_V(s, nir_lower_mediump_io,
804                   nir_var_shader_in,
805                   mediump_varyings,
806                   false);
807       }
808 
809       /* This should come after input lowering, to opportunistically lower non-mediump outputs. */
810       NIR_PASS_V(s, nir_lower_mediump_io, nir_var_shader_out, 0, false);
811    }
812 
813    {
814       /* If the API-facing subgroup size is forced to a particular value, lower
815        * it here. Beyond this point nir_intrinsic_load_subgroup_size will return
816        * the "real" subgroup size.
817        */
818       unsigned subgroup_size = 0, max_subgroup_size = 0;
819       ir3_shader_get_subgroup_size(compiler, &shader->options, s->info.stage,
820                                    &subgroup_size, &max_subgroup_size);
821 
822       nir_lower_subgroups_options options = {
823             .subgroup_size = subgroup_size,
824             .ballot_bit_size = 32,
825             .ballot_components = max_subgroup_size / 32,
826             .lower_to_scalar = true,
827             .lower_vote_eq = true,
828             .lower_vote_bool_eq = true,
829             .lower_subgroup_masks = true,
830             .lower_read_invocation_to_cond = true,
831             .lower_shuffle = !compiler->has_shfl,
832             .lower_relative_shuffle = !compiler->has_shfl,
833             .lower_rotate_to_shuffle = !compiler->has_shfl,
834             .lower_rotate_clustered_to_shuffle = true,
835             .lower_inverse_ballot = true,
836             .lower_reduce = true,
837             .filter = ir3_nir_lower_subgroups_filter,
838             .filter_data = compiler,
839       };
840 
841       if (!((s->info.stage == MESA_SHADER_COMPUTE) ||
842             (s->info.stage == MESA_SHADER_KERNEL) ||
843             compiler->has_getfiberid)) {
844          options.subgroup_size = 1;
845          options.lower_vote_trivial = true;
846       }
847 
848       OPT(s, nir_lower_subgroups, &options);
849       OPT(s, ir3_nir_lower_shuffle, shader);
850    }
851 
852    if ((s->info.stage == MESA_SHADER_COMPUTE) ||
853        (s->info.stage == MESA_SHADER_KERNEL)) {
854       bool progress = false;
855       NIR_PASS(progress, s, ir3_nir_lower_subgroup_id_cs, shader);
856 
857       if (s->info.derivative_group == DERIVATIVE_GROUP_LINEAR)
858          shader->cs.force_linear_dispatch = true;
859 
860       /* ir3_nir_lower_subgroup_id_cs creates extra compute intrinsics which
861        * we need to lower again.
862        */
863       if (progress)
864          NIR_PASS_V(s, nir_lower_compute_system_values, NULL);
865    }
866 
867    /* we cannot ensure that ir3_finalize_nir() is only called once, so
868     * we also need to do any run-once workarounds here:
869     */
870    OPT_V(s, ir3_nir_apply_trig_workarounds);
871 
872    const nir_lower_image_options lower_image_opts = {
873       .lower_cube_size = true,
874       .lower_image_samples_to_one = true
875    };
876    NIR_PASS_V(s, nir_lower_image, &lower_image_opts);
877 
878    const nir_lower_idiv_options lower_idiv_options = {
879       .allow_fp16 = true,
880    };
881    NIR_PASS_V(s, nir_lower_idiv, &lower_idiv_options); /* idiv generated by cube lowering */
882 
883 
884    /* The resinfo opcode returns the size in dwords on a4xx */
885    if (compiler->gen == 4)
886       OPT_V(s, ir3_nir_lower_ssbo_size, 2);
887 
888    /* The resinfo opcode we have for getting the SSBO size on a6xx returns a
889     * byte length divided by IBO_0_FMT, while the NIR intrinsic coming in is a
890     * number of bytes. Switch things so the NIR intrinsic in our backend means
891     * dwords.
892     */
893    if (compiler->gen >= 6)
894       OPT_V(s, ir3_nir_lower_ssbo_size, compiler->options.storage_16bit ? 1 : 2);
895 
896    ir3_optimize_loop(compiler, &shader->options.nir_options, s);
897 }
898 
899 static bool
lower_ucp_vs(struct ir3_shader_variant * so)900 lower_ucp_vs(struct ir3_shader_variant *so)
901 {
902    if (!so->key.ucp_enables)
903       return false;
904 
905    gl_shader_stage last_geom_stage;
906 
907    if (so->key.has_gs) {
908       last_geom_stage = MESA_SHADER_GEOMETRY;
909    } else if (so->key.tessellation) {
910       last_geom_stage = MESA_SHADER_TESS_EVAL;
911    } else {
912       last_geom_stage = MESA_SHADER_VERTEX;
913    }
914 
915    return so->type == last_geom_stage;
916 }
917 
918 static bool
output_slot_used_for_binning(gl_varying_slot slot)919 output_slot_used_for_binning(gl_varying_slot slot)
920 {
921    return slot == VARYING_SLOT_POS || slot == VARYING_SLOT_PSIZ ||
922           slot == VARYING_SLOT_CLIP_DIST0 || slot == VARYING_SLOT_CLIP_DIST1 ||
923           slot == VARYING_SLOT_VIEWPORT;
924 }
925 
926 static bool
remove_nonbinning_output(nir_builder * b,nir_intrinsic_instr * intr,void * data)927 remove_nonbinning_output(nir_builder *b, nir_intrinsic_instr *intr, void *data)
928 {
929    if (intr->intrinsic != nir_intrinsic_store_output &&
930        intr->intrinsic != nir_intrinsic_store_per_view_output)
931       return false;
932 
933    nir_io_semantics io = nir_intrinsic_io_semantics(intr);
934 
935    if (output_slot_used_for_binning(io.location))
936       return false;
937 
938    nir_instr_remove(&intr->instr);
939    return true;
940 }
941 
942 static bool
lower_binning(nir_shader * s)943 lower_binning(nir_shader *s)
944 {
945    return nir_shader_intrinsics_pass(s, remove_nonbinning_output,
946                                      nir_metadata_control_flow, NULL);
947 }
948 
949 nir_mem_access_size_align
ir3_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align,uint32_t align_offset,bool offset_is_const,enum gl_access_qualifier access,const void * cb_data)950 ir3_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
951                  uint8_t bit_size, uint32_t align,
952                  uint32_t align_offset, bool offset_is_const,
953                  enum gl_access_qualifier access, const void *cb_data)
954 {
955    align = nir_combined_align(align, align_offset);
956    assert(util_is_power_of_two_nonzero(align));
957 
958    /* But if we're only aligned to 1 byte, use 8-bit loads. If we're only
959     * aligned to 2 bytes, use 16-bit loads, unless we needed 8-bit loads due to
960     * the size.
961     */
962    if ((bytes & 1) || (align == 1))
963       bit_size = 8;
964    else if ((bytes & 2) || (align == 2))
965       bit_size = 16;
966    else if (bit_size >= 32)
967       bit_size = 32;
968 
969    if (intrin == nir_intrinsic_load_ubo)
970       bit_size = 32;
971 
972    return (nir_mem_access_size_align){
973       .num_components = MAX2(1, MIN2(bytes / (bit_size / 8), 4)),
974       .bit_size = bit_size,
975       .align = bit_size / 8,
976       .shift = nir_mem_access_shift_method_scalar,
977    };
978 }
979 
980 static bool
atomic_supported(const nir_instr * instr,const void * data)981 atomic_supported(const nir_instr * instr, const void * data)
982 {
983    /* No atomic 64b arithmetic is supported in A7XX so far */
984    return nir_instr_as_intrinsic(instr)->def.bit_size != 64;
985 }
986 
987 void
ir3_nir_lower_variant(struct ir3_shader_variant * so,const struct ir3_shader_nir_options * options,nir_shader * s)988 ir3_nir_lower_variant(struct ir3_shader_variant *so,
989                       const struct ir3_shader_nir_options *options,
990                       nir_shader *s)
991 {
992    MESA_TRACE_FUNC();
993 
994    if (ir3_shader_debug & IR3_DBG_DISASM) {
995       mesa_logi("----------------------");
996       nir_log_shaderi(s);
997       mesa_logi("----------------------");
998    }
999 
1000    bool progress = false;
1001 
1002    progress |= OPT(s, nir_lower_io_to_scalar, nir_var_mem_ssbo,
1003                    ir3_nir_should_scalarize_mem, so->compiler);
1004 
1005    if (so->key.has_gs || so->key.tessellation) {
1006       switch (so->type) {
1007       case MESA_SHADER_VERTEX:
1008          NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
1009                     so->key.tessellation);
1010          progress = true;
1011          break;
1012       case MESA_SHADER_TESS_CTRL:
1013          NIR_PASS_V(s, nir_lower_io_to_scalar,
1014                      nir_var_shader_in | nir_var_shader_out, NULL, NULL);
1015          NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
1016          NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
1017          progress = true;
1018          break;
1019       case MESA_SHADER_TESS_EVAL:
1020          NIR_PASS_V(s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
1021          if (so->key.has_gs)
1022             NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
1023                        so->key.tessellation);
1024          progress = true;
1025          break;
1026       case MESA_SHADER_GEOMETRY:
1027          NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
1028          progress = true;
1029          break;
1030       default:
1031          break;
1032       }
1033    }
1034 
1035    /* Note that it is intentional to use the VS lowering pass for GS, since we
1036     * lower GS into something that looks more like a VS in ir3_nir_lower_gs():
1037     */
1038    if (lower_ucp_vs(so)) {
1039       progress |= OPT(s, nir_lower_clip_vs, so->key.ucp_enables, false, true, NULL);
1040    } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
1041       if (so->key.ucp_enables && !so->compiler->has_clip_cull)
1042          progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, true, true);
1043    }
1044 
1045    if (so->binning_pass) {
1046       if (OPT(s, lower_binning)) {
1047          progress = true;
1048 
1049          /* outputs_written has changed. */
1050          nir_shader_gather_info(s, nir_shader_get_entrypoint(s));
1051       }
1052    }
1053 
1054    /* Move large constant variables to the constants attached to the NIR
1055     * shader, which we will upload in the immediates range.  This generates
1056     * amuls, so we need to clean those up after.
1057     *
1058     * Passing no size_align, we would get packed values, which if we end up
1059     * having to load with LDC would result in extra reads to unpack from
1060     * straddling loads.  Align everything to vec4 to avoid that, though we
1061     * could theoretically do better.
1062     */
1063    OPT_V(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
1064          32 /* bytes */);
1065    progress |= OPT(s, ir3_nir_lower_load_constant, so);
1066 
1067    /* Lower large temporaries to scratch, which in Qualcomm terms is private
1068     * memory, to avoid excess register pressure. This should happen after
1069     * nir_opt_large_constants, because loading from a UBO is much, much less
1070     * expensive.
1071     */
1072    if (so->compiler->has_pvtmem) {
1073       progress |= OPT(s, nir_lower_vars_to_scratch, nir_var_function_temp,
1074                       16 * 16 /* bytes */,
1075                       ir3_get_variable_size_align_bytes, glsl_get_natural_size_align_bytes);
1076    }
1077 
1078    /* Lower scratch writemasks */
1079    progress |= OPT(s, nir_lower_wrmasks, should_split_wrmask, s);
1080    progress |= OPT(s, nir_lower_atomics, atomic_supported);
1081 
1082    if (OPT(s, nir_lower_locals_to_regs, 1)) {
1083       progress = true;
1084 
1085       /* Split 64b registers into two 32b ones. */
1086       OPT_V(s, ir3_nir_lower_64b_regs);
1087    }
1088 
1089    nir_lower_mem_access_bit_sizes_options mem_bit_size_options = {
1090       .modes = nir_var_mem_constant | nir_var_mem_ubo |
1091                nir_var_mem_global | nir_var_mem_shared |
1092                nir_var_function_temp | nir_var_mem_ssbo,
1093       .callback = ir3_mem_access_size_align,
1094    };
1095 
1096    progress |= OPT(s, nir_lower_mem_access_bit_sizes, &mem_bit_size_options);
1097    progress |= OPT(s, ir3_nir_lower_64b_global);
1098    progress |= OPT(s, ir3_nir_lower_64b_undef);
1099    progress |= OPT(s, nir_lower_int64);
1100    progress |= OPT(s, ir3_nir_lower_64b_intrinsics);
1101    progress |= OPT(s, nir_lower_64bit_phis);
1102 
1103    /* Cleanup code leftover from lowering passes before opt_preamble */
1104    if (progress) {
1105       progress |= OPT(s, nir_opt_constant_folding);
1106    }
1107 
1108    progress |= OPT(s, ir3_nir_opt_subgroups, so);
1109 
1110    if (so->compiler->load_shader_consts_via_preamble)
1111       progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
1112 
1113    if (!so->binning_pass) {
1114       ir3_setup_const_state(s, so, ir3_const_state_mut(so));
1115    }
1116 
1117    /* Do the preamble before analysing UBO ranges, because it's usually
1118     * higher-value and because it can result in eliminating some indirect UBO
1119     * accesses where otherwise we'd have to push the whole range. However we
1120     * have to lower the preamble after UBO lowering so that UBO lowering can
1121     * insert instructions in the preamble to push UBOs.
1122     */
1123    if (so->compiler->has_preamble &&
1124        !(ir3_shader_debug & IR3_DBG_NOPREAMBLE))
1125       progress |= OPT(s, ir3_nir_opt_preamble, so);
1126 
1127    if (so->compiler->load_shader_consts_via_preamble)
1128       progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
1129 
1130    /* TODO: ldg.k might also work on a6xx */
1131    if (so->compiler->gen >= 7)
1132       progress |= OPT(s, ir3_nir_lower_const_global_loads, so);
1133 
1134    if (!so->binning_pass)
1135       OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
1136 
1137    progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
1138 
1139    if (so->compiler->gen >= 7 &&
1140        !(ir3_shader_debug & (IR3_DBG_NOPREAMBLE | IR3_DBG_NODESCPREFETCH)))
1141       progress |= OPT(s, ir3_nir_opt_prefetch_descriptors, so);
1142 
1143    if (so->shader_options.push_consts_type == IR3_PUSH_CONSTS_SHARED_PREAMBLE)
1144       progress |= OPT(s, ir3_nir_lower_push_consts_to_preamble, so);
1145 
1146    progress |= OPT(s, ir3_nir_lower_preamble, so);
1147 
1148    progress |= OPT(s, nir_lower_amul, ir3_glsl_type_size);
1149 
1150    /* UBO offset lowering has to come after we've decided what will
1151     * be left as load_ubo
1152     */
1153    if (so->compiler->gen >= 6)
1154       progress |= OPT(s, nir_lower_ubo_vec4);
1155 
1156    progress |= OPT(s, ir3_nir_lower_io_offsets);
1157 
1158    if (!so->binning_pass) {
1159       ir3_const_alloc_all_reserved_space(&ir3_const_state_mut(so)->allocs);
1160    }
1161 
1162    if (progress)
1163       ir3_optimize_loop(so->compiler, options, s);
1164 
1165    /* verify that progress is always set */
1166    assert(!ir3_optimize_loop(so->compiler, options, s));
1167 
1168    /* Fixup indirect load_const_ir3's which end up with a const base offset
1169     * which is too large to encode.  Do this late(ish) so we actually
1170     * can differentiate indirect vs non-indirect.
1171     */
1172    if (OPT(s, ir3_nir_fixup_load_const_ir3))
1173       ir3_optimize_loop(so->compiler, options, s);
1174 
1175    /* Do late algebraic optimization to turn add(a, neg(b)) back into
1176     * subs, then the mandatory cleanup after algebraic.  Note that it may
1177     * produce fnegs, and if so then we need to keep running to squash
1178     * fneg(fneg(a)).
1179     */
1180    bool more_late_algebraic = true;
1181    while (more_late_algebraic) {
1182       more_late_algebraic = OPT(s, nir_opt_algebraic_late);
1183       if (!more_late_algebraic && so->compiler->gen >= 5) {
1184          /* Lowers texture operations that have only f2f16 or u2u16 called on
1185           * them to have a 16-bit destination.  Also, lower 16-bit texture
1186           * coordinates that had been upconverted to 32-bits just for the
1187           * sampler to just be 16-bit texture sources.
1188           */
1189          struct nir_opt_tex_srcs_options opt_srcs_options = {
1190             .sampler_dims = ~0,
1191             .src_types = (1 << nir_tex_src_coord) |
1192                          (1 << nir_tex_src_lod) |
1193                          (1 << nir_tex_src_bias) |
1194                          (1 << nir_tex_src_offset) |
1195                          (1 << nir_tex_src_comparator) |
1196                          (1 << nir_tex_src_min_lod) |
1197                          (1 << nir_tex_src_ms_index) |
1198                          (1 << nir_tex_src_ddx) |
1199                          (1 << nir_tex_src_ddy),
1200          };
1201          struct nir_opt_16bit_tex_image_options opt_16bit_options = {
1202             .rounding_mode = nir_rounding_mode_rtz,
1203             .opt_tex_dest_types = nir_type_float,
1204             /* blob dumps have no half regs on pixel 2's ldib or stib, so only enable for a6xx+. */
1205             .opt_image_dest_types = so->compiler->gen >= 6 ?
1206                                         nir_type_float | nir_type_uint | nir_type_int : 0,
1207             .opt_image_store_data = so->compiler->gen >= 6,
1208             .opt_srcs_options_count = 1,
1209             .opt_srcs_options = &opt_srcs_options,
1210          };
1211          OPT(s, nir_opt_16bit_tex_image, &opt_16bit_options);
1212       }
1213       OPT_V(s, nir_opt_constant_folding);
1214       OPT_V(s, nir_copy_prop);
1215       OPT_V(s, nir_opt_dce);
1216       OPT_V(s, nir_opt_cse);
1217    }
1218 
1219    OPT_V(s, nir_opt_sink, nir_move_const_undef);
1220 
1221    if (ir3_shader_debug & IR3_DBG_DISASM) {
1222       mesa_logi("----------------------");
1223       nir_log_shaderi(s);
1224       mesa_logi("----------------------");
1225    }
1226 
1227    nir_sweep(s);
1228 }
1229 
1230 bool
ir3_get_driver_param_info(const nir_shader * shader,nir_intrinsic_instr * intr,struct driver_param_info * param_info)1231 ir3_get_driver_param_info(const nir_shader *shader, nir_intrinsic_instr *intr,
1232                           struct driver_param_info *param_info)
1233 {
1234    switch (intr->intrinsic) {
1235    case nir_intrinsic_load_base_workgroup_id:
1236       param_info->offset = IR3_DP_CS(base_group_x);
1237       break;
1238    case nir_intrinsic_load_num_workgroups:
1239       param_info->offset = IR3_DP_CS(num_work_groups_x);
1240       break;
1241    case nir_intrinsic_load_workgroup_size:
1242       param_info->offset = IR3_DP_CS(local_group_size_x);
1243       break;
1244    case nir_intrinsic_load_subgroup_size:
1245       if (shader->info.stage == MESA_SHADER_COMPUTE) {
1246          param_info->offset = IR3_DP_CS(subgroup_size);
1247       } else if (shader->info.stage == MESA_SHADER_FRAGMENT) {
1248          param_info->offset = IR3_DP_FS(subgroup_size);
1249       } else {
1250          return false;
1251       }
1252       break;
1253    case nir_intrinsic_load_subgroup_id_shift_ir3:
1254       param_info->offset = IR3_DP_CS(subgroup_id_shift);
1255       break;
1256    case nir_intrinsic_load_work_dim:
1257       param_info->offset = IR3_DP_CS(work_dim);
1258       break;
1259    case nir_intrinsic_load_base_vertex:
1260    case nir_intrinsic_load_first_vertex:
1261       param_info->offset = IR3_DP_VS(vtxid_base);
1262       break;
1263    case nir_intrinsic_load_is_indexed_draw:
1264       param_info->offset = IR3_DP_VS(is_indexed_draw);
1265       break;
1266    case nir_intrinsic_load_draw_id:
1267       param_info->offset = IR3_DP_VS(draw_id);
1268       break;
1269    case nir_intrinsic_load_base_instance:
1270       param_info->offset = IR3_DP_VS(instid_base);
1271       break;
1272    case nir_intrinsic_load_user_clip_plane: {
1273       uint32_t idx = nir_intrinsic_ucp_id(intr);
1274       param_info->offset = IR3_DP_VS(ucp[0].x) + 4 * idx;
1275       break;
1276    }
1277    case nir_intrinsic_load_tess_level_outer_default:
1278       param_info->offset = IR3_DP_TCS(default_outer_level_x);
1279       break;
1280    case nir_intrinsic_load_tess_level_inner_default:
1281       param_info->offset = IR3_DP_TCS(default_inner_level_x);
1282       break;
1283    case nir_intrinsic_load_frag_size_ir3:
1284       param_info->offset = IR3_DP_FS(frag_size);
1285       break;
1286    case nir_intrinsic_load_frag_offset_ir3:
1287       param_info->offset = IR3_DP_FS(frag_offset);
1288       break;
1289    case nir_intrinsic_load_frag_invocation_count:
1290       param_info->offset = IR3_DP_FS(frag_invocation_count);
1291       break;
1292    default:
1293       return false;
1294    }
1295 
1296    return true;
1297 }
1298 
1299 uint32_t
ir3_nir_scan_driver_consts(struct ir3_compiler * compiler,nir_shader * shader,struct ir3_const_image_dims * image_dims)1300 ir3_nir_scan_driver_consts(struct ir3_compiler *compiler, nir_shader *shader,
1301                            struct ir3_const_image_dims *image_dims)
1302 {
1303    uint32_t num_driver_params = 0;
1304    nir_foreach_function (function, shader) {
1305       if (!function->impl)
1306          continue;
1307 
1308       nir_foreach_block (block, function->impl) {
1309          nir_foreach_instr (instr, block) {
1310             if (instr->type != nir_instr_type_intrinsic)
1311                continue;
1312 
1313             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1314             unsigned idx;
1315 
1316             if (image_dims) {
1317                switch (intr->intrinsic) {
1318                case nir_intrinsic_image_atomic:
1319                case nir_intrinsic_image_atomic_swap:
1320                case nir_intrinsic_image_load:
1321                case nir_intrinsic_image_store:
1322                case nir_intrinsic_image_size:
1323                   /* a4xx gets these supplied by the hw directly (maybe CP?) */
1324                   if (compiler->gen == 5 &&
1325                      !(intr->intrinsic == nir_intrinsic_image_load &&
1326                         !(nir_intrinsic_access(intr) & ACCESS_COHERENT))) {
1327                      idx = nir_src_as_uint(intr->src[0]);
1328                      if (image_dims->mask & (1 << idx))
1329                         break;
1330                      image_dims->mask |= (1 << idx);
1331                      image_dims->off[idx] = image_dims->count;
1332                      image_dims->count += 3; /* three const per */
1333                   }
1334                   break;
1335                default:
1336                   break;
1337                }
1338             }
1339 
1340             struct driver_param_info param_info;
1341             if (ir3_get_driver_param_info(shader, intr, &param_info)) {
1342                num_driver_params =
1343                   MAX2(num_driver_params,
1344                        param_info.offset + nir_intrinsic_dest_components(intr));
1345             }
1346          }
1347       }
1348    }
1349 
1350    /* TODO: Provide a spot somewhere to safely upload unwanted values, and a way
1351     * to determine if they're wanted or not. For now we always make the whole
1352     * driver param range available, since the driver will always instruct the
1353     * hardware to upload these.
1354     */
1355    if (!compiler->has_shared_regfile &&
1356          shader->info.stage == MESA_SHADER_COMPUTE) {
1357       num_driver_params =
1358          MAX2(num_driver_params, IR3_DP_CS(workgroup_id_z) + 1);
1359    }
1360 
1361    return num_driver_params;
1362 }
1363 
1364 void
ir3_const_alloc(struct ir3_const_allocations * const_alloc,enum ir3_const_alloc_type type,uint32_t size_vec4,uint32_t align_vec4)1365 ir3_const_alloc(struct ir3_const_allocations *const_alloc,
1366                 enum ir3_const_alloc_type type, uint32_t size_vec4,
1367                 uint32_t align_vec4)
1368 {
1369    struct ir3_const_allocation *alloc = &const_alloc->consts[type];
1370    assert(alloc->size_vec4 == 0);
1371 
1372    const_alloc->max_const_offset_vec4 =
1373       align(const_alloc->max_const_offset_vec4, align_vec4);
1374    alloc->size_vec4 = size_vec4;
1375    alloc->offset_vec4 = const_alloc->max_const_offset_vec4;
1376    const_alloc->max_const_offset_vec4 += size_vec4;
1377 }
1378 
1379 void
ir3_const_reserve_space(struct ir3_const_allocations * const_alloc,enum ir3_const_alloc_type type,uint32_t size_vec4,uint32_t align_vec4)1380 ir3_const_reserve_space(struct ir3_const_allocations *const_alloc,
1381                         enum ir3_const_alloc_type type, uint32_t size_vec4,
1382                         uint32_t align_vec4)
1383 {
1384    struct ir3_const_allocation *alloc = &const_alloc->consts[type];
1385    assert(alloc->size_vec4 == 0 && alloc->reserved_size_vec4 == 0);
1386 
1387    alloc->reserved_size_vec4 = size_vec4;
1388    alloc->reserved_align_vec4 = align_vec4;
1389    /* Be pessimistic here and assume the worst case alignment is needed */
1390    const_alloc->reserved_vec4 += size_vec4 + align_vec4 - 1;
1391 }
1392 
1393 void
ir3_const_free_reserved_space(struct ir3_const_allocations * const_alloc,enum ir3_const_alloc_type type)1394 ir3_const_free_reserved_space(struct ir3_const_allocations *const_alloc,
1395                               enum ir3_const_alloc_type type)
1396 {
1397    struct ir3_const_allocation *alloc = &const_alloc->consts[type];
1398    assert(const_alloc->reserved_vec4 >= alloc->reserved_size_vec4);
1399 
1400    const_alloc->reserved_vec4 -=
1401       alloc->reserved_size_vec4 + alloc->reserved_align_vec4 - 1;
1402    alloc->reserved_size_vec4 = 0;
1403 }
1404 
1405 void
ir3_const_alloc_all_reserved_space(struct ir3_const_allocations * const_alloc)1406 ir3_const_alloc_all_reserved_space(struct ir3_const_allocations *const_alloc)
1407 {
1408    for (int i = 0; i < IR3_CONST_ALLOC_MAX; i++) {
1409       if (const_alloc->consts[i].reserved_size_vec4 > 0) {
1410          ir3_const_alloc(const_alloc, i,
1411                          const_alloc->consts[i].reserved_size_vec4,
1412                          const_alloc->consts[i].reserved_align_vec4);
1413          const_alloc->consts[i].reserved_size_vec4 = 0;
1414       }
1415    }
1416    const_alloc->reserved_vec4 = 0;
1417 }
1418 
1419 void
ir3_alloc_driver_params(struct ir3_const_allocations * const_alloc,uint32_t * num_driver_params,struct ir3_compiler * compiler,gl_shader_stage shader_stage)1420 ir3_alloc_driver_params(struct ir3_const_allocations *const_alloc,
1421                         uint32_t *num_driver_params,
1422                         struct ir3_compiler *compiler,
1423                         gl_shader_stage shader_stage)
1424 {
1425    if (*num_driver_params == 0)
1426       return;
1427 
1428    /* num_driver_params in dwords.  we only need to align to vec4s for the
1429     * common case of immediate constant uploads, but for indirect dispatch
1430     * the constants may also be indirect and so we have to align the area in
1431     * const space to that requirement.
1432     */
1433    *num_driver_params = align(*num_driver_params, 4);
1434    unsigned upload_unit = 1;
1435    if (shader_stage == MESA_SHADER_COMPUTE ||
1436        (*num_driver_params >= IR3_DP_VS(vtxid_base))) {
1437       upload_unit = compiler->const_upload_unit;
1438    }
1439 
1440    /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
1441    if (shader_stage == MESA_SHADER_VERTEX && compiler->gen >= 6)
1442       const_alloc->max_const_offset_vec4 =
1443          MAX2(const_alloc->max_const_offset_vec4, 1);
1444 
1445    uint32_t driver_params_size_vec4 =
1446       align(*num_driver_params / 4, upload_unit);
1447    ir3_const_alloc(const_alloc, IR3_CONST_ALLOC_DRIVER_PARAMS,
1448                    driver_params_size_vec4, upload_unit);
1449 }
1450 
1451 /* Sets up the variant-dependent constant state for the ir3_shader.
1452  * The consts allocation flow is as follows:
1453  * 1) Turnip/Freedreno allocates consts required by corresponding API,
1454  *    e.g. push const, inline uniforms, etc. Then passes ir3_const_allocations
1455  *    into IR3.
1456  * 2) ir3_setup_const_state pre-allocates consts with non-negotiable size.
1457  * 3) IR3 lowerings afterwards allocate from the free space left.
1458  * 4) Allocate offsets for consts from step 2)
1459  */
1460 void
ir3_setup_const_state(nir_shader * nir,struct ir3_shader_variant * v,struct ir3_const_state * const_state)1461 ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
1462                       struct ir3_const_state *const_state)
1463 {
1464    struct ir3_compiler *compiler = v->compiler;
1465    unsigned ptrsz = ir3_pointer_size(compiler);
1466 
1467    const_state->num_driver_params =
1468       ir3_nir_scan_driver_consts(compiler, nir, &const_state->image_dims);
1469 
1470    if ((compiler->gen < 5) && (v->stream_output.num_outputs > 0)) {
1471       const_state->num_driver_params =
1472          MAX2(const_state->num_driver_params, IR3_DP_VS(vtxcnt_max) + 1);
1473    }
1474 
1475    const_state->num_ubos = nir->info.num_ubos;
1476 
1477    assert((const_state->ubo_state.size % 16) == 0);
1478 
1479    /* IR3_CONST_ALLOC_DRIVER_PARAMS could have been allocated earlier. */
1480    if (const_state->allocs.consts[IR3_CONST_ALLOC_DRIVER_PARAMS].size_vec4 == 0) {
1481       ir3_alloc_driver_params(&const_state->allocs,
1482                               &const_state->num_driver_params, compiler,
1483                               v->type);
1484    }
1485 
1486    if (const_state->image_dims.count > 0) {
1487       ir3_const_reserve_space(&const_state->allocs, IR3_CONST_ALLOC_IMAGE_DIMS,
1488                               align(const_state->image_dims.count, 4) / 4, 1);
1489    }
1490 
1491    if (v->type == MESA_SHADER_KERNEL && v->cs.req_input_mem) {
1492       ir3_const_reserve_space(&const_state->allocs,
1493                               IR3_CONST_ALLOC_KERNEL_PARAMS,
1494                               align(v->cs.req_input_mem, 4) / 4, 1);
1495    }
1496 
1497    if ((v->type == MESA_SHADER_VERTEX) && (compiler->gen < 5) &&
1498        v->stream_output.num_outputs > 0) {
1499       ir3_const_reserve_space(&const_state->allocs, IR3_CONST_ALLOC_TFBO,
1500                               align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4, 1);
1501    }
1502 
1503    if (!compiler->load_shader_consts_via_preamble) {
1504       switch (v->type) {
1505       case MESA_SHADER_TESS_CTRL:
1506       case MESA_SHADER_TESS_EVAL:
1507          ir3_const_reserve_space(&const_state->allocs,
1508                                  IR3_CONST_ALLOC_PRIMITIVE_PARAM, 2, 1);
1509          break;
1510       case MESA_SHADER_GEOMETRY:
1511          ir3_const_reserve_space(&const_state->allocs,
1512                                  IR3_CONST_ALLOC_PRIMITIVE_PARAM, 1, 1);
1513          break;
1514       default:
1515          break;
1516       }
1517    }
1518 
1519    if (v->type == MESA_SHADER_VERTEX) {
1520       ir3_const_reserve_space(&const_state->allocs,
1521                               IR3_CONST_ALLOC_PRIMITIVE_PARAM, 1, 1);
1522    }
1523 
1524    if ((v->type == MESA_SHADER_TESS_CTRL || v->type == MESA_SHADER_TESS_EVAL ||
1525         v->type == MESA_SHADER_GEOMETRY)) {
1526       ir3_const_reserve_space(&const_state->allocs,
1527                               IR3_CONST_ALLOC_PRIMITIVE_MAP,
1528                               DIV_ROUND_UP(v->input_size, 4), 1);
1529    }
1530 
1531    assert(const_state->allocs.max_const_offset_vec4 <= ir3_max_const(v));
1532 }
1533 
1534 uint32_t
ir3_const_state_get_free_space(const struct ir3_shader_variant * v,const struct ir3_const_state * const_state,uint32_t align_vec4)1535 ir3_const_state_get_free_space(const struct ir3_shader_variant *v,
1536                                const struct ir3_const_state *const_state,
1537                                uint32_t align_vec4)
1538 {
1539    uint32_t aligned_offset_vec4 =
1540       align(const_state->allocs.max_const_offset_vec4, align_vec4);
1541    uint32_t free_space_vec4 = ir3_max_const(v) - aligned_offset_vec4 -
1542                               const_state->allocs.reserved_vec4;
1543    return free_space_vec4;
1544 }
1545