1 /*
2 * Copyright © 2015 Rob Clark <robclark@freedesktop.org>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <robclark@freedesktop.org>
7 */
8
9 #include "util/u_debug.h"
10 #include "util/u_math.h"
11
12 #include "ir3_compiler.h"
13 #include "ir3_nir.h"
14 #include "ir3_shader.h"
15
16 /* For use by binning_pass shaders, where const_state is const, but expected
17 * to be already set up when we compiled the corresponding non-binning variant
18 */
19 nir_def *
ir3_get_shared_driver_ubo(nir_builder * b,const struct ir3_driver_ubo * ubo)20 ir3_get_shared_driver_ubo(nir_builder *b, const struct ir3_driver_ubo *ubo)
21 {
22 assert(ubo->idx > 0);
23
24 /* Binning shader shared ir3_driver_ubo definitions but not shader info */
25 b->shader->info.num_ubos = MAX2(b->shader->info.num_ubos, ubo->idx + 1);
26 return nir_imm_int(b, ubo->idx);
27 }
28
29 nir_def *
ir3_get_driver_ubo(nir_builder * b,struct ir3_driver_ubo * ubo)30 ir3_get_driver_ubo(nir_builder *b, struct ir3_driver_ubo *ubo)
31 {
32 /* Pick a UBO index to use as our constant data. Skip UBO 0 since that's
33 * reserved for gallium's cb0.
34 */
35 if (ubo->idx == -1) {
36 if (b->shader->info.num_ubos == 0)
37 b->shader->info.num_ubos++;
38 ubo->idx = b->shader->info.num_ubos++;
39 return nir_imm_int(b, ubo->idx);
40 }
41
42 return ir3_get_shared_driver_ubo(b, ubo);
43 }
44
45 nir_def *
ir3_get_driver_consts_ubo(nir_builder * b,struct ir3_shader_variant * v)46 ir3_get_driver_consts_ubo(nir_builder *b, struct ir3_shader_variant *v)
47 {
48 if (v->binning_pass)
49 return ir3_get_shared_driver_ubo(b, &ir3_const_state(v)->consts_ubo);
50 return ir3_get_driver_ubo(b, &ir3_const_state_mut(v)->consts_ubo);
51 }
52
53 static const struct glsl_type *
get_driver_ubo_type(const struct ir3_driver_ubo * ubo)54 get_driver_ubo_type(const struct ir3_driver_ubo *ubo)
55 {
56 return glsl_array_type(glsl_uint_type(), ubo->size, 0);
57 }
58
59 /* Create or update the size of a driver-ubo: */
60 void
ir3_update_driver_ubo(nir_shader * nir,const struct ir3_driver_ubo * ubo,const char * name)61 ir3_update_driver_ubo(nir_shader *nir, const struct ir3_driver_ubo *ubo, const char *name)
62 {
63 if (ubo->idx < 0)
64 return;
65
66
67 nir_foreach_variable_in_shader(var, nir) {
68 if (var->data.mode != nir_var_mem_ubo)
69 continue;
70 if (var->data.binding != ubo->idx)
71 continue;
72
73 /* UBO already exists, make sure it is big enough: */
74 if (glsl_array_size(var->type) < ubo->size)
75 var->type = get_driver_ubo_type(ubo);
76 }
77
78 /* UBO variable does not exist yet, so create it: */
79 nir_variable *var =
80 nir_variable_create(nir, nir_var_mem_ubo, get_driver_ubo_type(ubo), name);
81 var->data.driver_location = ubo->idx;
82 }
83
84 static nir_def *
load_driver_ubo(nir_builder * b,unsigned components,nir_def * ubo,unsigned offset)85 load_driver_ubo(nir_builder *b, unsigned components, nir_def *ubo, unsigned offset)
86 {
87 return nir_load_ubo(b, components, 32, ubo,
88 nir_imm_int(b, offset * sizeof(uint32_t)),
89 .align_mul = 16,
90 .align_offset = (offset % 4) * sizeof(uint32_t),
91 .range_base = offset * sizeof(uint32_t),
92 .range = components * sizeof(uint32_t));
93 }
94
95 /* For use by binning_pass shaders, where const_state is const, but expected
96 * to be already set up when we compiled the corresponding non-binning variant
97 */
98 nir_def *
ir3_load_shared_driver_ubo(nir_builder * b,unsigned components,const struct ir3_driver_ubo * ubo,unsigned offset)99 ir3_load_shared_driver_ubo(nir_builder *b, unsigned components,
100 const struct ir3_driver_ubo *ubo,
101 unsigned offset)
102 {
103 assert(ubo->size >= MAX2(ubo->size, offset + components));
104
105 return load_driver_ubo(b, components, ir3_get_shared_driver_ubo(b, ubo), offset);
106 }
107
108 nir_def *
ir3_load_driver_ubo(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned offset)109 ir3_load_driver_ubo(nir_builder *b, unsigned components,
110 struct ir3_driver_ubo *ubo,
111 unsigned offset)
112 {
113 ubo->size = MAX2(ubo->size, offset + components);
114
115 return load_driver_ubo(b, components, ir3_get_driver_ubo(b, ubo), offset);
116 }
117
118 nir_def *
ir3_load_driver_ubo_indirect(nir_builder * b,unsigned components,struct ir3_driver_ubo * ubo,unsigned base,nir_def * offset,unsigned range)119 ir3_load_driver_ubo_indirect(nir_builder *b, unsigned components,
120 struct ir3_driver_ubo *ubo,
121 unsigned base, nir_def *offset,
122 unsigned range)
123 {
124 assert(range > 0);
125 ubo->size = MAX2(ubo->size, base + components + (range - 1) * 4);
126
127 return nir_load_ubo(b, components, 32, ir3_get_driver_ubo(b, ubo),
128 nir_iadd(b, nir_imul24(b, offset, nir_imm_int(b, 16)),
129 nir_imm_int(b, base * sizeof(uint32_t))),
130 .align_mul = 16,
131 .align_offset = (base % 4) * sizeof(uint32_t),
132 .range_base = base * sizeof(uint32_t),
133 .range = components * sizeof(uint32_t) +
134 (range - 1) * 16);
135 }
136
137 static bool
ir3_nir_should_scalarize_mem(const nir_instr * instr,const void * data)138 ir3_nir_should_scalarize_mem(const nir_instr *instr, const void *data)
139 {
140 const struct ir3_compiler *compiler = data;
141 const nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
142
143 /* Scalarize load_ssbo's that we could otherwise lower to isam,
144 * as the tex cache benefit outweighs the benefit of vectorizing
145 * Don't do this if (vectorized) isam.v is supported.
146 */
147 if ((intrin->intrinsic == nir_intrinsic_load_ssbo) &&
148 (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER) &&
149 compiler->has_isam_ssbo && !compiler->has_isam_v) {
150 return true;
151 }
152
153 if ((intrin->intrinsic == nir_intrinsic_load_ssbo &&
154 intrin->def.bit_size == 8) ||
155 (intrin->intrinsic == nir_intrinsic_store_ssbo &&
156 intrin->src[0].ssa->bit_size == 8)) {
157 return true;
158 }
159
160 return false;
161 }
162
163 static bool
ir3_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,int64_t hole_size,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)164 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
165 unsigned bit_size, unsigned num_components,
166 int64_t hole_size, nir_intrinsic_instr *low,
167 nir_intrinsic_instr *high, void *data)
168 {
169 if (hole_size > 0 || !nir_num_components_valid(num_components))
170 return false;
171
172 struct ir3_compiler *compiler = data;
173 unsigned byte_size = bit_size / 8;
174
175 if (low->intrinsic == nir_intrinsic_load_const_ir3)
176 return bit_size <= 32 && num_components <= 4;
177
178 if (low->intrinsic == nir_intrinsic_store_const_ir3)
179 return bit_size == 32 && num_components <= 4;
180
181 /* Don't vectorize load_ssbo's that we could otherwise lower to isam,
182 * as the tex cache benefit outweighs the benefit of vectorizing. If we
183 * support isam.v, we can vectorize this though.
184 */
185 if ((low->intrinsic == nir_intrinsic_load_ssbo) &&
186 (nir_intrinsic_access(low) & ACCESS_CAN_REORDER) &&
187 compiler->has_isam_ssbo && !compiler->has_isam_v) {
188 return false;
189 }
190
191 if (low->intrinsic != nir_intrinsic_load_ubo) {
192 return bit_size <= 32 && align_mul >= byte_size &&
193 align_offset % byte_size == 0 &&
194 num_components <= 4;
195 }
196
197 assert(bit_size >= 8);
198 if (bit_size != 32)
199 return false;
200
201 int size = num_components * byte_size;
202
203 /* Don't care about alignment past vec4. */
204 assert(util_is_power_of_two_nonzero(align_mul));
205 align_mul = MIN2(align_mul, 16);
206 align_offset &= 15;
207
208 /* Our offset alignment should aways be at least 4 bytes */
209 if (align_mul < 4)
210 return false;
211
212 unsigned worst_start_offset = 16 - align_mul + align_offset;
213 if (worst_start_offset + size > 16)
214 return false;
215
216 return true;
217 }
218
219 static unsigned
ir3_lower_bit_size(const nir_instr * instr,UNUSED void * data)220 ir3_lower_bit_size(const nir_instr *instr, UNUSED void *data)
221 {
222 if (instr->type == nir_instr_type_intrinsic) {
223 nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
224 switch (intrinsic->intrinsic) {
225 case nir_intrinsic_exclusive_scan:
226 case nir_intrinsic_inclusive_scan:
227 case nir_intrinsic_quad_broadcast:
228 case nir_intrinsic_quad_swap_diagonal:
229 case nir_intrinsic_quad_swap_horizontal:
230 case nir_intrinsic_quad_swap_vertical:
231 case nir_intrinsic_reduce:
232 return intrinsic->def.bit_size == 8 ? 16 : 0;
233 default:
234 break;
235 }
236 }
237
238 if (instr->type == nir_instr_type_alu) {
239 nir_alu_instr *alu = nir_instr_as_alu(instr);
240 switch (alu->op) {
241 case nir_op_iabs:
242 case nir_op_iadd_sat:
243 case nir_op_imax:
244 case nir_op_imin:
245 case nir_op_ineg:
246 case nir_op_ishl:
247 case nir_op_ishr:
248 case nir_op_isub_sat:
249 case nir_op_uadd_sat:
250 case nir_op_umax:
251 case nir_op_umin:
252 case nir_op_ushr:
253 return alu->def.bit_size == 8 ? 16 : 0;
254 case nir_op_ieq:
255 case nir_op_ige:
256 case nir_op_ilt:
257 case nir_op_ine:
258 case nir_op_uge:
259 case nir_op_ult:
260 return nir_src_bit_size(alu->src[0].src) == 8 ? 16 : 0;
261 default:
262 break;
263 }
264 }
265
266 return 0;
267 }
268
269 static void
ir3_get_variable_size_align_bytes(const glsl_type * type,unsigned * size,unsigned * align)270 ir3_get_variable_size_align_bytes(const glsl_type *type, unsigned *size, unsigned *align)
271 {
272 switch (type->base_type) {
273 case GLSL_TYPE_ARRAY:
274 case GLSL_TYPE_INTERFACE:
275 case GLSL_TYPE_STRUCT:
276 glsl_size_align_handle_array_and_structs(type, ir3_get_variable_size_align_bytes,
277 size, align);
278 break;
279 case GLSL_TYPE_UINT8:
280 case GLSL_TYPE_INT8:
281 /* 8-bit values are handled through 16-bit half-registers, so the resulting size
282 * and alignment value has to be doubled to reflect the actual variable size
283 * requirement.
284 */
285 *size = 2 * glsl_get_components(type);
286 *align = 2;
287 break;
288 default:
289 glsl_get_natural_size_align_bytes(type, size, align);
290 break;
291 }
292 }
293
294 #define OPT(nir, pass, ...) \
295 ({ \
296 bool this_progress = false; \
297 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
298 this_progress; \
299 })
300
301 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
302
303 bool
ir3_optimize_loop(struct ir3_compiler * compiler,const struct ir3_shader_nir_options * options,nir_shader * s)304 ir3_optimize_loop(struct ir3_compiler *compiler,
305 const struct ir3_shader_nir_options *options,
306 nir_shader *s)
307 {
308 MESA_TRACE_FUNC();
309
310 bool progress;
311 bool did_progress = false;
312 unsigned lower_flrp = (s->options->lower_flrp16 ? 16 : 0) |
313 (s->options->lower_flrp32 ? 32 : 0) |
314 (s->options->lower_flrp64 ? 64 : 0);
315
316 do {
317 progress = false;
318
319 OPT_V(s, nir_lower_vars_to_ssa);
320 progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
321 progress |= OPT(s, nir_lower_phis_to_scalar, false);
322
323 progress |= OPT(s, nir_copy_prop);
324 progress |= OPT(s, nir_opt_deref);
325 progress |= OPT(s, nir_opt_dce);
326 progress |= OPT(s, nir_opt_cse);
327
328 progress |= OPT(s, nir_opt_find_array_copies);
329 progress |= OPT(s, nir_opt_copy_prop_vars);
330 progress |= OPT(s, nir_opt_dead_write_vars);
331 progress |= OPT(s, nir_split_struct_vars, nir_var_function_temp);
332
333 static int gcm = -1;
334 if (gcm == -1)
335 gcm = debug_get_num_option("GCM", 0);
336 if (gcm == 1)
337 progress |= OPT(s, nir_opt_gcm, true);
338 else if (gcm == 2)
339 progress |= OPT(s, nir_opt_gcm, false);
340 progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
341 progress |= OPT(s, nir_opt_intrinsics);
342 /* NOTE: GS lowering inserts an output var with varying slot that
343 * is larger than VARYING_SLOT_MAX (ie. GS_VERTEX_FLAGS_IR3),
344 * which triggers asserts in nir_shader_gather_info(). To work
345 * around that skip lowering phi precision for GS.
346 *
347 * Calling nir_shader_gather_info() late also seems to cause
348 * problems for tess lowering, for now since we only enable
349 * fp16/int16 for frag and compute, skip phi precision lowering
350 * for other stages.
351 */
352 if ((s->info.stage == MESA_SHADER_FRAGMENT) ||
353 (s->info.stage == MESA_SHADER_COMPUTE) ||
354 (s->info.stage == MESA_SHADER_KERNEL)) {
355 progress |= OPT(s, nir_opt_phi_precision);
356 }
357 progress |= OPT(s, nir_opt_algebraic);
358 progress |= OPT(s, nir_lower_alu);
359 progress |= OPT(s, nir_lower_pack);
360 progress |= OPT(s, nir_lower_bit_size, ir3_lower_bit_size, NULL);
361 progress |= OPT(s, nir_opt_constant_folding);
362
363 const nir_opt_offsets_options offset_options = {
364 /* How large an offset we can encode in the instr's immediate field.
365 */
366 .uniform_max = (1 << 9) - 1,
367
368 /* STL/LDL have 13b for offset with MSB being a sign bit, but this opt
369 * doesn't deal with negative offsets.
370 */
371 .shared_max = (1 << 12) - 1,
372
373 .buffer_max = 0,
374 .max_offset_cb = ir3_nir_max_imm_offset,
375 .max_offset_data = compiler,
376 .allow_offset_wrap = true,
377 };
378 progress |= OPT(s, nir_opt_offsets, &offset_options);
379
380 nir_load_store_vectorize_options vectorize_opts = {
381 .modes = nir_var_mem_ubo | nir_var_mem_ssbo | nir_var_uniform,
382 .callback = ir3_nir_should_vectorize_mem,
383 .robust_modes = options->robust_modes,
384 .cb_data = compiler,
385 };
386 progress |= OPT(s, nir_opt_load_store_vectorize, &vectorize_opts);
387
388 if (lower_flrp != 0) {
389 if (OPT(s, nir_lower_flrp, lower_flrp, false /* always_precise */)) {
390 OPT(s, nir_opt_constant_folding);
391 progress = true;
392 }
393
394 /* Nothing should rematerialize any flrps, so we only
395 * need to do this lowering once.
396 */
397 lower_flrp = 0;
398 }
399
400 progress |= OPT(s, nir_opt_dead_cf);
401 if (OPT(s, nir_opt_loop)) {
402 progress |= true;
403 /* If nir_opt_loop makes progress, then we need to clean
404 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
405 * to make progress.
406 */
407 OPT(s, nir_copy_prop);
408 OPT(s, nir_opt_dce);
409 }
410 progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
411 progress |= OPT(s, nir_opt_loop_unroll);
412 progress |= OPT(s, nir_opt_remove_phis);
413 progress |= OPT(s, nir_opt_undef);
414 did_progress |= progress;
415 } while (progress);
416
417 OPT(s, nir_lower_var_copies);
418 return did_progress;
419 }
420
421 static bool
should_split_wrmask(const nir_instr * instr,const void * data)422 should_split_wrmask(const nir_instr *instr, const void *data)
423 {
424 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
425
426 switch (intr->intrinsic) {
427 case nir_intrinsic_store_ssbo:
428 case nir_intrinsic_store_shared:
429 case nir_intrinsic_store_global:
430 case nir_intrinsic_store_scratch:
431 return true;
432 default:
433 return false;
434 }
435 }
436
437 static bool
ir3_nir_lower_ssbo_size_filter(const nir_instr * instr,const void * data)438 ir3_nir_lower_ssbo_size_filter(const nir_instr *instr, const void *data)
439 {
440 return instr->type == nir_instr_type_intrinsic &&
441 nir_instr_as_intrinsic(instr)->intrinsic ==
442 nir_intrinsic_get_ssbo_size;
443 }
444
445 static nir_def *
ir3_nir_lower_ssbo_size_instr(nir_builder * b,nir_instr * instr,void * data)446 ir3_nir_lower_ssbo_size_instr(nir_builder *b, nir_instr *instr, void *data)
447 {
448 uint8_t ssbo_size_to_bytes_shift = *(uint8_t *) data;
449 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
450 return nir_ishl_imm(b, &intr->def, ssbo_size_to_bytes_shift);
451 }
452
453 static bool
ir3_nir_lower_ssbo_size(nir_shader * s,uint8_t ssbo_size_to_bytes_shift)454 ir3_nir_lower_ssbo_size(nir_shader *s, uint8_t ssbo_size_to_bytes_shift)
455 {
456 return nir_shader_lower_instructions(s, ir3_nir_lower_ssbo_size_filter,
457 ir3_nir_lower_ssbo_size_instr,
458 &ssbo_size_to_bytes_shift);
459 }
460
461 void
ir3_nir_lower_io_to_temporaries(nir_shader * s)462 ir3_nir_lower_io_to_temporaries(nir_shader *s)
463 {
464 /* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
465 * by the hardware pre-loading registers at the beginning and then reading
466 * them at the end, so we can't access them indirectly except through
467 * normal register-indirect accesses, and therefore ir3 doesn't support
468 * indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
469 * and indirects work just fine for those. GS outputs may be consumed by
470 * VPC, but have their own lowering in ir3_nir_lower_gs() which does
471 * something similar to nir_lower_io_to_temporaries so we shouldn't need
472 * to lower them.
473 *
474 * Note: this might be a little inefficient for VS or TES outputs which are
475 * when the next stage isn't an FS, but it probably don't make sense to
476 * depend on the next stage before variant creation.
477 *
478 * TODO: for gallium, mesa/st also does some redundant lowering, including
479 * running this pass for GS inputs/outputs which we don't want but not
480 * including TES outputs or FS inputs which we do need. We should probably
481 * stop doing that once we're sure all drivers are doing their own
482 * indirect i/o lowering.
483 */
484 bool lower_input = s->info.stage == MESA_SHADER_VERTEX ||
485 s->info.stage == MESA_SHADER_FRAGMENT;
486 bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
487 s->info.stage != MESA_SHADER_GEOMETRY;
488 if (lower_input || lower_output) {
489 NIR_PASS_V(s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
490 lower_output, lower_input);
491
492 /* nir_lower_io_to_temporaries() creates global variables and copy
493 * instructions which need to be cleaned up.
494 */
495 NIR_PASS_V(s, nir_split_var_copies);
496 NIR_PASS_V(s, nir_lower_var_copies);
497 NIR_PASS_V(s, nir_lower_global_vars_to_local);
498 }
499
500 /* Regardless of the above, we need to lower indirect references to
501 * compact variables such as clip/cull distances because due to how
502 * TCS<->TES IO works we cannot handle indirect accesses that "straddle"
503 * vec4 components. nir_lower_indirect_derefs has a special case for
504 * compact variables, so it will actually lower them even though we pass
505 * in 0 modes.
506 *
507 * Using temporaries would be slightly better but
508 * nir_lower_io_to_temporaries currently doesn't support TCS i/o.
509 */
510 NIR_PASS_V(s, nir_lower_indirect_derefs, 0, UINT32_MAX);
511 }
512
513 /**
514 * Inserts an add of 0.5 to floating point array index values in texture coordinates.
515 */
516 static bool
ir3_nir_lower_array_sampler_cb(struct nir_builder * b,nir_instr * instr,void * _data)517 ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_data)
518 {
519 if (instr->type != nir_instr_type_tex)
520 return false;
521
522 nir_tex_instr *tex = nir_instr_as_tex(instr);
523 if (!tex->is_array || tex->op == nir_texop_lod)
524 return false;
525
526 int coord_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
527 if (coord_idx == -1 ||
528 nir_tex_instr_src_type(tex, coord_idx) != nir_type_float)
529 return false;
530
531 b->cursor = nir_before_instr(&tex->instr);
532
533 unsigned ncomp = tex->coord_components;
534 nir_def *src = tex->src[coord_idx].src.ssa;
535
536 assume(ncomp >= 1);
537 nir_def *ai = nir_channel(b, src, ncomp - 1);
538 ai = nir_fadd_imm(b, ai, 0.5);
539 nir_src_rewrite(&tex->src[coord_idx].src,
540 nir_vector_insert_imm(b, src, ai, ncomp - 1));
541 return true;
542 }
543
544 static bool
ir3_nir_lower_array_sampler(nir_shader * shader)545 ir3_nir_lower_array_sampler(nir_shader *shader)
546 {
547 return nir_shader_instructions_pass(
548 shader, ir3_nir_lower_array_sampler_cb,
549 nir_metadata_control_flow, NULL);
550 }
551
552 void
ir3_finalize_nir(struct ir3_compiler * compiler,const struct ir3_shader_nir_options * options,nir_shader * s)553 ir3_finalize_nir(struct ir3_compiler *compiler,
554 const struct ir3_shader_nir_options *options,
555 nir_shader *s)
556 {
557 MESA_TRACE_FUNC();
558
559 struct nir_lower_tex_options tex_options = {
560 .lower_rect = 0,
561 .lower_tg4_offsets = true,
562 .lower_invalid_implicit_lod = true,
563 .lower_index_to_offset = true,
564 };
565
566 if (compiler->gen >= 4) {
567 /* a4xx seems to have *no* sam.p */
568 tex_options.lower_txp = ~0; /* lower all txp */
569 } else {
570 /* a3xx just needs to avoid sam.p for 3d tex */
571 tex_options.lower_txp = (1 << GLSL_SAMPLER_DIM_3D);
572 }
573
574 if (ir3_shader_debug & IR3_DBG_DISASM) {
575 mesa_logi("----------------------");
576 nir_log_shaderi(s);
577 mesa_logi("----------------------");
578 }
579
580 if (s->info.stage == MESA_SHADER_GEOMETRY)
581 NIR_PASS_V(s, ir3_nir_lower_gs);
582
583 NIR_PASS_V(s, nir_lower_frexp);
584 NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
585
586 OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
587
588 OPT_V(s, nir_lower_tex, &tex_options);
589 OPT_V(s, nir_lower_load_const_to_scalar);
590
591 if (compiler->array_index_add_half)
592 OPT_V(s, ir3_nir_lower_array_sampler);
593
594 OPT_V(s, nir_lower_is_helper_invocation);
595
596 ir3_optimize_loop(compiler, options, s);
597
598 /* do idiv lowering after first opt loop to get a chance to propagate
599 * constants for divide by immed power-of-two:
600 */
601 nir_lower_idiv_options idiv_options = {
602 .allow_fp16 = true,
603 };
604 bool idiv_progress = OPT(s, nir_opt_idiv_const, 8);
605 idiv_progress |= OPT(s, nir_lower_idiv, &idiv_options);
606
607 if (idiv_progress)
608 ir3_optimize_loop(compiler, options, s);
609
610 OPT_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
611
612 if (ir3_shader_debug & IR3_DBG_DISASM) {
613 mesa_logi("----------------------");
614 nir_log_shaderi(s);
615 mesa_logi("----------------------");
616 }
617
618 /* st_program.c's parameter list optimization requires that future nir
619 * variants don't reallocate the uniform storage, so we have to remove
620 * uniforms that occupy storage. But we don't want to remove samplers,
621 * because they're needed for YUV variant lowering.
622 */
623 nir_foreach_uniform_variable_safe (var, s) {
624 if (var->data.mode == nir_var_uniform &&
625 (glsl_type_get_image_count(var->type) ||
626 glsl_type_get_sampler_count(var->type)))
627 continue;
628
629 exec_node_remove(&var->node);
630 }
631 nir_validate_shader(s, "after uniform var removal");
632
633 nir_sweep(s);
634 }
635
636 static bool
lower_subgroup_id_filter(const nir_instr * instr,const void * unused)637 lower_subgroup_id_filter(const nir_instr *instr, const void *unused)
638 {
639 (void)unused;
640
641 if (instr->type != nir_instr_type_intrinsic)
642 return false;
643
644 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
645 return intr->intrinsic == nir_intrinsic_load_subgroup_invocation ||
646 intr->intrinsic == nir_intrinsic_load_subgroup_id ||
647 intr->intrinsic == nir_intrinsic_load_num_subgroups;
648 }
649
650 static nir_def *
lower_subgroup_id(nir_builder * b,nir_instr * instr,void * _shader)651 lower_subgroup_id(nir_builder *b, nir_instr *instr, void *_shader)
652 {
653 struct ir3_shader *shader = _shader;
654
655 /* Vulkan allows implementations to tile workgroup invocations even when
656 * subgroup operations are involved, which is implied by this Note:
657 *
658 * "There is no direct relationship between SubgroupLocalInvocationId and
659 * LocalInvocationId or LocalInvocationIndex."
660 *
661 * However there is no way to get SubgroupId directly, so we have to use
662 * LocalInvocationIndex here. This means that whenever we do this lowering we
663 * have to force linear dispatch to make sure that the relation between
664 * SubgroupId/SubgroupLocalInvocationId and LocalInvocationIndex is what we
665 * expect, unless the shader forces us to do the quad layout in which case we
666 * have to use the tiled layout.
667 */
668 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
669 if (intr->intrinsic == nir_intrinsic_load_subgroup_id &&
670 shader->nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
671 /* We have to manually figure out which subgroup we're in using the
672 * tiling. The tiling is 4x4, unless one of the dimensions is not a
673 * multiple of 4 in which case it drops to 2.
674 */
675 nir_def *local_size = nir_load_workgroup_size(b);
676 nir_def *local_size_x = nir_channel(b, local_size, 0);
677 nir_def *local_size_y = nir_channel(b, local_size, 1);
678 /* Calculate the shift from invocation to tile index for x and y */
679 nir_def *x_shift = nir_bcsel(b,
680 nir_ieq_imm(b,
681 nir_iand_imm(b, local_size_x, 3),
682 0),
683 nir_imm_int(b, 2), nir_imm_int(b, 1));
684 nir_def *y_shift = nir_bcsel(b,
685 nir_ieq_imm(b,
686 nir_iand_imm(b, local_size_y, 3),
687 0),
688 nir_imm_int(b, 2), nir_imm_int(b, 1));
689 nir_def *id = nir_load_local_invocation_id(b);
690 nir_def *id_x = nir_channel(b, id, 0);
691 nir_def *id_y = nir_channel(b, id, 1);
692 /* Calculate which tile we're in */
693 nir_def *tile_id =
694 nir_iadd(b, nir_imul24(b, nir_ishr(b, id_y, y_shift),
695 nir_ishr(b, local_size_x, x_shift)),
696 nir_ishr(b, id_x, x_shift));
697 /* Finally calculate the subgroup id */
698 return nir_ishr(b, tile_id, nir_isub(b,
699 nir_load_subgroup_id_shift_ir3(b),
700 nir_iadd(b, x_shift, y_shift)));
701 }
702
703 /* Just use getfiberid if we have to use tiling */
704 if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation &&
705 shader->nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
706 return NULL;
707 }
708
709
710 if (intr->intrinsic == nir_intrinsic_load_subgroup_invocation) {
711 shader->cs.force_linear_dispatch = true;
712 return nir_iand(
713 b, nir_load_local_invocation_index(b),
714 nir_iadd_imm(b, nir_load_subgroup_size(b), -1));
715 } else if (intr->intrinsic == nir_intrinsic_load_subgroup_id) {
716 shader->cs.force_linear_dispatch = true;
717 return nir_ishr(b, nir_load_local_invocation_index(b),
718 nir_load_subgroup_id_shift_ir3(b));
719 } else {
720 assert(intr->intrinsic == nir_intrinsic_load_num_subgroups);
721 /* If the workgroup size is constant,
722 * nir_lower_compute_system_values() will replace local_size with a
723 * constant so this can mostly be constant folded away.
724 */
725 nir_def *local_size = nir_load_workgroup_size(b);
726 nir_def *size =
727 nir_imul24(b, nir_channel(b, local_size, 0),
728 nir_imul24(b, nir_channel(b, local_size, 1),
729 nir_channel(b, local_size, 2)));
730 nir_def *one = nir_imm_int(b, 1);
731 return nir_iadd(b, one,
732 nir_ishr(b, nir_isub(b, size, one),
733 nir_load_subgroup_id_shift_ir3(b)));
734 }
735 }
736
737 static bool
ir3_nir_lower_subgroup_id_cs(nir_shader * nir,struct ir3_shader * shader)738 ir3_nir_lower_subgroup_id_cs(nir_shader *nir, struct ir3_shader *shader)
739 {
740 return nir_shader_lower_instructions(nir, lower_subgroup_id_filter,
741 lower_subgroup_id, shader);
742 }
743
744 /**
745 * Late passes that need to be done after pscreen->finalize_nir()
746 */
747 void
ir3_nir_post_finalize(struct ir3_shader * shader)748 ir3_nir_post_finalize(struct ir3_shader *shader)
749 {
750 struct nir_shader *s = shader->nir;
751 struct ir3_compiler *compiler = shader->compiler;
752
753 MESA_TRACE_FUNC();
754
755 NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
756 ir3_glsl_type_size, nir_lower_io_lower_64bit_to_32 |
757 nir_lower_io_use_interpolated_input_intrinsics);
758
759 if (s->info.stage == MESA_SHADER_FRAGMENT) {
760 /* NOTE: lower load_barycentric_at_sample first, since it
761 * produces load_barycentric_at_offset:
762 */
763 NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
764 NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
765 NIR_PASS_V(s, ir3_nir_move_varying_inputs);
766 NIR_PASS_V(s, nir_lower_fb_read);
767 NIR_PASS_V(s, ir3_nir_lower_layer_id);
768 NIR_PASS_V(s, ir3_nir_lower_frag_shading_rate);
769 }
770
771 if (s->info.stage == MESA_SHADER_VERTEX || s->info.stage == MESA_SHADER_GEOMETRY) {
772 NIR_PASS_V(s, ir3_nir_lower_primitive_shading_rate);
773 }
774
775 if (compiler->gen >= 6 && s->info.stage == MESA_SHADER_FRAGMENT &&
776 !(ir3_shader_debug & IR3_DBG_NOFP16)) {
777 /* Lower FS mediump inputs to 16-bit. If you declared it mediump, you
778 * probably want 16-bit instructions (and have set
779 * mediump/RelaxedPrecision on most of the rest of the shader's
780 * instructions). If we don't lower it in NIR, then comparisons of the
781 * results of mediump ALU ops with the mediump input will happen in highp,
782 * causing extra conversions (and, incidentally, causing
783 * dEQP-GLES2.functional.shaders.algorithm.rgb_to_hsl_fragment on ANGLE to
784 * fail)
785 *
786 * However, we can't do flat inputs because flat.b doesn't have the
787 * destination type for how to downconvert the
788 * 32-bit-in-the-varyings-interpolator value. (also, even if it did, watch
789 * out for how gl_nir_lower_packed_varyings packs all flat-interpolated
790 * things together as ivec4s, so when we lower a formerly-float input
791 * you'd end up with an incorrect f2f16(i2i32(load_input())) instead of
792 * load_input).
793 */
794 uint64_t mediump_varyings = 0;
795 nir_foreach_shader_in_variable(var, s) {
796 if ((var->data.precision == GLSL_PRECISION_MEDIUM ||
797 var->data.precision == GLSL_PRECISION_LOW) &&
798 var->data.interpolation != INTERP_MODE_FLAT) {
799 mediump_varyings |= BITFIELD64_BIT(var->data.location);
800 }
801 }
802
803 if (mediump_varyings) {
804 NIR_PASS_V(s, nir_lower_mediump_io,
805 nir_var_shader_in,
806 mediump_varyings,
807 false);
808 }
809
810 /* This should come after input lowering, to opportunistically lower non-mediump outputs. */
811 NIR_PASS_V(s, nir_lower_mediump_io, nir_var_shader_out, 0, false);
812 }
813
814 {
815 /* If the API-facing subgroup size is forced to a particular value, lower
816 * it here. Beyond this point nir_intrinsic_load_subgroup_size will return
817 * the "real" subgroup size.
818 */
819 unsigned subgroup_size = 0, max_subgroup_size = 0;
820 ir3_shader_get_subgroup_size(compiler, &shader->options, s->info.stage,
821 &subgroup_size, &max_subgroup_size);
822
823 nir_lower_subgroups_options options = {
824 .subgroup_size = subgroup_size,
825 .ballot_bit_size = 32,
826 .ballot_components = max_subgroup_size / 32,
827 .lower_to_scalar = true,
828 .lower_vote_eq = true,
829 .lower_vote_bool_eq = true,
830 .lower_subgroup_masks = true,
831 .lower_read_invocation_to_cond = true,
832 .lower_shuffle = !compiler->has_shfl,
833 .lower_relative_shuffle = !compiler->has_shfl,
834 .lower_rotate_to_shuffle = !compiler->has_shfl,
835 .lower_rotate_clustered_to_shuffle = true,
836 .lower_inverse_ballot = true,
837 .lower_reduce = true,
838 .filter = ir3_nir_lower_subgroups_filter,
839 .filter_data = compiler,
840 };
841
842 if (!((s->info.stage == MESA_SHADER_COMPUTE) ||
843 (s->info.stage == MESA_SHADER_KERNEL) ||
844 compiler->has_getfiberid)) {
845 options.subgroup_size = 1;
846 options.lower_vote_trivial = true;
847 }
848
849 OPT(s, nir_lower_subgroups, &options);
850 OPT(s, ir3_nir_lower_shuffle, shader);
851 }
852
853 if ((s->info.stage == MESA_SHADER_COMPUTE) ||
854 (s->info.stage == MESA_SHADER_KERNEL)) {
855 bool progress = false;
856 NIR_PASS(progress, s, ir3_nir_lower_subgroup_id_cs, shader);
857
858 if (s->info.derivative_group == DERIVATIVE_GROUP_LINEAR)
859 shader->cs.force_linear_dispatch = true;
860
861 /* ir3_nir_lower_subgroup_id_cs creates extra compute intrinsics which
862 * we need to lower again.
863 */
864 if (progress)
865 NIR_PASS_V(s, nir_lower_compute_system_values, NULL);
866 }
867
868 /* we cannot ensure that ir3_finalize_nir() is only called once, so
869 * we also need to do any run-once workarounds here:
870 */
871 OPT_V(s, ir3_nir_apply_trig_workarounds);
872
873 const nir_lower_image_options lower_image_opts = {
874 .lower_cube_size = true,
875 .lower_image_samples_to_one = true
876 };
877 NIR_PASS_V(s, nir_lower_image, &lower_image_opts);
878
879 const nir_lower_idiv_options lower_idiv_options = {
880 .allow_fp16 = true,
881 };
882 NIR_PASS_V(s, nir_lower_idiv, &lower_idiv_options); /* idiv generated by cube lowering */
883
884
885 /* The resinfo opcode returns the size in dwords on a4xx */
886 if (compiler->gen == 4)
887 OPT_V(s, ir3_nir_lower_ssbo_size, 2);
888
889 /* The resinfo opcode we have for getting the SSBO size on a6xx returns a
890 * byte length divided by IBO_0_FMT, while the NIR intrinsic coming in is a
891 * number of bytes. Switch things so the NIR intrinsic in our backend means
892 * dwords.
893 */
894 if (compiler->gen >= 6)
895 OPT_V(s, ir3_nir_lower_ssbo_size, compiler->options.storage_16bit ? 1 : 2);
896
897 ir3_optimize_loop(compiler, &shader->options.nir_options, s);
898 }
899
900 static bool
lower_ucp_vs(struct ir3_shader_variant * so)901 lower_ucp_vs(struct ir3_shader_variant *so)
902 {
903 if (!so->key.ucp_enables)
904 return false;
905
906 gl_shader_stage last_geom_stage;
907
908 if (so->key.has_gs) {
909 last_geom_stage = MESA_SHADER_GEOMETRY;
910 } else if (so->key.tessellation) {
911 last_geom_stage = MESA_SHADER_TESS_EVAL;
912 } else {
913 last_geom_stage = MESA_SHADER_VERTEX;
914 }
915
916 return so->type == last_geom_stage;
917 }
918
919 static bool
output_slot_used_for_binning(gl_varying_slot slot)920 output_slot_used_for_binning(gl_varying_slot slot)
921 {
922 return slot == VARYING_SLOT_POS || slot == VARYING_SLOT_PSIZ ||
923 slot == VARYING_SLOT_CLIP_DIST0 || slot == VARYING_SLOT_CLIP_DIST1 ||
924 slot == VARYING_SLOT_VIEWPORT;
925 }
926
927 static bool
remove_nonbinning_output(nir_builder * b,nir_intrinsic_instr * intr,void * data)928 remove_nonbinning_output(nir_builder *b, nir_intrinsic_instr *intr, void *data)
929 {
930 if (intr->intrinsic != nir_intrinsic_store_output &&
931 intr->intrinsic != nir_intrinsic_store_per_view_output)
932 return false;
933
934 nir_io_semantics io = nir_intrinsic_io_semantics(intr);
935
936 if (output_slot_used_for_binning(io.location))
937 return false;
938
939 nir_instr_remove(&intr->instr);
940 return true;
941 }
942
943 static bool
lower_binning(nir_shader * s)944 lower_binning(nir_shader *s)
945 {
946 return nir_shader_intrinsics_pass(s, remove_nonbinning_output,
947 nir_metadata_control_flow, NULL);
948 }
949
950 nir_mem_access_size_align
ir3_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align,uint32_t align_offset,bool offset_is_const,enum gl_access_qualifier access,const void * cb_data)951 ir3_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
952 uint8_t bit_size, uint32_t align,
953 uint32_t align_offset, bool offset_is_const,
954 enum gl_access_qualifier access, const void *cb_data)
955 {
956 align = nir_combined_align(align, align_offset);
957 assert(util_is_power_of_two_nonzero(align));
958
959 /* But if we're only aligned to 1 byte, use 8-bit loads. If we're only
960 * aligned to 2 bytes, use 16-bit loads, unless we needed 8-bit loads due to
961 * the size.
962 */
963 if ((bytes & 1) || (align == 1))
964 bit_size = 8;
965 else if ((bytes & 2) || (align == 2))
966 bit_size = 16;
967 else if (bit_size >= 32)
968 bit_size = 32;
969
970 if (intrin == nir_intrinsic_load_ubo)
971 bit_size = 32;
972
973 return (nir_mem_access_size_align){
974 .num_components = MAX2(1, MIN2(bytes / (bit_size / 8), 4)),
975 .bit_size = bit_size,
976 .align = bit_size / 8,
977 .shift = nir_mem_access_shift_method_scalar,
978 };
979 }
980
981 static bool
atomic_supported(const nir_instr * instr,const void * data)982 atomic_supported(const nir_instr * instr, const void * data)
983 {
984 /* No atomic 64b arithmetic is supported in A7XX so far */
985 return nir_instr_as_intrinsic(instr)->def.bit_size != 64;
986 }
987
988 void
ir3_nir_lower_variant(struct ir3_shader_variant * so,const struct ir3_shader_nir_options * options,nir_shader * s)989 ir3_nir_lower_variant(struct ir3_shader_variant *so,
990 const struct ir3_shader_nir_options *options,
991 nir_shader *s)
992 {
993 MESA_TRACE_FUNC();
994
995 if (ir3_shader_debug & IR3_DBG_DISASM) {
996 mesa_logi("----------------------");
997 nir_log_shaderi(s);
998 mesa_logi("----------------------");
999 }
1000
1001 bool progress = false;
1002
1003 progress |= OPT(s, nir_lower_io_to_scalar, nir_var_mem_ssbo,
1004 ir3_nir_should_scalarize_mem, so->compiler);
1005
1006 if (so->key.has_gs || so->key.tessellation) {
1007 switch (so->type) {
1008 case MESA_SHADER_VERTEX:
1009 NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
1010 so->key.tessellation);
1011 progress = true;
1012 break;
1013 case MESA_SHADER_TESS_CTRL:
1014 NIR_PASS_V(s, nir_lower_io_to_scalar,
1015 nir_var_shader_in | nir_var_shader_out, NULL, NULL);
1016 NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
1017 NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
1018 progress = true;
1019 break;
1020 case MESA_SHADER_TESS_EVAL:
1021 NIR_PASS_V(s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
1022 if (so->key.has_gs)
1023 NIR_PASS_V(s, ir3_nir_lower_to_explicit_output, so,
1024 so->key.tessellation);
1025 progress = true;
1026 break;
1027 case MESA_SHADER_GEOMETRY:
1028 NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so);
1029 progress = true;
1030 break;
1031 default:
1032 break;
1033 }
1034 }
1035
1036 /* Note that it is intentional to use the VS lowering pass for GS, since we
1037 * lower GS into something that looks more like a VS in ir3_nir_lower_gs():
1038 */
1039 if (lower_ucp_vs(so)) {
1040 progress |= OPT(s, nir_lower_clip_vs, so->key.ucp_enables, false, true, NULL);
1041 } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
1042 if (so->key.ucp_enables && !so->compiler->has_clip_cull)
1043 progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, true, true);
1044 }
1045
1046 if (so->binning_pass) {
1047 if (OPT(s, lower_binning)) {
1048 progress = true;
1049
1050 /* outputs_written has changed. */
1051 nir_shader_gather_info(s, nir_shader_get_entrypoint(s));
1052 }
1053 }
1054
1055 /* Move large constant variables to the constants attached to the NIR
1056 * shader, which we will upload in the immediates range. This generates
1057 * amuls, so we need to clean those up after.
1058 *
1059 * Passing no size_align, we would get packed values, which if we end up
1060 * having to load with LDC would result in extra reads to unpack from
1061 * straddling loads. Align everything to vec4 to avoid that, though we
1062 * could theoretically do better.
1063 */
1064 OPT_V(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
1065 32 /* bytes */);
1066 progress |= OPT(s, ir3_nir_lower_load_constant, so);
1067
1068 /* Lower large temporaries to scratch, which in Qualcomm terms is private
1069 * memory, to avoid excess register pressure. This should happen after
1070 * nir_opt_large_constants, because loading from a UBO is much, much less
1071 * expensive.
1072 */
1073 if (so->compiler->has_pvtmem) {
1074 progress |= OPT(s, nir_lower_vars_to_scratch, nir_var_function_temp,
1075 16 * 16 /* bytes */,
1076 ir3_get_variable_size_align_bytes, glsl_get_natural_size_align_bytes);
1077 }
1078
1079 /* Lower scratch writemasks */
1080 progress |= OPT(s, nir_lower_wrmasks, should_split_wrmask, s);
1081 progress |= OPT(s, nir_lower_atomics, atomic_supported);
1082
1083 if (OPT(s, nir_lower_locals_to_regs, 1)) {
1084 progress = true;
1085
1086 /* Split 64b registers into two 32b ones. */
1087 OPT_V(s, ir3_nir_lower_64b_regs);
1088 }
1089
1090 nir_lower_mem_access_bit_sizes_options mem_bit_size_options = {
1091 .modes = nir_var_mem_constant | nir_var_mem_ubo |
1092 nir_var_mem_global | nir_var_mem_shared |
1093 nir_var_function_temp | nir_var_mem_ssbo,
1094 .callback = ir3_mem_access_size_align,
1095 };
1096
1097 progress |= OPT(s, nir_lower_mem_access_bit_sizes, &mem_bit_size_options);
1098 progress |= OPT(s, ir3_nir_lower_64b_global);
1099 progress |= OPT(s, ir3_nir_lower_64b_undef);
1100 progress |= OPT(s, nir_lower_int64);
1101 progress |= OPT(s, ir3_nir_lower_64b_intrinsics);
1102 progress |= OPT(s, nir_lower_64bit_phis);
1103
1104 /* Cleanup code leftover from lowering passes before opt_preamble */
1105 if (progress) {
1106 progress |= OPT(s, nir_opt_constant_folding);
1107 }
1108
1109 progress |= OPT(s, ir3_nir_opt_subgroups, so);
1110
1111 if (so->compiler->load_shader_consts_via_preamble)
1112 progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
1113
1114 if (!so->binning_pass) {
1115 ir3_setup_const_state(s, so, ir3_const_state_mut(so));
1116 }
1117
1118 /* Do the preamble before analysing UBO ranges, because it's usually
1119 * higher-value and because it can result in eliminating some indirect UBO
1120 * accesses where otherwise we'd have to push the whole range. However we
1121 * have to lower the preamble after UBO lowering so that UBO lowering can
1122 * insert instructions in the preamble to push UBOs.
1123 */
1124 if (so->compiler->has_preamble &&
1125 !(ir3_shader_debug & IR3_DBG_NOPREAMBLE))
1126 progress |= OPT(s, ir3_nir_opt_preamble, so);
1127
1128 if (so->compiler->load_shader_consts_via_preamble)
1129 progress |= OPT(s, ir3_nir_lower_driver_params_to_ubo, so);
1130
1131 /* TODO: ldg.k might also work on a6xx */
1132 if (so->compiler->gen >= 7)
1133 progress |= OPT(s, ir3_nir_lower_const_global_loads, so);
1134
1135 if (!so->binning_pass)
1136 OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
1137
1138 progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
1139
1140 if (so->compiler->gen >= 7 &&
1141 !(ir3_shader_debug & (IR3_DBG_NOPREAMBLE | IR3_DBG_NODESCPREFETCH)))
1142 progress |= OPT(s, ir3_nir_opt_prefetch_descriptors, so);
1143
1144 if (so->shader_options.push_consts_type == IR3_PUSH_CONSTS_SHARED_PREAMBLE)
1145 progress |= OPT(s, ir3_nir_lower_push_consts_to_preamble, so);
1146
1147 progress |= OPT(s, ir3_nir_lower_preamble, so);
1148
1149 progress |= OPT(s, nir_lower_amul, ir3_glsl_type_size);
1150
1151 /* UBO offset lowering has to come after we've decided what will
1152 * be left as load_ubo
1153 */
1154 if (so->compiler->gen >= 6)
1155 progress |= OPT(s, nir_lower_ubo_vec4);
1156
1157 progress |= OPT(s, ir3_nir_lower_io_offsets);
1158
1159 if (!so->binning_pass) {
1160 ir3_const_alloc_all_reserved_space(&ir3_const_state_mut(so)->allocs);
1161 }
1162
1163 if (progress)
1164 ir3_optimize_loop(so->compiler, options, s);
1165
1166 /* verify that progress is always set */
1167 assert(!ir3_optimize_loop(so->compiler, options, s));
1168
1169 /* Fixup indirect load_const_ir3's which end up with a const base offset
1170 * which is too large to encode. Do this late(ish) so we actually
1171 * can differentiate indirect vs non-indirect.
1172 */
1173 if (OPT(s, ir3_nir_fixup_load_const_ir3))
1174 ir3_optimize_loop(so->compiler, options, s);
1175
1176 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1177 * subs, then the mandatory cleanup after algebraic. Note that it may
1178 * produce fnegs, and if so then we need to keep running to squash
1179 * fneg(fneg(a)).
1180 */
1181 bool more_late_algebraic = true;
1182 while (more_late_algebraic) {
1183 more_late_algebraic = OPT(s, nir_opt_algebraic_late);
1184 if (!more_late_algebraic && so->compiler->gen >= 5) {
1185 /* Lowers texture operations that have only f2f16 or u2u16 called on
1186 * them to have a 16-bit destination. Also, lower 16-bit texture
1187 * coordinates that had been upconverted to 32-bits just for the
1188 * sampler to just be 16-bit texture sources.
1189 */
1190 struct nir_opt_tex_srcs_options opt_srcs_options = {
1191 .sampler_dims = ~0,
1192 .src_types = (1 << nir_tex_src_coord) |
1193 (1 << nir_tex_src_lod) |
1194 (1 << nir_tex_src_bias) |
1195 (1 << nir_tex_src_offset) |
1196 (1 << nir_tex_src_comparator) |
1197 (1 << nir_tex_src_min_lod) |
1198 (1 << nir_tex_src_ms_index) |
1199 (1 << nir_tex_src_ddx) |
1200 (1 << nir_tex_src_ddy),
1201 };
1202 struct nir_opt_16bit_tex_image_options opt_16bit_options = {
1203 .rounding_mode = nir_rounding_mode_rtz,
1204 .opt_tex_dest_types = nir_type_float,
1205 /* blob dumps have no half regs on pixel 2's ldib or stib, so only enable for a6xx+. */
1206 .opt_image_dest_types = so->compiler->gen >= 6 ?
1207 nir_type_float | nir_type_uint | nir_type_int : 0,
1208 .opt_image_store_data = so->compiler->gen >= 6,
1209 .opt_srcs_options_count = 1,
1210 .opt_srcs_options = &opt_srcs_options,
1211 };
1212 OPT(s, nir_opt_16bit_tex_image, &opt_16bit_options);
1213 }
1214 OPT_V(s, nir_opt_constant_folding);
1215 OPT_V(s, nir_copy_prop);
1216 OPT_V(s, nir_opt_dce);
1217 OPT_V(s, nir_opt_cse);
1218 }
1219
1220 OPT_V(s, nir_opt_sink, nir_move_const_undef);
1221
1222 if (ir3_shader_debug & IR3_DBG_DISASM) {
1223 mesa_logi("----------------------");
1224 nir_log_shaderi(s);
1225 mesa_logi("----------------------");
1226 }
1227
1228 nir_sweep(s);
1229 }
1230
1231 bool
ir3_get_driver_param_info(const nir_shader * shader,nir_intrinsic_instr * intr,struct driver_param_info * param_info)1232 ir3_get_driver_param_info(const nir_shader *shader, nir_intrinsic_instr *intr,
1233 struct driver_param_info *param_info)
1234 {
1235 switch (intr->intrinsic) {
1236 case nir_intrinsic_load_base_workgroup_id:
1237 param_info->offset = IR3_DP_CS(base_group_x);
1238 break;
1239 case nir_intrinsic_load_num_workgroups:
1240 param_info->offset = IR3_DP_CS(num_work_groups_x);
1241 break;
1242 case nir_intrinsic_load_workgroup_size:
1243 param_info->offset = IR3_DP_CS(local_group_size_x);
1244 break;
1245 case nir_intrinsic_load_subgroup_size:
1246 if (shader->info.stage == MESA_SHADER_COMPUTE) {
1247 param_info->offset = IR3_DP_CS(subgroup_size);
1248 } else if (shader->info.stage == MESA_SHADER_FRAGMENT) {
1249 param_info->offset = IR3_DP_FS(subgroup_size);
1250 } else {
1251 return false;
1252 }
1253 break;
1254 case nir_intrinsic_load_subgroup_id_shift_ir3:
1255 param_info->offset = IR3_DP_CS(subgroup_id_shift);
1256 break;
1257 case nir_intrinsic_load_work_dim:
1258 param_info->offset = IR3_DP_CS(work_dim);
1259 break;
1260 case nir_intrinsic_load_base_vertex:
1261 case nir_intrinsic_load_first_vertex:
1262 param_info->offset = IR3_DP_VS(vtxid_base);
1263 break;
1264 case nir_intrinsic_load_is_indexed_draw:
1265 param_info->offset = IR3_DP_VS(is_indexed_draw);
1266 break;
1267 case nir_intrinsic_load_draw_id:
1268 param_info->offset = IR3_DP_VS(draw_id);
1269 break;
1270 case nir_intrinsic_load_base_instance:
1271 param_info->offset = IR3_DP_VS(instid_base);
1272 break;
1273 case nir_intrinsic_load_user_clip_plane: {
1274 uint32_t idx = nir_intrinsic_ucp_id(intr);
1275 param_info->offset = IR3_DP_VS(ucp[0].x) + 4 * idx;
1276 break;
1277 }
1278 case nir_intrinsic_load_tess_level_outer_default:
1279 param_info->offset = IR3_DP_TCS(default_outer_level_x);
1280 break;
1281 case nir_intrinsic_load_tess_level_inner_default:
1282 param_info->offset = IR3_DP_TCS(default_inner_level_x);
1283 break;
1284 case nir_intrinsic_load_frag_size_ir3:
1285 param_info->offset = IR3_DP_FS(frag_size);
1286 break;
1287 case nir_intrinsic_load_frag_offset_ir3:
1288 param_info->offset = IR3_DP_FS(frag_offset);
1289 break;
1290 case nir_intrinsic_load_frag_invocation_count:
1291 param_info->offset = IR3_DP_FS(frag_invocation_count);
1292 break;
1293 default:
1294 return false;
1295 }
1296
1297 return true;
1298 }
1299
1300 uint32_t
ir3_nir_scan_driver_consts(struct ir3_compiler * compiler,nir_shader * shader,struct ir3_const_image_dims * image_dims)1301 ir3_nir_scan_driver_consts(struct ir3_compiler *compiler, nir_shader *shader,
1302 struct ir3_const_image_dims *image_dims)
1303 {
1304 uint32_t num_driver_params = 0;
1305 nir_foreach_function (function, shader) {
1306 if (!function->impl)
1307 continue;
1308
1309 nir_foreach_block (block, function->impl) {
1310 nir_foreach_instr (instr, block) {
1311 if (instr->type != nir_instr_type_intrinsic)
1312 continue;
1313
1314 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1315 unsigned idx;
1316
1317 if (image_dims) {
1318 switch (intr->intrinsic) {
1319 case nir_intrinsic_image_atomic:
1320 case nir_intrinsic_image_atomic_swap:
1321 case nir_intrinsic_image_load:
1322 case nir_intrinsic_image_store:
1323 case nir_intrinsic_image_size:
1324 /* a4xx gets these supplied by the hw directly (maybe CP?) */
1325 if (compiler->gen == 5 &&
1326 !(intr->intrinsic == nir_intrinsic_image_load &&
1327 !(nir_intrinsic_access(intr) & ACCESS_COHERENT))) {
1328 idx = nir_src_as_uint(intr->src[0]);
1329 if (image_dims->mask & (1 << idx))
1330 break;
1331 image_dims->mask |= (1 << idx);
1332 image_dims->off[idx] = image_dims->count;
1333 image_dims->count += 3; /* three const per */
1334 }
1335 break;
1336 default:
1337 break;
1338 }
1339 }
1340
1341 struct driver_param_info param_info;
1342 if (ir3_get_driver_param_info(shader, intr, ¶m_info)) {
1343 num_driver_params =
1344 MAX2(num_driver_params,
1345 param_info.offset + nir_intrinsic_dest_components(intr));
1346 }
1347 }
1348 }
1349 }
1350
1351 /* TODO: Provide a spot somewhere to safely upload unwanted values, and a way
1352 * to determine if they're wanted or not. For now we always make the whole
1353 * driver param range available, since the driver will always instruct the
1354 * hardware to upload these.
1355 */
1356 if (!compiler->has_shared_regfile &&
1357 shader->info.stage == MESA_SHADER_COMPUTE) {
1358 num_driver_params =
1359 MAX2(num_driver_params, IR3_DP_CS(workgroup_id_z) + 1);
1360 }
1361
1362 return num_driver_params;
1363 }
1364
1365 void
ir3_const_alloc(struct ir3_const_allocations * const_alloc,enum ir3_const_alloc_type type,uint32_t size_vec4,uint32_t align_vec4)1366 ir3_const_alloc(struct ir3_const_allocations *const_alloc,
1367 enum ir3_const_alloc_type type, uint32_t size_vec4,
1368 uint32_t align_vec4)
1369 {
1370 struct ir3_const_allocation *alloc = &const_alloc->consts[type];
1371 assert(alloc->size_vec4 == 0);
1372
1373 const_alloc->max_const_offset_vec4 =
1374 align(const_alloc->max_const_offset_vec4, align_vec4);
1375 alloc->size_vec4 = size_vec4;
1376 alloc->offset_vec4 = const_alloc->max_const_offset_vec4;
1377 const_alloc->max_const_offset_vec4 += size_vec4;
1378 }
1379
1380 void
ir3_const_reserve_space(struct ir3_const_allocations * const_alloc,enum ir3_const_alloc_type type,uint32_t size_vec4,uint32_t align_vec4)1381 ir3_const_reserve_space(struct ir3_const_allocations *const_alloc,
1382 enum ir3_const_alloc_type type, uint32_t size_vec4,
1383 uint32_t align_vec4)
1384 {
1385 struct ir3_const_allocation *alloc = &const_alloc->consts[type];
1386 assert(alloc->size_vec4 == 0 && alloc->reserved_size_vec4 == 0);
1387
1388 alloc->reserved_size_vec4 = size_vec4;
1389 alloc->reserved_align_vec4 = align_vec4;
1390 /* Be pessimistic here and assume the worst case alignment is needed */
1391 const_alloc->reserved_vec4 += size_vec4 + align_vec4 - 1;
1392 }
1393
1394 void
ir3_const_free_reserved_space(struct ir3_const_allocations * const_alloc,enum ir3_const_alloc_type type)1395 ir3_const_free_reserved_space(struct ir3_const_allocations *const_alloc,
1396 enum ir3_const_alloc_type type)
1397 {
1398 struct ir3_const_allocation *alloc = &const_alloc->consts[type];
1399 assert(const_alloc->reserved_vec4 >= alloc->reserved_size_vec4);
1400
1401 const_alloc->reserved_vec4 -=
1402 alloc->reserved_size_vec4 + alloc->reserved_align_vec4 - 1;
1403 alloc->reserved_size_vec4 = 0;
1404 }
1405
1406 void
ir3_const_alloc_all_reserved_space(struct ir3_const_allocations * const_alloc)1407 ir3_const_alloc_all_reserved_space(struct ir3_const_allocations *const_alloc)
1408 {
1409 for (int i = 0; i < IR3_CONST_ALLOC_MAX; i++) {
1410 if (const_alloc->consts[i].reserved_size_vec4 > 0) {
1411 ir3_const_alloc(const_alloc, i,
1412 const_alloc->consts[i].reserved_size_vec4,
1413 const_alloc->consts[i].reserved_align_vec4);
1414 const_alloc->consts[i].reserved_size_vec4 = 0;
1415 }
1416 }
1417 const_alloc->reserved_vec4 = 0;
1418 }
1419
1420 void
ir3_alloc_driver_params(struct ir3_const_allocations * const_alloc,uint32_t * num_driver_params,struct ir3_compiler * compiler,gl_shader_stage shader_stage)1421 ir3_alloc_driver_params(struct ir3_const_allocations *const_alloc,
1422 uint32_t *num_driver_params,
1423 struct ir3_compiler *compiler,
1424 gl_shader_stage shader_stage)
1425 {
1426 if (*num_driver_params == 0)
1427 return;
1428
1429 /* num_driver_params in dwords. we only need to align to vec4s for the
1430 * common case of immediate constant uploads, but for indirect dispatch
1431 * the constants may also be indirect and so we have to align the area in
1432 * const space to that requirement.
1433 */
1434 *num_driver_params = align(*num_driver_params, 4);
1435 unsigned upload_unit = 1;
1436 if (shader_stage == MESA_SHADER_COMPUTE ||
1437 (*num_driver_params >= IR3_DP_VS(vtxid_base))) {
1438 upload_unit = compiler->const_upload_unit;
1439 }
1440
1441 /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
1442 if (shader_stage == MESA_SHADER_VERTEX && compiler->gen >= 6)
1443 const_alloc->max_const_offset_vec4 =
1444 MAX2(const_alloc->max_const_offset_vec4, 1);
1445
1446 uint32_t driver_params_size_vec4 =
1447 align(*num_driver_params / 4, upload_unit);
1448 ir3_const_alloc(const_alloc, IR3_CONST_ALLOC_DRIVER_PARAMS,
1449 driver_params_size_vec4, upload_unit);
1450 }
1451
1452 /* Sets up the variant-dependent constant state for the ir3_shader.
1453 * The consts allocation flow is as follows:
1454 * 1) Turnip/Freedreno allocates consts required by corresponding API,
1455 * e.g. push const, inline uniforms, etc. Then passes ir3_const_allocations
1456 * into IR3.
1457 * 2) ir3_setup_const_state pre-allocates consts with non-negotiable size.
1458 * 3) IR3 lowerings afterwards allocate from the free space left.
1459 * 4) Allocate offsets for consts from step 2)
1460 */
1461 void
ir3_setup_const_state(nir_shader * nir,struct ir3_shader_variant * v,struct ir3_const_state * const_state)1462 ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
1463 struct ir3_const_state *const_state)
1464 {
1465 struct ir3_compiler *compiler = v->compiler;
1466 unsigned ptrsz = ir3_pointer_size(compiler);
1467
1468 const_state->num_driver_params =
1469 ir3_nir_scan_driver_consts(compiler, nir, &const_state->image_dims);
1470
1471 if ((compiler->gen < 5) && (v->stream_output.num_outputs > 0)) {
1472 const_state->num_driver_params =
1473 MAX2(const_state->num_driver_params, IR3_DP_VS(vtxcnt_max) + 1);
1474 }
1475
1476 const_state->num_ubos = nir->info.num_ubos;
1477
1478 assert((const_state->ubo_state.size % 16) == 0);
1479
1480 /* IR3_CONST_ALLOC_DRIVER_PARAMS could have been allocated earlier. */
1481 if (const_state->allocs.consts[IR3_CONST_ALLOC_DRIVER_PARAMS].size_vec4 == 0) {
1482 ir3_alloc_driver_params(&const_state->allocs,
1483 &const_state->num_driver_params, compiler,
1484 v->type);
1485 }
1486
1487 if (const_state->image_dims.count > 0) {
1488 ir3_const_reserve_space(&const_state->allocs, IR3_CONST_ALLOC_IMAGE_DIMS,
1489 align(const_state->image_dims.count, 4) / 4, 1);
1490 }
1491
1492 if (v->type == MESA_SHADER_KERNEL && v->cs.req_input_mem) {
1493 ir3_const_reserve_space(&const_state->allocs,
1494 IR3_CONST_ALLOC_KERNEL_PARAMS,
1495 align(v->cs.req_input_mem, 4) / 4, 1);
1496 }
1497
1498 if ((v->type == MESA_SHADER_VERTEX) && (compiler->gen < 5) &&
1499 v->stream_output.num_outputs > 0) {
1500 ir3_const_reserve_space(&const_state->allocs, IR3_CONST_ALLOC_TFBO,
1501 align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4, 1);
1502 }
1503
1504 if (!compiler->load_shader_consts_via_preamble) {
1505 switch (v->type) {
1506 case MESA_SHADER_TESS_CTRL:
1507 case MESA_SHADER_TESS_EVAL:
1508 ir3_const_reserve_space(&const_state->allocs,
1509 IR3_CONST_ALLOC_PRIMITIVE_PARAM, 2, 1);
1510 break;
1511 case MESA_SHADER_GEOMETRY:
1512 ir3_const_reserve_space(&const_state->allocs,
1513 IR3_CONST_ALLOC_PRIMITIVE_PARAM, 1, 1);
1514 break;
1515 default:
1516 break;
1517 }
1518 }
1519
1520 if (v->type == MESA_SHADER_VERTEX) {
1521 ir3_const_reserve_space(&const_state->allocs,
1522 IR3_CONST_ALLOC_PRIMITIVE_PARAM, 1, 1);
1523 }
1524
1525 if ((v->type == MESA_SHADER_TESS_CTRL || v->type == MESA_SHADER_TESS_EVAL ||
1526 v->type == MESA_SHADER_GEOMETRY)) {
1527 ir3_const_reserve_space(&const_state->allocs,
1528 IR3_CONST_ALLOC_PRIMITIVE_MAP,
1529 DIV_ROUND_UP(v->input_size, 4), 1);
1530 }
1531
1532 assert(const_state->allocs.max_const_offset_vec4 <= ir3_max_const(v));
1533 }
1534
1535 uint32_t
ir3_const_state_get_free_space(const struct ir3_shader_variant * v,const struct ir3_const_state * const_state,uint32_t align_vec4)1536 ir3_const_state_get_free_space(const struct ir3_shader_variant *v,
1537 const struct ir3_const_state *const_state,
1538 uint32_t align_vec4)
1539 {
1540 uint32_t aligned_offset_vec4 =
1541 align(const_state->allocs.max_const_offset_vec4, align_vec4);
1542 uint32_t free_space_vec4 = ir3_max_const(v) - aligned_offset_vec4 -
1543 const_state->allocs.reserved_vec4;
1544 free_space_vec4 = ROUND_DOWN_TO(free_space_vec4, align_vec4);
1545 return free_space_vec4;
1546 }
1547