• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Valve Corporation
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "ir3_compiler.h"
7 #include "ir3_nir.h"
8 #include "nir_instr_set.h"
9 
10 /* Preamble optimization happens in two parts: first we generate the preamble
11  * using the generic NIR pass, then we setup the preamble sequence and inline
12  * the preamble into the main shader if there was a preamble. The first part
13  * should happen before UBO lowering, because we want to prefer more complex
14  * expressions over UBO loads, but the second part has to happen after UBO
15  * lowering because it may add copy instructions to the preamble.
16  */
17 
18 static void
def_size(nir_def * def,unsigned * size,unsigned * align)19 def_size(nir_def *def, unsigned *size, unsigned *align)
20 {
21    unsigned bit_size = def->bit_size == 1 ? 32 : def->bit_size;
22    /* Due to the implicit const file promotion we want to expand 16-bit values
23     * to 32-bit so that the truncation in the main shader can hopefully be
24     * folded into the use.
25     */
26    *size = DIV_ROUND_UP(bit_size, 32) * def->num_components;
27    *align = 1;
28 }
29 
30 static bool
all_uses_float(nir_def * def,bool allow_src2)31 all_uses_float(nir_def *def, bool allow_src2)
32 {
33    nir_foreach_use_including_if (use, def) {
34       if (nir_src_is_if(use))
35          return false;
36 
37       nir_instr *use_instr = nir_src_parent_instr(use);
38       if (use_instr->type != nir_instr_type_alu)
39          return false;
40       nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
41       unsigned src_index = ~0;
42       for  (unsigned i = 0; i < nir_op_infos[use_alu->op].num_inputs; i++) {
43          if (&use_alu->src[i].src == use) {
44             src_index = i;
45             break;
46          }
47       }
48 
49       assert(src_index != ~0);
50       nir_alu_type src_type =
51          nir_alu_type_get_base_type(nir_op_infos[use_alu->op].input_types[src_index]);
52 
53       if (src_type != nir_type_float || (src_index == 2 && !allow_src2))
54          return false;
55    }
56 
57    return true;
58 }
59 
60 static bool
all_uses_bit(nir_def * def)61 all_uses_bit(nir_def *def)
62 {
63    nir_foreach_use_including_if (use, def) {
64       if (nir_src_is_if(use))
65          return false;
66 
67       nir_instr *use_instr = nir_src_parent_instr(use);
68       if (use_instr->type != nir_instr_type_alu)
69          return false;
70       nir_alu_instr *use_alu = nir_instr_as_alu(use_instr);
71 
72       /* See ir3_cat2_absneg() */
73       switch (use_alu->op) {
74       case nir_op_iand:
75       case nir_op_ior:
76       case nir_op_inot:
77       case nir_op_ixor:
78       case nir_op_bitfield_reverse:
79       case nir_op_ufind_msb:
80       case nir_op_ifind_msb:
81       case nir_op_find_lsb:
82       case nir_op_ishl:
83       case nir_op_ushr:
84       case nir_op_ishr:
85       case nir_op_bit_count:
86          continue;
87       default:
88          return false;
89       }
90    }
91 
92    return true;
93 }
94 
95 static float
instr_cost(nir_instr * instr,const void * data)96 instr_cost(nir_instr *instr, const void *data)
97 {
98    /* We'll assume wave64 here for simplicity and assume normal cat1-cat3 ops
99     * take 1 (normalized) cycle.
100     *
101     * See https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/A6xx-SP
102     *
103     * TODO: assume wave128 on fragment/compute shaders?
104     */
105 
106    switch (instr->type) {
107    case nir_instr_type_alu: {
108       nir_alu_instr *alu = nir_instr_as_alu(instr);
109       unsigned components = alu->def.num_components;
110       switch (alu->op) {
111       /* cat4 */
112       case nir_op_frcp:
113       case nir_op_fsqrt:
114       case nir_op_frsq:
115       case nir_op_flog2:
116       case nir_op_fexp2:
117       case nir_op_fsin:
118       case nir_op_fcos:
119          return 4 * components;
120 
121       /* Instructions that become src modifiers. Note for conversions this is
122        * really an approximation.
123        *
124        * This prevents silly things like lifting a negate that would become a
125        * modifier.
126        */
127       case nir_op_f2f32:
128       case nir_op_f2f16:
129       case nir_op_f2fmp:
130       case nir_op_fneg:
131          return all_uses_float(&alu->def, true) ? 0 : 1 * components;
132 
133       case nir_op_fabs:
134          return all_uses_float(&alu->def, false) ? 0 : 1 * components;
135 
136       case nir_op_inot:
137          return all_uses_bit(&alu->def) ? 0 : 1 * components;
138 
139       /* Instructions that become vector split/collect */
140       case nir_op_vec2:
141       case nir_op_vec3:
142       case nir_op_vec4:
143       case nir_op_mov:
144          return 0;
145 
146       /* cat1-cat3 */
147       default:
148          return 1 * components;
149       }
150       break;
151    }
152 
153    case nir_instr_type_tex:
154       /* cat5 */
155       return 8;
156 
157    case nir_instr_type_intrinsic: {
158       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
159       switch (intrin->intrinsic) {
160       case nir_intrinsic_load_ubo: {
161          /* If the UBO and offset are constant, then UBO lowering should do a
162           * better job trying to lower this, and opt_preamble shouldn't try to
163           * duplicate it. However if it has a non-constant offset then we can
164           * avoid setting up a0.x etc. in the main shader and potentially have
165           * to push less.
166           */
167          bool const_ubo = nir_src_is_const(intrin->src[0]);
168          if (!const_ubo) {
169             nir_intrinsic_instr *rsrc = ir3_bindless_resource(intrin->src[0]);
170             if (rsrc)
171                const_ubo = nir_src_is_const(rsrc->src[0]);
172          }
173 
174          if (const_ubo && nir_src_is_const(intrin->src[1]))
175             return 0;
176 
177          /* TODO: get actual numbers for ldc */
178          return 8;
179       }
180 
181       case nir_intrinsic_load_ssbo:
182       case nir_intrinsic_load_ssbo_ir3:
183       case nir_intrinsic_get_ssbo_size:
184       case nir_intrinsic_image_load:
185       case nir_intrinsic_bindless_image_load:
186          /* cat5/isam */
187          return 8;
188 
189       /* By default assume it's a sysval or something */
190       default:
191          return 0;
192       }
193    }
194 
195    case nir_instr_type_phi:
196       /* Although we can often coalesce phis, the cost of a phi is a proxy for
197        * the cost of the if-else statement... If all phis are moved, then the
198        * branches move too. So this needs to have a nonzero cost, even if we're
199        * optimistic about coalescing.
200        *
201        * Value chosen empirically. On Rob's shader-db, cost of 2 performs better
202        * across the board than a cost of 1. Values greater than 2 do not seem to
203        * have any change, so sticking with 2.
204        */
205       return 2;
206 
207    default:
208       return 0;
209    }
210 }
211 
212 static float
rewrite_cost(nir_def * def,const void * data)213 rewrite_cost(nir_def *def, const void *data)
214 {
215    /* We always have to expand booleans */
216    if (def->bit_size == 1)
217       return def->num_components;
218 
219    bool mov_needed = false;
220    nir_foreach_use (use, def) {
221       nir_instr *parent_instr = nir_src_parent_instr(use);
222       if (parent_instr->type != nir_instr_type_alu) {
223          mov_needed = true;
224          break;
225       } else {
226          nir_alu_instr *alu = nir_instr_as_alu(parent_instr);
227          if (alu->op == nir_op_vec2 ||
228              alu->op == nir_op_vec3 ||
229              alu->op == nir_op_vec4 ||
230              alu->op == nir_op_mov) {
231             mov_needed = true;
232             break;
233          } else {
234             /* Assume for non-moves that the const is folded into the src */
235          }
236       }
237    }
238 
239    return mov_needed ? def->num_components : 0;
240 }
241 
242 static bool
avoid_instr(const nir_instr * instr,const void * data)243 avoid_instr(const nir_instr *instr, const void *data)
244 {
245    if (instr->type != nir_instr_type_intrinsic)
246       return false;
247 
248    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
249 
250    return intrin->intrinsic == nir_intrinsic_bindless_resource_ir3;
251 }
252 
253 static bool
set_speculate(nir_builder * b,nir_intrinsic_instr * intr,UNUSED void * _)254 set_speculate(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *_)
255 {
256    switch (intr->intrinsic) {
257    /* These instructions go through bounds-checked hardware descriptors so
258     * should be safe to speculate.
259     *
260     * TODO: This isn't necessarily true in Vulkan, where descriptors don't need
261     * to be filled out and bindless descriptor offsets aren't bounds checked.
262     * We may need to plumb this information through from turnip for correctness
263     * to avoid regressing freedreno codegen.
264     */
265    case nir_intrinsic_load_ubo:
266    case nir_intrinsic_load_ubo_vec4:
267    case nir_intrinsic_image_load:
268    case nir_intrinsic_image_samples_identical:
269    case nir_intrinsic_bindless_image_load:
270    case nir_intrinsic_load_ssbo:
271    case nir_intrinsic_load_ssbo_ir3:
272       nir_intrinsic_set_access(intr, nir_intrinsic_access(intr) |
273                                      ACCESS_CAN_SPECULATE);
274       return true;
275 
276    default:
277       return false;
278    }
279 }
280 
281 bool
ir3_nir_opt_preamble(nir_shader * nir,struct ir3_shader_variant * v)282 ir3_nir_opt_preamble(nir_shader *nir, struct ir3_shader_variant *v)
283 {
284    unsigned max_size;
285    if (v->binning_pass) {
286       const struct ir3_const_state *const_state = ir3_const_state(v);
287       max_size =
288          const_state->allocs.consts[IR3_CONST_ALLOC_PREAMBLE].size_vec4 * 4;
289    } else {
290       const struct ir3_const_state *const_state = ir3_const_state(v);
291       max_size = ir3_const_state_get_free_space(
292                     v, const_state, v->compiler->const_upload_unit) * 4;
293    }
294 
295    if (max_size == 0)
296       return false;
297 
298    bool progress = nir_shader_intrinsics_pass(nir, set_speculate,
299                                               nir_metadata_control_flow, NULL);
300 
301    nir_opt_preamble_options options = {
302       .drawid_uniform = true,
303       .subgroup_size_uniform = true,
304       .load_workgroup_size_allowed = true,
305       .def_size = def_size,
306       .preamble_storage_size = max_size,
307       .instr_cost_cb = instr_cost,
308       .avoid_instr_cb = avoid_instr,
309       .rewrite_cost_cb = rewrite_cost,
310    };
311 
312    unsigned size = 0;
313    progress |= nir_opt_preamble(nir, &options, &size);
314 
315    if (!v->binning_pass) {
316       uint32_t preamble_size_vec4 =
317          align(DIV_ROUND_UP(size, 4), v->compiler->const_upload_unit);
318       ir3_const_alloc(&ir3_const_state_mut(v)->allocs, IR3_CONST_ALLOC_PREAMBLE,
319                       preamble_size_vec4, v->compiler->const_upload_unit);
320    }
321 
322    return progress;
323 }
324 
325 /* This isn't nearly as comprehensive as what's done in nir_opt_preamble, but in
326  * various use-cases we need to hoist definitions into preambles outside of
327  * opt_preamble. Currently we only handle a few uncomplicated intrinsics.
328  */
329 bool
ir3_def_is_rematerializable_for_preamble(nir_def * def,nir_def ** preamble_defs)330 ir3_def_is_rematerializable_for_preamble(nir_def *def,
331                                          nir_def **preamble_defs)
332 {
333    switch (def->parent_instr->type) {
334    case nir_instr_type_load_const:
335       return true;
336    case nir_instr_type_intrinsic: {
337       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
338       switch (intrin->intrinsic) {
339       case nir_intrinsic_load_ubo:
340          return ir3_def_is_rematerializable_for_preamble(intrin->src[0].ssa,
341                                                          preamble_defs) &&
342             ir3_def_is_rematerializable_for_preamble(intrin->src[1].ssa,
343                                                      preamble_defs) &&
344             (def->parent_instr->block->cf_node.parent->type ==
345              nir_cf_node_function ||
346              (nir_intrinsic_access(intrin) & ACCESS_CAN_SPECULATE));
347       case nir_intrinsic_bindless_resource_ir3:
348          return ir3_def_is_rematerializable_for_preamble(intrin->src[0].ssa,
349                                                          preamble_defs);
350       case nir_intrinsic_load_preamble:
351          return !!preamble_defs;
352       default:
353          return false;
354       }
355    }
356    case nir_instr_type_alu: {
357       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
358       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
359          if (!ir3_def_is_rematerializable_for_preamble(alu->src[i].src.ssa,
360                                                        preamble_defs))
361             return false;
362       }
363       return true;
364    }
365    default:
366       return false;
367    }
368 }
369 
370 static nir_def *
_rematerialize_def(nir_builder * b,struct hash_table * remap_ht,struct set * instr_set,nir_def ** preamble_defs,nir_def * def)371 _rematerialize_def(nir_builder *b, struct hash_table *remap_ht,
372                    struct set *instr_set, nir_def **preamble_defs,
373                    nir_def *def)
374 {
375    if (_mesa_hash_table_search(remap_ht, def->parent_instr))
376       return NULL;
377 
378    switch (def->parent_instr->type) {
379    case nir_instr_type_load_const:
380       break;
381    case nir_instr_type_intrinsic: {
382       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
383       if (intrin->intrinsic == nir_intrinsic_load_preamble) {
384          _mesa_hash_table_insert(remap_ht, def,
385                                  preamble_defs[nir_intrinsic_base(intrin)]);
386          return preamble_defs[nir_intrinsic_base(intrin)];
387       } else {
388          for (unsigned i = 0; i < nir_intrinsic_infos[intrin->intrinsic].num_srcs;
389               i++)
390             _rematerialize_def(b, remap_ht, instr_set, preamble_defs,
391                                intrin->src[i].ssa);
392       }
393       break;
394    }
395    case nir_instr_type_alu: {
396       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
397       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
398          _rematerialize_def(b, remap_ht, instr_set, preamble_defs,
399                             alu->src[i].src.ssa);
400       break;
401    }
402    default:
403       unreachable("should not get here");
404    }
405 
406    nir_instr *instr = nir_instr_clone_deep(b->shader, def->parent_instr,
407                                            remap_ht);
408    if (instr_set) {
409       nir_instr *other_instr =
410          nir_instr_set_add_or_rewrite(instr_set, instr, NULL);
411       if (other_instr) {
412          instr = other_instr;
413          _mesa_hash_table_insert(remap_ht, def, nir_instr_def(other_instr));
414       } else {
415          nir_builder_instr_insert(b, instr);
416       }
417    } else {
418       nir_builder_instr_insert(b, instr);
419    }
420 
421    return nir_instr_def(instr);
422 }
423 
424 /* Hoist a given definition into the preamble. If "instr_set" is non-NULL,
425  * de-duplicate the hoisted definitions, and if "preamble_defs" is non-NULL then
426  * it is used to remap load_preamble instructions back to the original
427  * definition in the preamble, if the definition uses load_preamble
428  * instructions.
429  */
430 
431 nir_def *
ir3_rematerialize_def_for_preamble(nir_builder * b,nir_def * def,struct set * instr_set,nir_def ** preamble_defs)432 ir3_rematerialize_def_for_preamble(nir_builder *b, nir_def *def,
433                                    struct set *instr_set,
434                                    nir_def **preamble_defs)
435 {
436    struct hash_table *remap_ht = _mesa_pointer_hash_table_create(NULL);
437 
438    nir_def *new_def =
439       _rematerialize_def(b, remap_ht, instr_set, preamble_defs, def);
440 
441    _mesa_hash_table_destroy(remap_ht, NULL);
442 
443    return new_def;
444 }
445 
446 
447 static void
get_descriptors(nir_instr * instr,nir_def ** descs)448 get_descriptors(nir_instr *instr, nir_def **descs)
449 {
450    if (instr->type == nir_instr_type_tex) {
451       nir_tex_instr *tex = nir_instr_as_tex(instr);
452       /* TODO: handle non-bindless tex instructions. These are more complicated,
453        * because of the implicit addition in the instruction.
454        */
455       int texture_index =
456          nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
457       int sampler_index =
458          nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
459       if (texture_index >= 0)
460          descs[0] = tex->src[texture_index].src.ssa;
461       if (sampler_index >= 0)
462          descs[1] = tex->src[sampler_index].src.ssa;
463    } else if (instr->type == nir_instr_type_intrinsic) {
464       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
465       switch (intrin->intrinsic) {
466       case nir_intrinsic_load_ssbo:
467       case nir_intrinsic_load_ubo:
468       case nir_intrinsic_ssbo_atomic:
469       case nir_intrinsic_ssbo_atomic_swap:
470       case nir_intrinsic_get_ssbo_size:
471       case nir_intrinsic_image_load:
472       case nir_intrinsic_bindless_image_load:
473       case nir_intrinsic_image_store:
474       case nir_intrinsic_bindless_image_store:
475       case nir_intrinsic_image_atomic:
476       case nir_intrinsic_bindless_image_atomic:
477       case nir_intrinsic_image_size:
478       case nir_intrinsic_bindless_image_size:
479          descs[0] = intrin->src[0].ssa;
480          break;
481       case nir_intrinsic_store_ssbo:
482          descs[0] = intrin->src[1].ssa;
483          break;
484       default:
485          break;
486       }
487    }
488 }
489 
490 #define MAX_PREFETCHES 32
491 
492 struct prefetches {
493    nir_def *prefetches[MAX_PREFETCHES];
494    unsigned num_prefetches;
495 };
496 
497 static bool
is_already_prefetched(struct prefetches * prefetches,nir_def * def)498 is_already_prefetched(struct prefetches *prefetches, nir_def *def)
499 {
500    for (unsigned i = 0; i < prefetches->num_prefetches; i++) {
501       if (prefetches->prefetches[i] == def)
502          return true;
503    }
504 
505    return false;
506 }
507 
508 static void
add_prefetch(struct prefetches * prefetches,nir_def * def)509 add_prefetch(struct prefetches *prefetches, nir_def *def)
510 {
511    assert(prefetches->num_prefetches < MAX_PREFETCHES);
512    prefetches->prefetches[prefetches->num_prefetches++] = def;
513 }
514 
515 struct prefetch_state {
516    struct prefetches tex, sampler;
517 };
518 
519 static bool
emit_descriptor_prefetch(nir_builder * b,nir_instr * instr,nir_def ** descs,struct prefetch_state * state)520 emit_descriptor_prefetch(nir_builder *b, nir_instr *instr, nir_def **descs,
521                          struct prefetch_state *state)
522 {
523    if (instr->type == nir_instr_type_tex) {
524       nir_tex_instr *tex = nir_instr_as_tex(instr);
525       int sampler_index =
526          nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
527       int texture_index =
528          nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
529 
530       /* For texture instructions, prefetch if at least one source hasn't been
531        * prefetched already. For example, the same sampler may be used with
532        * different textures, and we still want to prefetch the texture
533        * descriptor if we've already prefetched the sampler descriptor.
534        */
535 
536       bool tex_already_prefetched = is_already_prefetched(&state->tex, descs[0]);
537 
538       if (!tex_already_prefetched &&
539           state->tex.num_prefetches == MAX_PREFETCHES)
540          return false;
541 
542       assert(texture_index >= 0);
543       if (sampler_index >= 0) {
544          bool sampler_already_prefetched =
545             is_already_prefetched(&state->sampler, descs[1]);
546 
547          if (!sampler_already_prefetched &&
548              state->sampler.num_prefetches == MAX_PREFETCHES)
549             return false;
550 
551          if (tex_already_prefetched && sampler_already_prefetched)
552             return false;
553 
554          if (!tex_already_prefetched)
555             add_prefetch(&state->tex, descs[0]);
556          if (!sampler_already_prefetched)
557             add_prefetch(&state->sampler, descs[1]);
558 
559          nir_prefetch_sam_ir3(b, descs[0], descs[1]);
560       } else {
561          if (tex_already_prefetched)
562             return false;
563 
564          add_prefetch(&state->tex, descs[0]);
565          nir_prefetch_tex_ir3(b, descs[0]);
566       }
567    } else {
568       assert(instr->type == nir_instr_type_intrinsic);
569 
570       if (state->tex.num_prefetches == MAX_PREFETCHES)
571          return false;
572 
573       if (is_already_prefetched(&state->tex, descs[0]))
574          return false;
575 
576       add_prefetch(&state->tex, descs[0]);
577 
578       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
579       if (intrin->intrinsic == nir_intrinsic_load_ubo)
580          nir_prefetch_ubo_ir3(b, descs[0]);
581       else
582          nir_prefetch_tex_ir3(b, descs[0]);
583    }
584 
585    return true;
586 }
587 
588 static unsigned
get_preamble_offset(nir_def * def)589 get_preamble_offset(nir_def *def)
590 {
591    return nir_intrinsic_base(nir_instr_as_intrinsic(def->parent_instr));
592 }
593 
594 /* Prefetch descriptors in the preamble. This is an optimization introduced on
595  * a7xx, mainly useful when the preamble is an early preamble, and replaces the
596  * use of CP_LOAD_STATE on a6xx to prefetch descriptors in HLSQ.
597  */
598 
599 bool
ir3_nir_opt_prefetch_descriptors(nir_shader * nir,struct ir3_shader_variant * v)600 ir3_nir_opt_prefetch_descriptors(nir_shader *nir, struct ir3_shader_variant *v)
601 {
602    const struct ir3_const_state *const_state = ir3_const_state(v);
603 
604    nir_function_impl *main = nir_shader_get_entrypoint(nir);
605    struct set *instr_set = nir_instr_set_create(NULL);
606    nir_function_impl *preamble = main->preamble ? main->preamble->impl : NULL;
607    nir_builder b;
608    bool progress = false;
609    struct prefetch_state state = {};
610 
611    nir_def **preamble_defs =
612       calloc(const_state->allocs.consts[IR3_CONST_ALLOC_PREAMBLE].size_vec4 * 4,
613              sizeof(nir_def *));
614 
615    /* Collect preamble defs. This is useful if the computation of the offset has
616     * already been hoisted to the preamble.
617     */
618    if (preamble) {
619       nir_foreach_block (block, preamble) {
620          nir_foreach_instr (instr, block) {
621             if (instr->type != nir_instr_type_intrinsic)
622                continue;
623 
624             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
625 
626             if (intrin->intrinsic != nir_intrinsic_store_preamble)
627                continue;
628 
629             assert(
630                nir_intrinsic_base(intrin) <
631                const_state->allocs.consts[IR3_CONST_ALLOC_PREAMBLE].size_vec4 * 4);
632             preamble_defs[nir_intrinsic_base(intrin)] = intrin->src[0].ssa;
633          }
634       }
635    }
636 
637    nir_foreach_block (block, main) {
638       nir_foreach_instr (instr, block) {
639          nir_def *descs[2] = { NULL, NULL };
640          nir_def *preamble_descs[2] = { NULL, NULL };
641          get_descriptors(instr, descs);
642 
643          /* We must have found at least one descriptor */
644          if (!descs[0] && !descs[1])
645             continue;
646 
647          /* The instruction itself must be hoistable.
648           * TODO: If the descriptor is statically referenced and in-bounds, then
649           * we should be able to hoist the descriptor load even if the
650           * descriptor contents aren't guaranteed. This would require more
651           * plumbing.
652           * TODO: Textures. This is broken in nir_opt_preamble at the moment and
653           * handling them would also require more plumbing.
654           */
655          if (instr->type == nir_instr_type_intrinsic &&
656              nir_intrinsic_has_access(nir_instr_as_intrinsic(instr)) &&
657              !(nir_intrinsic_access(nir_instr_as_intrinsic(instr)) &
658                ACCESS_CAN_SPECULATE) &&
659              block->cf_node.parent->type != nir_cf_node_function)
660             continue;
661 
662          /* Each descriptor must be rematerializable */
663          if (descs[0] &&
664              !ir3_def_is_rematerializable_for_preamble(descs[0], preamble_defs))
665             continue;
666          if (descs[1] &&
667              !ir3_def_is_rematerializable_for_preamble(descs[1], preamble_defs))
668             continue;
669 
670          /* If the preamble hasn't been created then this descriptor isn't a
671           * duplicate and we will definitely insert an instruction, so create
672           * the preamble if it hasn't already been created.
673           */
674          if (!preamble) {
675             preamble = nir_shader_get_preamble(nir);
676          }
677 
678          b = nir_builder_at(nir_after_impl(preamble));
679 
680          /* Materialize descriptors for the prefetch. Note that we deduplicate
681           * descriptors so that we don't blow our budget when repeatedly loading
682           * from the same descriptor, even if the calculation of the descriptor
683           * offset hasn't been CSE'd because the accesses are in different
684           * blocks. This is common because we emit the bindless_resource_ir3
685           * intrinsic right before the access.
686           */
687          for (unsigned i = 0; i < 2; i++) {
688             if (!descs[i])
689                continue;
690 
691             preamble_descs[i] =
692                ir3_rematerialize_def_for_preamble(&b, descs[i], instr_set,
693                                                   preamble_defs);
694          }
695 
696          progress |= emit_descriptor_prefetch(&b, instr, preamble_descs, &state);
697 
698          if (state.sampler.num_prefetches == MAX_PREFETCHES &&
699              state.tex.num_prefetches == MAX_PREFETCHES)
700             goto finished;
701       }
702    }
703 
704 finished:
705    nir_metadata_preserve(main, nir_metadata_all);
706    if (preamble) {
707       nir_metadata_preserve(preamble,
708                             nir_metadata_block_index |
709                             nir_metadata_dominance);
710    }
711    nir_instr_set_destroy(instr_set);
712    free(preamble_defs);
713    return progress;
714 }
715 
716 bool
ir3_nir_lower_preamble(nir_shader * nir,struct ir3_shader_variant * v)717 ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v)
718 {
719    nir_function_impl *main = nir_shader_get_entrypoint(nir);
720 
721    if (!main->preamble)
722       return false;
723 
724    nir_function_impl *preamble = main->preamble->impl;
725 
726    /* First, lower load/store_preamble. */
727    const struct ir3_const_state *const_state = ir3_const_state(v);
728    unsigned preamble_base =
729       const_state->allocs.consts[IR3_CONST_ALLOC_PREAMBLE].offset_vec4 * 4;
730    unsigned preamble_size =
731       const_state->allocs.consts[IR3_CONST_ALLOC_PREAMBLE].size_vec4 * 4;
732 
733    BITSET_DECLARE(promoted_to_float, preamble_size);
734    memset(promoted_to_float, 0, sizeof(promoted_to_float));
735 
736    nir_builder builder_main = nir_builder_create(main);
737    nir_builder *b = &builder_main;
738 
739    nir_foreach_block (block, main) {
740       nir_foreach_instr_safe (instr, block) {
741          if (instr->type != nir_instr_type_intrinsic)
742             continue;
743 
744          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
745          if (intrin->intrinsic != nir_intrinsic_load_preamble)
746             continue;
747 
748          nir_def *dest = &intrin->def;
749 
750          unsigned offset = preamble_base + nir_intrinsic_base(intrin);
751          b->cursor = nir_before_instr(instr);
752 
753          nir_def *new_dest = nir_load_const_ir3(
754             b, dest->num_components, 32, nir_imm_int(b, 0), .base = offset);
755 
756          if (dest->bit_size == 1) {
757             new_dest = nir_i2b(b, new_dest);
758          } else if (dest->bit_size != 32) {
759             if (all_uses_float(dest, true)) {
760                assert(dest->bit_size == 16);
761                new_dest = nir_f2f16(b, new_dest);
762                BITSET_SET(promoted_to_float, nir_intrinsic_base(intrin));
763             } else {
764                new_dest = nir_u2uN(b, new_dest, dest->bit_size);
765             }
766          }
767 
768          nir_def_rewrite_uses(dest, new_dest);
769          nir_instr_remove(instr);
770          nir_instr_free(instr);
771       }
772    }
773 
774    nir_builder builder_preamble = nir_builder_create(preamble);
775    b = &builder_preamble;
776 
777    nir_foreach_block (block, preamble) {
778       nir_foreach_instr_safe (instr, block) {
779          if (instr->type != nir_instr_type_intrinsic)
780             continue;
781 
782          nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
783          if (intrin->intrinsic != nir_intrinsic_store_preamble)
784             continue;
785 
786          nir_def *src = intrin->src[0].ssa;
787          unsigned offset = preamble_base + nir_intrinsic_base(intrin);
788 
789          b->cursor = nir_before_instr(instr);
790 
791          if (src->bit_size == 1)
792             src = nir_b2i32(b, src);
793          if (src->bit_size != 32) {
794             if (BITSET_TEST(promoted_to_float, nir_intrinsic_base(intrin))){
795                assert(src->bit_size == 16);
796                src = nir_f2f32(b, src);
797             } else {
798                src = nir_u2u32(b, src);
799             }
800          }
801 
802          nir_store_const_ir3(b, src, .base = offset);
803          nir_instr_remove(instr);
804          nir_instr_free(instr);
805       }
806    }
807 
808    /* Now, create the preamble sequence and move the preamble into the main
809     * shader:
810     *
811     * if (preamble_start_ir3()) {
812     *    if (subgroupElect()) {
813     *       preamble();
814     *       preamble_end_ir3();
815     *    }
816     * }
817     * ...
818     */
819 
820    /* @decl_regs need to stay in the first block. */
821    b->cursor = nir_after_reg_decls(main);
822 
823    nir_if *outer_if = nir_push_if(b, nir_preamble_start_ir3(b, 1));
824    {
825       nir_if *inner_if = nir_push_if(b, nir_elect_any_ir3(b, 1));
826       {
827          nir_call_instr *call = nir_call_instr_create(nir, main->preamble);
828          nir_builder_instr_insert(b, &call->instr);
829          nir_preamble_end_ir3(b);
830       }
831       nir_pop_if(b, inner_if);
832    }
833    nir_pop_if(b, outer_if);
834 
835    nir_inline_functions(nir);
836    exec_node_remove(&main->preamble->node);
837    main->preamble = NULL;
838 
839    nir_metadata_preserve(main, nir_metadata_none);
840    return true;
841 }
842