• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "compiler/nir/nir.h"
25 #include "compiler/nir/nir_builder.h"
26 #include "util/u_math.h"
27 #include "ir3_compiler.h"
28 #include "ir3_nir.h"
29 
30 static inline bool
get_ubo_load_range(nir_shader * nir,nir_intrinsic_instr * instr,uint32_t alignment,struct ir3_ubo_range * r)31 get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr,
32                    uint32_t alignment, struct ir3_ubo_range *r)
33 {
34    uint32_t offset = nir_intrinsic_range_base(instr);
35    uint32_t size = nir_intrinsic_range(instr);
36 
37    if (instr->intrinsic == nir_intrinsic_load_global_ir3) {
38       offset *= 4;
39       size *= 4;
40    }
41 
42    /* If the offset is constant, the range is trivial (and NIR may not have
43     * figured it out).
44     */
45    if (nir_src_is_const(instr->src[1])) {
46       offset = nir_src_as_uint(instr->src[1]);
47       if (instr->intrinsic == nir_intrinsic_load_global_ir3)
48          offset *= 4;
49       size = nir_intrinsic_dest_components(instr) * 4;
50    }
51 
52    /* If we haven't figured out the range accessed in the UBO, bail. */
53    if (size == ~0)
54       return false;
55 
56    r->start = ROUND_DOWN_TO(offset, alignment * 16);
57    r->end = ALIGN(offset + size, alignment * 16);
58 
59    return true;
60 }
61 
62 static bool
get_ubo_info(nir_intrinsic_instr * instr,struct ir3_ubo_info * ubo)63 get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
64 {
65    if (instr->intrinsic == nir_intrinsic_load_global_ir3) {
66       ubo->global_base = instr->src[0].ssa;
67       ubo->block = 0;
68       ubo->bindless_base = 0;
69       ubo->bindless = false;
70       ubo->global = true;
71       return true;
72    } else if (nir_src_is_const(instr->src[0])) {
73       ubo->global_base = NULL;
74       ubo->block = nir_src_as_uint(instr->src[0]);
75       ubo->bindless_base = 0;
76       ubo->bindless = false;
77       ubo->global = false;
78       return true;
79    } else {
80       nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
81       if (rsrc && nir_src_is_const(rsrc->src[0])) {
82          ubo->global_base = NULL;
83          ubo->block = nir_src_as_uint(rsrc->src[0]);
84          ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
85          ubo->bindless = true;
86          ubo->global = false;
87          return true;
88       }
89    }
90    return false;
91 }
92 
93 /**
94  * Finds the given instruction's UBO load in the UBO upload plan, if any.
95  */
96 static const struct ir3_ubo_range *
get_existing_range(nir_intrinsic_instr * instr,const struct ir3_ubo_analysis_state * state,struct ir3_ubo_range * r)97 get_existing_range(nir_intrinsic_instr *instr,
98                    const struct ir3_ubo_analysis_state *state,
99                    struct ir3_ubo_range *r)
100 {
101    struct ir3_ubo_info ubo = {};
102 
103    if (!get_ubo_info(instr, &ubo))
104       return NULL;
105 
106    for (int i = 0; i < state->num_enabled; i++) {
107       const struct ir3_ubo_range *range = &state->range[i];
108       if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) && r->start >= range->start &&
109           r->end <= range->end) {
110          return range;
111       }
112    }
113 
114    return NULL;
115 }
116 
117 /**
118  * Merges together neighboring/overlapping ranges in the range plan with a
119  * newly updated range.
120  */
121 static void
merge_neighbors(struct ir3_ubo_analysis_state * state,int index)122 merge_neighbors(struct ir3_ubo_analysis_state *state, int index)
123 {
124    struct ir3_ubo_range *a = &state->range[index];
125 
126    /* index is always the first slot that would have neighbored/overlapped with
127     * the new range.
128     */
129    for (int i = index + 1; i < state->num_enabled; i++) {
130       struct ir3_ubo_range *b = &state->range[i];
131       if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))
132          continue;
133 
134       if (a->start > b->end || a->end < b->start)
135          continue;
136 
137       /* Merge B into A. */
138       a->start = MIN2(a->start, b->start);
139       a->end = MAX2(a->end, b->end);
140 
141       /* Swap the last enabled range into B's now unused slot */
142       *b = state->range[--state->num_enabled];
143    }
144 }
145 
146 /**
147  * During the first pass over the shader, makes the plan of which UBO upload
148  * should include the range covering this UBO load.
149  *
150  * We are passed in an upload_remaining of how much space is left for us in
151  * the const file, and we make sure our plan doesn't exceed that.
152  */
153 static void
gather_ubo_ranges(nir_shader * nir,nir_intrinsic_instr * instr,struct ir3_ubo_analysis_state * state,uint32_t alignment,uint32_t * upload_remaining)154 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
155                   struct ir3_ubo_analysis_state *state, uint32_t alignment,
156                   uint32_t *upload_remaining)
157 {
158    struct ir3_ubo_info ubo = {};
159    if (!get_ubo_info(instr, &ubo))
160       return;
161 
162    struct ir3_ubo_range r;
163    if (!get_ubo_load_range(nir, instr, alignment, &r))
164       return;
165 
166    /* See if there's an existing range for this UBO we want to merge into. */
167    for (int i = 0; i < state->num_enabled; i++) {
168       struct ir3_ubo_range *plan_r = &state->range[i];
169       if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))
170          continue;
171 
172       /* Don't extend existing uploads unless they're
173        * neighboring/overlapping.
174        */
175       if (r.start > plan_r->end || r.end < plan_r->start)
176          continue;
177 
178       r.start = MIN2(r.start, plan_r->start);
179       r.end = MAX2(r.end, plan_r->end);
180 
181       uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);
182       if (added >= *upload_remaining)
183          return;
184 
185       plan_r->start = r.start;
186       plan_r->end = r.end;
187       *upload_remaining -= added;
188 
189       merge_neighbors(state, i);
190       return;
191    }
192 
193    if (state->num_enabled == ARRAY_SIZE(state->range))
194       return;
195 
196    uint32_t added = r.end - r.start;
197    if (added >= *upload_remaining)
198       return;
199 
200    struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];
201    plan_r->ubo = ubo;
202    plan_r->start = r.start;
203    plan_r->end = r.end;
204    *upload_remaining -= added;
205 }
206 
207 /* For indirect offset, it is common to see a pattern of multiple
208  * loads with the same base, but different constant offset, ie:
209  *
210  *    vec1 32 ssa_33 = iadd ssa_base, const_offset
211  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
212  *
213  * Detect this, and peel out the const_offset part, to end up with:
214  *
215  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,
216  * 0, 0)
217  *
218  * Or similarly:
219  *
220  *    vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
221  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
222  *
223  * Can be converted to:
224  *
225  *    vec1 32 ssa_base = imul24 a, b
226  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,
227  * 0, 0)
228  *
229  * This gives the other opt passes something much easier to work
230  * with (ie. not requiring value range tracking)
231  */
232 static void
handle_partial_const(nir_builder * b,nir_def ** srcp,int * offp)233 handle_partial_const(nir_builder *b, nir_def **srcp, int *offp)
234 {
235    if ((*srcp)->parent_instr->type != nir_instr_type_alu)
236       return;
237 
238    nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
239 
240    if (alu->op == nir_op_imad24_ir3) {
241       /* This case is slightly more complicated as we need to
242        * replace the imad24_ir3 with an imul24:
243        */
244       if (!nir_src_is_const(alu->src[2].src))
245          return;
246 
247       *offp += nir_src_as_uint(alu->src[2].src);
248       *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
249                          nir_ssa_for_alu_src(b, alu, 1));
250 
251       return;
252    }
253 
254    if (alu->op != nir_op_iadd)
255       return;
256 
257    if (nir_src_is_const(alu->src[0].src)) {
258       *offp += nir_src_as_uint(alu->src[0].src);
259       *srcp = alu->src[1].src.ssa;
260    } else if (nir_src_is_const(alu->src[1].src)) {
261       *srcp = alu->src[0].src.ssa;
262       *offp += nir_src_as_uint(alu->src[1].src);
263    }
264 }
265 
266 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
267  * descriptors emitted in the fast path for GL.
268  */
269 static void
track_ubo_use(nir_intrinsic_instr * instr,nir_builder * b,int * num_ubos)270 track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
271 {
272    if (ir3_bindless_resource(instr->src[0])) {
273       assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
274       return;
275    }
276 
277    if (nir_src_is_const(instr->src[0])) {
278       int block = nir_src_as_uint(instr->src[0]);
279       *num_ubos = MAX2(*num_ubos, block + 1);
280    } else {
281       *num_ubos = b->shader->info.num_ubos;
282    }
283 }
284 
285 static bool
lower_ubo_load_to_uniform(nir_intrinsic_instr * instr,nir_builder * b,const struct ir3_ubo_analysis_state * state,int * num_ubos,uint32_t alignment)286 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
287                           const struct ir3_ubo_analysis_state *state,
288                           int *num_ubos, uint32_t alignment)
289 {
290    b->cursor = nir_before_instr(&instr->instr);
291 
292    struct ir3_ubo_range r;
293    if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {
294       if (instr->intrinsic == nir_intrinsic_load_ubo)
295          track_ubo_use(instr, b, num_ubos);
296       return false;
297    }
298 
299    /* We don't lower dynamic block index UBO loads to load_uniform, but we
300     * could probably with some effort determine a block stride in number of
301     * registers.
302     */
303    const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);
304    if (!range) {
305       if (instr->intrinsic == nir_intrinsic_load_ubo)
306          track_ubo_use(instr, b, num_ubos);
307       return false;
308    }
309 
310    nir_def *ubo_offset = instr->src[1].ssa;
311    int const_offset = 0;
312 
313    handle_partial_const(b, &ubo_offset, &const_offset);
314 
315    nir_def *uniform_offset = ubo_offset;
316 
317    if (instr->intrinsic == nir_intrinsic_load_ubo) {
318       /* UBO offset is in bytes, but uniform offset is in units of
319        * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
320        * offset is in units of 16 bytes, so we need to multiply by 4. And
321        * also the same for the constant part of the offset:
322        */
323       const int shift = -2;
324       nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
325       if (new_offset) {
326          uniform_offset = new_offset;
327       } else {
328          uniform_offset = shift > 0
329                              ? nir_ishl_imm(b, ubo_offset, shift)
330                              : nir_ushr_imm(b, ubo_offset, -shift);
331       }
332    }
333 
334    assert(!(const_offset & 0x3));
335    const_offset >>= 2;
336 
337    const int range_offset = ((int)range->offset - (int)range->start) / 4;
338    const_offset += range_offset;
339 
340    /* The range_offset could be negative, if if only part of the UBO
341     * block is accessed, range->start can be greater than range->offset.
342     * But we can't underflow const_offset.  If necessary we need to
343     * insert nir instructions to compensate (which can hopefully be
344     * optimized away)
345     */
346    if (const_offset < 0) {
347       uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
348       const_offset = 0;
349    }
350 
351    nir_def *uniform =
352       nir_load_uniform(b, instr->num_components, instr->def.bit_size,
353                        uniform_offset, .base = const_offset);
354 
355    nir_def_rewrite_uses(&instr->def, uniform);
356 
357    nir_instr_remove(&instr->instr);
358 
359    return true;
360 }
361 
362 /* This isn't nearly as comprehensive as what's done in nir_opt_preamble, but we
363  * need to hoist the load_global base into the preamble. Currently the only user
364  * is turnip with inline uniforms, so we can be simple and only handle a few
365  * uncomplicated intrinsics.
366  *
367  * TODO: Fold what this pass does into opt_preamble, which will give us a better
368  * heuristic for what to push and we won't need this.
369  */
370 static bool
def_is_rematerializable(nir_def * def)371 def_is_rematerializable(nir_def *def)
372 {
373    switch (def->parent_instr->type) {
374    case nir_instr_type_load_const:
375       return true;
376    case nir_instr_type_intrinsic: {
377       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
378       switch (intrin->intrinsic) {
379       case nir_intrinsic_load_ubo:
380          return def_is_rematerializable(intrin->src[0].ssa) &&
381             def_is_rematerializable(intrin->src[1].ssa);
382       case nir_intrinsic_bindless_resource_ir3:
383          return def_is_rematerializable(intrin->src[0].ssa);
384       default:
385          return false;
386       }
387    }
388    case nir_instr_type_alu: {
389       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
390       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
391          if (!def_is_rematerializable(alu->src[i].src.ssa))
392             return false;
393       }
394       return true;
395    }
396    default:
397       return false;
398    }
399 }
400 
401 static nir_def *
_rematerialize_def(nir_builder * b,struct hash_table * remap_ht,nir_def * def)402 _rematerialize_def(nir_builder *b, struct hash_table *remap_ht,
403                    nir_def *def)
404 {
405    if (_mesa_hash_table_search(remap_ht, def->parent_instr))
406       return NULL;
407 
408    switch (def->parent_instr->type) {
409    case nir_instr_type_load_const:
410       break;
411    case nir_instr_type_intrinsic: {
412       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
413       for (unsigned i = 0; i < nir_intrinsic_infos[intrin->intrinsic].num_srcs;
414            i++)
415          _rematerialize_def(b, remap_ht, intrin->src[i].ssa);
416       break;
417    }
418    case nir_instr_type_alu: {
419       nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
420       for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
421          _rematerialize_def(b, remap_ht, alu->src[i].src.ssa);
422       break;
423    }
424    default:
425       unreachable("should not get here");
426    }
427 
428    nir_instr *instr = nir_instr_clone_deep(b->shader, def->parent_instr,
429                                            remap_ht);
430    nir_builder_instr_insert(b, instr);
431    return nir_instr_def(instr);
432 }
433 
434 static nir_def *
rematerialize_def(nir_builder * b,nir_def * def)435 rematerialize_def(nir_builder *b, nir_def *def)
436 {
437    struct hash_table *remap_ht = _mesa_pointer_hash_table_create(NULL);
438 
439    nir_def *new_def = _rematerialize_def(b, remap_ht, def);
440 
441    _mesa_hash_table_destroy(remap_ht, NULL);
442 
443    return new_def;
444 }
445 
446 static bool
rematerialize_load_global_bases(nir_shader * nir,struct ir3_ubo_analysis_state * state)447 rematerialize_load_global_bases(nir_shader *nir,
448                                 struct ir3_ubo_analysis_state *state)
449 {
450    bool has_load_global = false;
451    for (unsigned i = 0; i < state->num_enabled; i++) {
452       if (state->range[i].ubo.global) {
453          has_load_global = true;
454          break;
455       }
456    }
457 
458    if (!has_load_global)
459       return false;
460 
461    nir_function_impl *preamble = nir_shader_get_preamble(nir);
462    nir_builder _b = nir_builder_at(nir_after_impl(preamble));
463    nir_builder *b = &_b;
464 
465    for (unsigned i = 0; i < state->num_enabled; i++) {
466       struct ir3_ubo_range *range = &state->range[i];
467 
468       if (!range->ubo.global)
469          continue;
470 
471       range->ubo.global_base = rematerialize_def(b, range->ubo.global_base);
472    }
473 
474    return true;
475 }
476 
477 static bool
copy_global_to_uniform(nir_shader * nir,struct ir3_ubo_analysis_state * state)478 copy_global_to_uniform(nir_shader *nir, struct ir3_ubo_analysis_state *state)
479 {
480    if (state->num_enabled == 0)
481       return false;
482 
483    nir_function_impl *preamble = nir_shader_get_preamble(nir);
484    nir_builder _b = nir_builder_at(nir_after_impl(preamble));
485    nir_builder *b = &_b;
486 
487    for (unsigned i = 0; i < state->num_enabled; i++) {
488       const struct ir3_ubo_range *range = &state->range[i];
489       assert(range->ubo.global);
490 
491       nir_def *base = rematerialize_def(b, range->ubo.global_base);
492       unsigned start = range->start;
493       if (start > (1 << 10)) {
494          /* This is happening pretty late, so we need to add the offset
495           * manually ourselves.
496           */
497          nir_def *start_val = nir_imm_int(b, start);
498          nir_def *base_lo = nir_channel(b, base, 0);
499          nir_def *base_hi = nir_channel(b, base, 1);
500          nir_def *carry = nir_b2i32(b, nir_ult(b, base_lo, start_val));
501          base_lo = nir_iadd(b, base_lo, start_val);
502          base_hi = nir_iadd(b, base_hi, carry);
503          base = nir_vec2(b, base_lo, base_hi);
504          start = 0;
505       }
506 
507       unsigned size = (range->end - range->start);
508       for (unsigned offset = 0; offset < size; offset += 16) {
509          unsigned const_offset = range->offset / 4 + offset / 4;
510          if (const_offset < 256) {
511             nir_copy_global_to_uniform_ir3(b, base,
512                                            .base = start + offset,
513                                            .range_base = const_offset,
514                                            .range = 1);
515          } else {
516             /* It seems that the a1.x format doesn't work, so we need to
517              * decompose the ldg.k into ldg + stc.
518              */
519             nir_def *load =
520                nir_load_global_ir3(b, 4, 32, base,
521                                    nir_imm_int(b, (start + offset) / 4));
522             nir_store_uniform_ir3(b, load, .base = const_offset);
523          }
524       }
525    }
526 
527    return true;
528 }
529 
530 static bool
copy_ubo_to_uniform(nir_shader * nir,const struct ir3_const_state * const_state,bool const_data_via_cp)531 copy_ubo_to_uniform(nir_shader *nir, const struct ir3_const_state *const_state,
532                     bool const_data_via_cp)
533 {
534    const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
535 
536    if (state->num_enabled == 0)
537       return false;
538 
539    if (state->num_enabled == 1 &&
540        !state->range[0].ubo.bindless &&
541        state->range[0].ubo.block == const_state->consts_ubo.idx &&
542        const_data_via_cp)
543       return false;
544 
545    nir_function_impl *preamble = nir_shader_get_preamble(nir);
546    nir_builder _b = nir_builder_at(nir_after_impl(preamble));
547    nir_builder *b = &_b;
548 
549    for (unsigned i = 0; i < state->num_enabled; i++) {
550       const struct ir3_ubo_range *range = &state->range[i];
551 
552       /* The constant_data UBO is pushed in a different path from normal
553        * uniforms, and the state is setup earlier so it makes more sense to let
554        * the CP do it for us.
555        */
556       if (!range->ubo.bindless &&
557           range->ubo.block == const_state->consts_ubo.idx &&
558           const_data_via_cp)
559          continue;
560 
561       nir_def *ubo = nir_imm_int(b, range->ubo.block);
562       if (range->ubo.bindless) {
563          ubo = nir_bindless_resource_ir3(b, 32, ubo,
564                                          .desc_set = range->ubo.bindless_base);
565       }
566 
567       /* ldc.k has a range of only 256, but there are 512 vec4 constants.
568        * Therefore we may have to split a large copy in two.
569        */
570       unsigned size = (range->end - range->start) / 16;
571       for (unsigned offset = 0; offset < size; offset += 256) {
572          nir_copy_ubo_to_uniform_ir3(b, ubo, nir_imm_int(b, range->start / 16 +
573                                                          offset),
574                                      .base = range->offset / 4 + offset * 4,
575                                      .range = MIN2(size - offset, 256));
576       }
577    }
578 
579    return true;
580 }
581 
582 static bool
instr_is_load_ubo(nir_instr * instr)583 instr_is_load_ubo(nir_instr *instr)
584 {
585    if (instr->type != nir_instr_type_intrinsic)
586       return false;
587 
588    nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
589 
590    /* nir_lower_ubo_vec4 happens after this pass. */
591    assert(op != nir_intrinsic_load_ubo_vec4);
592 
593    return op == nir_intrinsic_load_ubo;
594 }
595 
596 static bool
instr_is_load_const(nir_instr * instr)597 instr_is_load_const(nir_instr *instr)
598 {
599    if (instr->type != nir_instr_type_intrinsic)
600       return false;
601 
602    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
603    nir_intrinsic_op op = intrin->intrinsic;
604 
605    if (op != nir_intrinsic_load_global_ir3)
606       return false;
607 
608    /* TODO handle non-aligned accesses */
609    if (nir_intrinsic_align_mul(intrin) < 16 ||
610        nir_intrinsic_align_offset(intrin) % 16 != 0)
611       return false;
612 
613    enum gl_access_qualifier access = nir_intrinsic_access(intrin);
614    return (access & ACCESS_NON_WRITEABLE) && (access & ACCESS_CAN_SPECULATE);
615 }
616 
617 /* For now, everything we upload is accessed statically and thus will be
618  * used by the shader. Once we can upload dynamically indexed data, we may
619  * upload sparsely accessed arrays, at which point we probably want to
620  * give priority to smaller UBOs, on the assumption that big UBOs will be
621  * accessed dynamically.  Alternatively, we can track statically and
622  * dynamically accessed ranges separately and upload static rangtes
623  * first.
624  */
625 static void
assign_offsets(struct ir3_ubo_analysis_state * state,unsigned start,unsigned max_upload)626 assign_offsets(struct ir3_ubo_analysis_state *state, unsigned start,
627                unsigned max_upload)
628 {
629    uint32_t offset = 0;
630    for (uint32_t i = 0; i < state->num_enabled; i++) {
631       uint32_t range_size = state->range[i].end - state->range[i].start;
632 
633       assert(offset <= max_upload);
634       state->range[i].offset = offset + start;
635       assert(offset <= max_upload);
636       offset += range_size;
637    }
638    state->size = offset;
639 }
640 
641 /* Lowering to ldg to ldg.k + const uses the same infrastructure as lowering UBO
642  * loads, but must be done separately because the analysis and transform must be
643  * done in the same pass and we cannot reuse the main variant analysis for the
644  * binning variant.
645  */
646 bool
ir3_nir_lower_const_global_loads(nir_shader * nir,struct ir3_shader_variant * v)647 ir3_nir_lower_const_global_loads(nir_shader *nir, struct ir3_shader_variant *v)
648 {
649    struct ir3_const_state *const_state = ir3_const_state(v);
650    struct ir3_compiler *compiler = v->compiler;
651 
652    if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
653       return false;
654 
655    unsigned max_upload;
656    if (v->binning_pass) {
657       max_upload = const_state->global_size * 16;
658    } else {
659       struct ir3_const_state worst_case_const_state = {
660          .preamble_size = const_state->preamble_size,
661       };
662       ir3_setup_const_state(nir, v, &worst_case_const_state);
663       max_upload = (ir3_max_const(v) - worst_case_const_state.offsets.immediate) * 16;
664    }
665 
666    struct ir3_ubo_analysis_state state = {};
667    uint32_t upload_remaining = max_upload;
668 
669    nir_foreach_function (function, nir) {
670       if (function->impl && !function->is_preamble) {
671          nir_foreach_block (block, function->impl) {
672             nir_foreach_instr (instr, block) {
673                if (instr_is_load_const(instr) &&
674                    def_is_rematerializable(nir_instr_as_intrinsic(instr)->src[0].ssa))
675                   gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), &state,
676                                     compiler->const_upload_unit,
677                                     &upload_remaining);
678             }
679          }
680       }
681    }
682 
683    uint32_t global_offset = v->shader_options.num_reserved_user_consts * 16;
684    assign_offsets(&state, global_offset, max_upload);
685 
686    bool progress = copy_global_to_uniform(nir, &state);
687 
688    if (progress) {
689       nir_foreach_function (function, nir) {
690          if (function->impl) {
691             if (function->is_preamble) {
692                nir_metadata_preserve(
693                   function->impl, nir_metadata_all);
694                continue;
695             }
696 
697             nir_builder builder = nir_builder_create(function->impl);
698             nir_foreach_block (block, function->impl) {
699                nir_foreach_instr_safe (instr, block) {
700                   if (!instr_is_load_const(instr))
701                      continue;
702                   progress |= lower_ubo_load_to_uniform(
703                      nir_instr_as_intrinsic(instr), &builder, &state, NULL,
704                      compiler->const_upload_unit);
705                }
706             }
707 
708             nir_metadata_preserve(
709                function->impl, nir_metadata_block_index | nir_metadata_dominance);
710          }
711       }
712    }
713 
714    if (!v->binning_pass)
715       const_state->global_size = DIV_ROUND_UP(state.size, 16);
716 
717    return progress;
718 }
719 
720 void
ir3_nir_analyze_ubo_ranges(nir_shader * nir,struct ir3_shader_variant * v)721 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
722 {
723    struct ir3_const_state *const_state = ir3_const_state(v);
724    struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
725    struct ir3_compiler *compiler = v->compiler;
726 
727    /* Limit our uploads to the amount of constant buffer space available in
728     * the hardware, minus what the shader compiler may need for various
729     * driver params.  We do this UBO-to-push-constant before the real
730     * allocation of the driver params' const space, because UBO pointers can
731     * be driver params but this pass usually eliminatings them.
732     */
733    struct ir3_const_state worst_case_const_state = {
734       .preamble_size = const_state->preamble_size,
735       .global_size = const_state->global_size,
736    };
737    ir3_setup_const_state(nir, v, &worst_case_const_state);
738    const uint32_t max_upload =
739       (ir3_max_const(v) - worst_case_const_state.offsets.immediate) * 16;
740 
741    memset(state, 0, sizeof(*state));
742 
743    if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
744       return;
745 
746    uint32_t upload_remaining = max_upload;
747    bool push_ubos = compiler->options.push_ubo_with_preamble;
748 
749    nir_foreach_function (function, nir) {
750       if (function->impl && (!push_ubos || !function->is_preamble)) {
751          nir_foreach_block (block, function->impl) {
752             nir_foreach_instr (instr, block) {
753                if (instr_is_load_ubo(instr))
754                   gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state,
755                                     compiler->const_upload_unit,
756                                     &upload_remaining);
757             }
758          }
759       }
760    }
761 
762    uint32_t ubo_offset = v->shader_options.num_reserved_user_consts * 16 +
763       const_state->global_size * 16;
764    assign_offsets(state, ubo_offset, max_upload);
765 }
766 
767 bool
ir3_nir_lower_ubo_loads(nir_shader * nir,struct ir3_shader_variant * v)768 ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
769 {
770    struct ir3_compiler *compiler = v->compiler;
771    /* For the binning pass variant, we re-use the corresponding draw-pass
772     * variants const_state and ubo state.  To make these clear, in this
773     * pass it is const (read-only)
774     */
775    const struct ir3_const_state *const_state = ir3_const_state(v);
776    const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
777 
778    int num_ubos = 0;
779    bool progress = false;
780    bool has_preamble = false;
781    bool push_ubos = compiler->options.push_ubo_with_preamble;
782    nir_foreach_function (function, nir) {
783       if (function->impl) {
784          if (function->is_preamble && push_ubos) {
785             has_preamble = true;
786             nir_metadata_preserve(function->impl, nir_metadata_all);
787             continue;
788          }
789          nir_builder builder = nir_builder_create(function->impl);
790          nir_foreach_block (block, function->impl) {
791             nir_foreach_instr_safe (instr, block) {
792                if (!instr_is_load_ubo(instr))
793                   continue;
794                progress |= lower_ubo_load_to_uniform(
795                   nir_instr_as_intrinsic(instr), &builder, state, &num_ubos,
796                   compiler->const_upload_unit);
797             }
798          }
799 
800          nir_metadata_preserve(
801             function->impl, nir_metadata_block_index | nir_metadata_dominance);
802       }
803    }
804    /* Update the num_ubos field for GL (first_ubo_is_default_ubo).  With
805     * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
806     * incremented.
807     */
808    if (nir->info.first_ubo_is_default_ubo && !push_ubos && !has_preamble)
809       nir->info.num_ubos = num_ubos;
810 
811    if (compiler->has_preamble && push_ubos)
812       progress |= copy_ubo_to_uniform(
813          nir, const_state, !compiler->load_shader_consts_via_preamble);
814 
815    return progress;
816 }
817 
818 static bool
fixup_load_uniform_filter(const nir_instr * instr,const void * arg)819 fixup_load_uniform_filter(const nir_instr *instr, const void *arg)
820 {
821    if (instr->type != nir_instr_type_intrinsic)
822       return false;
823    return nir_instr_as_intrinsic(instr)->intrinsic ==
824           nir_intrinsic_load_uniform;
825 }
826 
827 static nir_def *
fixup_load_uniform_instr(struct nir_builder * b,nir_instr * instr,void * arg)828 fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
829 {
830    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
831 
832    /* We don't need to worry about non-indirect case: */
833    if (nir_src_is_const(intr->src[0]))
834       return NULL;
835 
836    const unsigned base_offset_limit = (1 << 9); /* 9 bits */
837    unsigned base_offset = nir_intrinsic_base(intr);
838 
839    /* Or cases were base offset is lower than the hw limit: */
840    if (base_offset < base_offset_limit)
841       return NULL;
842 
843    b->cursor = nir_before_instr(instr);
844 
845    nir_def *offset = intr->src[0].ssa;
846 
847    /* We'd like to avoid a sequence like:
848     *
849     *   vec4 32 ssa_18 = intrinsic load_uniform (ssa_4) (1024, 0, 0)
850     *   vec4 32 ssa_19 = intrinsic load_uniform (ssa_4) (1072, 0, 0)
851     *   vec4 32 ssa_20 = intrinsic load_uniform (ssa_4) (1120, 0, 0)
852     *
853     * From turning into a unique offset value (which requires reloading
854     * a0.x for each instruction).  So instead of just adding the constant
855     * base_offset to the non-const offset, be a bit more clever and only
856     * extract the part that cannot be encoded.  Afterwards CSE should
857     * turn the result into:
858     *
859     *   vec1 32 ssa_5 = load_const (1024)
860     *   vec4 32 ssa_6  = iadd ssa4_, ssa_5
861     *   vec4 32 ssa_18 = intrinsic load_uniform (ssa_5) (0, 0, 0)
862     *   vec4 32 ssa_19 = intrinsic load_uniform (ssa_5) (48, 0, 0)
863     *   vec4 32 ssa_20 = intrinsic load_uniform (ssa_5) (96, 0, 0)
864     */
865    unsigned new_base_offset = base_offset % base_offset_limit;
866 
867    nir_intrinsic_set_base(intr, new_base_offset);
868    offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);
869 
870    nir_src_rewrite(&intr->src[0], offset);
871 
872    return NIR_LOWER_INSTR_PROGRESS;
873 }
874 
875 /**
876  * For relative CONST file access, we can only encode 10b worth of fixed offset,
877  * so in cases where the base offset is larger, we need to peel it out into
878  * ALU instructions.
879  *
880  * This should run late, after constant folding has had a chance to do it's
881  * thing, so we can actually know if it is an indirect uniform offset or not.
882  */
883 bool
ir3_nir_fixup_load_uniform(nir_shader * nir)884 ir3_nir_fixup_load_uniform(nir_shader *nir)
885 {
886    return nir_shader_lower_instructions(nir, fixup_load_uniform_filter,
887                                         fixup_load_uniform_instr, NULL);
888 }
889 static nir_def *
ir3_nir_lower_load_const_instr(nir_builder * b,nir_instr * in_instr,void * data)890 ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
891 {
892    struct ir3_const_state *const_state = data;
893    nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in_instr);
894 
895    unsigned num_components = instr->num_components;
896    unsigned bit_size = instr->def.bit_size;
897    if (instr->def.bit_size == 16) {
898       /* We can't do 16b loads -- either from LDC (32-bit only in any of our
899        * traces, and disasm that doesn't look like it really supports it) or
900        * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
901        * automatic 32b-to-16b conversions when we ask for 16b from it).
902        * Instead, we'll load 32b from a UBO and unpack from there.
903        */
904       num_components = DIV_ROUND_UP(num_components, 2);
905       bit_size = 32;
906    }
907    unsigned base = nir_intrinsic_base(instr);
908    nir_def *index = ir3_get_driver_ubo(b, &const_state->consts_ubo);
909    nir_def *offset =
910       nir_iadd_imm(b, instr->src[0].ssa, base);
911 
912    nir_def *result =
913       nir_load_ubo(b, num_components, bit_size, index, offset,
914                    .align_mul = nir_intrinsic_align_mul(instr),
915                    .align_offset = nir_intrinsic_align_offset(instr),
916                    .range_base = base, .range = nir_intrinsic_range(instr));
917 
918    if (instr->def.bit_size == 16) {
919       result = nir_bitcast_vector(b, result, 16);
920       result = nir_trim_vector(b, result, instr->num_components);
921    }
922 
923    return result;
924 }
925 
926 static bool
ir3_lower_load_const_filter(const nir_instr * instr,const void * data)927 ir3_lower_load_const_filter(const nir_instr *instr, const void *data)
928 {
929    return (instr->type == nir_instr_type_intrinsic &&
930            nir_instr_as_intrinsic(instr)->intrinsic ==
931               nir_intrinsic_load_constant);
932 }
933 
934 /* Lowers load_constant intrinsics to UBO accesses so we can run them through
935  * the general "upload to const file or leave as UBO access" code.
936  */
937 bool
ir3_nir_lower_load_constant(nir_shader * nir,struct ir3_shader_variant * v)938 ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v)
939 {
940    struct ir3_const_state *const_state = ir3_const_state(v);
941 
942    bool progress = nir_shader_lower_instructions(
943       nir, ir3_lower_load_const_filter, ir3_nir_lower_load_const_instr,
944       const_state);
945 
946    if (progress) {
947       struct ir3_compiler *compiler = v->compiler;
948 
949       /* Save a copy of the NIR constant data to the variant for
950        * inclusion in the final assembly.
951        */
952       v->constant_data_size =
953          align(nir->constant_data_size,
954                compiler->const_upload_unit * 4 * sizeof(uint32_t));
955       v->constant_data = rzalloc_size(v, v->constant_data_size);
956       memcpy(v->constant_data, nir->constant_data, nir->constant_data_size);
957    }
958 
959    return progress;
960 }
961