• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "compiler/nir/nir.h"
7 #include "compiler/nir/nir_builder.h"
8 #include "util/u_math.h"
9 #include "ir3_compiler.h"
10 #include "ir3_nir.h"
11 
12 static inline bool
get_ubo_load_range(nir_shader * nir,nir_intrinsic_instr * instr,uint32_t alignment,struct ir3_ubo_range * r)13 get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr,
14                    uint32_t alignment, struct ir3_ubo_range *r)
15 {
16    uint32_t offset = nir_intrinsic_range_base(instr);
17    uint32_t size = nir_intrinsic_range(instr);
18 
19    if (instr->intrinsic == nir_intrinsic_load_global_ir3) {
20       offset *= 4;
21       size *= 4;
22    }
23 
24    /* If the offset is constant, the range is trivial (and NIR may not have
25     * figured it out).
26     */
27    if (nir_src_is_const(instr->src[1])) {
28       offset = nir_src_as_uint(instr->src[1]);
29       if (instr->intrinsic == nir_intrinsic_load_global_ir3)
30          offset *= 4;
31       size = nir_intrinsic_dest_components(instr) * 4;
32    }
33 
34    /* If we haven't figured out the range accessed in the UBO, bail. */
35    if (size == ~0)
36       return false;
37 
38    r->start = ROUND_DOWN_TO(offset, alignment * 16);
39    r->end = ALIGN(offset + size, alignment * 16);
40 
41    return true;
42 }
43 
44 static bool
get_ubo_info(nir_intrinsic_instr * instr,struct ir3_ubo_info * ubo)45 get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
46 {
47    if (instr->intrinsic == nir_intrinsic_load_global_ir3) {
48       ubo->global_base = instr->src[0].ssa;
49       ubo->block = 0;
50       ubo->bindless_base = 0;
51       ubo->bindless = false;
52       ubo->global = true;
53       return true;
54    } else if (nir_src_is_const(instr->src[0])) {
55       ubo->global_base = NULL;
56       ubo->block = nir_src_as_uint(instr->src[0]);
57       ubo->bindless_base = 0;
58       ubo->bindless = false;
59       ubo->global = false;
60       return true;
61    } else {
62       nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
63       if (rsrc && nir_src_is_const(rsrc->src[0])) {
64          ubo->global_base = NULL;
65          ubo->block = nir_src_as_uint(rsrc->src[0]);
66          ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
67          ubo->bindless = true;
68          ubo->global = false;
69          return true;
70       }
71    }
72    return false;
73 }
74 
75 /**
76  * Finds the given instruction's UBO load in the UBO upload plan, if any.
77  */
78 static const struct ir3_ubo_range *
get_existing_range(nir_intrinsic_instr * instr,const struct ir3_ubo_analysis_state * state,struct ir3_ubo_range * r)79 get_existing_range(nir_intrinsic_instr *instr,
80                    const struct ir3_ubo_analysis_state *state,
81                    struct ir3_ubo_range *r)
82 {
83    struct ir3_ubo_info ubo = {};
84 
85    if (!get_ubo_info(instr, &ubo))
86       return NULL;
87 
88    for (int i = 0; i < state->num_enabled; i++) {
89       const struct ir3_ubo_range *range = &state->range[i];
90       if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) && r->start >= range->start &&
91           r->end <= range->end) {
92          return range;
93       }
94    }
95 
96    return NULL;
97 }
98 
99 /**
100  * Merges together neighboring/overlapping ranges in the range plan with a
101  * newly updated range.
102  */
103 static void
merge_neighbors(struct ir3_ubo_analysis_state * state,int index)104 merge_neighbors(struct ir3_ubo_analysis_state *state, int index)
105 {
106    struct ir3_ubo_range *a = &state->range[index];
107 
108    /* index is always the first slot that would have neighbored/overlapped with
109     * the new range.
110     */
111    for (int i = index + 1; i < state->num_enabled; i++) {
112       struct ir3_ubo_range *b = &state->range[i];
113       if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))
114          continue;
115 
116       if (a->start > b->end || a->end < b->start)
117          continue;
118 
119       /* Merge B into A. */
120       a->start = MIN2(a->start, b->start);
121       a->end = MAX2(a->end, b->end);
122 
123       /* Swap the last enabled range into B's now unused slot */
124       *b = state->range[--state->num_enabled];
125    }
126 }
127 
128 /**
129  * During the first pass over the shader, makes the plan of which UBO upload
130  * should include the range covering this UBO load.
131  *
132  * We are passed in an upload_remaining of how much space is left for us in
133  * the const file, and we make sure our plan doesn't exceed that.
134  */
135 static void
gather_ubo_ranges(nir_shader * nir,nir_intrinsic_instr * instr,struct ir3_ubo_analysis_state * state,uint32_t alignment,uint32_t * upload_remaining)136 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
137                   struct ir3_ubo_analysis_state *state, uint32_t alignment,
138                   uint32_t *upload_remaining)
139 {
140    struct ir3_ubo_info ubo = {};
141    if (!get_ubo_info(instr, &ubo))
142       return;
143 
144    struct ir3_ubo_range r;
145    if (!get_ubo_load_range(nir, instr, alignment, &r))
146       return;
147 
148    /* See if there's an existing range for this UBO we want to merge into. */
149    for (int i = 0; i < state->num_enabled; i++) {
150       struct ir3_ubo_range *plan_r = &state->range[i];
151       if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))
152          continue;
153 
154       /* Don't extend existing uploads unless they're
155        * neighboring/overlapping.
156        */
157       if (r.start > plan_r->end || r.end < plan_r->start)
158          continue;
159 
160       r.start = MIN2(r.start, plan_r->start);
161       r.end = MAX2(r.end, plan_r->end);
162 
163       uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);
164       if (added >= *upload_remaining)
165          return;
166 
167       plan_r->start = r.start;
168       plan_r->end = r.end;
169       *upload_remaining -= added;
170 
171       merge_neighbors(state, i);
172       return;
173    }
174 
175    if (state->num_enabled == ARRAY_SIZE(state->range))
176       return;
177 
178    uint32_t added = r.end - r.start;
179    if (added >= *upload_remaining)
180       return;
181 
182    struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];
183    plan_r->ubo = ubo;
184    plan_r->start = r.start;
185    plan_r->end = r.end;
186    *upload_remaining -= added;
187 }
188 
189 /* For indirect offset, it is common to see a pattern of multiple
190  * loads with the same base, but different constant offset, ie:
191  *
192  *    vec1 32 ssa_33 = iadd ssa_base, const_offset
193  *    vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_33) (base=N, 0, 0)
194  *
195  * Detect this, and peel out the const_offset part, to end up with:
196  *
197  *    vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_base) (base=N+const_offset,
198  * 0, 0)
199  *
200  * Or similarly:
201  *
202  *    vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
203  *    vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_33) (base=N, 0, 0)
204  *
205  * Can be converted to:
206  *
207  *    vec1 32 ssa_base = imul24 a, b
208  *    vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_base) (base=N+const_offset,
209  * 0, 0)
210  *
211  * This gives the other opt passes something much easier to work
212  * with (ie. not requiring value range tracking)
213  */
214 static void
handle_partial_const(nir_builder * b,nir_def ** srcp,int * offp)215 handle_partial_const(nir_builder *b, nir_def **srcp, int *offp)
216 {
217    if ((*srcp)->parent_instr->type != nir_instr_type_alu)
218       return;
219 
220    nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
221 
222    if (alu->op == nir_op_imad24_ir3) {
223       /* This case is slightly more complicated as we need to
224        * replace the imad24_ir3 with an imul24:
225        */
226       if (!nir_src_is_const(alu->src[2].src))
227          return;
228 
229       *offp += nir_src_as_uint(alu->src[2].src);
230       *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
231                          nir_ssa_for_alu_src(b, alu, 1));
232 
233       return;
234    }
235 
236    if (alu->op != nir_op_iadd)
237       return;
238 
239    if (nir_src_is_const(alu->src[0].src)) {
240       *offp += nir_src_as_uint(alu->src[0].src);
241       *srcp = alu->src[1].src.ssa;
242    } else if (nir_src_is_const(alu->src[1].src)) {
243       *srcp = alu->src[0].src.ssa;
244       *offp += nir_src_as_uint(alu->src[1].src);
245    }
246 }
247 
248 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
249  * descriptors emitted in the fast path for GL.
250  */
251 static void
track_ubo_use(nir_intrinsic_instr * instr,nir_builder * b,int * num_ubos)252 track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
253 {
254    if (ir3_bindless_resource(instr->src[0])) {
255       assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
256       return;
257    }
258 
259    if (nir_src_is_const(instr->src[0])) {
260       int block = nir_src_as_uint(instr->src[0]);
261       *num_ubos = MAX2(*num_ubos, block + 1);
262    } else {
263       *num_ubos = b->shader->info.num_ubos;
264    }
265 }
266 
267 static bool
lower_ubo_load_to_uniform(nir_intrinsic_instr * instr,nir_builder * b,const struct ir3_ubo_analysis_state * state,int * num_ubos,uint32_t alignment)268 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
269                           const struct ir3_ubo_analysis_state *state,
270                           int *num_ubos, uint32_t alignment)
271 {
272    b->cursor = nir_before_instr(&instr->instr);
273 
274    struct ir3_ubo_range r;
275    if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {
276       if (instr->intrinsic == nir_intrinsic_load_ubo)
277          track_ubo_use(instr, b, num_ubos);
278       return false;
279    }
280 
281    /* We don't lower dynamic block index UBO loads to load_const_ir3, but we
282     * could probably with some effort determine a block stride in number of
283     * registers.
284     */
285    const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);
286    if (!range) {
287       if (instr->intrinsic == nir_intrinsic_load_ubo)
288          track_ubo_use(instr, b, num_ubos);
289       return false;
290    }
291 
292    nir_def *ubo_offset = instr->src[1].ssa;
293    int const_offset = 0;
294 
295    handle_partial_const(b, &ubo_offset, &const_offset);
296 
297    nir_def *uniform_offset = ubo_offset;
298 
299    if (instr->intrinsic == nir_intrinsic_load_ubo) {
300       /* UBO offset is in bytes, but uniform offset is in units of
301        * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
302        * offset is in units of 16 bytes, so we need to multiply by 4. And
303        * also the same for the constant part of the offset:
304        */
305       const int shift = -2;
306       nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
307       if (new_offset) {
308          uniform_offset = new_offset;
309       } else {
310          uniform_offset = shift > 0
311                              ? nir_ishl_imm(b, ubo_offset, shift)
312                              : nir_ushr_imm(b, ubo_offset, -shift);
313       }
314    }
315 
316    assert(!(const_offset & 0x3));
317    const_offset >>= 2;
318 
319    const int range_offset = ((int)range->offset - (int)range->start) / 4;
320    const_offset += range_offset;
321 
322    /* The range_offset could be negative, if if only part of the UBO
323     * block is accessed, range->start can be greater than range->offset.
324     * But we can't underflow const_offset.  If necessary we need to
325     * insert nir instructions to compensate (which can hopefully be
326     * optimized away)
327     */
328    if (const_offset < 0) {
329       uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
330       const_offset = 0;
331    }
332 
333    nir_def *uniform =
334       nir_load_const_ir3(b, instr->num_components, instr->def.bit_size,
335                          uniform_offset, .base = const_offset);
336 
337    nir_def_replace(&instr->def, uniform);
338 
339    return true;
340 }
341 
342 static bool
rematerialize_load_global_bases(nir_shader * nir,struct ir3_ubo_analysis_state * state)343 rematerialize_load_global_bases(nir_shader *nir,
344                                 struct ir3_ubo_analysis_state *state)
345 {
346    bool has_load_global = false;
347    for (unsigned i = 0; i < state->num_enabled; i++) {
348       if (state->range[i].ubo.global) {
349          has_load_global = true;
350          break;
351       }
352    }
353 
354    if (!has_load_global)
355       return false;
356 
357    nir_function_impl *preamble = nir_shader_get_preamble(nir);
358    nir_builder _b = nir_builder_at(nir_after_impl(preamble));
359    nir_builder *b = &_b;
360 
361    for (unsigned i = 0; i < state->num_enabled; i++) {
362       struct ir3_ubo_range *range = &state->range[i];
363 
364       if (!range->ubo.global)
365          continue;
366 
367       range->ubo.global_base =
368          ir3_rematerialize_def_for_preamble(b, range->ubo.global_base, NULL,
369                                             NULL);
370    }
371 
372    return true;
373 }
374 
375 static bool
copy_global_to_uniform(nir_shader * nir,struct ir3_ubo_analysis_state * state)376 copy_global_to_uniform(nir_shader *nir, struct ir3_ubo_analysis_state *state)
377 {
378    if (state->num_enabled == 0)
379       return false;
380 
381    nir_function_impl *preamble = nir_shader_get_preamble(nir);
382    nir_builder _b = nir_builder_at(nir_after_impl(preamble));
383    nir_builder *b = &_b;
384 
385    for (unsigned i = 0; i < state->num_enabled; i++) {
386       const struct ir3_ubo_range *range = &state->range[i];
387       assert(range->ubo.global);
388 
389       nir_def *base =
390          ir3_rematerialize_def_for_preamble(b, range->ubo.global_base, NULL,
391                                             NULL);
392       unsigned start = range->start;
393       if (start > (1 << 10)) {
394          /* This is happening pretty late, so we need to add the offset
395           * manually ourselves.
396           */
397          nir_def *start_val = nir_imm_int(b, start);
398          nir_def *base_lo = nir_channel(b, base, 0);
399          nir_def *base_hi = nir_channel(b, base, 1);
400          nir_def *carry = nir_b2i32(b, nir_ult(b, base_lo, start_val));
401          base_lo = nir_iadd(b, base_lo, start_val);
402          base_hi = nir_iadd(b, base_hi, carry);
403          base = nir_vec2(b, base_lo, base_hi);
404          start = 0;
405       }
406 
407       unsigned size = (range->end - range->start);
408       for (unsigned offset = 0; offset < size; offset += 16) {
409          unsigned const_offset = range->offset / 4 + offset / 4;
410          if (const_offset < 256) {
411             nir_copy_global_to_uniform_ir3(b, base,
412                                            .base = start + offset,
413                                            .range_base = const_offset,
414                                            .range = 1);
415          } else {
416             /* It seems that the a1.x format doesn't work, so we need to
417              * decompose the ldg.k into ldg + stc.
418              */
419             nir_def *load =
420                nir_load_global_ir3(b, 4, 32, base,
421                                    nir_imm_int(b, (start + offset) / 4));
422             nir_store_const_ir3(b, load, .base = const_offset);
423          }
424       }
425    }
426 
427    return true;
428 }
429 
430 static bool
copy_ubo_to_uniform(nir_shader * nir,const struct ir3_const_state * const_state,bool const_data_via_cp)431 copy_ubo_to_uniform(nir_shader *nir, const struct ir3_const_state *const_state,
432                     bool const_data_via_cp)
433 {
434    const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
435 
436    if (state->num_enabled == 0)
437       return false;
438 
439    if (state->num_enabled == 1 &&
440        !state->range[0].ubo.bindless &&
441        state->range[0].ubo.block == const_state->consts_ubo.idx &&
442        const_data_via_cp)
443       return false;
444 
445    nir_function_impl *preamble = nir_shader_get_preamble(nir);
446    nir_builder _b = nir_builder_at(nir_after_impl(preamble));
447    nir_builder *b = &_b;
448 
449    for (unsigned i = 0; i < state->num_enabled; i++) {
450       const struct ir3_ubo_range *range = &state->range[i];
451 
452       /* The constant_data UBO is pushed in a different path from normal
453        * uniforms, and the state is setup earlier so it makes more sense to let
454        * the CP do it for us.
455        */
456       if (!range->ubo.bindless &&
457           range->ubo.block == const_state->consts_ubo.idx &&
458           const_data_via_cp)
459          continue;
460 
461       nir_def *ubo = nir_imm_int(b, range->ubo.block);
462       if (range->ubo.bindless) {
463          ubo = nir_bindless_resource_ir3(b, 32, ubo,
464                                          .desc_set = range->ubo.bindless_base);
465       }
466 
467       /* ldc.k has a range of only 256, but there are 512 vec4 constants.
468        * Therefore we may have to split a large copy in two.
469        */
470       unsigned size = (range->end - range->start) / 16;
471       for (unsigned offset = 0; offset < size; offset += 256) {
472          nir_copy_ubo_to_uniform_ir3(b, ubo, nir_imm_int(b, range->start / 16 +
473                                                          offset),
474                                      .base = range->offset / 4 + offset * 4,
475                                      .range = MIN2(size - offset, 256));
476       }
477    }
478 
479    return true;
480 }
481 
482 static bool
instr_is_load_ubo(nir_instr * instr)483 instr_is_load_ubo(nir_instr *instr)
484 {
485    if (instr->type != nir_instr_type_intrinsic)
486       return false;
487 
488    nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
489 
490    /* nir_lower_ubo_vec4 happens after this pass. */
491    assert(op != nir_intrinsic_load_ubo_vec4);
492 
493    return op == nir_intrinsic_load_ubo;
494 }
495 
496 static bool
instr_is_load_const(nir_instr * instr)497 instr_is_load_const(nir_instr *instr)
498 {
499    if (instr->type != nir_instr_type_intrinsic)
500       return false;
501 
502    nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
503    nir_intrinsic_op op = intrin->intrinsic;
504 
505    if (op != nir_intrinsic_load_global_ir3)
506       return false;
507 
508    /* TODO handle non-aligned accesses */
509    if (nir_intrinsic_align_mul(intrin) < 16 ||
510        nir_intrinsic_align_offset(intrin) % 16 != 0)
511       return false;
512 
513    enum gl_access_qualifier access = nir_intrinsic_access(intrin);
514    return (access & ACCESS_NON_WRITEABLE) && (access & ACCESS_CAN_SPECULATE);
515 }
516 
517 /* For now, everything we upload is accessed statically and thus will be
518  * used by the shader. Once we can upload dynamically indexed data, we may
519  * upload sparsely accessed arrays, at which point we probably want to
520  * give priority to smaller UBOs, on the assumption that big UBOs will be
521  * accessed dynamically.  Alternatively, we can track statically and
522  * dynamically accessed ranges separately and upload static rangtes
523  * first.
524  */
525 static void
assign_offsets(struct ir3_ubo_analysis_state * state,unsigned start,unsigned max_upload)526 assign_offsets(struct ir3_ubo_analysis_state *state, unsigned start,
527                unsigned max_upload)
528 {
529    uint32_t offset = 0;
530    for (uint32_t i = 0; i < state->num_enabled; i++) {
531       uint32_t range_size = state->range[i].end - state->range[i].start;
532 
533       assert(offset <= max_upload);
534       state->range[i].offset = offset + start;
535       assert(offset <= max_upload);
536       offset += range_size;
537    }
538    state->size = offset;
539 }
540 
541 /* Lowering to ldg to ldg.k + const uses the same infrastructure as lowering UBO
542  * loads, but must be done separately because the analysis and transform must be
543  * done in the same pass and we cannot reuse the main variant analysis for the
544  * binning variant.
545  */
546 bool
ir3_nir_lower_const_global_loads(nir_shader * nir,struct ir3_shader_variant * v)547 ir3_nir_lower_const_global_loads(nir_shader *nir, struct ir3_shader_variant *v)
548 {
549    const struct ir3_const_state *const_state = ir3_const_state(v);
550    struct ir3_compiler *compiler = v->compiler;
551 
552    if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
553       return false;
554 
555    unsigned max_upload;
556    uint32_t global_offset = 0;
557    if (v->binning_pass) {
558       max_upload =
559          const_state->allocs.consts[IR3_CONST_ALLOC_GLOBAL].size_vec4 * 16;
560       global_offset =
561          const_state->allocs.consts[IR3_CONST_ALLOC_GLOBAL].offset_vec4 * 16;
562    } else {
563       const struct ir3_const_state *const_state = ir3_const_state(v);
564       global_offset = const_state->allocs.max_const_offset_vec4 * 16;
565       max_upload =
566          ir3_const_state_get_free_space(v, const_state, 1) * 16;
567    }
568 
569    struct ir3_ubo_analysis_state state = {};
570    uint32_t upload_remaining = max_upload;
571 
572    nir_foreach_function (function, nir) {
573       if (function->impl && !function->is_preamble) {
574          nir_foreach_block (block, function->impl) {
575             nir_foreach_instr (instr, block) {
576                if (instr_is_load_const(instr) &&
577                    ir3_def_is_rematerializable_for_preamble(nir_instr_as_intrinsic(instr)->src[0].ssa, NULL))
578                   gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), &state,
579                                     compiler->const_upload_unit,
580                                     &upload_remaining);
581             }
582          }
583       }
584    }
585 
586    assign_offsets(&state, global_offset, max_upload);
587 
588    bool progress = copy_global_to_uniform(nir, &state);
589 
590    if (progress) {
591       nir_foreach_function (function, nir) {
592          if (function->impl) {
593             if (function->is_preamble) {
594                nir_metadata_preserve(
595                   function->impl, nir_metadata_all);
596                continue;
597             }
598 
599             nir_builder builder = nir_builder_create(function->impl);
600             nir_foreach_block (block, function->impl) {
601                nir_foreach_instr_safe (instr, block) {
602                   if (!instr_is_load_const(instr))
603                      continue;
604                   progress |= lower_ubo_load_to_uniform(
605                      nir_instr_as_intrinsic(instr), &builder, &state, NULL,
606                      compiler->const_upload_unit);
607                }
608             }
609 
610             nir_metadata_preserve(
611                function->impl, nir_metadata_control_flow);
612          }
613       }
614    }
615 
616    if (!v->binning_pass) {
617       ir3_const_alloc(&ir3_const_state_mut(v)->allocs, IR3_CONST_ALLOC_GLOBAL,
618                       DIV_ROUND_UP(state.size, 16), 1);
619    }
620 
621    return progress;
622 }
623 
624 void
ir3_nir_analyze_ubo_ranges(nir_shader * nir,struct ir3_shader_variant * v)625 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
626 {
627    struct ir3_const_state *const_state = ir3_const_state_mut(v);
628    struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
629    struct ir3_compiler *compiler = v->compiler;
630 
631    if (compiler->gen < 6 && const_state->num_ubos > 0) {
632       uint32_t ptrs_vec4 =
633          align(const_state->num_ubos * ir3_pointer_size(compiler), 4) / 4;
634       ir3_const_reserve_space(&const_state->allocs, IR3_CONST_ALLOC_UBO_PTRS,
635                               ptrs_vec4, 1);
636    }
637 
638    uint32_t align_vec4 = compiler->load_shader_consts_via_preamble
639                             ? 1
640                             : compiler->const_upload_unit;
641 
642    /* Limit our uploads to the amount of constant buffer space available in
643     * the hardware, minus what the shader compiler may need for various
644     * driver params.  We do this UBO-to-push-constant before the real
645     * allocation of the UBO pointers' const space, because UBO pointers can
646     * be driver params but this pass usually eliminatings them.
647     */
648    const uint32_t max_upload =
649       ir3_const_state_get_free_space(v, const_state, align_vec4) * 16;
650 
651    memset(state, 0, sizeof(*state));
652 
653    if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
654       return;
655 
656    uint32_t upload_remaining = max_upload;
657    bool push_ubos = compiler->options.push_ubo_with_preamble;
658 
659    nir_foreach_function (function, nir) {
660       if (function->impl && (!push_ubos || !function->is_preamble)) {
661          nir_foreach_block (block, function->impl) {
662             nir_foreach_instr (instr, block) {
663                if (instr_is_load_ubo(instr))
664                   gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state,
665                                     compiler->const_upload_unit,
666                                     &upload_remaining);
667             }
668          }
669       }
670    }
671 
672    uint32_t ubo_offset = align(const_state->allocs.max_const_offset_vec4, align_vec4) * 16;
673    assign_offsets(state, ubo_offset, max_upload);
674 
675    uint32_t upload_vec4 = state->size / 16;
676    if (upload_vec4 > 0)
677       ir3_const_alloc(&ir3_const_state_mut(v)->allocs,
678                       IR3_CONST_ALLOC_UBO_RANGES, upload_vec4, align_vec4);
679 }
680 
681 bool
ir3_nir_lower_ubo_loads(nir_shader * nir,struct ir3_shader_variant * v)682 ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
683 {
684    struct ir3_compiler *compiler = v->compiler;
685    /* For the binning pass variant, we re-use the corresponding draw-pass
686     * variants const_state and ubo state.  To make these clear, in this
687     * pass it is const (read-only)
688     */
689    const struct ir3_const_state *const_state = ir3_const_state(v);
690    const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
691 
692    int num_ubos = 0;
693    bool progress = false;
694    bool has_preamble = false;
695    bool push_ubos = compiler->options.push_ubo_with_preamble;
696    nir_foreach_function (function, nir) {
697       if (function->impl) {
698          if (function->is_preamble && push_ubos) {
699             has_preamble = true;
700             nir_metadata_preserve(function->impl, nir_metadata_all);
701             continue;
702          }
703          nir_builder builder = nir_builder_create(function->impl);
704          nir_foreach_block (block, function->impl) {
705             nir_foreach_instr_safe (instr, block) {
706                if (!instr_is_load_ubo(instr))
707                   continue;
708                progress |= lower_ubo_load_to_uniform(
709                   nir_instr_as_intrinsic(instr), &builder, state, &num_ubos,
710                   compiler->const_upload_unit);
711             }
712          }
713 
714          nir_metadata_preserve(
715             function->impl, nir_metadata_control_flow);
716       }
717    }
718    /* Update the num_ubos field for GL (first_ubo_is_default_ubo).  With
719     * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
720     * incremented.
721     */
722    if (nir->info.first_ubo_is_default_ubo && !push_ubos && !has_preamble)
723       nir->info.num_ubos = num_ubos;
724 
725 
726    if (!v->binning_pass) {
727       ir3_const_state_mut(v)->num_ubos = num_ubos;
728 
729       if (compiler->gen < 6)
730          ir3_const_free_reserved_space(&ir3_const_state_mut(v)->allocs,
731                                        IR3_CONST_ALLOC_UBO_PTRS);
732 
733       if (compiler->gen < 6 && const_state->num_ubos > 0) {
734          uint32_t upload_ptrs_vec4 =
735             align(const_state->num_ubos * ir3_pointer_size(compiler), 4) / 4;
736          ir3_const_alloc(&ir3_const_state_mut(v)->allocs,
737                          IR3_CONST_ALLOC_UBO_PTRS, upload_ptrs_vec4, 1);
738       }
739    }
740 
741    if (compiler->has_preamble && push_ubos)
742       progress |= copy_ubo_to_uniform(
743          nir, const_state, !compiler->load_shader_consts_via_preamble);
744 
745    return progress;
746 }
747 
748 static bool
fixup_load_const_ir3_filter(const nir_instr * instr,const void * arg)749 fixup_load_const_ir3_filter(const nir_instr *instr, const void *arg)
750 {
751    if (instr->type != nir_instr_type_intrinsic)
752       return false;
753    return nir_instr_as_intrinsic(instr)->intrinsic ==
754           nir_intrinsic_load_const_ir3;
755 }
756 
757 static nir_def *
fixup_load_const_ir3_instr(struct nir_builder * b,nir_instr * instr,void * arg)758 fixup_load_const_ir3_instr(struct nir_builder *b, nir_instr *instr, void *arg)
759 {
760    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
761 
762    /* We don't need to worry about non-indirect case: */
763    if (nir_src_is_const(intr->src[0]))
764       return NULL;
765 
766    const unsigned base_offset_limit = (1 << 9); /* 9 bits */
767    unsigned base_offset = nir_intrinsic_base(intr);
768 
769    /* Or cases were base offset is lower than the hw limit: */
770    if (base_offset < base_offset_limit)
771       return NULL;
772 
773    b->cursor = nir_before_instr(instr);
774 
775    nir_def *offset = intr->src[0].ssa;
776 
777    /* We'd like to avoid a sequence like:
778     *
779     *   vec4 32 ssa_18 = intrinsic load_const_ir3 (ssa_4) (1024, 0, 0)
780     *   vec4 32 ssa_19 = intrinsic load_const_ir3 (ssa_4) (1072, 0, 0)
781     *   vec4 32 ssa_20 = intrinsic load_const_ir3 (ssa_4) (1120, 0, 0)
782     *
783     * From turning into a unique offset value (which requires reloading
784     * a0.x for each instruction).  So instead of just adding the constant
785     * base_offset to the non-const offset, be a bit more clever and only
786     * extract the part that cannot be encoded.  Afterwards CSE should
787     * turn the result into:
788     *
789     *   vec1 32 ssa_5 = load_const (1024)
790     *   vec4 32 ssa_6  = iadd ssa4_, ssa_5
791     *   vec4 32 ssa_18 = intrinsic load_const_ir3 (ssa_5) (0, 0, 0)
792     *   vec4 32 ssa_19 = intrinsic load_const_ir3 (ssa_5) (48, 0, 0)
793     *   vec4 32 ssa_20 = intrinsic load_const_ir3 (ssa_5) (96, 0, 0)
794     */
795    unsigned new_base_offset = base_offset % base_offset_limit;
796 
797    nir_intrinsic_set_base(intr, new_base_offset);
798    offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);
799 
800    nir_src_rewrite(&intr->src[0], offset);
801 
802    return NIR_LOWER_INSTR_PROGRESS;
803 }
804 
805 /**
806  * For relative CONST file access, we can only encode 10b worth of fixed offset,
807  * so in cases where the base offset is larger, we need to peel it out into
808  * ALU instructions.
809  *
810  * This should run late, after constant folding has had a chance to do it's
811  * thing, so we can actually know if it is an indirect uniform offset or not.
812  */
813 bool
ir3_nir_fixup_load_const_ir3(nir_shader * nir)814 ir3_nir_fixup_load_const_ir3(nir_shader *nir)
815 {
816    return nir_shader_lower_instructions(nir, fixup_load_const_ir3_filter,
817                                         fixup_load_const_ir3_instr, NULL);
818 }
819 static nir_def *
ir3_nir_lower_load_const_instr(nir_builder * b,nir_instr * in_instr,void * data)820 ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
821 {
822    struct ir3_shader_variant *v = data;
823    nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in_instr);
824 
825    unsigned num_components = instr->num_components;
826    unsigned bit_size = instr->def.bit_size;
827    if (instr->def.bit_size == 16) {
828       /* We can't do 16b loads -- either from LDC (32-bit only in any of our
829        * traces, and disasm that doesn't look like it really supports it) or
830        * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
831        * automatic 32b-to-16b conversions when we ask for 16b from it).
832        * Instead, we'll load 32b from a UBO and unpack from there.
833        */
834       num_components = DIV_ROUND_UP(num_components, 2);
835       bit_size = 32;
836    }
837    unsigned base = nir_intrinsic_base(instr);
838    nir_def *index = ir3_get_driver_consts_ubo(b, v);
839    nir_def *offset =
840       nir_iadd_imm(b, instr->src[0].ssa, base);
841 
842    nir_def *result =
843       nir_load_ubo(b, num_components, bit_size, index, offset,
844                    .align_mul = nir_intrinsic_align_mul(instr),
845                    .align_offset = nir_intrinsic_align_offset(instr),
846                    .range_base = base, .range = nir_intrinsic_range(instr));
847 
848    if (instr->def.bit_size == 16) {
849       result = nir_bitcast_vector(b, result, 16);
850       result = nir_trim_vector(b, result, instr->num_components);
851    }
852 
853    return result;
854 }
855 
856 static bool
ir3_lower_load_const_filter(const nir_instr * instr,const void * data)857 ir3_lower_load_const_filter(const nir_instr *instr, const void *data)
858 {
859    return (instr->type == nir_instr_type_intrinsic &&
860            nir_instr_as_intrinsic(instr)->intrinsic ==
861               nir_intrinsic_load_constant);
862 }
863 
864 /* Lowers load_constant intrinsics to UBO accesses so we can run them through
865  * the general "upload to const file or leave as UBO access" code.
866  */
867 bool
ir3_nir_lower_load_constant(nir_shader * nir,struct ir3_shader_variant * v)868 ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v)
869 {
870    bool progress = nir_shader_lower_instructions(
871       nir, ir3_lower_load_const_filter, ir3_nir_lower_load_const_instr,
872       v);
873 
874    if (progress) {
875       struct ir3_compiler *compiler = v->compiler;
876 
877       /* Save a copy of the NIR constant data to the variant for
878        * inclusion in the final assembly.
879        */
880       v->constant_data_size =
881          align(nir->constant_data_size,
882                compiler->const_upload_unit * 4 * sizeof(uint32_t));
883       v->constant_data = rzalloc_size(v, v->constant_data_size);
884       memcpy(v->constant_data, nir->constant_data, nir->constant_data_size);
885 
886       const struct ir3_const_state *const_state = ir3_const_state(v);
887       ir3_update_driver_ubo(nir, &const_state->consts_ubo, "$consts");
888    }
889 
890    return progress;
891 }
892