• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2019 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "compiler/nir/nir.h"
25 #include "compiler/nir/nir_builder.h"
26 #include "util/u_math.h"
27 #include "ir3_compiler.h"
28 #include "ir3_nir.h"
29 
30 static inline bool
get_ubo_load_range(nir_shader * nir,nir_intrinsic_instr * instr,uint32_t alignment,struct ir3_ubo_range * r)31 get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr,
32                    uint32_t alignment, struct ir3_ubo_range *r)
33 {
34    uint32_t offset = nir_intrinsic_range_base(instr);
35    uint32_t size = nir_intrinsic_range(instr);
36 
37    /* If the offset is constant, the range is trivial (and NIR may not have
38     * figured it out).
39     */
40    if (nir_src_is_const(instr->src[1])) {
41       offset = nir_src_as_uint(instr->src[1]);
42       size = nir_intrinsic_dest_components(instr) * 4;
43    }
44 
45    /* If we haven't figured out the range accessed in the UBO, bail. */
46    if (size == ~0)
47       return false;
48 
49    r->start = ROUND_DOWN_TO(offset, alignment * 16);
50    r->end = ALIGN(offset + size, alignment * 16);
51 
52    return true;
53 }
54 
55 static bool
get_ubo_info(nir_intrinsic_instr * instr,struct ir3_ubo_info * ubo)56 get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
57 {
58    if (nir_src_is_const(instr->src[0])) {
59       ubo->block = nir_src_as_uint(instr->src[0]);
60       ubo->bindless_base = 0;
61       ubo->bindless = false;
62       return true;
63    } else {
64       nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
65       if (rsrc && nir_src_is_const(rsrc->src[0])) {
66          ubo->block = nir_src_as_uint(rsrc->src[0]);
67          ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
68          ubo->bindless = true;
69          return true;
70       }
71    }
72    return false;
73 }
74 
75 /**
76  * Finds the given instruction's UBO load in the UBO upload plan, if any.
77  */
78 static const struct ir3_ubo_range *
get_existing_range(nir_intrinsic_instr * instr,const struct ir3_ubo_analysis_state * state,struct ir3_ubo_range * r)79 get_existing_range(nir_intrinsic_instr *instr,
80                    const struct ir3_ubo_analysis_state *state,
81                    struct ir3_ubo_range *r)
82 {
83    struct ir3_ubo_info ubo = {};
84 
85    if (!get_ubo_info(instr, &ubo))
86       return NULL;
87 
88    for (int i = 0; i < state->num_enabled; i++) {
89       const struct ir3_ubo_range *range = &state->range[i];
90       if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) && r->start >= range->start &&
91           r->end <= range->end) {
92          return range;
93       }
94    }
95 
96    return NULL;
97 }
98 
99 /**
100  * Merges together neighboring/overlapping ranges in the range plan with a
101  * newly updated range.
102  */
103 static void
merge_neighbors(struct ir3_ubo_analysis_state * state,int index)104 merge_neighbors(struct ir3_ubo_analysis_state *state, int index)
105 {
106    struct ir3_ubo_range *a = &state->range[index];
107 
108    /* index is always the first slot that would have neighbored/overlapped with
109     * the new range.
110     */
111    for (int i = index + 1; i < state->num_enabled; i++) {
112       struct ir3_ubo_range *b = &state->range[i];
113       if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))
114          continue;
115 
116       if (a->start > b->end || a->end < b->start)
117          continue;
118 
119       /* Merge B into A. */
120       a->start = MIN2(a->start, b->start);
121       a->end = MAX2(a->end, b->end);
122 
123       /* Swap the last enabled range into B's now unused slot */
124       *b = state->range[--state->num_enabled];
125    }
126 }
127 
128 /**
129  * During the first pass over the shader, makes the plan of which UBO upload
130  * should include the range covering this UBO load.
131  *
132  * We are passed in an upload_remaining of how much space is left for us in
133  * the const file, and we make sure our plan doesn't exceed that.
134  */
135 static void
gather_ubo_ranges(nir_shader * nir,nir_intrinsic_instr * instr,struct ir3_ubo_analysis_state * state,uint32_t alignment,uint32_t * upload_remaining)136 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
137                   struct ir3_ubo_analysis_state *state, uint32_t alignment,
138                   uint32_t *upload_remaining)
139 {
140    if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
141       return;
142 
143    struct ir3_ubo_info ubo = {};
144    if (!get_ubo_info(instr, &ubo))
145       return;
146 
147    struct ir3_ubo_range r;
148    if (!get_ubo_load_range(nir, instr, alignment, &r))
149       return;
150 
151    /* See if there's an existing range for this UBO we want to merge into. */
152    for (int i = 0; i < state->num_enabled; i++) {
153       struct ir3_ubo_range *plan_r = &state->range[i];
154       if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))
155          continue;
156 
157       /* Don't extend existing uploads unless they're
158        * neighboring/overlapping.
159        */
160       if (r.start > plan_r->end || r.end < plan_r->start)
161          continue;
162 
163       r.start = MIN2(r.start, plan_r->start);
164       r.end = MAX2(r.end, plan_r->end);
165 
166       uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);
167       if (added >= *upload_remaining)
168          return;
169 
170       plan_r->start = r.start;
171       plan_r->end = r.end;
172       *upload_remaining -= added;
173 
174       merge_neighbors(state, i);
175       return;
176    }
177 
178    if (state->num_enabled == ARRAY_SIZE(state->range))
179       return;
180 
181    uint32_t added = r.end - r.start;
182    if (added >= *upload_remaining)
183       return;
184 
185    struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];
186    plan_r->ubo = ubo;
187    plan_r->start = r.start;
188    plan_r->end = r.end;
189    *upload_remaining -= added;
190 }
191 
192 /* For indirect offset, it is common to see a pattern of multiple
193  * loads with the same base, but different constant offset, ie:
194  *
195  *    vec1 32 ssa_33 = iadd ssa_base, const_offset
196  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
197  *
198  * Detect this, and peel out the const_offset part, to end up with:
199  *
200  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,
201  * 0, 0)
202  *
203  * Or similarly:
204  *
205  *    vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
206  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
207  *
208  * Can be converted to:
209  *
210  *    vec1 32 ssa_base = imul24 a, b
211  *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,
212  * 0, 0)
213  *
214  * This gives the other opt passes something much easier to work
215  * with (ie. not requiring value range tracking)
216  */
217 static void
handle_partial_const(nir_builder * b,nir_ssa_def ** srcp,int * offp)218 handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)
219 {
220    if ((*srcp)->parent_instr->type != nir_instr_type_alu)
221       return;
222 
223    nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
224 
225    if (alu->op == nir_op_imad24_ir3) {
226       /* This case is slightly more complicated as we need to
227        * replace the imad24_ir3 with an imul24:
228        */
229       if (!nir_src_is_const(alu->src[2].src))
230          return;
231 
232       *offp += nir_src_as_uint(alu->src[2].src);
233       *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
234                          nir_ssa_for_alu_src(b, alu, 1));
235 
236       return;
237    }
238 
239    if (alu->op != nir_op_iadd)
240       return;
241 
242    if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
243       return;
244 
245    if (nir_src_is_const(alu->src[0].src)) {
246       *offp += nir_src_as_uint(alu->src[0].src);
247       *srcp = alu->src[1].src.ssa;
248    } else if (nir_src_is_const(alu->src[1].src)) {
249       *srcp = alu->src[0].src.ssa;
250       *offp += nir_src_as_uint(alu->src[1].src);
251    }
252 }
253 
254 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
255  * descriptors emitted in the fast path for GL.
256  */
257 static void
track_ubo_use(nir_intrinsic_instr * instr,nir_builder * b,int * num_ubos)258 track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
259 {
260    if (ir3_bindless_resource(instr->src[0])) {
261       assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
262       return;
263    }
264 
265    if (nir_src_is_const(instr->src[0])) {
266       int block = nir_src_as_uint(instr->src[0]);
267       *num_ubos = MAX2(*num_ubos, block + 1);
268    } else {
269       *num_ubos = b->shader->info.num_ubos;
270    }
271 }
272 
273 static bool
lower_ubo_load_to_uniform(nir_intrinsic_instr * instr,nir_builder * b,const struct ir3_ubo_analysis_state * state,int * num_ubos,uint32_t alignment)274 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
275                           const struct ir3_ubo_analysis_state *state,
276                           int *num_ubos, uint32_t alignment)
277 {
278    b->cursor = nir_before_instr(&instr->instr);
279 
280    struct ir3_ubo_range r;
281    if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {
282       track_ubo_use(instr, b, num_ubos);
283       return false;
284    }
285 
286    /* We don't lower dynamic block index UBO loads to load_uniform, but we
287     * could probably with some effort determine a block stride in number of
288     * registers.
289     */
290    const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);
291    if (!range) {
292       track_ubo_use(instr, b, num_ubos);
293       return false;
294    }
295 
296    nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
297    int const_offset = 0;
298 
299    handle_partial_const(b, &ubo_offset, &const_offset);
300 
301    /* UBO offset is in bytes, but uniform offset is in units of
302     * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
303     * offset is in units of 16 bytes, so we need to multiply by 4. And
304     * also the same for the constant part of the offset:
305     */
306    const int shift = -2;
307    nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
308    nir_ssa_def *uniform_offset = NULL;
309    if (new_offset) {
310       uniform_offset = new_offset;
311    } else {
312       uniform_offset = shift > 0
313                           ? nir_ishl(b, ubo_offset, nir_imm_int(b, shift))
314                           : nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
315    }
316 
317    assert(!(const_offset & 0x3));
318    const_offset >>= 2;
319 
320    const int range_offset = ((int)range->offset - (int)range->start) / 4;
321    const_offset += range_offset;
322 
323    /* The range_offset could be negative, if if only part of the UBO
324     * block is accessed, range->start can be greater than range->offset.
325     * But we can't underflow const_offset.  If necessary we need to
326     * insert nir instructions to compensate (which can hopefully be
327     * optimized away)
328     */
329    if (const_offset < 0) {
330       uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
331       const_offset = 0;
332    }
333 
334    nir_ssa_def *uniform =
335       nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size,
336                        uniform_offset, .base = const_offset);
337 
338    nir_ssa_def_rewrite_uses(&instr->dest.ssa, uniform);
339 
340    nir_instr_remove(&instr->instr);
341 
342    return true;
343 }
344 
345 static bool
copy_ubo_to_uniform(nir_shader * nir,const struct ir3_const_state * const_state)346 copy_ubo_to_uniform(nir_shader *nir, const struct ir3_const_state *const_state)
347 {
348    const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
349 
350    if (state->num_enabled == 0 ||
351        (state->num_enabled == 1 && !state->range[0].ubo.bindless &&
352         state->range[0].ubo.block == const_state->constant_data_ubo))
353       return false;
354 
355    nir_function_impl *preamble = nir_shader_get_preamble(nir);
356    nir_builder _b, *b = &_b;
357    nir_builder_init(b, preamble);
358    b->cursor = nir_after_cf_list(&preamble->body);
359 
360    for (unsigned i = 0; i < state->num_enabled; i++) {
361       const struct ir3_ubo_range *range = &state->range[i];
362 
363       /* The constant_data UBO is pushed in a different path from normal
364        * uniforms, and the state is setup earlier so it makes more sense to let
365        * the CP do it for us.
366        */
367       if (!range->ubo.bindless &&
368           range->ubo.block == const_state->constant_data_ubo)
369          continue;
370 
371       nir_ssa_def *ubo = nir_imm_int(b, range->ubo.block);
372       if (range->ubo.bindless) {
373          ubo = nir_bindless_resource_ir3(b, 32, ubo,
374                                          .desc_set = range->ubo.bindless_base);
375       }
376 
377       /* ldc.k has a range of only 256, but there are 512 vec4 constants.
378        * Therefore we may have to split a large copy in two.
379        */
380       unsigned size = (range->end - range->start) / 16;
381       for (unsigned offset = 0; offset < size; offset += 256) {
382          nir_copy_ubo_to_uniform_ir3(b, ubo, nir_imm_int(b, range->start / 16 +
383                                                          offset),
384                                      .base = range->offset / 4 + offset * 4,
385                                      .range = MIN2(size - offset, 256));
386       }
387    }
388 
389    return true;
390 }
391 
392 static bool
instr_is_load_ubo(nir_instr * instr)393 instr_is_load_ubo(nir_instr *instr)
394 {
395    if (instr->type != nir_instr_type_intrinsic)
396       return false;
397 
398    nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
399 
400    /* nir_lower_ubo_vec4 happens after this pass. */
401    assert(op != nir_intrinsic_load_ubo_vec4);
402 
403    return op == nir_intrinsic_load_ubo;
404 }
405 
406 void
ir3_nir_analyze_ubo_ranges(nir_shader * nir,struct ir3_shader_variant * v)407 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
408 {
409    struct ir3_const_state *const_state = ir3_const_state(v);
410    struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
411    struct ir3_compiler *compiler = v->compiler;
412 
413    /* Limit our uploads to the amount of constant buffer space available in
414     * the hardware, minus what the shader compiler may need for various
415     * driver params.  We do this UBO-to-push-constant before the real
416     * allocation of the driver params' const space, because UBO pointers can
417     * be driver params but this pass usually eliminatings them.
418     */
419    struct ir3_const_state worst_case_const_state = {
420       .preamble_size = const_state->preamble_size,
421    };
422    ir3_setup_const_state(nir, v, &worst_case_const_state);
423    const uint32_t max_upload =
424       (ir3_max_const(v) - worst_case_const_state.offsets.immediate) * 16;
425 
426    memset(state, 0, sizeof(*state));
427 
428    uint32_t upload_remaining = max_upload;
429    bool push_ubos = compiler->push_ubo_with_preamble;
430    nir_foreach_function (function, nir) {
431       if (function->impl && (!push_ubos || !function->is_preamble)) {
432          nir_foreach_block (block, function->impl) {
433             nir_foreach_instr (instr, block) {
434                if (instr_is_load_ubo(instr))
435                   gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state,
436                                     compiler->const_upload_unit,
437                                     &upload_remaining);
438             }
439          }
440       }
441    }
442 
443    /* For now, everything we upload is accessed statically and thus will be
444     * used by the shader. Once we can upload dynamically indexed data, we may
445     * upload sparsely accessed arrays, at which point we probably want to
446     * give priority to smaller UBOs, on the assumption that big UBOs will be
447     * accessed dynamically.  Alternatively, we can track statically and
448     * dynamically accessed ranges separately and upload static rangtes
449     * first.
450     */
451 
452    uint32_t offset = 0;
453    for (uint32_t i = 0; i < state->num_enabled; i++) {
454       uint32_t range_size = state->range[i].end - state->range[i].start;
455 
456       assert(offset <= max_upload);
457       state->range[i].offset = offset + v->num_reserved_user_consts * 16;
458       assert(offset <= max_upload);
459       offset += range_size;
460    }
461    state->size = offset;
462 }
463 
464 bool
ir3_nir_lower_ubo_loads(nir_shader * nir,struct ir3_shader_variant * v)465 ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
466 {
467    struct ir3_compiler *compiler = v->compiler;
468    /* For the binning pass variant, we re-use the corresponding draw-pass
469     * variants const_state and ubo state.  To make these clear, in this
470     * pass it is const (read-only)
471     */
472    const struct ir3_const_state *const_state = ir3_const_state(v);
473    const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
474 
475    int num_ubos = 0;
476    bool progress = false;
477    bool has_preamble = false;
478    bool push_ubos = compiler->push_ubo_with_preamble;
479    nir_foreach_function (function, nir) {
480       if (function->impl) {
481          if (function->is_preamble && push_ubos) {
482             has_preamble = true;
483             nir_metadata_preserve(function->impl, nir_metadata_all);
484             continue;
485          }
486          nir_builder builder;
487          nir_builder_init(&builder, function->impl);
488          nir_foreach_block (block, function->impl) {
489             nir_foreach_instr_safe (instr, block) {
490                if (!instr_is_load_ubo(instr))
491                   continue;
492                progress |= lower_ubo_load_to_uniform(
493                   nir_instr_as_intrinsic(instr), &builder, state, &num_ubos,
494                   compiler->const_upload_unit);
495             }
496          }
497 
498          nir_metadata_preserve(
499             function->impl, nir_metadata_block_index | nir_metadata_dominance);
500       }
501    }
502    /* Update the num_ubos field for GL (first_ubo_is_default_ubo).  With
503     * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
504     * incremented.
505     */
506    if (nir->info.first_ubo_is_default_ubo && !push_ubos && !has_preamble)
507       nir->info.num_ubos = num_ubos;
508 
509    if (compiler->has_preamble && push_ubos)
510       progress |= copy_ubo_to_uniform(nir, const_state);
511 
512    return progress;
513 }
514 
515 static bool
fixup_load_uniform_filter(const nir_instr * instr,const void * arg)516 fixup_load_uniform_filter(const nir_instr *instr, const void *arg)
517 {
518    if (instr->type != nir_instr_type_intrinsic)
519       return false;
520    return nir_instr_as_intrinsic(instr)->intrinsic ==
521           nir_intrinsic_load_uniform;
522 }
523 
524 static nir_ssa_def *
fixup_load_uniform_instr(struct nir_builder * b,nir_instr * instr,void * arg)525 fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
526 {
527    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
528 
529    /* We don't need to worry about non-indirect case: */
530    if (nir_src_is_const(intr->src[0]))
531       return NULL;
532 
533    const unsigned base_offset_limit = (1 << 9); /* 9 bits */
534    unsigned base_offset = nir_intrinsic_base(intr);
535 
536    /* Or cases were base offset is lower than the hw limit: */
537    if (base_offset < base_offset_limit)
538       return NULL;
539 
540    b->cursor = nir_before_instr(instr);
541 
542    nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[0], 1);
543 
544    /* We'd like to avoid a sequence like:
545     *
546     *   vec4 32 ssa_18 = intrinsic load_uniform (ssa_4) (1024, 0, 0)
547     *   vec4 32 ssa_19 = intrinsic load_uniform (ssa_4) (1072, 0, 0)
548     *   vec4 32 ssa_20 = intrinsic load_uniform (ssa_4) (1120, 0, 0)
549     *
550     * From turning into a unique offset value (which requires reloading
551     * a0.x for each instruction).  So instead of just adding the constant
552     * base_offset to the non-const offset, be a bit more clever and only
553     * extract the part that cannot be encoded.  Afterwards CSE should
554     * turn the result into:
555     *
556     *   vec1 32 ssa_5 = load_const (1024)
557     *   vec4 32 ssa_6  = iadd ssa4_, ssa_5
558     *   vec4 32 ssa_18 = intrinsic load_uniform (ssa_5) (0, 0, 0)
559     *   vec4 32 ssa_19 = intrinsic load_uniform (ssa_5) (48, 0, 0)
560     *   vec4 32 ssa_20 = intrinsic load_uniform (ssa_5) (96, 0, 0)
561     */
562    unsigned new_base_offset = base_offset % base_offset_limit;
563 
564    nir_intrinsic_set_base(intr, new_base_offset);
565    offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);
566 
567    nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(offset));
568 
569    return NIR_LOWER_INSTR_PROGRESS;
570 }
571 
572 /**
573  * For relative CONST file access, we can only encode 10b worth of fixed offset,
574  * so in cases where the base offset is larger, we need to peel it out into
575  * ALU instructions.
576  *
577  * This should run late, after constant folding has had a chance to do it's
578  * thing, so we can actually know if it is an indirect uniform offset or not.
579  */
580 bool
ir3_nir_fixup_load_uniform(nir_shader * nir)581 ir3_nir_fixup_load_uniform(nir_shader *nir)
582 {
583    return nir_shader_lower_instructions(nir, fixup_load_uniform_filter,
584                                         fixup_load_uniform_instr, NULL);
585 }
586 static nir_ssa_def *
ir3_nir_lower_load_const_instr(nir_builder * b,nir_instr * in_instr,void * data)587 ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
588 {
589    struct ir3_const_state *const_state = data;
590    nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in_instr);
591 
592    /* Pick a UBO index to use as our constant data.  Skip UBO 0 since that's
593     * reserved for gallium's cb0.
594     */
595    if (const_state->constant_data_ubo == -1) {
596       if (b->shader->info.num_ubos == 0)
597          b->shader->info.num_ubos++;
598       const_state->constant_data_ubo = b->shader->info.num_ubos++;
599    }
600 
601    unsigned num_components = instr->num_components;
602    if (nir_dest_bit_size(instr->dest) == 16) {
603       /* We can't do 16b loads -- either from LDC (32-bit only in any of our
604        * traces, and disasm that doesn't look like it really supports it) or
605        * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
606        * automatic 32b-to-16b conversions when we ask for 16b from it).
607        * Instead, we'll load 32b from a UBO and unpack from there.
608        */
609       num_components = DIV_ROUND_UP(num_components, 2);
610    }
611    unsigned base = nir_intrinsic_base(instr);
612    nir_ssa_def *index = nir_imm_int(b, const_state->constant_data_ubo);
613    nir_ssa_def *offset =
614       nir_iadd_imm(b, nir_ssa_for_src(b, instr->src[0], 1), base);
615 
616    nir_ssa_def *result =
617       nir_load_ubo(b, num_components, 32, index, offset,
618                    .align_mul = nir_intrinsic_align_mul(instr),
619                    .align_offset = nir_intrinsic_align_offset(instr),
620                    .range_base = base, .range = nir_intrinsic_range(instr));
621 
622    if (nir_dest_bit_size(instr->dest) == 16) {
623       result = nir_bitcast_vector(b, result, 16);
624       result = nir_trim_vector(b, result, instr->num_components);
625    }
626 
627    return result;
628 }
629 
630 static bool
ir3_lower_load_const_filter(const nir_instr * instr,const void * data)631 ir3_lower_load_const_filter(const nir_instr *instr, const void *data)
632 {
633    return (instr->type == nir_instr_type_intrinsic &&
634            nir_instr_as_intrinsic(instr)->intrinsic ==
635               nir_intrinsic_load_constant);
636 }
637 
638 /* Lowers load_constant intrinsics to UBO accesses so we can run them through
639  * the general "upload to const file or leave as UBO access" code.
640  */
641 bool
ir3_nir_lower_load_constant(nir_shader * nir,struct ir3_shader_variant * v)642 ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v)
643 {
644    struct ir3_const_state *const_state = ir3_const_state(v);
645 
646    const_state->constant_data_ubo = -1;
647 
648    bool progress = nir_shader_lower_instructions(
649       nir, ir3_lower_load_const_filter, ir3_nir_lower_load_const_instr,
650       const_state);
651 
652    if (progress) {
653       struct ir3_compiler *compiler = v->compiler;
654 
655       /* Save a copy of the NIR constant data to the variant for
656        * inclusion in the final assembly.
657        */
658       v->constant_data_size =
659          align(nir->constant_data_size,
660                compiler->const_upload_unit * 4 * sizeof(uint32_t));
661       v->constant_data = rzalloc_size(v, v->constant_data_size);
662       memcpy(v->constant_data, nir->constant_data, nir->constant_data_size);
663    }
664 
665    return progress;
666 }
667