• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015-2018 Rob Clark <robclark@freedesktop.org>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <robclark@freedesktop.org>
7  */
8 
9 #include "ir3_context.h"
10 #include "ir3_compiler.h"
11 #include "ir3_image.h"
12 #include "ir3_nir.h"
13 #include "ir3_shader.h"
14 #include "nir.h"
15 #include "nir_intrinsics_indices.h"
16 #include "util/u_math.h"
17 
18 struct ir3_context *
ir3_context_init(struct ir3_compiler * compiler,struct ir3_shader * shader,struct ir3_shader_variant * so)19 ir3_context_init(struct ir3_compiler *compiler, struct ir3_shader *shader,
20                  struct ir3_shader_variant *so)
21 {
22    MESA_TRACE_FUNC();
23 
24    struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
25 
26    if (compiler->gen == 4) {
27       if (so->type == MESA_SHADER_VERTEX) {
28          ctx->astc_srgb = so->key.vastc_srgb;
29          memcpy(ctx->sampler_swizzles, so->key.vsampler_swizzles, sizeof(ctx->sampler_swizzles));
30       } else if (so->type == MESA_SHADER_FRAGMENT ||
31             so->type == MESA_SHADER_COMPUTE) {
32          ctx->astc_srgb = so->key.fastc_srgb;
33          memcpy(ctx->sampler_swizzles, so->key.fsampler_swizzles, sizeof(ctx->sampler_swizzles));
34       }
35    } else if (compiler->gen == 3) {
36       if (so->type == MESA_SHADER_VERTEX) {
37          ctx->samples = so->key.vsamples;
38       } else if (so->type == MESA_SHADER_FRAGMENT) {
39          ctx->samples = so->key.fsamples;
40       }
41    }
42 
43    if (compiler->gen >= 6) {
44       ctx->funcs = &ir3_a6xx_funcs;
45    } else if (compiler->gen >= 4) {
46       ctx->funcs = &ir3_a4xx_funcs;
47    }
48 
49    ctx->compiler = compiler;
50    ctx->so = so;
51    ctx->def_ht =
52       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
53    ctx->block_ht =
54       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
55    ctx->continue_block_ht =
56       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
57    ctx->sel_cond_conversions =
58       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
59    ctx->predicate_conversions = _mesa_pointer_hash_table_create(ctx);
60 
61    /* TODO: maybe generate some sort of bitmask of what key
62     * lowers vs what shader has (ie. no need to lower
63     * texture clamp lowering if no texture sample instrs)..
64     * although should be done further up the stack to avoid
65     * creating duplicate variants..
66     */
67 
68    ctx->s = nir_shader_clone(ctx, shader->nir);
69    ir3_nir_lower_variant(so, &shader->options.nir_options, ctx->s);
70 
71    bool progress = false;
72    bool needs_late_alg = false;
73 
74    /* We want to lower nir_op_imul as late as possible, to catch also
75     * those generated by earlier passes (e.g,
76     * nir_lower_locals_to_regs).  However, we want a final swing of a
77     * few passes to have a chance at optimizing the result.
78     */
79    NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
80    while (progress) {
81       progress = false;
82       NIR_PASS(progress, ctx->s, nir_opt_algebraic);
83       NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
84       NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
85       NIR_PASS(progress, ctx->s, nir_opt_dce);
86       NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
87       needs_late_alg = true;
88    }
89 
90    /* nir_opt_algebraic() above would have unfused our ffmas, re-fuse them. */
91    if (needs_late_alg) {
92       NIR_PASS(progress, ctx->s, nir_opt_algebraic_late);
93       NIR_PASS(progress, ctx->s, nir_opt_dce);
94    }
95 
96    /* This must run after the last nir_opt_algebraic or it gets undone. */
97    if (compiler->has_branch_and_or)
98       NIR_PASS_V(ctx->s, ir3_nir_opt_branch_and_or_not);
99 
100    if (compiler->has_bitwise_triops) {
101       bool triops_progress = false;
102       NIR_PASS(triops_progress, ctx->s, ir3_nir_opt_triops_bitwise);
103 
104       if (triops_progress) {
105          NIR_PASS_V(ctx->s, nir_opt_dce);
106       }
107    }
108 
109    /* Enable the texture pre-fetch feature only a4xx onwards.  But
110     * only enable it on generations that have been tested:
111     */
112    if ((so->type == MESA_SHADER_FRAGMENT) && compiler->has_fs_tex_prefetch)
113       NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
114 
115    bool vectorized = false;
116    NIR_PASS(vectorized, ctx->s, nir_opt_vectorize, ir3_nir_vectorize_filter,
117             NULL);
118 
119    if (vectorized) {
120       NIR_PASS_V(ctx->s, nir_opt_undef);
121       NIR_PASS_V(ctx->s, nir_copy_prop);
122       NIR_PASS_V(ctx->s, nir_opt_dce);
123    }
124 
125    NIR_PASS(progress, ctx->s, nir_convert_to_lcssa, true, true);
126 
127    /* This has to go at the absolute end to make sure that all SSA defs are
128     * correctly marked.
129     */
130    NIR_PASS_V(ctx->s, nir_divergence_analysis);
131 
132    /* Super crude heuristic to limit # of tex prefetch in small
133     * shaders.  This completely ignores loops.. but that's really
134     * not the worst of it's problems.  (A frag shader that has
135     * loops is probably going to be big enough to not trigger a
136     * lower threshold.)
137     *
138     *   1) probably want to do this in terms of ir3 instructions
139     *   2) probably really want to decide this after scheduling
140     *      (or at least pre-RA sched) so we have a rough idea about
141     *      nops, and don't count things that get cp'd away
142     *   3) blob seems to use higher thresholds with a mix of more
143     *      SFU instructions.  Which partly makes sense, more SFU
144     *      instructions probably means you want to get the real
145     *      shader started sooner, but that considers where in the
146     *      shader the SFU instructions are, which blob doesn't seem
147     *      to do.
148     *
149     * This uses more conservative thresholds assuming a more alu
150     * than sfu heavy instruction mix.
151     */
152    if (so->type == MESA_SHADER_FRAGMENT) {
153       nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
154 
155       unsigned instruction_count = 0;
156       nir_foreach_block (block, fxn) {
157          nir_foreach_instr (instr, block) {
158             /* Vectorized ALU instructions expand to one scalar instruction per
159              * component.
160              */
161             if (instr->type == nir_instr_type_alu)
162                instruction_count += nir_instr_as_alu(instr)->def.num_components;
163             else
164                instruction_count++;
165          }
166       }
167 
168       if (instruction_count < 50) {
169          ctx->prefetch_limit = 2;
170       } else if (instruction_count < 70) {
171          ctx->prefetch_limit = 3;
172       } else {
173          ctx->prefetch_limit = IR3_MAX_SAMPLER_PREFETCH;
174       }
175    }
176 
177    if (shader_debug_enabled(so->type, ctx->s->info.internal)) {
178       mesa_logi("NIR (final form) for %s shader %s:", ir3_shader_stage(so),
179                 so->name);
180       nir_log_shaderi(ctx->s);
181    }
182 
183    ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
184 
185    /* Implement the "dual_color_blend_by_location" workaround for Unigine Heaven
186     * and Unigine Valley, by remapping FRAG_RESULT_DATA1 to be the 2nd color
187     * channel of FRAG_RESULT_DATA0.
188     */
189    if ((so->type == MESA_SHADER_FRAGMENT) && so->key.force_dual_color_blend) {
190       nir_variable *var = nir_find_variable_with_location(
191          ctx->s, nir_var_shader_out, FRAG_RESULT_DATA1);
192       if (var) {
193          var->data.location = FRAG_RESULT_DATA0;
194          var->data.index = 1;
195          nir_shader_gather_info(ctx->s, nir_shader_get_entrypoint(ctx->s));
196          so->dual_src_blend = true;
197       }
198    }
199 
200    return ctx;
201 }
202 
203 void
ir3_context_free(struct ir3_context * ctx)204 ir3_context_free(struct ir3_context *ctx)
205 {
206    ralloc_free(ctx);
207 }
208 
209 /*
210  * Misc helpers
211  */
212 
213 /* allocate a n element value array (to be populated by caller) and
214  * insert in def_ht
215  */
216 struct ir3_instruction **
ir3_get_dst_ssa(struct ir3_context * ctx,nir_def * dst,unsigned n)217 ir3_get_dst_ssa(struct ir3_context *ctx, nir_def *dst, unsigned n)
218 {
219    struct ir3_instruction **value =
220       ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
221    _mesa_hash_table_insert(ctx->def_ht, dst, value);
222    return value;
223 }
224 
225 struct ir3_instruction **
ir3_get_def(struct ir3_context * ctx,nir_def * def,unsigned n)226 ir3_get_def(struct ir3_context *ctx, nir_def *def, unsigned n)
227 {
228    struct ir3_instruction **value = ir3_get_dst_ssa(ctx, def, n);
229 
230    compile_assert(ctx, !ctx->last_dst);
231    ctx->last_dst = value;
232    ctx->last_dst_n = n;
233 
234    return value;
235 }
236 
237 struct ir3_instruction *const *
ir3_get_src_maybe_shared(struct ir3_context * ctx,nir_src * src)238 ir3_get_src_maybe_shared(struct ir3_context *ctx, nir_src *src)
239 {
240    struct hash_entry *entry;
241    entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
242    compile_assert(ctx, entry);
243    return entry->data;
244 }
245 
246 static struct ir3_instruction *
get_shared(struct ir3_builder * build,struct ir3_instruction * src,bool shared)247 get_shared(struct ir3_builder *build, struct ir3_instruction *src, bool shared)
248 {
249    if (!!(src->dsts[0]->flags & IR3_REG_SHARED) != shared) {
250       struct ir3_instruction *mov =
251          ir3_MOV(build, src,
252                  (src->dsts[0]->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32);
253       mov->dsts[0]->flags &= ~IR3_REG_SHARED;
254       mov->dsts[0]->flags |= COND(shared, IR3_REG_SHARED);
255       return mov;
256    }
257 
258    return src;
259 }
260 
261 struct ir3_instruction *const *
ir3_get_src_shared(struct ir3_context * ctx,nir_src * src,bool shared)262 ir3_get_src_shared(struct ir3_context *ctx, nir_src *src, bool shared)
263 {
264    unsigned num_components = nir_src_num_components(*src);
265    struct ir3_instruction *const *value = ir3_get_src_maybe_shared(ctx, src);
266    bool mismatch = false;
267    for (unsigned i = 0; i < nir_src_num_components(*src); i++) {
268       if (!!(value[i]->dsts[0]->flags & IR3_REG_SHARED) != shared) {
269          mismatch = true;
270          break;
271       }
272    }
273 
274    if (!mismatch)
275       return value;
276 
277    struct ir3_instruction **new_value =
278       ralloc_array(ctx, struct ir3_instruction *, num_components);
279    for (unsigned i = 0; i < num_components; i++)
280       new_value[i] = get_shared(&ctx->build, value[i], shared);
281 
282    return new_value;
283 }
284 
285 void
ir3_put_def(struct ir3_context * ctx,nir_def * def)286 ir3_put_def(struct ir3_context *ctx, nir_def *def)
287 {
288    unsigned bit_size = ir3_bitsize(ctx, def->bit_size);
289 
290    if (bit_size <= 16) {
291       for (unsigned i = 0; i < ctx->last_dst_n; i++) {
292          struct ir3_instruction *dst = ctx->last_dst[i];
293          ir3_set_dst_type(dst, true);
294          ir3_fixup_src_type(dst);
295          if (dst->opc == OPC_META_SPLIT) {
296             ir3_set_dst_type(ssa(dst->srcs[0]), true);
297             ir3_fixup_src_type(ssa(dst->srcs[0]));
298             dst->srcs[0]->flags |= IR3_REG_HALF;
299          }
300       }
301    }
302 
303    ctx->last_dst = NULL;
304    ctx->last_dst_n = 0;
305 }
306 
307 static unsigned
dest_flags(struct ir3_instruction * instr)308 dest_flags(struct ir3_instruction *instr)
309 {
310    return instr->dsts[0]->flags & (IR3_REG_HALF | IR3_REG_SHARED);
311 }
312 
313 struct ir3_instruction *
ir3_create_collect(struct ir3_builder * build,struct ir3_instruction * const * arr,unsigned arrsz)314 ir3_create_collect(struct ir3_builder *build,
315                    struct ir3_instruction *const *arr, unsigned arrsz)
316 {
317    struct ir3_instruction *collect;
318 
319    if (arrsz == 0)
320       return NULL;
321 
322    if (arrsz == 1)
323       return arr[0];
324 
325    int non_undef_src = -1;
326    for (unsigned i = 0; i < arrsz; i++) {
327       if (arr[i]) {
328          non_undef_src = i;
329          break;
330       }
331    }
332 
333    /* There should be at least one non-undef source to determine the type of the
334     * destination.
335     */
336    assert(non_undef_src != -1);
337    unsigned flags = dest_flags(arr[non_undef_src]);
338 
339    collect = ir3_build_instr(build, OPC_META_COLLECT, 1, arrsz);
340    __ssa_dst(collect)->flags |= flags;
341    for (unsigned i = 0; i < arrsz; i++) {
342       struct ir3_instruction *elem = arr[i];
343 
344       /* Since arrays are pre-colored in RA, we can't assume that
345        * things will end up in the right place.  (Ie. if a collect
346        * joins elements from two different arrays.)  So insert an
347        * extra mov.
348        *
349        * We could possibly skip this if all the collected elements
350        * are contiguous elements in a single array.. not sure how
351        * likely that is to happen.
352        *
353        * Fixes a problem with glamor shaders, that in effect do
354        * something like:
355        *
356        *   if (foo)
357        *     texcoord = ..
358        *   else
359        *     texcoord = ..
360        *   color = texture2D(tex, texcoord);
361        *
362        * In this case, texcoord will end up as nir registers (which
363        * translate to ir3 array's of length 1.  And we can't assume
364        * the two (or more) arrays will get allocated in consecutive
365        * scalar registers.
366        *
367        */
368       if (elem && elem->dsts[0]->flags & IR3_REG_ARRAY) {
369          type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
370          elem = ir3_MOV(build, elem, type);
371       }
372 
373       if (elem) {
374          assert(dest_flags(elem) == flags);
375          __ssa_src(collect, elem, flags);
376       } else {
377          ir3_src_create(collect, INVALID_REG, flags | IR3_REG_SSA);
378       }
379    }
380 
381    collect->dsts[0]->wrmask = MASK(arrsz);
382 
383    return collect;
384 }
385 
386 /* helper for instructions that produce multiple consecutive scalar
387  * outputs which need to have a split meta instruction inserted
388  */
389 void
ir3_split_dest(struct ir3_builder * build,struct ir3_instruction ** dst,struct ir3_instruction * src,unsigned base,unsigned n)390 ir3_split_dest(struct ir3_builder *build, struct ir3_instruction **dst,
391                struct ir3_instruction *src, unsigned base, unsigned n)
392 {
393    if ((n == 1) && (src->dsts[0]->wrmask == 0x1) &&
394        /* setup_input needs ir3_split_dest to generate a SPLIT instruction */
395        src->opc != OPC_META_INPUT) {
396       dst[0] = src;
397       return;
398    }
399 
400    if (src->opc == OPC_META_COLLECT) {
401       assert((base + n) <= src->srcs_count);
402 
403       for (int i = 0; i < n; i++) {
404          dst[i] = ssa(src->srcs[i + base]);
405       }
406 
407       return;
408    }
409 
410    unsigned flags = dest_flags(src);
411 
412    for (int i = 0, j = 0; i < n; i++) {
413       struct ir3_instruction *split =
414          ir3_build_instr(build, OPC_META_SPLIT, 1, 1);
415       __ssa_dst(split)->flags |= flags;
416       __ssa_src(split, src, flags);
417       split->split.off = i + base;
418 
419       if (src->dsts[0]->wrmask & (1 << (i + base)))
420          dst[j++] = split;
421    }
422 }
423 
424 NORETURN void
ir3_context_error(struct ir3_context * ctx,const char * format,...)425 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
426 {
427    struct hash_table *errors = NULL;
428    va_list ap;
429    va_start(ap, format);
430    if (ctx->cur_instr) {
431       errors = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
432                                        _mesa_key_pointer_equal);
433       char *msg = ralloc_vasprintf(errors, format, ap);
434       _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
435    } else {
436       mesa_loge_v(format, ap);
437    }
438    va_end(ap);
439    nir_log_shader_annotated(ctx->s, errors);
440    ralloc_free(errors);
441    ctx->error = true;
442    unreachable("");
443 }
444 
445 static struct ir3_instruction *
create_addr0(struct ir3_builder * build,struct ir3_instruction * src,int align)446 create_addr0(struct ir3_builder *build, struct ir3_instruction *src, int align)
447 {
448    struct ir3_instruction *instr, *immed;
449 
450    instr = ir3_COV(build, src, TYPE_U32, TYPE_S16);
451    bool shared = (src->dsts[0]->flags & IR3_REG_SHARED);
452 
453    switch (align) {
454    case 1:
455       /* src *= 1: */
456       break;
457    case 2:
458       /* src *= 2	=> src <<= 1: */
459       immed = create_immed_typed_shared(build, 1, TYPE_S16, shared);
460       instr = ir3_SHL_B(build, instr, 0, immed, 0);
461       break;
462    case 3:
463       /* src *= 3: */
464       immed = create_immed_typed_shared(build, 3, TYPE_S16, shared);
465       instr = ir3_MULL_U(build, instr, 0, immed, 0);
466       break;
467    case 4:
468       /* src *= 4 => src <<= 2: */
469       immed = create_immed_typed_shared(build, 2, TYPE_S16, shared);
470       instr = ir3_SHL_B(build, instr, 0, immed, 0);
471       break;
472    default:
473       unreachable("bad align");
474       return NULL;
475    }
476 
477    instr->dsts[0]->flags |= IR3_REG_HALF;
478 
479    instr = ir3_MOV(build, instr, TYPE_S16);
480    instr->dsts[0]->num = regid(REG_A0, 0);
481    instr->dsts[0]->flags &= ~IR3_REG_SHARED;
482 
483    return instr;
484 }
485 
486 static struct ir3_instruction *
create_addr1(struct ir3_builder * build,unsigned const_val)487 create_addr1(struct ir3_builder *build, unsigned const_val)
488 {
489    struct ir3_instruction *immed =
490       create_immed_typed(build, const_val, TYPE_U16);
491    struct ir3_instruction *instr = ir3_MOV(build, immed, TYPE_U16);
492    instr->dsts[0]->num = regid(REG_A0, 1);
493    return instr;
494 }
495 
496 /* caches addr values to avoid generating multiple cov/shl/mova
497  * sequences for each use of a given NIR level src as address
498  */
499 struct ir3_instruction *
ir3_get_addr0(struct ir3_context * ctx,struct ir3_instruction * src,int align)500 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
501 {
502    struct ir3_instruction *addr;
503    unsigned idx = align - 1;
504 
505    compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
506 
507    if (!ctx->addr0_ht[idx]) {
508       ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx, _mesa_hash_pointer,
509                                                    _mesa_key_pointer_equal);
510    } else {
511       struct hash_entry *entry;
512       entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
513       if (entry)
514          return entry->data;
515    }
516 
517    addr = create_addr0(&ctx->build, src, align);
518    _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
519 
520    return addr;
521 }
522 
523 /* Similar to ir3_get_addr0, but for a1.x. */
524 struct ir3_instruction *
ir3_get_addr1(struct ir3_context * ctx,unsigned const_val)525 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
526 {
527    struct ir3_instruction *addr;
528 
529    if (!ctx->addr1_ht) {
530       ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
531    } else {
532       addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
533       if (addr)
534          return addr;
535    }
536 
537    addr = create_addr1(&ctx->build, const_val);
538    _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
539 
540    return addr;
541 }
542 
543 struct ir3_instruction *
ir3_get_predicate(struct ir3_context * ctx,struct ir3_instruction * src)544 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
545 {
546    src = ir3_get_cond_for_nonzero_compare(src);
547 
548    struct hash_entry *src_entry =
549       _mesa_hash_table_search(ctx->predicate_conversions, src);
550    if (src_entry)
551       return src_entry->data;
552 
553    struct ir3_builder b = ir3_builder_at(ir3_after_instr_and_phis(src));
554    struct ir3_instruction *cond;
555 
556    /* NOTE: we use cpms.s.ne x, 0 to move x into a predicate register */
557    struct ir3_instruction *zero =
558       create_immed_typed_shared(&b, 0, is_half(src) ? TYPE_U16 : TYPE_U32,
559                                 src->dsts[0]->flags & IR3_REG_SHARED);
560    cond = ir3_CMPS_S(&b, src, 0, zero, 0);
561    cond->cat2.condition = IR3_COND_NE;
562 
563    /* condition always goes in predicate register: */
564    cond->dsts[0]->flags |= IR3_REG_PREDICATE;
565    cond->dsts[0]->flags &= ~IR3_REG_SHARED;
566 
567    _mesa_hash_table_insert(ctx->predicate_conversions, src, cond);
568    return cond;
569 }
570 
571 /*
572  * Array helpers
573  */
574 
575 void
ir3_declare_array(struct ir3_context * ctx,nir_intrinsic_instr * decl)576 ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl)
577 {
578    struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
579    arr->id = ++ctx->num_arrays;
580    /* NOTE: sometimes we get non array regs, for example for arrays of
581     * length 1.  See fs-const-array-of-struct-of-array.shader_test.  So
582     * treat a non-array as if it was an array of length 1.
583     *
584     * It would be nice if there was a nir pass to convert arrays of
585     * length 1 to ssa.
586     */
587    arr->length = nir_intrinsic_num_components(decl) *
588                  MAX2(1, nir_intrinsic_num_array_elems(decl));
589 
590    compile_assert(ctx, arr->length > 0);
591    arr->r = &decl->def;
592    arr->half = ir3_bitsize(ctx, nir_intrinsic_bit_size(decl)) <= 16;
593    list_addtail(&arr->node, &ctx->ir->array_list);
594 }
595 
596 struct ir3_array *
ir3_get_array(struct ir3_context * ctx,nir_def * reg)597 ir3_get_array(struct ir3_context *ctx, nir_def *reg)
598 {
599    foreach_array (arr, &ctx->ir->array_list) {
600       if (arr->r == reg)
601          return arr;
602    }
603    ir3_context_error(ctx, "bogus reg: r%d\n", reg->index);
604    return NULL;
605 }
606 
607 /* relative (indirect) if address!=NULL */
608 struct ir3_instruction *
ir3_create_array_load(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * address)609 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
610                       struct ir3_instruction *address)
611 {
612    struct ir3_block *block = ctx->block;
613    struct ir3_instruction *mov;
614    struct ir3_register *src;
615    unsigned flags = 0;
616 
617    mov = ir3_build_instr(&ctx->build, OPC_MOV, 1, 1);
618    if (arr->half) {
619       mov->cat1.src_type = TYPE_U16;
620       mov->cat1.dst_type = TYPE_U16;
621       flags |= IR3_REG_HALF;
622    } else {
623       mov->cat1.src_type = TYPE_U32;
624       mov->cat1.dst_type = TYPE_U32;
625    }
626 
627    mov->barrier_class = IR3_BARRIER_ARRAY_R;
628    mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
629    __ssa_dst(mov)->flags |= flags;
630    src = ir3_src_create(mov, 0,
631                         IR3_REG_ARRAY | COND(address, IR3_REG_RELATIV) | flags);
632    src->def = (arr->last_write && arr->last_write->instr->block == block)
633                  ? arr->last_write
634                  : NULL;
635    src->size = arr->length;
636    src->array.id = arr->id;
637    src->array.offset = n;
638    src->array.base = INVALID_REG;
639 
640    if (address)
641       ir3_instr_set_address(mov, address);
642 
643    return mov;
644 }
645 
646 /* relative (indirect) if address!=NULL */
647 void
ir3_create_array_store(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * src,struct ir3_instruction * address)648 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
649                        struct ir3_instruction *src,
650                        struct ir3_instruction *address)
651 {
652    struct ir3_block *block = ctx->block;
653    struct ir3_instruction *mov;
654    struct ir3_register *dst;
655    unsigned flags = 0;
656 
657    mov = ir3_build_instr(&ctx->build, OPC_MOV, 1, 1);
658    if (arr->half) {
659       mov->cat1.src_type = TYPE_U16;
660       mov->cat1.dst_type = TYPE_U16;
661       flags |= IR3_REG_HALF;
662    } else {
663       mov->cat1.src_type = TYPE_U32;
664       mov->cat1.dst_type = TYPE_U32;
665    }
666    mov->barrier_class = IR3_BARRIER_ARRAY_W;
667    mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
668    dst = ir3_dst_create(
669       mov, INVALID_REG,
670       IR3_REG_SSA | IR3_REG_ARRAY | flags | COND(address, IR3_REG_RELATIV));
671    dst->instr = mov;
672    dst->size = arr->length;
673    dst->array.id = arr->id;
674    dst->array.offset = n;
675    dst->array.base = INVALID_REG;
676    ir3_src_create(mov, INVALID_REG, IR3_REG_SSA | flags |
677                   (src->dsts[0]->flags & IR3_REG_SHARED))->def = src->dsts[0];
678 
679    if (arr->last_write && arr->last_write->instr->block == block)
680       ir3_reg_set_last_array(mov, dst, arr->last_write);
681 
682    if (address)
683       ir3_instr_set_address(mov, address);
684 
685    arr->last_write = dst;
686 
687    /* the array store may only matter to something in an earlier
688     * block (ie. loops), but since arrays are not in SSA, depth
689     * pass won't know this.. so keep all array stores:
690     */
691    array_insert(block, block->keeps, mov);
692 }
693 
694 void
ir3_lower_imm_offset(struct ir3_context * ctx,nir_intrinsic_instr * intr,nir_src * offset_src,unsigned imm_offset_bits,struct ir3_instruction ** offset,unsigned * imm_offset)695 ir3_lower_imm_offset(struct ir3_context *ctx, nir_intrinsic_instr *intr,
696                      nir_src *offset_src, unsigned imm_offset_bits,
697                      struct ir3_instruction **offset, unsigned *imm_offset)
698 {
699    nir_const_value *nir_const_offset = nir_src_as_const_value(*offset_src);
700    int base = nir_intrinsic_base(intr);
701    unsigned imm_offset_bound = (1 << imm_offset_bits);
702    assert(base >= 0 && base < imm_offset_bound);
703 
704    if (nir_const_offset) {
705       /* If both the offset and the base (immed offset) are constants, lower the
706        * offset to a multiple of the bound and the immed offset to the
707        * remainder. This ensures that the offset register can often be reused
708        * among multiple contiguous accesses.
709        */
710       uint32_t full_offset = base + nir_const_offset->u32;
711       *offset = create_immed(&ctx->build,
712                              ROUND_DOWN_TO(full_offset, imm_offset_bound));
713       *imm_offset = full_offset % imm_offset_bound;
714    } else {
715       *offset = ir3_get_src(ctx, offset_src)[0];
716       *imm_offset = base;
717    }
718 }
719