• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015-2018 Rob Clark <robclark@freedesktop.org>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <robclark@freedesktop.org>
7  */
8 
9 #include "ir3_context.h"
10 #include "ir3_compiler.h"
11 #include "ir3_image.h"
12 #include "ir3_nir.h"
13 #include "ir3_shader.h"
14 #include "nir.h"
15 #include "nir_intrinsics_indices.h"
16 #include "util/u_math.h"
17 
18 struct ir3_context *
ir3_context_init(struct ir3_compiler * compiler,struct ir3_shader * shader,struct ir3_shader_variant * so)19 ir3_context_init(struct ir3_compiler *compiler, struct ir3_shader *shader,
20                  struct ir3_shader_variant *so)
21 {
22    MESA_TRACE_FUNC();
23 
24    struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
25 
26    if (compiler->gen == 4) {
27       if (so->type == MESA_SHADER_VERTEX) {
28          ctx->astc_srgb = so->key.vastc_srgb;
29          memcpy(ctx->sampler_swizzles, so->key.vsampler_swizzles, sizeof(ctx->sampler_swizzles));
30       } else if (so->type == MESA_SHADER_FRAGMENT ||
31             so->type == MESA_SHADER_COMPUTE) {
32          ctx->astc_srgb = so->key.fastc_srgb;
33          memcpy(ctx->sampler_swizzles, so->key.fsampler_swizzles, sizeof(ctx->sampler_swizzles));
34       }
35    } else if (compiler->gen == 3) {
36       if (so->type == MESA_SHADER_VERTEX) {
37          ctx->samples = so->key.vsamples;
38       } else if (so->type == MESA_SHADER_FRAGMENT) {
39          ctx->samples = so->key.fsamples;
40       }
41    }
42 
43    if (compiler->gen >= 6) {
44       ctx->funcs = &ir3_a6xx_funcs;
45    } else if (compiler->gen >= 4) {
46       ctx->funcs = &ir3_a4xx_funcs;
47    }
48 
49    ctx->compiler = compiler;
50    ctx->so = so;
51    ctx->def_ht =
52       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
53    ctx->block_ht =
54       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
55    ctx->continue_block_ht =
56       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
57    ctx->sel_cond_conversions =
58       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
59    ctx->predicate_conversions = _mesa_pointer_hash_table_create(ctx);
60 
61    /* TODO: maybe generate some sort of bitmask of what key
62     * lowers vs what shader has (ie. no need to lower
63     * texture clamp lowering if no texture sample instrs)..
64     * although should be done further up the stack to avoid
65     * creating duplicate variants..
66     */
67 
68    ctx->s = nir_shader_clone(ctx, shader->nir);
69    ir3_nir_lower_variant(so, &shader->options.nir_options, ctx->s);
70 
71    bool progress = false;
72    bool needs_late_alg = false;
73 
74    /* We want to lower nir_op_imul as late as possible, to catch also
75     * those generated by earlier passes (e.g,
76     * nir_lower_locals_to_regs).  However, we want a final swing of a
77     * few passes to have a chance at optimizing the result.
78     */
79    NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
80    while (progress) {
81       progress = false;
82       NIR_PASS(progress, ctx->s, nir_opt_algebraic);
83       NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
84       NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
85       NIR_PASS(progress, ctx->s, nir_opt_dce);
86       NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
87       needs_late_alg = true;
88    }
89 
90    /* nir_opt_algebraic() above would have unfused our ffmas, re-fuse them. */
91    if (needs_late_alg) {
92       NIR_PASS(progress, ctx->s, nir_opt_algebraic_late);
93       NIR_PASS(progress, ctx->s, nir_opt_dce);
94    }
95 
96    /* This must run after the last nir_opt_algebraic or it gets undone. */
97    if (compiler->has_branch_and_or)
98       NIR_PASS_V(ctx->s, ir3_nir_opt_branch_and_or_not);
99 
100    if (compiler->has_bitwise_triops) {
101       bool triops_progress = false;
102       NIR_PASS(triops_progress, ctx->s, ir3_nir_opt_triops_bitwise);
103 
104       if (triops_progress) {
105          NIR_PASS_V(ctx->s, nir_opt_dce);
106       }
107    }
108 
109    /* Enable the texture pre-fetch feature only a4xx onwards.  But
110     * only enable it on generations that have been tested:
111     */
112    if ((so->type == MESA_SHADER_FRAGMENT) && compiler->has_fs_tex_prefetch)
113       NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
114 
115    bool vectorized = false;
116    NIR_PASS(vectorized, ctx->s, nir_opt_vectorize, ir3_nir_vectorize_filter,
117             NULL);
118 
119    if (vectorized) {
120       NIR_PASS_V(ctx->s, nir_opt_undef);
121       NIR_PASS_V(ctx->s, nir_copy_prop);
122       NIR_PASS_V(ctx->s, nir_opt_dce);
123    }
124 
125    NIR_PASS(progress, ctx->s, nir_convert_to_lcssa, true, true);
126 
127    /* This has to go at the absolute end to make sure that all SSA defs are
128     * correctly marked.
129     */
130    NIR_PASS_V(ctx->s, nir_divergence_analysis);
131 
132    /* Super crude heuristic to limit # of tex prefetch in small
133     * shaders.  This completely ignores loops.. but that's really
134     * not the worst of it's problems.  (A frag shader that has
135     * loops is probably going to be big enough to not trigger a
136     * lower threshold.)
137     *
138     *   1) probably want to do this in terms of ir3 instructions
139     *   2) probably really want to decide this after scheduling
140     *      (or at least pre-RA sched) so we have a rough idea about
141     *      nops, and don't count things that get cp'd away
142     *   3) blob seems to use higher thresholds with a mix of more
143     *      SFU instructions.  Which partly makes sense, more SFU
144     *      instructions probably means you want to get the real
145     *      shader started sooner, but that considers where in the
146     *      shader the SFU instructions are, which blob doesn't seem
147     *      to do.
148     *
149     * This uses more conservative thresholds assuming a more alu
150     * than sfu heavy instruction mix.
151     */
152    if (so->type == MESA_SHADER_FRAGMENT) {
153       nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
154 
155       unsigned instruction_count = 0;
156       nir_foreach_block (block, fxn) {
157          nir_foreach_instr (instr, block) {
158             /* Vectorized ALU instructions expand to one scalar instruction per
159              * component.
160              */
161             if (instr->type == nir_instr_type_alu)
162                instruction_count += nir_instr_as_alu(instr)->def.num_components;
163             else
164                instruction_count++;
165          }
166       }
167 
168       if (instruction_count < 50) {
169          ctx->prefetch_limit = 2;
170       } else if (instruction_count < 70) {
171          ctx->prefetch_limit = 3;
172       } else {
173          ctx->prefetch_limit = IR3_MAX_SAMPLER_PREFETCH;
174       }
175    }
176 
177    if (shader_debug_enabled(so->type, ctx->s->info.internal)) {
178       mesa_logi("NIR (final form) for %s shader %s:", ir3_shader_stage(so),
179                 so->name);
180       nir_log_shaderi(ctx->s);
181    }
182 
183    ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
184 
185    /* Implement the "dual_color_blend_by_location" workaround for Unigine Heaven
186     * and Unigine Valley, by remapping FRAG_RESULT_DATA1 to be the 2nd color
187     * channel of FRAG_RESULT_DATA0.
188     */
189    if ((so->type == MESA_SHADER_FRAGMENT) && so->key.force_dual_color_blend) {
190       nir_variable *var = nir_find_variable_with_location(
191          ctx->s, nir_var_shader_out, FRAG_RESULT_DATA1);
192       if (var) {
193          var->data.location = FRAG_RESULT_DATA0;
194          var->data.index = 1;
195          nir_shader_gather_info(ctx->s, nir_shader_get_entrypoint(ctx->s));
196          so->dual_src_blend = true;
197       }
198    }
199 
200    return ctx;
201 }
202 
203 void
ir3_context_free(struct ir3_context * ctx)204 ir3_context_free(struct ir3_context *ctx)
205 {
206    ralloc_free(ctx);
207 }
208 
209 /*
210  * Misc helpers
211  */
212 
213 /* allocate a n element value array (to be populated by caller) and
214  * insert in def_ht
215  */
216 struct ir3_instruction **
ir3_get_dst_ssa(struct ir3_context * ctx,nir_def * dst,unsigned n)217 ir3_get_dst_ssa(struct ir3_context *ctx, nir_def *dst, unsigned n)
218 {
219    struct ir3_instruction **value =
220       ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
221    _mesa_hash_table_insert(ctx->def_ht, dst, value);
222    return value;
223 }
224 
225 struct ir3_instruction **
ir3_get_def(struct ir3_context * ctx,nir_def * def,unsigned n)226 ir3_get_def(struct ir3_context *ctx, nir_def *def, unsigned n)
227 {
228    struct ir3_instruction **value = ir3_get_dst_ssa(ctx, def, n);
229 
230    compile_assert(ctx, !ctx->last_dst);
231    ctx->last_dst = value;
232    ctx->last_dst_n = n;
233 
234    return value;
235 }
236 
237 struct ir3_instruction *const *
ir3_get_src_maybe_shared(struct ir3_context * ctx,nir_src * src)238 ir3_get_src_maybe_shared(struct ir3_context *ctx, nir_src *src)
239 {
240    struct hash_entry *entry;
241    entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
242    compile_assert(ctx, entry);
243    return entry->data;
244 }
245 
246 static struct ir3_instruction *
get_shared(struct ir3_builder * build,struct ir3_instruction * src,bool shared)247 get_shared(struct ir3_builder *build, struct ir3_instruction *src, bool shared)
248 {
249    if (!!(src->dsts[0]->flags & IR3_REG_SHARED) != shared) {
250       struct ir3_instruction *mov =
251          ir3_MOV(build, src,
252                  (src->dsts[0]->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32);
253       mov->dsts[0]->flags &= ~IR3_REG_SHARED;
254       mov->dsts[0]->flags |= COND(shared, IR3_REG_SHARED);
255       return mov;
256    }
257 
258    return src;
259 }
260 
261 struct ir3_instruction *const *
ir3_get_src_shared(struct ir3_context * ctx,nir_src * src,bool shared)262 ir3_get_src_shared(struct ir3_context *ctx, nir_src *src, bool shared)
263 {
264    unsigned num_components = nir_src_num_components(*src);
265    struct ir3_instruction *const *value = ir3_get_src_maybe_shared(ctx, src);
266    bool mismatch = false;
267    for (unsigned i = 0; i < nir_src_num_components(*src); i++) {
268       if (!!(value[i]->dsts[0]->flags & IR3_REG_SHARED) != shared) {
269          mismatch = true;
270          break;
271       }
272    }
273 
274    if (!mismatch)
275       return value;
276 
277    struct ir3_instruction **new_value =
278       ralloc_array(ctx, struct ir3_instruction *, num_components);
279    for (unsigned i = 0; i < num_components; i++)
280       new_value[i] = get_shared(&ctx->build, value[i], shared);
281 
282    return new_value;
283 }
284 
285 void
ir3_put_def(struct ir3_context * ctx,nir_def * def)286 ir3_put_def(struct ir3_context *ctx, nir_def *def)
287 {
288    unsigned bit_size = ir3_bitsize(ctx, def->bit_size);
289 
290    if (bit_size <= 16) {
291       for (unsigned i = 0; i < ctx->last_dst_n; i++) {
292          struct ir3_instruction *dst = ctx->last_dst[i];
293          ir3_set_dst_type(dst, true);
294          ir3_fixup_src_type(dst);
295          if (dst->opc == OPC_META_SPLIT) {
296             ir3_set_dst_type(ssa(dst->srcs[0]), true);
297             ir3_fixup_src_type(ssa(dst->srcs[0]));
298             dst->srcs[0]->flags |= IR3_REG_HALF;
299          }
300       }
301    }
302 
303    ctx->last_dst = NULL;
304    ctx->last_dst_n = 0;
305 }
306 
307 static unsigned
dest_flags(struct ir3_instruction * instr)308 dest_flags(struct ir3_instruction *instr)
309 {
310    return instr->dsts[0]->flags & (IR3_REG_HALF | IR3_REG_SHARED);
311 }
312 
313 struct ir3_instruction *
ir3_create_collect(struct ir3_builder * build,struct ir3_instruction * const * arr,unsigned arrsz)314 ir3_create_collect(struct ir3_builder *build,
315                    struct ir3_instruction *const *arr, unsigned arrsz)
316 {
317    struct ir3_instruction *collect;
318 
319    if (arrsz == 0)
320       return NULL;
321 
322    if (arrsz == 1)
323       return arr[0];
324 
325    unsigned flags = dest_flags(arr[0]);
326 
327    collect = ir3_build_instr(build, OPC_META_COLLECT, 1, arrsz);
328    __ssa_dst(collect)->flags |= flags;
329    for (unsigned i = 0; i < arrsz; i++) {
330       struct ir3_instruction *elem = arr[i];
331 
332       /* Since arrays are pre-colored in RA, we can't assume that
333        * things will end up in the right place.  (Ie. if a collect
334        * joins elements from two different arrays.)  So insert an
335        * extra mov.
336        *
337        * We could possibly skip this if all the collected elements
338        * are contiguous elements in a single array.. not sure how
339        * likely that is to happen.
340        *
341        * Fixes a problem with glamor shaders, that in effect do
342        * something like:
343        *
344        *   if (foo)
345        *     texcoord = ..
346        *   else
347        *     texcoord = ..
348        *   color = texture2D(tex, texcoord);
349        *
350        * In this case, texcoord will end up as nir registers (which
351        * translate to ir3 array's of length 1.  And we can't assume
352        * the two (or more) arrays will get allocated in consecutive
353        * scalar registers.
354        *
355        */
356       if (elem->dsts[0]->flags & IR3_REG_ARRAY) {
357          type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
358          elem = ir3_MOV(build, elem, type);
359       }
360 
361       assert(dest_flags(elem) == flags);
362       __ssa_src(collect, elem, flags);
363    }
364 
365    collect->dsts[0]->wrmask = MASK(arrsz);
366 
367    return collect;
368 }
369 
370 /* helper for instructions that produce multiple consecutive scalar
371  * outputs which need to have a split meta instruction inserted
372  */
373 void
ir3_split_dest(struct ir3_builder * build,struct ir3_instruction ** dst,struct ir3_instruction * src,unsigned base,unsigned n)374 ir3_split_dest(struct ir3_builder *build, struct ir3_instruction **dst,
375                struct ir3_instruction *src, unsigned base, unsigned n)
376 {
377    if ((n == 1) && (src->dsts[0]->wrmask == 0x1) &&
378        /* setup_input needs ir3_split_dest to generate a SPLIT instruction */
379        src->opc != OPC_META_INPUT) {
380       dst[0] = src;
381       return;
382    }
383 
384    if (src->opc == OPC_META_COLLECT) {
385       assert((base + n) <= src->srcs_count);
386 
387       for (int i = 0; i < n; i++) {
388          dst[i] = ssa(src->srcs[i + base]);
389       }
390 
391       return;
392    }
393 
394    unsigned flags = dest_flags(src);
395 
396    for (int i = 0, j = 0; i < n; i++) {
397       struct ir3_instruction *split =
398          ir3_build_instr(build, OPC_META_SPLIT, 1, 1);
399       __ssa_dst(split)->flags |= flags;
400       __ssa_src(split, src, flags);
401       split->split.off = i + base;
402 
403       if (src->dsts[0]->wrmask & (1 << (i + base)))
404          dst[j++] = split;
405    }
406 }
407 
408 NORETURN void
ir3_context_error(struct ir3_context * ctx,const char * format,...)409 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
410 {
411    struct hash_table *errors = NULL;
412    va_list ap;
413    va_start(ap, format);
414    if (ctx->cur_instr) {
415       errors = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
416                                        _mesa_key_pointer_equal);
417       char *msg = ralloc_vasprintf(errors, format, ap);
418       _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
419    } else {
420       mesa_loge_v(format, ap);
421    }
422    va_end(ap);
423    nir_log_shader_annotated(ctx->s, errors);
424    ralloc_free(errors);
425    ctx->error = true;
426    unreachable("");
427 }
428 
429 static struct ir3_instruction *
create_addr0(struct ir3_builder * build,struct ir3_instruction * src,int align)430 create_addr0(struct ir3_builder *build, struct ir3_instruction *src, int align)
431 {
432    struct ir3_instruction *instr, *immed;
433 
434    instr = ir3_COV(build, src, TYPE_U32, TYPE_S16);
435    bool shared = (src->dsts[0]->flags & IR3_REG_SHARED);
436 
437    switch (align) {
438    case 1:
439       /* src *= 1: */
440       break;
441    case 2:
442       /* src *= 2	=> src <<= 1: */
443       immed = create_immed_typed_shared(build, 1, TYPE_S16, shared);
444       instr = ir3_SHL_B(build, instr, 0, immed, 0);
445       break;
446    case 3:
447       /* src *= 3: */
448       immed = create_immed_typed_shared(build, 3, TYPE_S16, shared);
449       instr = ir3_MULL_U(build, instr, 0, immed, 0);
450       break;
451    case 4:
452       /* src *= 4 => src <<= 2: */
453       immed = create_immed_typed_shared(build, 2, TYPE_S16, shared);
454       instr = ir3_SHL_B(build, instr, 0, immed, 0);
455       break;
456    default:
457       unreachable("bad align");
458       return NULL;
459    }
460 
461    instr->dsts[0]->flags |= IR3_REG_HALF;
462 
463    instr = ir3_MOV(build, instr, TYPE_S16);
464    instr->dsts[0]->num = regid(REG_A0, 0);
465    instr->dsts[0]->flags &= ~IR3_REG_SHARED;
466 
467    return instr;
468 }
469 
470 static struct ir3_instruction *
create_addr1(struct ir3_builder * build,unsigned const_val)471 create_addr1(struct ir3_builder *build, unsigned const_val)
472 {
473    struct ir3_instruction *immed =
474       create_immed_typed(build, const_val, TYPE_U16);
475    struct ir3_instruction *instr = ir3_MOV(build, immed, TYPE_U16);
476    instr->dsts[0]->num = regid(REG_A0, 1);
477    return instr;
478 }
479 
480 /* caches addr values to avoid generating multiple cov/shl/mova
481  * sequences for each use of a given NIR level src as address
482  */
483 struct ir3_instruction *
ir3_get_addr0(struct ir3_context * ctx,struct ir3_instruction * src,int align)484 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
485 {
486    struct ir3_instruction *addr;
487    unsigned idx = align - 1;
488 
489    compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
490 
491    if (!ctx->addr0_ht[idx]) {
492       ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx, _mesa_hash_pointer,
493                                                    _mesa_key_pointer_equal);
494    } else {
495       struct hash_entry *entry;
496       entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
497       if (entry)
498          return entry->data;
499    }
500 
501    addr = create_addr0(&ctx->build, src, align);
502    _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
503 
504    return addr;
505 }
506 
507 /* Similar to ir3_get_addr0, but for a1.x. */
508 struct ir3_instruction *
ir3_get_addr1(struct ir3_context * ctx,unsigned const_val)509 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
510 {
511    struct ir3_instruction *addr;
512 
513    if (!ctx->addr1_ht) {
514       ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
515    } else {
516       addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
517       if (addr)
518          return addr;
519    }
520 
521    addr = create_addr1(&ctx->build, const_val);
522    _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
523 
524    return addr;
525 }
526 
527 struct ir3_instruction *
ir3_get_predicate(struct ir3_context * ctx,struct ir3_instruction * src)528 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
529 {
530    src = ir3_get_cond_for_nonzero_compare(src);
531 
532    struct hash_entry *src_entry =
533       _mesa_hash_table_search(ctx->predicate_conversions, src);
534    if (src_entry)
535       return src_entry->data;
536 
537    struct ir3_builder b = ir3_builder_at(ir3_after_instr_and_phis(src));
538    struct ir3_instruction *cond;
539 
540    /* NOTE: we use cpms.s.ne x, 0 to move x into a predicate register */
541    struct ir3_instruction *zero =
542       create_immed_typed_shared(&b, 0, is_half(src) ? TYPE_U16 : TYPE_U32,
543                                 src->dsts[0]->flags & IR3_REG_SHARED);
544    cond = ir3_CMPS_S(&b, src, 0, zero, 0);
545    cond->cat2.condition = IR3_COND_NE;
546 
547    /* condition always goes in predicate register: */
548    cond->dsts[0]->flags |= IR3_REG_PREDICATE;
549    cond->dsts[0]->flags &= ~IR3_REG_SHARED;
550 
551    _mesa_hash_table_insert(ctx->predicate_conversions, src, cond);
552    return cond;
553 }
554 
555 /*
556  * Array helpers
557  */
558 
559 void
ir3_declare_array(struct ir3_context * ctx,nir_intrinsic_instr * decl)560 ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl)
561 {
562    struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
563    arr->id = ++ctx->num_arrays;
564    /* NOTE: sometimes we get non array regs, for example for arrays of
565     * length 1.  See fs-const-array-of-struct-of-array.shader_test.  So
566     * treat a non-array as if it was an array of length 1.
567     *
568     * It would be nice if there was a nir pass to convert arrays of
569     * length 1 to ssa.
570     */
571    arr->length = nir_intrinsic_num_components(decl) *
572                  MAX2(1, nir_intrinsic_num_array_elems(decl));
573 
574    compile_assert(ctx, arr->length > 0);
575    arr->r = &decl->def;
576    arr->half = ir3_bitsize(ctx, nir_intrinsic_bit_size(decl)) <= 16;
577    list_addtail(&arr->node, &ctx->ir->array_list);
578 }
579 
580 struct ir3_array *
ir3_get_array(struct ir3_context * ctx,nir_def * reg)581 ir3_get_array(struct ir3_context *ctx, nir_def *reg)
582 {
583    foreach_array (arr, &ctx->ir->array_list) {
584       if (arr->r == reg)
585          return arr;
586    }
587    ir3_context_error(ctx, "bogus reg: r%d\n", reg->index);
588    return NULL;
589 }
590 
591 /* relative (indirect) if address!=NULL */
592 struct ir3_instruction *
ir3_create_array_load(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * address)593 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
594                       struct ir3_instruction *address)
595 {
596    struct ir3_block *block = ctx->block;
597    struct ir3_instruction *mov;
598    struct ir3_register *src;
599    unsigned flags = 0;
600 
601    mov = ir3_build_instr(&ctx->build, OPC_MOV, 1, 1);
602    if (arr->half) {
603       mov->cat1.src_type = TYPE_U16;
604       mov->cat1.dst_type = TYPE_U16;
605       flags |= IR3_REG_HALF;
606    } else {
607       mov->cat1.src_type = TYPE_U32;
608       mov->cat1.dst_type = TYPE_U32;
609    }
610 
611    mov->barrier_class = IR3_BARRIER_ARRAY_R;
612    mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
613    __ssa_dst(mov)->flags |= flags;
614    src = ir3_src_create(mov, 0,
615                         IR3_REG_ARRAY | COND(address, IR3_REG_RELATIV) | flags);
616    src->def = (arr->last_write && arr->last_write->instr->block == block)
617                  ? arr->last_write
618                  : NULL;
619    src->size = arr->length;
620    src->array.id = arr->id;
621    src->array.offset = n;
622    src->array.base = INVALID_REG;
623 
624    if (address)
625       ir3_instr_set_address(mov, address);
626 
627    return mov;
628 }
629 
630 /* relative (indirect) if address!=NULL */
631 void
ir3_create_array_store(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * src,struct ir3_instruction * address)632 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
633                        struct ir3_instruction *src,
634                        struct ir3_instruction *address)
635 {
636    struct ir3_block *block = ctx->block;
637    struct ir3_instruction *mov;
638    struct ir3_register *dst;
639    unsigned flags = 0;
640 
641    mov = ir3_build_instr(&ctx->build, OPC_MOV, 1, 1);
642    if (arr->half) {
643       mov->cat1.src_type = TYPE_U16;
644       mov->cat1.dst_type = TYPE_U16;
645       flags |= IR3_REG_HALF;
646    } else {
647       mov->cat1.src_type = TYPE_U32;
648       mov->cat1.dst_type = TYPE_U32;
649    }
650    mov->barrier_class = IR3_BARRIER_ARRAY_W;
651    mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
652    dst = ir3_dst_create(
653       mov, INVALID_REG,
654       IR3_REG_SSA | IR3_REG_ARRAY | flags | COND(address, IR3_REG_RELATIV));
655    dst->instr = mov;
656    dst->size = arr->length;
657    dst->array.id = arr->id;
658    dst->array.offset = n;
659    dst->array.base = INVALID_REG;
660    ir3_src_create(mov, INVALID_REG, IR3_REG_SSA | flags |
661                   (src->dsts[0]->flags & IR3_REG_SHARED))->def = src->dsts[0];
662 
663    if (arr->last_write && arr->last_write->instr->block == block)
664       ir3_reg_set_last_array(mov, dst, arr->last_write);
665 
666    if (address)
667       ir3_instr_set_address(mov, address);
668 
669    arr->last_write = dst;
670 
671    /* the array store may only matter to something in an earlier
672     * block (ie. loops), but since arrays are not in SSA, depth
673     * pass won't know this.. so keep all array stores:
674     */
675    array_insert(block, block->keeps, mov);
676 }
677 
678 void
ir3_lower_imm_offset(struct ir3_context * ctx,nir_intrinsic_instr * intr,nir_src * offset_src,unsigned imm_offset_bits,struct ir3_instruction ** offset,unsigned * imm_offset)679 ir3_lower_imm_offset(struct ir3_context *ctx, nir_intrinsic_instr *intr,
680                      nir_src *offset_src, unsigned imm_offset_bits,
681                      struct ir3_instruction **offset, unsigned *imm_offset)
682 {
683    nir_const_value *nir_const_offset = nir_src_as_const_value(*offset_src);
684    int base = nir_intrinsic_base(intr);
685    unsigned imm_offset_bound = (1 << imm_offset_bits);
686    assert(base >= 0 && base < imm_offset_bound);
687 
688    if (nir_const_offset) {
689       /* If both the offset and the base (immed offset) are constants, lower the
690        * offset to a multiple of the bound and the immed offset to the
691        * remainder. This ensures that the offset register can often be reused
692        * among multiple contiguous accesses.
693        */
694       uint32_t full_offset = base + nir_const_offset->u32;
695       *offset = create_immed(&ctx->build,
696                              ROUND_DOWN_TO(full_offset, imm_offset_bound));
697       *imm_offset = full_offset % imm_offset_bound;
698    } else {
699       *offset = ir3_get_src(ctx, offset_src)[0];
700       *imm_offset = base;
701    }
702 }
703