• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "ir3_context.h"
28 #include "ir3_compiler.h"
29 #include "ir3_image.h"
30 #include "ir3_nir.h"
31 #include "ir3_shader.h"
32 #include "nir.h"
33 #include "nir_intrinsics_indices.h"
34 
35 struct ir3_context *
ir3_context_init(struct ir3_compiler * compiler,struct ir3_shader * shader,struct ir3_shader_variant * so)36 ir3_context_init(struct ir3_compiler *compiler, struct ir3_shader *shader,
37                  struct ir3_shader_variant *so)
38 {
39    MESA_TRACE_FUNC();
40 
41    struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
42 
43    if (compiler->gen == 4) {
44       if (so->type == MESA_SHADER_VERTEX) {
45          ctx->astc_srgb = so->key.vastc_srgb;
46          memcpy(ctx->sampler_swizzles, so->key.vsampler_swizzles, sizeof(ctx->sampler_swizzles));
47       } else if (so->type == MESA_SHADER_FRAGMENT ||
48             so->type == MESA_SHADER_COMPUTE) {
49          ctx->astc_srgb = so->key.fastc_srgb;
50          memcpy(ctx->sampler_swizzles, so->key.fsampler_swizzles, sizeof(ctx->sampler_swizzles));
51       }
52    } else if (compiler->gen == 3) {
53       if (so->type == MESA_SHADER_VERTEX) {
54          ctx->samples = so->key.vsamples;
55       } else if (so->type == MESA_SHADER_FRAGMENT) {
56          ctx->samples = so->key.fsamples;
57       }
58    }
59 
60    if (compiler->gen >= 6) {
61       ctx->funcs = &ir3_a6xx_funcs;
62    } else if (compiler->gen >= 4) {
63       ctx->funcs = &ir3_a4xx_funcs;
64    }
65 
66    ctx->compiler = compiler;
67    ctx->so = so;
68    ctx->def_ht =
69       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
70    ctx->block_ht =
71       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
72    ctx->continue_block_ht =
73       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
74    ctx->sel_cond_conversions =
75       _mesa_hash_table_create(ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
76 
77    /* TODO: maybe generate some sort of bitmask of what key
78     * lowers vs what shader has (ie. no need to lower
79     * texture clamp lowering if no texture sample instrs)..
80     * although should be done further up the stack to avoid
81     * creating duplicate variants..
82     */
83 
84    ctx->s = nir_shader_clone(ctx, shader->nir);
85    ir3_nir_lower_variant(so, ctx->s);
86 
87    bool progress = false;
88    bool needs_late_alg = false;
89 
90    /* We want to lower nir_op_imul as late as possible, to catch also
91     * those generated by earlier passes (e.g,
92     * nir_lower_locals_to_regs).  However, we want a final swing of a
93     * few passes to have a chance at optimizing the result.
94     */
95    NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
96    while (progress) {
97       progress = false;
98       NIR_PASS(progress, ctx->s, nir_opt_algebraic);
99       NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
100       NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
101       NIR_PASS(progress, ctx->s, nir_opt_dce);
102       NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
103       needs_late_alg = true;
104    }
105 
106    /* nir_opt_algebraic() above would have unfused our ffmas, re-fuse them. */
107    if (needs_late_alg) {
108       NIR_PASS(progress, ctx->s, nir_opt_algebraic_late);
109       NIR_PASS(progress, ctx->s, nir_opt_dce);
110    }
111 
112    /* Enable the texture pre-fetch feature only a4xx onwards.  But
113     * only enable it on generations that have been tested:
114     */
115    if ((so->type == MESA_SHADER_FRAGMENT) && compiler->has_fs_tex_prefetch)
116       NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
117 
118    NIR_PASS(progress, ctx->s, nir_lower_phis_to_scalar, true);
119 
120    /* Super crude heuristic to limit # of tex prefetch in small
121     * shaders.  This completely ignores loops.. but that's really
122     * not the worst of it's problems.  (A frag shader that has
123     * loops is probably going to be big enough to not trigger a
124     * lower threshold.)
125     *
126     *   1) probably want to do this in terms of ir3 instructions
127     *   2) probably really want to decide this after scheduling
128     *      (or at least pre-RA sched) so we have a rough idea about
129     *      nops, and don't count things that get cp'd away
130     *   3) blob seems to use higher thresholds with a mix of more
131     *      SFU instructions.  Which partly makes sense, more SFU
132     *      instructions probably means you want to get the real
133     *      shader started sooner, but that considers where in the
134     *      shader the SFU instructions are, which blob doesn't seem
135     *      to do.
136     *
137     * This uses more conservative thresholds assuming a more alu
138     * than sfu heavy instruction mix.
139     */
140    if (so->type == MESA_SHADER_FRAGMENT) {
141       nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
142 
143       unsigned instruction_count = 0;
144       nir_foreach_block (block, fxn) {
145          instruction_count += exec_list_length(&block->instr_list);
146       }
147 
148       if (instruction_count < 50) {
149          ctx->prefetch_limit = 2;
150       } else if (instruction_count < 70) {
151          ctx->prefetch_limit = 3;
152       } else {
153          ctx->prefetch_limit = IR3_MAX_SAMPLER_PREFETCH;
154       }
155    }
156 
157    if (shader_debug_enabled(so->type, ctx->s->info.internal)) {
158       mesa_logi("NIR (final form) for %s shader %s:", ir3_shader_stage(so),
159                 so->name);
160       nir_log_shaderi(ctx->s);
161    }
162 
163    ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
164 
165    return ctx;
166 }
167 
168 void
ir3_context_free(struct ir3_context * ctx)169 ir3_context_free(struct ir3_context *ctx)
170 {
171    ralloc_free(ctx);
172 }
173 
174 /*
175  * Misc helpers
176  */
177 
178 /* allocate a n element value array (to be populated by caller) and
179  * insert in def_ht
180  */
181 struct ir3_instruction **
ir3_get_dst_ssa(struct ir3_context * ctx,nir_def * dst,unsigned n)182 ir3_get_dst_ssa(struct ir3_context *ctx, nir_def *dst, unsigned n)
183 {
184    struct ir3_instruction **value =
185       ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
186    _mesa_hash_table_insert(ctx->def_ht, dst, value);
187    return value;
188 }
189 
190 struct ir3_instruction **
ir3_get_def(struct ir3_context * ctx,nir_def * def,unsigned n)191 ir3_get_def(struct ir3_context *ctx, nir_def *def, unsigned n)
192 {
193    struct ir3_instruction **value = ir3_get_dst_ssa(ctx, def, n);
194 
195    compile_assert(ctx, !ctx->last_dst);
196    ctx->last_dst = value;
197    ctx->last_dst_n = n;
198 
199    return value;
200 }
201 
202 struct ir3_instruction *const *
ir3_get_src(struct ir3_context * ctx,nir_src * src)203 ir3_get_src(struct ir3_context *ctx, nir_src *src)
204 {
205    struct hash_entry *entry;
206    entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
207    compile_assert(ctx, entry);
208    return entry->data;
209 }
210 
211 void
ir3_put_def(struct ir3_context * ctx,nir_def * def)212 ir3_put_def(struct ir3_context *ctx, nir_def *def)
213 {
214    unsigned bit_size = ir3_bitsize(ctx, def->bit_size);
215 
216    /* add extra mov if dst value is shared reg.. in some cases not all
217     * instructions can read from shared regs, in cases where they can
218     * ir3_cp will clean up the extra mov:
219     */
220    for (unsigned i = 0; i < ctx->last_dst_n; i++) {
221       if (!ctx->last_dst[i])
222          continue;
223       if (ctx->last_dst[i]->dsts[0]->flags & IR3_REG_SHARED) {
224          ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
225       }
226    }
227 
228    if (bit_size <= 16) {
229       for (unsigned i = 0; i < ctx->last_dst_n; i++) {
230          struct ir3_instruction *dst = ctx->last_dst[i];
231          ir3_set_dst_type(dst, true);
232          ir3_fixup_src_type(dst);
233          if (dst->opc == OPC_META_SPLIT) {
234             ir3_set_dst_type(ssa(dst->srcs[0]), true);
235             ir3_fixup_src_type(ssa(dst->srcs[0]));
236             dst->srcs[0]->flags |= IR3_REG_HALF;
237          }
238       }
239    }
240 
241    ctx->last_dst = NULL;
242    ctx->last_dst_n = 0;
243 }
244 
245 static unsigned
dest_flags(struct ir3_instruction * instr)246 dest_flags(struct ir3_instruction *instr)
247 {
248    return instr->dsts[0]->flags & (IR3_REG_HALF | IR3_REG_SHARED);
249 }
250 
251 struct ir3_instruction *
ir3_create_collect(struct ir3_block * block,struct ir3_instruction * const * arr,unsigned arrsz)252 ir3_create_collect(struct ir3_block *block, struct ir3_instruction *const *arr,
253                    unsigned arrsz)
254 {
255    struct ir3_instruction *collect;
256 
257    if (arrsz == 0)
258       return NULL;
259 
260    unsigned flags = dest_flags(arr[0]);
261 
262    collect = ir3_instr_create(block, OPC_META_COLLECT, 1, arrsz);
263    __ssa_dst(collect)->flags |= flags;
264    for (unsigned i = 0; i < arrsz; i++) {
265       struct ir3_instruction *elem = arr[i];
266 
267       /* Since arrays are pre-colored in RA, we can't assume that
268        * things will end up in the right place.  (Ie. if a collect
269        * joins elements from two different arrays.)  So insert an
270        * extra mov.
271        *
272        * We could possibly skip this if all the collected elements
273        * are contiguous elements in a single array.. not sure how
274        * likely that is to happen.
275        *
276        * Fixes a problem with glamor shaders, that in effect do
277        * something like:
278        *
279        *   if (foo)
280        *     texcoord = ..
281        *   else
282        *     texcoord = ..
283        *   color = texture2D(tex, texcoord);
284        *
285        * In this case, texcoord will end up as nir registers (which
286        * translate to ir3 array's of length 1.  And we can't assume
287        * the two (or more) arrays will get allocated in consecutive
288        * scalar registers.
289        *
290        */
291       if (elem->dsts[0]->flags & IR3_REG_ARRAY) {
292          type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
293          elem = ir3_MOV(block, elem, type);
294       }
295 
296       assert(dest_flags(elem) == flags);
297       __ssa_src(collect, elem, flags);
298    }
299 
300    collect->dsts[0]->wrmask = MASK(arrsz);
301 
302    return collect;
303 }
304 
305 /* helper for instructions that produce multiple consecutive scalar
306  * outputs which need to have a split meta instruction inserted
307  */
308 void
ir3_split_dest(struct ir3_block * block,struct ir3_instruction ** dst,struct ir3_instruction * src,unsigned base,unsigned n)309 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
310                struct ir3_instruction *src, unsigned base, unsigned n)
311 {
312    if ((n == 1) && (src->dsts[0]->wrmask == 0x1) &&
313        /* setup_input needs ir3_split_dest to generate a SPLIT instruction */
314        src->opc != OPC_META_INPUT) {
315       dst[0] = src;
316       return;
317    }
318 
319    if (src->opc == OPC_META_COLLECT) {
320       assert((base + n) <= src->srcs_count);
321 
322       for (int i = 0; i < n; i++) {
323          dst[i] = ssa(src->srcs[i + base]);
324       }
325 
326       return;
327    }
328 
329    unsigned flags = dest_flags(src);
330 
331    for (int i = 0, j = 0; i < n; i++) {
332       struct ir3_instruction *split =
333          ir3_instr_create(block, OPC_META_SPLIT, 1, 1);
334       __ssa_dst(split)->flags |= flags;
335       __ssa_src(split, src, flags);
336       split->split.off = i + base;
337 
338       if (src->dsts[0]->wrmask & (1 << (i + base)))
339          dst[j++] = split;
340    }
341 }
342 
343 NORETURN void
ir3_context_error(struct ir3_context * ctx,const char * format,...)344 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
345 {
346    struct hash_table *errors = NULL;
347    va_list ap;
348    va_start(ap, format);
349    if (ctx->cur_instr) {
350       errors = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
351                                        _mesa_key_pointer_equal);
352       char *msg = ralloc_vasprintf(errors, format, ap);
353       _mesa_hash_table_insert(errors, ctx->cur_instr, msg);
354    } else {
355       mesa_loge_v(format, ap);
356    }
357    va_end(ap);
358    nir_log_shader_annotated(ctx->s, errors);
359    ralloc_free(errors);
360    ctx->error = true;
361    unreachable("");
362 }
363 
364 static struct ir3_instruction *
create_addr0(struct ir3_block * block,struct ir3_instruction * src,int align)365 create_addr0(struct ir3_block *block, struct ir3_instruction *src, int align)
366 {
367    struct ir3_instruction *instr, *immed;
368 
369    instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
370 
371    switch (align) {
372    case 1:
373       /* src *= 1: */
374       break;
375    case 2:
376       /* src *= 2	=> src <<= 1: */
377       immed = create_immed_typed(block, 1, TYPE_S16);
378       instr = ir3_SHL_B(block, instr, 0, immed, 0);
379       break;
380    case 3:
381       /* src *= 3: */
382       immed = create_immed_typed(block, 3, TYPE_S16);
383       instr = ir3_MULL_U(block, instr, 0, immed, 0);
384       break;
385    case 4:
386       /* src *= 4 => src <<= 2: */
387       immed = create_immed_typed(block, 2, TYPE_S16);
388       instr = ir3_SHL_B(block, instr, 0, immed, 0);
389       break;
390    default:
391       unreachable("bad align");
392       return NULL;
393    }
394 
395    instr->dsts[0]->flags |= IR3_REG_HALF;
396 
397    instr = ir3_MOV(block, instr, TYPE_S16);
398    instr->dsts[0]->num = regid(REG_A0, 0);
399 
400    return instr;
401 }
402 
403 static struct ir3_instruction *
create_addr1(struct ir3_block * block,unsigned const_val)404 create_addr1(struct ir3_block *block, unsigned const_val)
405 {
406    struct ir3_instruction *immed =
407       create_immed_typed(block, const_val, TYPE_U16);
408    struct ir3_instruction *instr = ir3_MOV(block, immed, TYPE_U16);
409    instr->dsts[0]->num = regid(REG_A0, 1);
410    return instr;
411 }
412 
413 /* caches addr values to avoid generating multiple cov/shl/mova
414  * sequences for each use of a given NIR level src as address
415  */
416 struct ir3_instruction *
ir3_get_addr0(struct ir3_context * ctx,struct ir3_instruction * src,int align)417 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
418 {
419    struct ir3_instruction *addr;
420    unsigned idx = align - 1;
421 
422    compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
423 
424    if (!ctx->addr0_ht[idx]) {
425       ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx, _mesa_hash_pointer,
426                                                    _mesa_key_pointer_equal);
427    } else {
428       struct hash_entry *entry;
429       entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
430       if (entry)
431          return entry->data;
432    }
433 
434    addr = create_addr0(ctx->block, src, align);
435    _mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
436 
437    return addr;
438 }
439 
440 /* Similar to ir3_get_addr0, but for a1.x. */
441 struct ir3_instruction *
ir3_get_addr1(struct ir3_context * ctx,unsigned const_val)442 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
443 {
444    struct ir3_instruction *addr;
445 
446    if (!ctx->addr1_ht) {
447       ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
448    } else {
449       addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
450       if (addr)
451          return addr;
452    }
453 
454    addr = create_addr1(ctx->block, const_val);
455    _mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
456 
457    return addr;
458 }
459 
460 struct ir3_instruction *
ir3_get_predicate(struct ir3_context * ctx,struct ir3_instruction * src)461 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
462 {
463    struct ir3_block *b = ctx->block;
464    struct ir3_instruction *cond;
465 
466    /* NOTE: only cmps.*.* can write p0.x: */
467    struct ir3_instruction *zero =
468          create_immed_typed(b, 0, is_half(src) ? TYPE_U16 : TYPE_U32);
469    cond = ir3_CMPS_S(b, src, 0, zero, 0);
470    cond->cat2.condition = IR3_COND_NE;
471 
472    /* condition always goes in predicate register: */
473    cond->dsts[0]->num = regid(REG_P0, 0);
474    cond->dsts[0]->flags &= ~IR3_REG_SSA;
475 
476    return cond;
477 }
478 
479 /*
480  * Array helpers
481  */
482 
483 void
ir3_declare_array(struct ir3_context * ctx,nir_intrinsic_instr * decl)484 ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl)
485 {
486    struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
487    arr->id = ++ctx->num_arrays;
488    /* NOTE: sometimes we get non array regs, for example for arrays of
489     * length 1.  See fs-const-array-of-struct-of-array.shader_test.  So
490     * treat a non-array as if it was an array of length 1.
491     *
492     * It would be nice if there was a nir pass to convert arrays of
493     * length 1 to ssa.
494     */
495    arr->length = nir_intrinsic_num_components(decl) *
496                  MAX2(1, nir_intrinsic_num_array_elems(decl));
497 
498    compile_assert(ctx, arr->length > 0);
499    arr->r = &decl->def;
500    arr->half = ir3_bitsize(ctx, nir_intrinsic_bit_size(decl)) <= 16;
501    list_addtail(&arr->node, &ctx->ir->array_list);
502 }
503 
504 struct ir3_array *
ir3_get_array(struct ir3_context * ctx,nir_def * reg)505 ir3_get_array(struct ir3_context *ctx, nir_def *reg)
506 {
507    foreach_array (arr, &ctx->ir->array_list) {
508       if (arr->r == reg)
509          return arr;
510    }
511    ir3_context_error(ctx, "bogus reg: r%d\n", reg->index);
512    return NULL;
513 }
514 
515 /* relative (indirect) if address!=NULL */
516 struct ir3_instruction *
ir3_create_array_load(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * address)517 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
518                       struct ir3_instruction *address)
519 {
520    struct ir3_block *block = ctx->block;
521    struct ir3_instruction *mov;
522    struct ir3_register *src;
523    unsigned flags = 0;
524 
525    mov = ir3_instr_create(block, OPC_MOV, 1, 1);
526    if (arr->half) {
527       mov->cat1.src_type = TYPE_U16;
528       mov->cat1.dst_type = TYPE_U16;
529       flags |= IR3_REG_HALF;
530    } else {
531       mov->cat1.src_type = TYPE_U32;
532       mov->cat1.dst_type = TYPE_U32;
533    }
534 
535    mov->barrier_class = IR3_BARRIER_ARRAY_R;
536    mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
537    __ssa_dst(mov)->flags |= flags;
538    src = ir3_src_create(mov, 0,
539                         IR3_REG_ARRAY | COND(address, IR3_REG_RELATIV) | flags);
540    src->def = (arr->last_write && arr->last_write->instr->block == block)
541                  ? arr->last_write
542                  : NULL;
543    src->size = arr->length;
544    src->array.id = arr->id;
545    src->array.offset = n;
546    src->array.base = INVALID_REG;
547 
548    if (address)
549       ir3_instr_set_address(mov, address);
550 
551    return mov;
552 }
553 
554 /* relative (indirect) if address!=NULL */
555 void
ir3_create_array_store(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * src,struct ir3_instruction * address)556 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
557                        struct ir3_instruction *src,
558                        struct ir3_instruction *address)
559 {
560    struct ir3_block *block = ctx->block;
561    struct ir3_instruction *mov;
562    struct ir3_register *dst;
563    unsigned flags = 0;
564 
565    mov = ir3_instr_create(block, OPC_MOV, 1, 1);
566    if (arr->half) {
567       mov->cat1.src_type = TYPE_U16;
568       mov->cat1.dst_type = TYPE_U16;
569       flags |= IR3_REG_HALF;
570    } else {
571       mov->cat1.src_type = TYPE_U32;
572       mov->cat1.dst_type = TYPE_U32;
573    }
574    mov->barrier_class = IR3_BARRIER_ARRAY_W;
575    mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
576    dst = ir3_dst_create(
577       mov, 0,
578       IR3_REG_SSA | IR3_REG_ARRAY | flags | COND(address, IR3_REG_RELATIV));
579    dst->instr = mov;
580    dst->size = arr->length;
581    dst->array.id = arr->id;
582    dst->array.offset = n;
583    dst->array.base = INVALID_REG;
584    ir3_src_create(mov, 0, IR3_REG_SSA | flags)->def = src->dsts[0];
585 
586    if (arr->last_write && arr->last_write->instr->block == block)
587       ir3_reg_set_last_array(mov, dst, arr->last_write);
588 
589    if (address)
590       ir3_instr_set_address(mov, address);
591 
592    arr->last_write = dst;
593 
594    /* the array store may only matter to something in an earlier
595     * block (ie. loops), but since arrays are not in SSA, depth
596     * pass won't know this.. so keep all array stores:
597     */
598    array_insert(block, block->keeps, mov);
599 }
600