• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "ir3_compiler.h"
28 #include "ir3_context.h"
29 #include "ir3_image.h"
30 #include "ir3_shader.h"
31 #include "ir3_nir.h"
32 
33 struct ir3_context *
ir3_context_init(struct ir3_compiler * compiler,struct ir3_shader_variant * so)34 ir3_context_init(struct ir3_compiler *compiler,
35 		struct ir3_shader_variant *so)
36 {
37 	struct ir3_context *ctx = rzalloc(NULL, struct ir3_context);
38 
39 	if (compiler->gpu_id >= 400) {
40 		if (so->type == MESA_SHADER_VERTEX) {
41 			ctx->astc_srgb = so->key.vastc_srgb;
42 		} else if (so->type == MESA_SHADER_FRAGMENT) {
43 			ctx->astc_srgb = so->key.fastc_srgb;
44 		}
45 
46 	} else {
47 		if (so->type == MESA_SHADER_VERTEX) {
48 			ctx->samples = so->key.vsamples;
49 		} else if (so->type == MESA_SHADER_FRAGMENT) {
50 			ctx->samples = so->key.fsamples;
51 		}
52 	}
53 
54 	if (compiler->gpu_id >= 600) {
55 		ctx->funcs = &ir3_a6xx_funcs;
56 	} else if (compiler->gpu_id >= 400) {
57 		ctx->funcs = &ir3_a4xx_funcs;
58 	}
59 
60 	ctx->compiler = compiler;
61 	ctx->so = so;
62 	ctx->def_ht = _mesa_hash_table_create(ctx,
63 			_mesa_hash_pointer, _mesa_key_pointer_equal);
64 	ctx->block_ht = _mesa_hash_table_create(ctx,
65 			_mesa_hash_pointer, _mesa_key_pointer_equal);
66 	ctx->sel_cond_conversions = _mesa_hash_table_create(ctx,
67 			_mesa_hash_pointer, _mesa_key_pointer_equal);
68 
69 	/* TODO: maybe generate some sort of bitmask of what key
70 	 * lowers vs what shader has (ie. no need to lower
71 	 * texture clamp lowering if no texture sample instrs)..
72 	 * although should be done further up the stack to avoid
73 	 * creating duplicate variants..
74 	 */
75 
76 	ctx->s = nir_shader_clone(ctx, so->shader->nir);
77 	ir3_nir_lower_variant(so, ctx->s);
78 
79 	/* this needs to be the last pass run, so do this here instead of
80 	 * in ir3_optimize_nir():
81 	 */
82 	bool progress = false;
83 	NIR_PASS(progress, ctx->s, nir_lower_locals_to_regs);
84 
85 	/* we could need cleanup after lower_locals_to_regs */
86 	while (progress) {
87 		progress = false;
88 		NIR_PASS(progress, ctx->s, nir_opt_algebraic);
89 		NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
90 	}
91 
92 	/* We want to lower nir_op_imul as late as possible, to catch also
93 	 * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
94 	 * However, we want a final swing of a few passes to have a chance
95 	 * at optimizing the result.
96 	 */
97 	progress = false;
98 	NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
99 	while (progress) {
100 		progress = false;
101 		NIR_PASS(progress, ctx->s, nir_opt_algebraic);
102 		NIR_PASS(progress, ctx->s, nir_opt_copy_prop_vars);
103 		NIR_PASS(progress, ctx->s, nir_opt_dead_write_vars);
104 		NIR_PASS(progress, ctx->s, nir_opt_dce);
105 		NIR_PASS(progress, ctx->s, nir_opt_constant_folding);
106 	}
107 
108 	/* Enable the texture pre-fetch feature only a4xx onwards.  But
109 	 * only enable it on generations that have been tested:
110 	 */
111 	if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
112 		NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
113 
114 	NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
115 
116 	/* Super crude heuristic to limit # of tex prefetch in small
117 	 * shaders.  This completely ignores loops.. but that's really
118 	 * not the worst of it's problems.  (A frag shader that has
119 	 * loops is probably going to be big enough to not trigger a
120 	 * lower threshold.)
121 	 *
122 	 *   1) probably want to do this in terms of ir3 instructions
123 	 *   2) probably really want to decide this after scheduling
124 	 *      (or at least pre-RA sched) so we have a rough idea about
125 	 *      nops, and don't count things that get cp'd away
126 	 *   3) blob seems to use higher thresholds with a mix of more
127 	 *      SFU instructions.  Which partly makes sense, more SFU
128 	 *      instructions probably means you want to get the real
129 	 *      shader started sooner, but that considers where in the
130 	 *      shader the SFU instructions are, which blob doesn't seem
131 	 *      to do.
132 	 *
133 	 * This uses more conservative thresholds assuming a more alu
134 	 * than sfu heavy instruction mix.
135 	 */
136 	if (so->type == MESA_SHADER_FRAGMENT) {
137 		nir_function_impl *fxn = nir_shader_get_entrypoint(ctx->s);
138 
139 		unsigned instruction_count = 0;
140 		nir_foreach_block (block, fxn) {
141 			instruction_count += exec_list_length(&block->instr_list);
142 		}
143 
144 		if (instruction_count < 50) {
145 			ctx->prefetch_limit = 2;
146 		} else if (instruction_count < 70) {
147 			ctx->prefetch_limit = 3;
148 		} else {
149 			ctx->prefetch_limit = IR3_MAX_SAMPLER_PREFETCH;
150 		}
151 	}
152 
153 	if (shader_debug_enabled(so->type)) {
154 		fprintf(stdout, "NIR (final form) for %s shader %s:\n",
155 			ir3_shader_stage(so), so->shader->nir->info.name);
156 		nir_print_shader(ctx->s, stdout);
157 	}
158 
159 	ir3_ibo_mapping_init(&so->image_mapping, ctx->s->info.num_textures);
160 
161 	return ctx;
162 }
163 
164 void
ir3_context_free(struct ir3_context * ctx)165 ir3_context_free(struct ir3_context *ctx)
166 {
167 	ralloc_free(ctx);
168 }
169 
170 /*
171  * Misc helpers
172  */
173 
174 /* allocate a n element value array (to be populated by caller) and
175  * insert in def_ht
176  */
177 struct ir3_instruction **
ir3_get_dst_ssa(struct ir3_context * ctx,nir_ssa_def * dst,unsigned n)178 ir3_get_dst_ssa(struct ir3_context *ctx, nir_ssa_def *dst, unsigned n)
179 {
180 	struct ir3_instruction **value =
181 		ralloc_array(ctx->def_ht, struct ir3_instruction *, n);
182 	_mesa_hash_table_insert(ctx->def_ht, dst, value);
183 	return value;
184 }
185 
186 struct ir3_instruction **
ir3_get_dst(struct ir3_context * ctx,nir_dest * dst,unsigned n)187 ir3_get_dst(struct ir3_context *ctx, nir_dest *dst, unsigned n)
188 {
189 	struct ir3_instruction **value;
190 
191 	if (dst->is_ssa) {
192 		value = ir3_get_dst_ssa(ctx, &dst->ssa, n);
193 	} else {
194 		value = ralloc_array(ctx, struct ir3_instruction *, n);
195 	}
196 
197 	/* NOTE: in non-ssa case, we don't really need to store last_dst
198 	 * but this helps us catch cases where put_dst() call is forgotten
199 	 */
200 	compile_assert(ctx, !ctx->last_dst);
201 	ctx->last_dst = value;
202 	ctx->last_dst_n = n;
203 
204 	return value;
205 }
206 
207 struct ir3_instruction * const *
ir3_get_src(struct ir3_context * ctx,nir_src * src)208 ir3_get_src(struct ir3_context *ctx, nir_src *src)
209 {
210 	if (src->is_ssa) {
211 		struct hash_entry *entry;
212 		entry = _mesa_hash_table_search(ctx->def_ht, src->ssa);
213 		compile_assert(ctx, entry);
214 		return entry->data;
215 	} else {
216 		nir_register *reg = src->reg.reg;
217 		struct ir3_array *arr = ir3_get_array(ctx, reg);
218 		unsigned num_components = arr->r->num_components;
219 		struct ir3_instruction *addr = NULL;
220 		struct ir3_instruction **value =
221 			ralloc_array(ctx, struct ir3_instruction *, num_components);
222 
223 		if (src->reg.indirect)
224 			addr = ir3_get_addr0(ctx, ir3_get_src(ctx, src->reg.indirect)[0],
225 					reg->num_components);
226 
227 		for (unsigned i = 0; i < num_components; i++) {
228 			unsigned n = src->reg.base_offset * reg->num_components + i;
229 			compile_assert(ctx, n < arr->length);
230 			value[i] = ir3_create_array_load(ctx, arr, n, addr);
231 		}
232 
233 		return value;
234 	}
235 }
236 
237 void
ir3_put_dst(struct ir3_context * ctx,nir_dest * dst)238 ir3_put_dst(struct ir3_context *ctx, nir_dest *dst)
239 {
240 	unsigned bit_size = nir_dest_bit_size(*dst);
241 
242 	/* add extra mov if dst value is HIGH reg.. in some cases not all
243 	 * instructions can read from HIGH regs, in cases where they can
244 	 * ir3_cp will clean up the extra mov:
245 	 */
246 	for (unsigned i = 0; i < ctx->last_dst_n; i++) {
247 		if (!ctx->last_dst[i])
248 			continue;
249 		if (ctx->last_dst[i]->regs[0]->flags & IR3_REG_HIGH) {
250 			ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32);
251 		}
252 	}
253 
254 	/* Note: 1-bit bools are stored in 32-bit regs */
255 	if (bit_size == 16) {
256 		for (unsigned i = 0; i < ctx->last_dst_n; i++) {
257 			struct ir3_instruction *dst = ctx->last_dst[i];
258 			ir3_set_dst_type(dst, true);
259 			ir3_fixup_src_type(dst);
260 			if (dst->opc == OPC_META_SPLIT) {
261 				ir3_set_dst_type(ssa(dst->regs[1]), true);
262 				ir3_fixup_src_type(ssa(dst->regs[1]));
263 				dst->regs[1]->flags |= IR3_REG_HALF;
264 			}
265 		}
266 	}
267 
268 	if (!dst->is_ssa) {
269 		nir_register *reg = dst->reg.reg;
270 		struct ir3_array *arr = ir3_get_array(ctx, reg);
271 		unsigned num_components = ctx->last_dst_n;
272 		struct ir3_instruction *addr = NULL;
273 
274 		if (dst->reg.indirect)
275 			addr = ir3_get_addr0(ctx, ir3_get_src(ctx, dst->reg.indirect)[0],
276 					reg->num_components);
277 
278 		for (unsigned i = 0; i < num_components; i++) {
279 			unsigned n = dst->reg.base_offset * reg->num_components + i;
280 			compile_assert(ctx, n < arr->length);
281 			if (!ctx->last_dst[i])
282 				continue;
283 			ir3_create_array_store(ctx, arr, n, ctx->last_dst[i], addr);
284 		}
285 
286 		ralloc_free(ctx->last_dst);
287 	}
288 
289 	ctx->last_dst = NULL;
290 	ctx->last_dst_n = 0;
291 }
292 
293 static unsigned
dest_flags(struct ir3_instruction * instr)294 dest_flags(struct ir3_instruction *instr)
295 {
296 	return instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH);
297 }
298 
299 struct ir3_instruction *
ir3_create_collect(struct ir3_context * ctx,struct ir3_instruction * const * arr,unsigned arrsz)300 ir3_create_collect(struct ir3_context *ctx, struct ir3_instruction *const *arr,
301 		unsigned arrsz)
302 {
303 	struct ir3_block *block = ctx->block;
304 	struct ir3_instruction *collect;
305 
306 	if (arrsz == 0)
307 		return NULL;
308 
309 	unsigned flags = dest_flags(arr[0]);
310 
311 	collect = ir3_instr_create2(block, OPC_META_COLLECT, 1 + arrsz);
312 	__ssa_dst(collect)->flags |= flags;
313 	for (unsigned i = 0; i < arrsz; i++) {
314 		struct ir3_instruction *elem = arr[i];
315 
316 		/* Since arrays are pre-colored in RA, we can't assume that
317 		 * things will end up in the right place.  (Ie. if a collect
318 		 * joins elements from two different arrays.)  So insert an
319 		 * extra mov.
320 		 *
321 		 * We could possibly skip this if all the collected elements
322 		 * are contiguous elements in a single array.. not sure how
323 		 * likely that is to happen.
324 		 *
325 		 * Fixes a problem with glamor shaders, that in effect do
326 		 * something like:
327 		 *
328 		 *   if (foo)
329 		 *     texcoord = ..
330 		 *   else
331 		 *     texcoord = ..
332 		 *   color = texture2D(tex, texcoord);
333 		 *
334 		 * In this case, texcoord will end up as nir registers (which
335 		 * translate to ir3 array's of length 1.  And we can't assume
336 		 * the two (or more) arrays will get allocated in consecutive
337 		 * scalar registers.
338 		 *
339 		 */
340 		if (elem->regs[0]->flags & IR3_REG_ARRAY) {
341 			type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
342 			elem = ir3_MOV(block, elem, type);
343 		}
344 
345 		compile_assert(ctx, dest_flags(elem) == flags);
346 		__ssa_src(collect, elem, flags);
347 	}
348 
349 	collect->regs[0]->wrmask = MASK(arrsz);
350 
351 	return collect;
352 }
353 
354 /* helper for instructions that produce multiple consecutive scalar
355  * outputs which need to have a split meta instruction inserted
356  */
357 void
ir3_split_dest(struct ir3_block * block,struct ir3_instruction ** dst,struct ir3_instruction * src,unsigned base,unsigned n)358 ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
359 		struct ir3_instruction *src, unsigned base, unsigned n)
360 {
361 	struct ir3_instruction *prev = NULL;
362 
363 	if ((n == 1) && (src->regs[0]->wrmask == 0x1) &&
364 		/* setup_input needs ir3_split_dest to generate a SPLIT instruction */
365 		src->opc != OPC_META_INPUT) {
366 		dst[0] = src;
367 		return;
368 
369 	}
370 
371 	if (src->opc == OPC_META_COLLECT) {
372 		debug_assert((base + n) < src->regs_count);
373 
374 		for (int i = 0; i < n; i++) {
375 			dst[i] = ssa(src->regs[i + base + 1]);
376 		}
377 
378 		return;
379 	}
380 
381 	unsigned flags = dest_flags(src);
382 
383 	for (int i = 0, j = 0; i < n; i++) {
384 		struct ir3_instruction *split =
385 				ir3_instr_create(block, OPC_META_SPLIT);
386 		__ssa_dst(split)->flags |= flags;
387 		__ssa_src(split, src, flags);
388 		split->split.off = i + base;
389 
390 		if (prev) {
391 			split->cp.left = prev;
392 			split->cp.left_cnt++;
393 			prev->cp.right = split;
394 			prev->cp.right_cnt++;
395 		}
396 		prev = split;
397 
398 		if (src->regs[0]->wrmask & (1 << (i + base)))
399 			dst[j++] = split;
400 	}
401 }
402 
403 NORETURN void
ir3_context_error(struct ir3_context * ctx,const char * format,...)404 ir3_context_error(struct ir3_context *ctx, const char *format, ...)
405 {
406 	struct hash_table *errors = NULL;
407 	va_list ap;
408 	va_start(ap, format);
409 	if (ctx->cur_instr) {
410 		errors = _mesa_hash_table_create(NULL,
411 				_mesa_hash_pointer,
412 				_mesa_key_pointer_equal);
413 		char *msg = ralloc_vasprintf(errors, format, ap);
414 		_mesa_hash_table_insert(errors, ctx->cur_instr, msg);
415 	} else {
416 		_debug_vprintf(format, ap);
417 	}
418 	va_end(ap);
419 	nir_print_shader_annotated(ctx->s, stdout, errors);
420 	ralloc_free(errors);
421 	ctx->error = true;
422 	unreachable("");
423 }
424 
425 static struct ir3_instruction *
create_addr0(struct ir3_block * block,struct ir3_instruction * src,int align)426 create_addr0(struct ir3_block *block, struct ir3_instruction *src, int align)
427 {
428 	struct ir3_instruction *instr, *immed;
429 
430 	instr = ir3_COV(block, src, TYPE_U32, TYPE_S16);
431 
432 	switch(align){
433 	case 1:
434 		/* src *= 1: */
435 		break;
436 	case 2:
437 		/* src *= 2	=> src <<= 1: */
438 		immed = create_immed_typed(block, 1, TYPE_S16);
439 		instr = ir3_SHL_B(block, instr, 0, immed, 0);
440 		break;
441 	case 3:
442 		/* src *= 3: */
443 		immed = create_immed_typed(block, 3, TYPE_S16);
444 		instr = ir3_MULL_U(block, instr, 0, immed, 0);
445 		break;
446 	case 4:
447 		/* src *= 4 => src <<= 2: */
448 		immed = create_immed_typed(block, 2, TYPE_S16);
449 		instr = ir3_SHL_B(block, instr, 0, immed, 0);
450 		break;
451 	default:
452 		unreachable("bad align");
453 		return NULL;
454 	}
455 
456 	instr->regs[0]->flags |= IR3_REG_HALF;
457 
458 	instr = ir3_MOV(block, instr, TYPE_S16);
459 	instr->regs[0]->num = regid(REG_A0, 0);
460 	instr->regs[0]->flags &= ~IR3_REG_SSA;
461 
462 	return instr;
463 }
464 
465 static struct ir3_instruction *
create_addr1(struct ir3_block * block,unsigned const_val)466 create_addr1(struct ir3_block *block, unsigned const_val)
467 {
468 
469 	struct ir3_instruction *immed = create_immed_typed(block, const_val, TYPE_S16);
470 	struct ir3_instruction *instr = ir3_MOV(block, immed, TYPE_S16);
471 	instr->regs[0]->num = regid(REG_A0, 1);
472 	instr->regs[0]->flags &= ~IR3_REG_SSA;
473 	return instr;
474 }
475 
476 /* caches addr values to avoid generating multiple cov/shl/mova
477  * sequences for each use of a given NIR level src as address
478  */
479 struct ir3_instruction *
ir3_get_addr0(struct ir3_context * ctx,struct ir3_instruction * src,int align)480 ir3_get_addr0(struct ir3_context *ctx, struct ir3_instruction *src, int align)
481 {
482 	struct ir3_instruction *addr;
483 	unsigned idx = align - 1;
484 
485 	compile_assert(ctx, idx < ARRAY_SIZE(ctx->addr0_ht));
486 
487 	if (!ctx->addr0_ht[idx]) {
488 		ctx->addr0_ht[idx] = _mesa_hash_table_create(ctx,
489 				_mesa_hash_pointer, _mesa_key_pointer_equal);
490 	} else {
491 		struct hash_entry *entry;
492 		entry = _mesa_hash_table_search(ctx->addr0_ht[idx], src);
493 		if (entry)
494 			return entry->data;
495 	}
496 
497 	addr = create_addr0(ctx->block, src, align);
498 	_mesa_hash_table_insert(ctx->addr0_ht[idx], src, addr);
499 
500 	return addr;
501 }
502 
503 /* Similar to ir3_get_addr0, but for a1.x. */
504 struct ir3_instruction *
ir3_get_addr1(struct ir3_context * ctx,unsigned const_val)505 ir3_get_addr1(struct ir3_context *ctx, unsigned const_val)
506 {
507 	struct ir3_instruction *addr;
508 
509 	if (!ctx->addr1_ht) {
510 		ctx->addr1_ht = _mesa_hash_table_u64_create(ctx);
511 	} else {
512 		addr = _mesa_hash_table_u64_search(ctx->addr1_ht, const_val);
513 		if (addr)
514 			return addr;
515 	}
516 
517 	addr = create_addr1(ctx->block, const_val);
518 	_mesa_hash_table_u64_insert(ctx->addr1_ht, const_val, addr);
519 
520 	return addr;
521 }
522 
523 struct ir3_instruction *
ir3_get_predicate(struct ir3_context * ctx,struct ir3_instruction * src)524 ir3_get_predicate(struct ir3_context *ctx, struct ir3_instruction *src)
525 {
526 	struct ir3_block *b = ctx->block;
527 	struct ir3_instruction *cond;
528 
529 	/* NOTE: only cmps.*.* can write p0.x: */
530 	cond = ir3_CMPS_S(b, src, 0, create_immed(b, 0), 0);
531 	cond->cat2.condition = IR3_COND_NE;
532 
533 	/* condition always goes in predicate register: */
534 	cond->regs[0]->num = regid(REG_P0, 0);
535 	cond->regs[0]->flags &= ~IR3_REG_SSA;
536 
537 	return cond;
538 }
539 
540 /*
541  * Array helpers
542  */
543 
544 void
ir3_declare_array(struct ir3_context * ctx,nir_register * reg)545 ir3_declare_array(struct ir3_context *ctx, nir_register *reg)
546 {
547 	struct ir3_array *arr = rzalloc(ctx, struct ir3_array);
548 	arr->id = ++ctx->num_arrays;
549 	/* NOTE: sometimes we get non array regs, for example for arrays of
550 	 * length 1.  See fs-const-array-of-struct-of-array.shader_test.  So
551 	 * treat a non-array as if it was an array of length 1.
552 	 *
553 	 * It would be nice if there was a nir pass to convert arrays of
554 	 * length 1 to ssa.
555 	 */
556 	arr->length = reg->num_components * MAX2(1, reg->num_array_elems);
557 	compile_assert(ctx, arr->length > 0);
558 	arr->r = reg;
559 	arr->half = reg->bit_size <= 16;
560 	// HACK one-bit bools still end up as 32b:
561 	if (reg->bit_size == 1)
562 		arr->half = false;
563 	list_addtail(&arr->node, &ctx->ir->array_list);
564 }
565 
566 struct ir3_array *
ir3_get_array(struct ir3_context * ctx,nir_register * reg)567 ir3_get_array(struct ir3_context *ctx, nir_register *reg)
568 {
569 	foreach_array (arr, &ctx->ir->array_list) {
570 		if (arr->r == reg)
571 			return arr;
572 	}
573 	ir3_context_error(ctx, "bogus reg: %s\n", reg->name);
574 	return NULL;
575 }
576 
577 /* relative (indirect) if address!=NULL */
578 struct ir3_instruction *
ir3_create_array_load(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * address)579 ir3_create_array_load(struct ir3_context *ctx, struct ir3_array *arr, int n,
580 		struct ir3_instruction *address)
581 {
582 	struct ir3_block *block = ctx->block;
583 	struct ir3_instruction *mov;
584 	struct ir3_register *src;
585 	unsigned flags = 0;
586 
587 	mov = ir3_instr_create(block, OPC_MOV);
588 	if (arr->half) {
589 		mov->cat1.src_type = TYPE_U16;
590 		mov->cat1.dst_type = TYPE_U16;
591 		flags |= IR3_REG_HALF;
592 	} else {
593 		mov->cat1.src_type = TYPE_U32;
594 		mov->cat1.dst_type = TYPE_U32;
595 	}
596 
597 	mov->barrier_class = IR3_BARRIER_ARRAY_R;
598 	mov->barrier_conflict = IR3_BARRIER_ARRAY_W;
599 	__ssa_dst(mov)->flags |= flags;
600 	src = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
601 			COND(address, IR3_REG_RELATIV) | flags);
602 	src->instr = arr->last_write;
603 	src->size  = arr->length;
604 	src->array.id = arr->id;
605 	src->array.offset = n;
606 
607 	if (address)
608 		ir3_instr_set_address(mov, address);
609 
610 	return mov;
611 }
612 
613 /* relative (indirect) if address!=NULL */
614 void
ir3_create_array_store(struct ir3_context * ctx,struct ir3_array * arr,int n,struct ir3_instruction * src,struct ir3_instruction * address)615 ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr, int n,
616 		struct ir3_instruction *src, struct ir3_instruction *address)
617 {
618 	struct ir3_block *block = ctx->block;
619 	struct ir3_instruction *mov;
620 	struct ir3_register *dst;
621 	unsigned flags = 0;
622 
623 	/* if not relative store, don't create an extra mov, since that
624 	 * ends up being difficult for cp to remove.
625 	 *
626 	 * Also, don't skip the mov if the src is meta (like fanout/split),
627 	 * since that creates a situation that RA can't really handle properly.
628 	 */
629 	if (!address && !is_meta(src)) {
630 		dst = src->regs[0];
631 
632 		src->barrier_class |= IR3_BARRIER_ARRAY_W;
633 		src->barrier_conflict |= IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
634 
635 		dst->flags |= IR3_REG_ARRAY;
636 		dst->instr = arr->last_write;
637 		dst->size = arr->length;
638 		dst->array.id = arr->id;
639 		dst->array.offset = n;
640 
641 		arr->last_write = src;
642 
643 		array_insert(block, block->keeps, src);
644 
645 		return;
646 	}
647 
648 	mov = ir3_instr_create(block, OPC_MOV);
649 	if (arr->half) {
650 		mov->cat1.src_type = TYPE_U16;
651 		mov->cat1.dst_type = TYPE_U16;
652 		flags |= IR3_REG_HALF;
653 	} else {
654 		mov->cat1.src_type = TYPE_U32;
655 		mov->cat1.dst_type = TYPE_U32;
656 	}
657 	mov->barrier_class = IR3_BARRIER_ARRAY_W;
658 	mov->barrier_conflict = IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W;
659 	dst = ir3_reg_create(mov, 0, IR3_REG_ARRAY |
660 			flags |
661 			COND(address, IR3_REG_RELATIV));
662 	dst->instr = arr->last_write;
663 	dst->size  = arr->length;
664 	dst->array.id = arr->id;
665 	dst->array.offset = n;
666 	ir3_reg_create(mov, 0, IR3_REG_SSA | flags)->instr = src;
667 
668 	if (address)
669 		ir3_instr_set_address(mov, address);
670 
671 	arr->last_write = mov;
672 
673 	/* the array store may only matter to something in an earlier
674 	 * block (ie. loops), but since arrays are not in SSA, depth
675 	 * pass won't know this.. so keep all array stores:
676 	 */
677 	array_insert(block, block->keeps, mov);
678 }
679