• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31 
32 #include "ir3.h"
33 #include "ir3_shader.h"
34 #include "ir3_ra.h"
35 
36 
37 #ifdef DEBUG
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
39 #else
40 #define RA_DEBUG 0
41 #endif
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 	printf("RA: "fmt"\n", ##__VA_ARGS__); \
44 } } while (0)
45 
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 	printf("RA: "fmt": ", ##__VA_ARGS__); \
48 	ir3_print_instr(instr); \
49 } } while (0)
50 
51 /*
52  * Register Assignment:
53  *
54  * Uses the register_allocate util, which implements graph coloring
55  * algo with interference classes.  To handle the cases where we need
56  * consecutive registers (for example, texture sample instructions),
57  * we model these as larger (double/quad/etc) registers which conflict
58  * with the corresponding registers in other classes.
59  *
60  * Additionally we create additional classes for half-regs, which
61  * do not conflict with the full-reg classes.  We do need at least
62  * sizes 1-4 (to deal w/ texture sample instructions output to half-
63  * reg).  At the moment we don't create the higher order half-reg
64  * classes as half-reg frequently does not have enough precision
65  * for texture coords at higher resolutions.
66  *
67  * There are some additional cases that we need to handle specially,
68  * as the graph coloring algo doesn't understand "partial writes".
69  * For example, a sequence like:
70  *
71  *   add r0.z, ...
72  *   sam (f32)(xy)r0.x, ...
73  *   ...
74  *   sam (f32)(xyzw)r0.w, r0.x, ...  ; 3d texture, so r0.xyz are coord
75  *
76  * In this scenario, we treat r0.xyz as class size 3, which is written
77  * (from a use/def perspective) at the 'add' instruction and ignore the
78  * subsequent partial writes to r0.xy.  So the 'add r0.z, ...' is the
79  * defining instruction, as it is the first to partially write r0.xyz.
80  *
81  * To address the fragmentation that this can potentially cause, a
82  * two pass register allocation is used.  After the first pass the
83  * assignment of scalars is discarded, but the assignment of vecN (for
84  * N > 1) is used to pre-color in the second pass, which considers
85  * only scalars.
86  *
87  * Arrays of arbitrary size are handled via pre-coloring a consecutive
88  * sequence of registers.  Additional scalar (single component) reg
89  * names are allocated starting at ctx->class_base[total_class_count]
90  * (see arr->base), which are pre-colored.  In the use/def graph direct
91  * access is treated as a single element use/def, and indirect access
92  * is treated as use or def of all array elements.  (Only the first
93  * def is tracked, in case of multiple indirect writes, etc.)
94  *
95  * TODO arrays that fit in one of the pre-defined class sizes should
96  * not need to be pre-colored, but instead could be given a normal
97  * vreg name.  (Ignoring this for now since it is a good way to work
98  * out the kinks with arbitrary sized arrays.)
99  *
100  * TODO might be easier for debugging to split this into two passes,
101  * the first assigning vreg names in a way that we could ir3_print()
102  * the result.
103  */
104 
105 
106 static struct ir3_instruction * name_to_instr(struct ir3_ra_ctx *ctx, unsigned name);
107 
108 static bool name_is_array(struct ir3_ra_ctx *ctx, unsigned name);
109 static struct ir3_array * name_to_array(struct ir3_ra_ctx *ctx, unsigned name);
110 
111 /* does it conflict? */
112 static inline bool
intersects(unsigned a_start,unsigned a_end,unsigned b_start,unsigned b_end)113 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
114 {
115 	return !((a_start >= b_end) || (b_start >= a_end));
116 }
117 
118 static bool
instr_before(struct ir3_instruction * a,struct ir3_instruction * b)119 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
120 {
121 	if (a->flags & IR3_INSTR_UNUSED)
122 		return false;
123 	return (a->ip < b->ip);
124 }
125 
126 static struct ir3_instruction *
get_definer(struct ir3_ra_ctx * ctx,struct ir3_instruction * instr,int * sz,int * off)127 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
128 		int *sz, int *off)
129 {
130 	struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
131 	struct ir3_instruction *d = NULL;
132 
133 	if (ctx->scalar_pass) {
134 		id->defn = instr;
135 		id->off = 0;
136 		id->sz = 1;     /* considering things as N scalar regs now */
137 	}
138 
139 	if (id->defn) {
140 		*sz = id->sz;
141 		*off = id->off;
142 		return id->defn;
143 	}
144 
145 	if (instr->opc == OPC_META_COLLECT) {
146 		/* What about the case where collect is subset of array, we
147 		 * need to find the distance between where actual array starts
148 		 * and collect..  that probably doesn't happen currently.
149 		 */
150 		int dsz, doff;
151 
152 		/* note: don't use foreach_ssa_src as this gets called once
153 		 * while assigning regs (which clears SSA flag)
154 		 */
155 		foreach_src_n (src, n, instr) {
156 			struct ir3_instruction *dd;
157 			if (!src->instr)
158 				continue;
159 
160 			dd = get_definer(ctx, src->instr, &dsz, &doff);
161 
162 			if ((!d) || instr_before(dd, d)) {
163 				d = dd;
164 				*sz = dsz;
165 				*off = doff - n;
166 			}
167 		}
168 
169 	} else if (instr->cp.right || instr->cp.left) {
170 		/* covers also the meta:fo case, which ends up w/ single
171 		 * scalar instructions for each component:
172 		 */
173 		struct ir3_instruction *f = ir3_neighbor_first(instr);
174 
175 		/* by definition, the entire sequence forms one linked list
176 		 * of single scalar register nodes (even if some of them may
177 		 * be splits from a texture sample (for example) instr.  We
178 		 * just need to walk the list finding the first element of
179 		 * the group defined (lowest ip)
180 		 */
181 		int cnt = 0;
182 
183 		/* need to skip over unused in the group: */
184 		while (f && (f->flags & IR3_INSTR_UNUSED)) {
185 			f = f->cp.right;
186 			cnt++;
187 		}
188 
189 		while (f) {
190 			if ((!d) || instr_before(f, d))
191 				d = f;
192 			if (f == instr)
193 				*off = cnt;
194 			f = f->cp.right;
195 			cnt++;
196 		}
197 
198 		*sz = cnt;
199 
200 	} else {
201 		/* second case is looking directly at the instruction which
202 		 * produces multiple values (eg, texture sample), rather
203 		 * than the split nodes that point back to that instruction.
204 		 * This isn't quite right, because it may be part of a larger
205 		 * group, such as:
206 		 *
207 		 *     sam (f32)(xyzw)r0.x, ...
208 		 *     add r1.x, ...
209 		 *     add r1.y, ...
210 		 *     sam (f32)(xyzw)r2.x, r0.w  <-- (r0.w, r1.x, r1.y)
211 		 *
212 		 * need to come up with a better way to handle that case.
213 		 */
214 		if (instr->address) {
215 			*sz = instr->regs[0]->size;
216 		} else {
217 			*sz = util_last_bit(instr->regs[0]->wrmask);
218 		}
219 		*off = 0;
220 		d = instr;
221 	}
222 
223 	if (d->opc == OPC_META_SPLIT) {
224 		struct ir3_instruction *dd;
225 		int dsz, doff;
226 
227 		dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
228 
229 		/* by definition, should come before: */
230 		ra_assert(ctx, instr_before(dd, d));
231 
232 		*sz = MAX2(*sz, dsz);
233 
234 		if (instr->opc == OPC_META_SPLIT)
235 			*off = MAX2(*off, instr->split.off);
236 
237 		d = dd;
238 	}
239 
240 	ra_assert(ctx, d->opc != OPC_META_SPLIT);
241 
242 	id->defn = d;
243 	id->sz = *sz;
244 	id->off = *off;
245 
246 	return d;
247 }
248 
249 static void
ra_block_find_definers(struct ir3_ra_ctx * ctx,struct ir3_block * block)250 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
251 {
252 	foreach_instr (instr, &block->instr_list) {
253 		struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
254 		if (instr->regs_count == 0)
255 			continue;
256 		/* couple special cases: */
257 		if (writes_addr0(instr) || writes_addr1(instr) || writes_pred(instr)) {
258 			id->cls = -1;
259 		} else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
260 			id->cls = total_class_count;
261 		} else {
262 			/* and the normal case: */
263 			id->defn = get_definer(ctx, instr, &id->sz, &id->off);
264 			id->cls = ra_size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
265 
266 			/* this is a bit of duct-tape.. if we have a scenario like:
267 			 *
268 			 *   sam (f32)(x) out.x, ...
269 			 *   sam (f32)(x) out.y, ...
270 			 *
271 			 * Then the fanout/split meta instructions for the two different
272 			 * tex instructions end up grouped as left/right neighbors.  The
273 			 * upshot is that in when you get_definer() on one of the meta:fo's
274 			 * you get definer as the first sam with sz=2, but when you call
275 			 * get_definer() on the either of the sam's you get itself as the
276 			 * definer with sz=1.
277 			 *
278 			 * (We actually avoid this scenario exactly, the neighbor links
279 			 * prevent one of the output mov's from being eliminated, so this
280 			 * hack should be enough.  But probably we need to rethink how we
281 			 * find the "defining" instruction.)
282 			 *
283 			 * TODO how do we figure out offset properly...
284 			 */
285 			if (id->defn != instr) {
286 				struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
287 				if (did->sz < id->sz) {
288 					did->sz = id->sz;
289 					did->cls = id->cls;
290 				}
291 			}
292 		}
293 	}
294 }
295 
296 /* give each instruction a name (and ip), and count up the # of names
297  * of each class
298  */
299 static void
ra_block_name_instructions(struct ir3_ra_ctx * ctx,struct ir3_block * block)300 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
301 {
302 	foreach_instr (instr, &block->instr_list) {
303 		struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
304 
305 #ifdef DEBUG
306 		instr->name = ~0;
307 #endif
308 
309 		ctx->instr_cnt++;
310 
311 		if (!writes_gpr(instr))
312 			continue;
313 
314 		if (id->defn != instr)
315 			continue;
316 
317 		/* In scalar pass, collect/split don't get their own names,
318 		 * but instead inherit them from their src(s):
319 		 *
320 		 * Possibly we don't need this because of scalar_name(), but
321 		 * it does make the ir3_print() dumps easier to read.
322 		 */
323 		if (ctx->scalar_pass) {
324 			if (instr->opc == OPC_META_SPLIT) {
325 				instr->name = instr->regs[1]->instr->name + instr->split.off;
326 				continue;
327 			}
328 
329 			if (instr->opc == OPC_META_COLLECT) {
330 				instr->name = instr->regs[1]->instr->name;
331 				continue;
332 			}
333 		}
334 
335 		/* arrays which don't fit in one of the pre-defined class
336 		 * sizes are pre-colored:
337 		 */
338 		if ((id->cls >= 0) && (id->cls < total_class_count)) {
339 			/* in the scalar pass, we generate a name for each
340 			 * scalar component, instr->name is the name of the
341 			 * first component.
342 			 */
343 			unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
344 			instr->name = ctx->class_alloc_count[id->cls];
345 			ctx->class_alloc_count[id->cls] += n;
346 			ctx->alloc_count += n;
347 		}
348 	}
349 }
350 
351 /**
352  * Set a value for max register target.
353  *
354  * Currently this just rounds up to a multiple of full-vec4 (ie. the
355  * granularity that we configure the hw for.. there is no point to
356  * using r3.x if you aren't going to make r3.yzw available).  But
357  * in reality there seems to be multiple thresholds that affect the
358  * number of waves.. and we should round up the target to the next
359  * threshold when we round-robin registers, to give postsched more
360  * options.  When we understand that better, this is where we'd
361  * implement that.
362  */
363 static void
ra_set_register_target(struct ir3_ra_ctx * ctx,unsigned max_target)364 ra_set_register_target(struct ir3_ra_ctx *ctx, unsigned max_target)
365 {
366 	const unsigned hvec4 = 4;
367 	const unsigned vec4 = 2 * hvec4;
368 
369 	ctx->max_target = align(max_target, vec4);
370 
371 	d("New max_target=%u", ctx->max_target);
372 }
373 
374 static int
pick_in_range(BITSET_WORD * regs,unsigned min,unsigned max)375 pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
376 {
377 	for (unsigned i = min; i <= max; i++) {
378 		if (BITSET_TEST(regs, i)) {
379 			return i;
380 		}
381 	}
382 	return -1;
383 }
384 
385 static int
pick_in_range_rev(BITSET_WORD * regs,int min,int max)386 pick_in_range_rev(BITSET_WORD *regs, int min, int max)
387 {
388 	for (int i = max; i >= min; i--) {
389 		if (BITSET_TEST(regs, i)) {
390 			return i;
391 		}
392 	}
393 	return -1;
394 }
395 
396 /* register selector for the a6xx+ merged register file: */
397 static unsigned int
ra_select_reg_merged(unsigned int n,BITSET_WORD * regs,void * data)398 ra_select_reg_merged(unsigned int n, BITSET_WORD *regs, void *data)
399 {
400 	struct ir3_ra_ctx *ctx = data;
401 	unsigned int class = ra_get_node_class(ctx->g, n);
402 	bool half, high;
403 	int sz = ra_class_to_size(class, &half, &high);
404 
405 	assert (sz > 0);
406 
407 	/* dimensions within the register class: */
408 	unsigned max_target, start;
409 
410 	/* the regs bitset will include *all* of the virtual regs, but we lay
411 	 * out the different classes consecutively in the virtual register
412 	 * space.  So we just need to think about the base offset of a given
413 	 * class within the virtual register space, and offset the register
414 	 * space we search within by that base offset.
415 	 */
416 	unsigned base;
417 
418 	/* TODO I think eventually we want to round-robin in vector pass
419 	 * as well, but needs some more work to calculate # of live vals
420 	 * for this.  (Maybe with some work, we could just figure out
421 	 * the scalar target and use that, since that is what we care
422 	 * about in the end.. but that would mean setting up use-def/
423 	 * liveranges for scalar pass before doing vector pass.)
424 	 *
425 	 * For now, in the vector class, just move assignments for scalar
426 	 * vals higher to hopefully prevent them from limiting where vecN
427 	 * values can be placed.  Since the scalar values are re-assigned
428 	 * in the 2nd pass, we don't really care where they end up in the
429 	 * vector pass.
430 	 */
431 	if (!ctx->scalar_pass) {
432 		base = ctx->set->gpr_to_ra_reg[class][0];
433 		if (high) {
434 			max_target = HIGH_CLASS_REGS(class - HIGH_OFFSET);
435 		} else if (half) {
436 			max_target = HALF_CLASS_REGS(class - HALF_OFFSET);
437 		} else {
438 			max_target = CLASS_REGS(class);
439 		}
440 
441 		if ((sz == 1) && !high) {
442 			return pick_in_range_rev(regs, base, base + max_target);
443 		} else {
444 			return pick_in_range(regs, base, base + max_target);
445 		}
446 	} else {
447 		ra_assert(ctx, sz == 1);
448 	}
449 
450 	/* NOTE: this is only used in scalar pass, so the register
451 	 * class will be one of the scalar classes (ie. idx==0):
452 	 */
453 	base = ctx->set->gpr_to_ra_reg[class][0];
454 	if (high) {
455 		max_target = HIGH_CLASS_REGS(0);
456 		start = 0;
457 	} else if (half) {
458 		max_target = ctx->max_target;
459 		start = ctx->start_search_reg;
460 	} else {
461 		max_target = ctx->max_target / 2;
462 		start = ctx->start_search_reg;
463 	}
464 
465 	/* For cat4 instructions, if the src reg is already assigned, and
466 	 * avail to pick, use it.  Because this doesn't introduce unnecessary
467 	 * dependencies, and it potentially avoids needing (ss) syncs to
468 	 * for write after read hazards:
469 	 */
470 	struct ir3_instruction *instr = name_to_instr(ctx, n);
471 	if (is_sfu(instr)) {
472 		struct ir3_register *src = instr->regs[1];
473 		int src_n;
474 
475 		if ((src->flags & IR3_REG_ARRAY) && !(src->flags & IR3_REG_RELATIV)) {
476 			struct ir3_array *arr = ir3_lookup_array(ctx->ir, src->array.id);
477 			src_n = arr->base + src->array.offset;
478 		} else {
479 			src_n = scalar_name(ctx, src->instr, 0);
480 		}
481 
482 		unsigned reg = ra_get_node_reg(ctx->g, src_n);
483 
484 		/* Check if the src register has been assigned yet: */
485 		if (reg != NO_REG) {
486 			if (BITSET_TEST(regs, reg)) {
487 				return reg;
488 			}
489 		}
490 	}
491 
492 	int r = pick_in_range(regs, base + start, base + max_target);
493 	if (r < 0) {
494 		/* wrap-around: */
495 		r = pick_in_range(regs, base, base + start);
496 	}
497 
498 	if (r < 0) {
499 		/* overflow, we need to increase max_target: */
500 		ra_set_register_target(ctx, ctx->max_target + 1);
501 		return ra_select_reg_merged(n, regs, data);
502 	}
503 
504 	if (class == ctx->set->half_classes[0]) {
505 		int n = r - base;
506 		ctx->start_search_reg = (n + 1) % ctx->max_target;
507 	} else if (class == ctx->set->classes[0]) {
508 		int n = (r - base) * 2;
509 		ctx->start_search_reg = (n + 1) % ctx->max_target;
510 	}
511 
512 	return r;
513 }
514 
515 static void
ra_init(struct ir3_ra_ctx * ctx)516 ra_init(struct ir3_ra_ctx *ctx)
517 {
518 	unsigned n, base;
519 
520 	ir3_clear_mark(ctx->ir);
521 	n = ir3_count_instructions_ra(ctx->ir);
522 
523 	ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
524 
525 	foreach_block (block, &ctx->ir->block_list) {
526 		ra_block_find_definers(ctx, block);
527 	}
528 
529 	foreach_block (block, &ctx->ir->block_list) {
530 		ra_block_name_instructions(ctx, block);
531 	}
532 
533 	/* figure out the base register name for each class.  The
534 	 * actual ra name is class_base[cls] + instr->name;
535 	 */
536 	ctx->class_base[0] = 0;
537 	for (unsigned i = 1; i <= total_class_count; i++) {
538 		ctx->class_base[i] = ctx->class_base[i-1] +
539 				ctx->class_alloc_count[i-1];
540 	}
541 
542 	/* and vreg names for array elements: */
543 	base = ctx->class_base[total_class_count];
544 	foreach_array (arr, &ctx->ir->array_list) {
545 		arr->base = base;
546 		ctx->class_alloc_count[total_class_count] += arr->length;
547 		base += arr->length;
548 	}
549 	ctx->alloc_count += ctx->class_alloc_count[total_class_count];
550 
551 	/* Add vreg names for r0.xyz */
552 	ctx->r0_xyz_nodes = ctx->alloc_count;
553 	ctx->alloc_count += 3;
554 	ctx->hr0_xyz_nodes = ctx->alloc_count;
555 	ctx->alloc_count += 3;
556 
557 	/* Add vreg name for prefetch-exclusion range: */
558 	ctx->prefetch_exclude_node = ctx->alloc_count++;
559 
560 	if (RA_DEBUG) {
561 		d("INSTRUCTION VREG NAMES:");
562 		foreach_block (block, &ctx->ir->block_list) {
563 			foreach_instr (instr, &block->instr_list) {
564 				if (!ctx->instrd[instr->ip].defn)
565 					continue;
566 				if (!writes_gpr(instr))
567 					continue;
568 				di(instr, "%04u", scalar_name(ctx, instr, 0));
569 			}
570 		}
571 		d("ARRAY VREG NAMES:");
572 		foreach_array (arr, &ctx->ir->array_list) {
573 			d("%04u: arr%u", arr->base, arr->id);
574 		}
575 		d("EXTRA VREG NAMES:");
576 		d("%04u: r0_xyz_nodes", ctx->r0_xyz_nodes);
577 		d("%04u: hr0_xyz_nodes", ctx->hr0_xyz_nodes);
578 		d("%04u: prefetch_exclude_node", ctx->prefetch_exclude_node);
579 	}
580 
581 	ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
582 	ralloc_steal(ctx->g, ctx->instrd);
583 	ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
584 	ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
585 
586 	/* TODO add selector callback for split (pre-a6xx) register file: */
587 	if (ctx->v->mergedregs) {
588 		ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
589 
590 		if (ctx->scalar_pass) {
591 			ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
592 					_mesa_hash_int, _mesa_key_int_equal);
593 		}
594 	}
595 }
596 
597 /* Map the name back to instruction: */
598 static struct ir3_instruction *
name_to_instr(struct ir3_ra_ctx * ctx,unsigned name)599 name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
600 {
601 	ra_assert(ctx, !name_is_array(ctx, name));
602 	struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
603 	if (entry)
604 		return entry->data;
605 	ra_unreachable(ctx, "invalid instr name");
606 	return NULL;
607 }
608 
609 static bool
name_is_array(struct ir3_ra_ctx * ctx,unsigned name)610 name_is_array(struct ir3_ra_ctx *ctx, unsigned name)
611 {
612 	return name >= ctx->class_base[total_class_count];
613 }
614 
615 static struct ir3_array *
name_to_array(struct ir3_ra_ctx * ctx,unsigned name)616 name_to_array(struct ir3_ra_ctx *ctx, unsigned name)
617 {
618 	ra_assert(ctx, name_is_array(ctx, name));
619 	foreach_array (arr, &ctx->ir->array_list) {
620 		if (name < (arr->base + arr->length))
621 			return arr;
622 	}
623 	ra_unreachable(ctx, "invalid array name");
624 	return NULL;
625 }
626 
627 static void
ra_destroy(struct ir3_ra_ctx * ctx)628 ra_destroy(struct ir3_ra_ctx *ctx)
629 {
630 	ralloc_free(ctx->g);
631 }
632 
633 static void
__def(struct ir3_ra_ctx * ctx,struct ir3_ra_block_data * bd,unsigned name,struct ir3_instruction * instr)634 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
635 		struct ir3_instruction *instr)
636 {
637 	ra_assert(ctx, name < ctx->alloc_count);
638 
639 	/* split/collect do not actually define any real value */
640 	if ((instr->opc == OPC_META_SPLIT) || (instr->opc == OPC_META_COLLECT))
641 		return;
642 
643 	/* defined on first write: */
644 	if (!ctx->def[name])
645 		ctx->def[name] = instr->ip;
646 	ctx->use[name] = MAX2(ctx->use[name], instr->ip);
647 	BITSET_SET(bd->def, name);
648 }
649 
650 static void
__use(struct ir3_ra_ctx * ctx,struct ir3_ra_block_data * bd,unsigned name,struct ir3_instruction * instr)651 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
652 		struct ir3_instruction *instr)
653 {
654 	ra_assert(ctx, name < ctx->alloc_count);
655 	ctx->use[name] = MAX2(ctx->use[name], instr->ip);
656 	if (!BITSET_TEST(bd->def, name))
657 		BITSET_SET(bd->use, name);
658 }
659 
660 static void
ra_block_compute_live_ranges(struct ir3_ra_ctx * ctx,struct ir3_block * block)661 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
662 {
663 	struct ir3_ra_block_data *bd;
664 	unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
665 
666 #define def(name, instr) __def(ctx, bd, name, instr)
667 #define use(name, instr) __use(ctx, bd, name, instr)
668 
669 	bd = rzalloc(ctx->g, struct ir3_ra_block_data);
670 
671 	bd->def     = rzalloc_array(bd, BITSET_WORD, bitset_words);
672 	bd->use     = rzalloc_array(bd, BITSET_WORD, bitset_words);
673 	bd->livein  = rzalloc_array(bd, BITSET_WORD, bitset_words);
674 	bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
675 
676 	block->data = bd;
677 
678 	struct ir3_instruction *first_non_input = NULL;
679 	foreach_instr (instr, &block->instr_list) {
680 		if (instr->opc != OPC_META_INPUT) {
681 			first_non_input = instr;
682 			break;
683 		}
684 	}
685 
686 	foreach_instr (instr, &block->instr_list) {
687 		foreach_def (name, ctx, instr) {
688 			if (name_is_array(ctx, name)) {
689 				struct ir3_array *arr = name_to_array(ctx, name);
690 
691 				arr->start_ip = MIN2(arr->start_ip, instr->ip);
692 				arr->end_ip = MAX2(arr->end_ip, instr->ip);
693 
694 				for (unsigned i = 0; i < arr->length; i++) {
695 					unsigned name = arr->base + i;
696 					if(arr->half)
697 						ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
698 					else
699 						ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
700 				}
701 			} else {
702 				struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
703 				if (is_high(instr)) {
704 					ra_set_node_class(ctx->g, name,
705 							ctx->set->high_classes[id->cls - HIGH_OFFSET]);
706 				} else if (is_half(instr)) {
707 					ra_set_node_class(ctx->g, name,
708 							ctx->set->half_classes[id->cls - HALF_OFFSET]);
709 				} else {
710 					ra_set_node_class(ctx->g, name,
711 							ctx->set->classes[id->cls]);
712 				}
713 			}
714 
715 			def(name, instr);
716 
717 			if ((instr->opc == OPC_META_INPUT) && first_non_input)
718 				use(name, first_non_input);
719 
720 			/* Texture instructions with writemasks can be treated as smaller
721 			 * vectors (or just scalars!) to allocate knowing that the
722 			 * masked-out regs won't be written, but we need to make sure that
723 			 * the start of the vector doesn't come before the first register
724 			 * or we'll wrap.
725 			 */
726 			if (is_tex_or_prefetch(instr)) {
727 				int writemask_skipped_regs = ffs(instr->regs[0]->wrmask) - 1;
728 				int r0_xyz = is_half(instr) ?
729 					ctx->hr0_xyz_nodes : ctx->r0_xyz_nodes;
730 				for (int i = 0; i < writemask_skipped_regs; i++)
731 					ra_add_node_interference(ctx->g, name, r0_xyz + i);
732 			}
733 
734 			/* Pre-fetched textures have a lower limit for bits to encode dst
735 			 * register, so add additional interference with registers above
736 			 * that limit.
737 			 */
738 			if (instr->opc == OPC_META_TEX_PREFETCH) {
739 				ra_add_node_interference(ctx->g, name,
740 						ctx->prefetch_exclude_node);
741 			}
742 		}
743 
744 		foreach_use (name, ctx, instr) {
745 			if (name_is_array(ctx, name)) {
746 				struct ir3_array *arr = name_to_array(ctx, name);
747 
748 				arr->start_ip = MIN2(arr->start_ip, instr->ip);
749 				arr->end_ip = MAX2(arr->end_ip, instr->ip);
750 
751 				/* NOTE: arrays are not SSA so unconditionally
752 				 * set use bit:
753 				 */
754 				BITSET_SET(bd->use, name);
755 			}
756 
757 			use(name, instr);
758 		}
759 
760 		foreach_name (name, ctx, instr) {
761 			/* split/collect instructions have duplicate names
762 			 * as real instructions, so they skip the hashtable:
763 			 */
764 			if (ctx->name_to_instr && !((instr->opc == OPC_META_SPLIT) ||
765 					(instr->opc == OPC_META_COLLECT))) {
766 				/* this is slightly annoying, we can't just use an
767 				 * integer on the stack
768 				 */
769 				unsigned *key = ralloc(ctx->name_to_instr, unsigned);
770 				*key = name;
771 				ra_assert(ctx, !_mesa_hash_table_search(ctx->name_to_instr, key));
772 				_mesa_hash_table_insert(ctx->name_to_instr, key, instr);
773 			}
774 		}
775 	}
776 }
777 
778 static bool
ra_compute_livein_liveout(struct ir3_ra_ctx * ctx)779 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
780 {
781 	unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
782 	bool progress = false;
783 
784 	foreach_block (block, &ctx->ir->block_list) {
785 		struct ir3_ra_block_data *bd = block->data;
786 
787 		/* update livein: */
788 		for (unsigned i = 0; i < bitset_words; i++) {
789 			/* anything used but not def'd within a block is
790 			 * by definition a live value coming into the block:
791 			 */
792 			BITSET_WORD new_livein =
793 				(bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
794 
795 			if (new_livein & ~bd->livein[i]) {
796 				bd->livein[i] |= new_livein;
797 				progress = true;
798 			}
799 		}
800 
801 		/* update liveout: */
802 		for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
803 			struct ir3_block *succ = block->successors[j];
804 			struct ir3_ra_block_data *succ_bd;
805 
806 			if (!succ)
807 				continue;
808 
809 			succ_bd = succ->data;
810 
811 			for (unsigned i = 0; i < bitset_words; i++) {
812 				/* add anything that is livein in a successor block
813 				 * to our liveout:
814 				 */
815 				BITSET_WORD new_liveout =
816 					(succ_bd->livein[i] & ~bd->liveout[i]);
817 
818 				if (new_liveout) {
819 					bd->liveout[i] |= new_liveout;
820 					progress = true;
821 				}
822 			}
823 		}
824 	}
825 
826 	return progress;
827 }
828 
829 static void
print_bitset(const char * name,BITSET_WORD * bs,unsigned cnt)830 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
831 {
832 	bool first = true;
833 	debug_printf("RA:  %s:", name);
834 	for (unsigned i = 0; i < cnt; i++) {
835 		if (BITSET_TEST(bs, i)) {
836 			if (!first)
837 				debug_printf(",");
838 			debug_printf(" %04u", i);
839 			first = false;
840 		}
841 	}
842 	debug_printf("\n");
843 }
844 
845 /* size of one component of instruction result, ie. half vs full: */
846 static unsigned
live_size(struct ir3_instruction * instr)847 live_size(struct ir3_instruction *instr)
848 {
849 	if (is_half(instr)) {
850 		return 1;
851 	} else if (is_high(instr)) {
852 		/* doesn't count towards footprint */
853 		return 0;
854 	} else {
855 		return 2;
856 	}
857 }
858 
859 static unsigned
name_size(struct ir3_ra_ctx * ctx,unsigned name)860 name_size(struct ir3_ra_ctx *ctx, unsigned name)
861 {
862 	if (name_is_array(ctx, name)) {
863 		struct ir3_array *arr = name_to_array(ctx, name);
864 		return arr->half ? 1 : 2;
865 	} else {
866 		struct ir3_instruction *instr = name_to_instr(ctx, name);
867 		/* in scalar pass, each name represents on scalar value,
868 		 * half or full precision
869 		 */
870 		return live_size(instr);
871 	}
872 }
873 
874 static unsigned
ra_calc_block_live_values(struct ir3_ra_ctx * ctx,struct ir3_block * block)875 ra_calc_block_live_values(struct ir3_ra_ctx *ctx, struct ir3_block *block)
876 {
877 	struct ir3_ra_block_data *bd = block->data;
878 	unsigned name;
879 
880 	ra_assert(ctx, ctx->name_to_instr);
881 
882 	/* TODO this gets a bit more complicated in non-scalar pass.. but
883 	 * possibly a lowball estimate is fine to start with if we do
884 	 * round-robin in non-scalar pass?  Maybe we just want to handle
885 	 * that in a different fxn?
886 	 */
887 	ra_assert(ctx, ctx->scalar_pass);
888 
889 	BITSET_WORD *live =
890 		rzalloc_array(bd, BITSET_WORD, BITSET_WORDS(ctx->alloc_count));
891 
892 	/* Add the live input values: */
893 	unsigned livein = 0;
894 	BITSET_FOREACH_SET (name, bd->livein, ctx->alloc_count) {
895 		livein += name_size(ctx, name);
896 		BITSET_SET(live, name);
897 	}
898 
899 	d("---------------------");
900 	d("block%u: LIVEIN: %u", block_id(block), livein);
901 
902 	unsigned max = livein;
903 	int cur_live = max;
904 
905 	/* Now that we know the live inputs to the block, iterate the
906 	 * instructions adjusting the current # of live values as we
907 	 * see their last use:
908 	 */
909 	foreach_instr (instr, &block->instr_list) {
910 		if (RA_DEBUG)
911 			print_bitset("LIVE", live, ctx->alloc_count);
912 		di(instr, "CALC");
913 
914 		unsigned new_live = 0;    /* newly live values */
915 		unsigned new_dead = 0;    /* newly no-longer live values */
916 		unsigned next_dead = 0;   /* newly dead following this instr */
917 
918 		foreach_def (name, ctx, instr) {
919 			/* NOTE: checking ctx->def filters out things like split/
920 			 * collect which are just redefining existing live names
921 			 * or array writes to already live array elements:
922 			 */
923 			if (ctx->def[name] != instr->ip)
924 				continue;
925 			new_live += live_size(instr);
926 			d("NEW_LIVE: %u (new_live=%u, use=%u)", name, new_live, ctx->use[name]);
927 			BITSET_SET(live, name);
928 			/* There can be cases where this is *also* the last use
929 			 * of a value, for example instructions that write multiple
930 			 * values, only some of which are used.  These values are
931 			 * dead *after* (rather than during) this instruction.
932 			 */
933 			if (ctx->use[name] != instr->ip)
934 				continue;
935 			next_dead += live_size(instr);
936 			d("NEXT_DEAD: %u (next_dead=%u)", name, next_dead);
937 			BITSET_CLEAR(live, name);
938 		}
939 
940 		/* To be more resilient against special cases where liverange
941 		 * is extended (like first_non_input), rather than using the
942 		 * foreach_use() iterator, we iterate the current live values
943 		 * instead:
944 		 */
945 		BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
946 			/* Is this the last use? */
947 			if (ctx->use[name] != instr->ip)
948 				continue;
949 			new_dead += name_size(ctx, name);
950 			d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
951 			BITSET_CLEAR(live, name);
952 		}
953 
954 		cur_live += new_live;
955 		cur_live -= new_dead;
956 
957 		ra_assert(ctx, cur_live >= 0);
958 		d("CUR_LIVE: %u", cur_live);
959 
960 		max = MAX2(max, cur_live);
961 
962 		/* account for written values which are not used later,
963 		 * but after updating max (since they are for one cycle
964 		 * live)
965 		 */
966 		cur_live -= next_dead;
967 		ra_assert(ctx, cur_live >= 0);
968 
969 		if (RA_DEBUG) {
970 			unsigned cnt = 0;
971 			BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
972 				cnt += name_size(ctx, name);
973 			}
974 			ra_assert(ctx, cur_live == cnt);
975 		}
976 	}
977 
978 	d("block%u max=%u", block_id(block), max);
979 
980 	/* the remaining live should match liveout (for extra sanity testing): */
981 	if (RA_DEBUG) {
982 		unsigned new_dead = 0;
983 		BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
984 			/* Is this the last use? */
985 			if (ctx->use[name] != block->end_ip)
986 				continue;
987 			new_dead += name_size(ctx, name);
988 			d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
989 			BITSET_CLEAR(live, name);
990 		}
991 		unsigned liveout = 0;
992 		BITSET_FOREACH_SET (name, bd->liveout, ctx->alloc_count) {
993 			liveout += name_size(ctx, name);
994 			BITSET_CLEAR(live, name);
995 		}
996 
997 		if (cur_live != liveout) {
998 			print_bitset("LEAKED", live, ctx->alloc_count);
999 			/* TODO there are a few edge cases where live-range extension
1000 			 * tells us a value is livein.  But not used by the block or
1001 			 * liveout for the block.  Possibly a bug in the liverange
1002 			 * extension.  But for now leave the assert disabled:
1003 			ra_assert(ctx, cur_live == liveout);
1004 			 */
1005 		}
1006 	}
1007 
1008 	ralloc_free(live);
1009 
1010 	return max;
1011 }
1012 
1013 static unsigned
ra_calc_max_live_values(struct ir3_ra_ctx * ctx)1014 ra_calc_max_live_values(struct ir3_ra_ctx *ctx)
1015 {
1016 	unsigned max = 0;
1017 
1018 	foreach_block (block, &ctx->ir->block_list) {
1019 		unsigned block_live = ra_calc_block_live_values(ctx, block);
1020 		max = MAX2(max, block_live);
1021 	}
1022 
1023 	return max;
1024 }
1025 
1026 static void
ra_add_interference(struct ir3_ra_ctx * ctx)1027 ra_add_interference(struct ir3_ra_ctx *ctx)
1028 {
1029 	struct ir3 *ir = ctx->ir;
1030 
1031 	/* initialize array live ranges: */
1032 	foreach_array (arr, &ir->array_list) {
1033 		arr->start_ip = ~0;
1034 		arr->end_ip = 0;
1035 	}
1036 
1037 	/* set up the r0.xyz precolor regs. */
1038 	for (int i = 0; i < 3; i++) {
1039 		ra_set_node_reg(ctx->g, ctx->r0_xyz_nodes + i, i);
1040 		ra_set_node_reg(ctx->g, ctx->hr0_xyz_nodes + i,
1041 				ctx->set->first_half_reg + i);
1042 	}
1043 
1044 	/* pre-color node that conflict with half/full regs higher than what
1045 	 * can be encoded for tex-prefetch:
1046 	 */
1047 	ra_set_node_reg(ctx->g, ctx->prefetch_exclude_node,
1048 			ctx->set->prefetch_exclude_reg);
1049 
1050 	/* compute live ranges (use/def) on a block level, also updating
1051 	 * block's def/use bitmasks (used below to calculate per-block
1052 	 * livein/liveout):
1053 	 */
1054 	foreach_block (block, &ir->block_list) {
1055 		ra_block_compute_live_ranges(ctx, block);
1056 	}
1057 
1058 	/* update per-block livein/liveout: */
1059 	while (ra_compute_livein_liveout(ctx)) {}
1060 
1061 	if (RA_DEBUG) {
1062 		d("AFTER LIVEIN/OUT:");
1063 		foreach_block (block, &ir->block_list) {
1064 			struct ir3_ra_block_data *bd = block->data;
1065 			d("block%u:", block_id(block));
1066 			print_bitset("  def", bd->def, ctx->alloc_count);
1067 			print_bitset("  use", bd->use, ctx->alloc_count);
1068 			print_bitset("  l/i", bd->livein, ctx->alloc_count);
1069 			print_bitset("  l/o", bd->liveout, ctx->alloc_count);
1070 		}
1071 		foreach_array (arr, &ir->array_list) {
1072 			d("array%u:", arr->id);
1073 			d("   length:   %u", arr->length);
1074 			d("   start_ip: %u", arr->start_ip);
1075 			d("   end_ip:   %u", arr->end_ip);
1076 		}
1077 	}
1078 
1079 	/* extend start/end ranges based on livein/liveout info from cfg: */
1080 	foreach_block (block, &ir->block_list) {
1081 		struct ir3_ra_block_data *bd = block->data;
1082 
1083 		for (unsigned i = 0; i < ctx->alloc_count; i++) {
1084 			if (BITSET_TEST(bd->livein, i)) {
1085 				ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
1086 				ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
1087 			}
1088 
1089 			if (BITSET_TEST(bd->liveout, i)) {
1090 				ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
1091 				ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
1092 			}
1093 		}
1094 
1095 		foreach_array (arr, &ctx->ir->array_list) {
1096 			for (unsigned i = 0; i < arr->length; i++) {
1097 				if (BITSET_TEST(bd->livein, i + arr->base)) {
1098 					arr->start_ip = MIN2(arr->start_ip, block->start_ip);
1099 				}
1100 				if (BITSET_TEST(bd->liveout, i + arr->base)) {
1101 					arr->end_ip = MAX2(arr->end_ip, block->end_ip);
1102 				}
1103 			}
1104 		}
1105 	}
1106 
1107 	if (ctx->name_to_instr) {
1108 		unsigned max = ra_calc_max_live_values(ctx);
1109 		ra_set_register_target(ctx, max);
1110 	}
1111 
1112 	for (unsigned i = 0; i < ctx->alloc_count; i++) {
1113 		for (unsigned j = 0; j < ctx->alloc_count; j++) {
1114 			if (intersects(ctx->def[i], ctx->use[i],
1115 					ctx->def[j], ctx->use[j])) {
1116 				ra_add_node_interference(ctx->g, i, j);
1117 			}
1118 		}
1119 	}
1120 }
1121 
1122 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1123  * array access(es) which do not have any previous access to depend
1124  * on from scheduling point of view
1125  */
1126 static void
reg_assign(struct ir3_ra_ctx * ctx,struct ir3_register * reg,struct ir3_instruction * instr)1127 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
1128 		struct ir3_instruction *instr)
1129 {
1130 	struct ir3_ra_instr_data *id;
1131 
1132 	if (reg->flags & IR3_REG_ARRAY) {
1133 		struct ir3_array *arr =
1134 			ir3_lookup_array(ctx->ir, reg->array.id);
1135 		unsigned name = arr->base + reg->array.offset;
1136 		unsigned r = ra_get_node_reg(ctx->g, name);
1137 		unsigned num = ctx->set->ra_reg_to_gpr[r];
1138 
1139 		if (reg->flags & IR3_REG_RELATIV) {
1140 			reg->array.offset = num;
1141 		} else {
1142 			reg->num = num;
1143 			reg->flags &= ~IR3_REG_SSA;
1144 		}
1145 
1146 		reg->flags &= ~IR3_REG_ARRAY;
1147 	} else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1148 		unsigned first_component = 0;
1149 
1150 		/* Special case for tex instructions, which may use the wrmask
1151 		 * to mask off the first component(s).  In the scalar pass,
1152 		 * this means the masked off component(s) are not def'd/use'd,
1153 		 * so we get a bogus value when we ask the register_allocate
1154 		 * algo to get the assigned reg for the unused/untouched
1155 		 * component.  So we need to consider the first used component:
1156 		 */
1157 		if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
1158 			unsigned n = ffs(id->defn->regs[0]->wrmask);
1159 			ra_assert(ctx, n > 0);
1160 			first_component = n - 1;
1161 		}
1162 
1163 		unsigned name = scalar_name(ctx, id->defn, first_component);
1164 		unsigned r = ra_get_node_reg(ctx->g, name);
1165 		unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1166 
1167 		ra_assert(ctx, !(reg->flags & IR3_REG_RELATIV));
1168 
1169 		ra_assert(ctx, num >= first_component);
1170 
1171 		if (is_high(id->defn))
1172 			num += FIRST_HIGH_REG;
1173 
1174 		reg->num = num - first_component;
1175 
1176 		reg->flags &= ~IR3_REG_SSA;
1177 
1178 		if (is_half(id->defn))
1179 			reg->flags |= IR3_REG_HALF;
1180 	}
1181 }
1182 
1183 /* helper to determine which regs to assign in which pass: */
1184 static bool
should_assign(struct ir3_ra_ctx * ctx,struct ir3_instruction * instr)1185 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1186 {
1187 	if ((instr->opc == OPC_META_SPLIT) &&
1188 			(util_bitcount(instr->regs[1]->wrmask) > 1))
1189 		return !ctx->scalar_pass;
1190 	if ((instr->opc == OPC_META_COLLECT) &&
1191 			(util_bitcount(instr->regs[0]->wrmask) > 1))
1192 		return !ctx->scalar_pass;
1193 	return ctx->scalar_pass;
1194 }
1195 
1196 static void
ra_block_alloc(struct ir3_ra_ctx * ctx,struct ir3_block * block)1197 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1198 {
1199 	foreach_instr (instr, &block->instr_list) {
1200 
1201 		if (writes_gpr(instr)) {
1202 			if (should_assign(ctx, instr)) {
1203 				reg_assign(ctx, instr->regs[0], instr);
1204 			}
1205 		}
1206 
1207 		foreach_src_n (reg, n, instr) {
1208 			struct ir3_instruction *src = reg->instr;
1209 
1210 			if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
1211 				continue;
1212 
1213 			if (src && should_assign(ctx, instr))
1214 				reg_assign(ctx, src->regs[0], src);
1215 
1216 			/* Note: reg->instr could be null for IR3_REG_ARRAY */
1217 			if (src || (reg->flags & IR3_REG_ARRAY))
1218 				reg_assign(ctx, instr->regs[n+1], src);
1219 		}
1220 	}
1221 
1222 	/* We need to pre-color outputs for the scalar pass in
1223 	 * ra_precolor_assigned(), so we need to actually assign
1224 	 * them in the first pass:
1225 	 */
1226 	if (!ctx->scalar_pass) {
1227 		foreach_input (in, ctx->ir) {
1228 			reg_assign(ctx, in->regs[0], in);
1229 		}
1230 		foreach_output (out, ctx->ir) {
1231 			reg_assign(ctx, out->regs[0], out);
1232 		}
1233 	}
1234 }
1235 
1236 static void
assign_arr_base(struct ir3_ra_ctx * ctx,struct ir3_array * arr,struct ir3_instruction ** precolor,unsigned nprecolor)1237 assign_arr_base(struct ir3_ra_ctx *ctx, struct ir3_array *arr,
1238 		struct ir3_instruction **precolor, unsigned nprecolor)
1239 {
1240 	/* In the mergedregs case, we convert full precision arrays
1241 	 * to their effective half-precision base, and find conflicts
1242 	 * amongst all other arrays/inputs.
1243 	 *
1244 	 * In the splitregs case (halfreg file and fullreg file do
1245 	 * not conflict), we ignore arrays and other pre-colors that
1246 	 * are not the same precision.
1247 	 */
1248 	bool mergedregs = ctx->v->mergedregs;
1249 	unsigned base = 0;
1250 
1251 	/* figure out what else we conflict with which has already
1252 	 * been assigned:
1253 	 */
1254 retry:
1255 	foreach_array (arr2, &ctx->ir->array_list) {
1256 		if (arr2 == arr)
1257 			break;
1258 		ra_assert(ctx, arr2->start_ip <= arr2->end_ip);
1259 
1260 		unsigned base2 = arr2->reg;
1261 		unsigned len2  = arr2->length;
1262 		unsigned len   = arr->length;
1263 
1264 		if (mergedregs) {
1265 			/* convert into half-reg space: */
1266 			if (!arr2->half) {
1267 				base2 *= 2;
1268 				len2  *= 2;
1269 			}
1270 			if (!arr->half) {
1271 				len   *= 2;
1272 			}
1273 		} else if (arr2->half != arr->half) {
1274 			/* for split-register-file mode, we only conflict with
1275 			 * other arrays of same precision:
1276 			 */
1277 			continue;
1278 		}
1279 
1280 		/* if it intersects with liverange AND register range.. */
1281 		if (intersects(arr->start_ip, arr->end_ip,
1282 				arr2->start_ip, arr2->end_ip) &&
1283 			intersects(base, base + len,
1284 				base2, base2 + len2)) {
1285 			base = MAX2(base, base2 + len2);
1286 			goto retry;
1287 		}
1288 	}
1289 
1290 	/* also need to not conflict with any pre-assigned inputs: */
1291 	for (unsigned i = 0; i < nprecolor; i++) {
1292 		struct ir3_instruction *instr = precolor[i];
1293 
1294 		if (!instr || (instr->flags & IR3_INSTR_UNUSED))
1295 			continue;
1296 
1297 		struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1298 
1299 		/* only consider the first component: */
1300 		if (id->off > 0)
1301 			continue;
1302 
1303 		unsigned name   = ra_name(ctx, id);
1304 		unsigned regid  = instr->regs[0]->num;
1305 		unsigned reglen = class_sizes[id->cls];
1306 		unsigned len    = arr->length;
1307 
1308 		if (mergedregs) {
1309 			/* convert into half-reg space: */
1310 			if (!is_half(instr)) {
1311 				regid  *= 2;
1312 				reglen *= 2;
1313 			}
1314 			if (!arr->half) {
1315 				len   *= 2;
1316 			}
1317 		} else if (is_half(instr) != arr->half) {
1318 			/* for split-register-file mode, we only conflict with
1319 			 * other arrays of same precision:
1320 			 */
1321 			continue;
1322 		}
1323 
1324 		/* Check if array intersects with liverange AND register
1325 		 * range of the input:
1326 		 */
1327 		if (intersects(arr->start_ip, arr->end_ip,
1328 						ctx->def[name], ctx->use[name]) &&
1329 				intersects(base, base + len,
1330 						regid, regid + reglen)) {
1331 			base = MAX2(base, regid + reglen);
1332 			goto retry;
1333 		}
1334 	}
1335 
1336 	/* convert back from half-reg space to fullreg space: */
1337 	if (mergedregs && !arr->half) {
1338 		base = DIV_ROUND_UP(base, 2);
1339 	}
1340 
1341 	arr->reg = base;
1342 }
1343 
1344 /* handle pre-colored registers.  This includes "arrays" (which could be of
1345  * length 1, used for phi webs lowered to registers in nir), as well as
1346  * special shader input values that need to be pinned to certain registers.
1347  */
1348 static void
ra_precolor(struct ir3_ra_ctx * ctx,struct ir3_instruction ** precolor,unsigned nprecolor)1349 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1350 {
1351 	for (unsigned i = 0; i < nprecolor; i++) {
1352 		if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1353 			struct ir3_instruction *instr = precolor[i];
1354 
1355 			if (instr->regs[0]->num == INVALID_REG)
1356 				continue;
1357 
1358 			struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1359 
1360 			ra_assert(ctx, !(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1361 
1362 			/* 'base' is in scalar (class 0) but we need to map that
1363 			 * the conflicting register of the appropriate class (ie.
1364 			 * input could be vec2/vec3/etc)
1365 			 *
1366 			 * Note that the higher class (larger than scalar) regs
1367 			 * are setup to conflict with others in the same class,
1368 			 * so for example, R1 (scalar) is also the first component
1369 			 * of D1 (vec2/double):
1370 			 *
1371 			 *    Single (base) |  Double
1372 			 *    --------------+---------------
1373 			 *       R0         |  D0
1374 			 *       R1         |  D0 D1
1375 			 *       R2         |     D1 D2
1376 			 *       R3         |        D2
1377 			 *           .. and so on..
1378 			 */
1379 			unsigned regid = instr->regs[0]->num;
1380 			ra_assert(ctx, regid >= id->off);
1381 			regid -= id->off;
1382 
1383 			unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1384 			unsigned name = ra_name(ctx, id);
1385 			ra_set_node_reg(ctx->g, name, reg);
1386 		}
1387 	}
1388 
1389 	/*
1390 	 * Pre-assign array elements:
1391 	 */
1392 	foreach_array (arr, &ctx->ir->array_list) {
1393 
1394 		if (arr->end_ip == 0)
1395 			continue;
1396 
1397 		if (!ctx->scalar_pass)
1398 			assign_arr_base(ctx, arr, precolor, nprecolor);
1399 
1400 		for (unsigned i = 0; i < arr->length; i++) {
1401 			unsigned cls = arr->half ? HALF_OFFSET : 0;
1402 
1403 			ra_set_node_reg(ctx->g,
1404 					arr->base + i,   /* vreg name */
1405 					ctx->set->gpr_to_ra_reg[cls][arr->reg + i]);
1406 		}
1407 	}
1408 
1409 	if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1410 		foreach_array (arr, &ctx->ir->array_list) {
1411 			unsigned first = arr->reg;
1412 			unsigned last  = arr->reg + arr->length - 1;
1413 			debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1414 					(first >> 2), "xyzw"[first & 0x3],
1415 					(last >> 2), "xyzw"[last & 0x3]);
1416 		}
1417 	}
1418 }
1419 
1420 static void
precolor(struct ir3_ra_ctx * ctx,struct ir3_instruction * instr)1421 precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1422 {
1423 	struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1424 	unsigned n = dest_regs(instr);
1425 	for (unsigned i = 0; i < n; i++) {
1426 		/* tex instructions actually have a wrmask, and
1427 		 * don't touch masked out components.  So we
1428 		 * shouldn't precolor them::
1429 		 */
1430 		if (is_tex_or_prefetch(instr) &&
1431 				!(instr->regs[0]->wrmask & (1 << i)))
1432 			continue;
1433 
1434 		unsigned name = scalar_name(ctx, instr, i);
1435 		unsigned regid = instr->regs[0]->num + i;
1436 
1437 		if (instr->regs[0]->flags & IR3_REG_HIGH)
1438 			regid -= FIRST_HIGH_REG;
1439 
1440 		unsigned vreg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1441 		ra_set_node_reg(ctx->g, name, vreg);
1442 	}
1443 }
1444 
1445 /* pre-color non-scalar registers based on the registers assigned in previous
1446  * pass.  Do this by looking actually at the fanout instructions.
1447  */
1448 static void
ra_precolor_assigned(struct ir3_ra_ctx * ctx)1449 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
1450 {
1451 	ra_assert(ctx, ctx->scalar_pass);
1452 
1453 	foreach_block (block, &ctx->ir->block_list) {
1454 		foreach_instr (instr, &block->instr_list) {
1455 
1456 			if (!writes_gpr(instr))
1457 				continue;
1458 
1459 			if (should_assign(ctx, instr))
1460 				continue;
1461 
1462 			precolor(ctx, instr);
1463 
1464 			foreach_src (src, instr) {
1465 				if (!src->instr)
1466 					continue;
1467 				precolor(ctx, src->instr);
1468 			}
1469 		}
1470 	}
1471 }
1472 
1473 static int
ra_alloc(struct ir3_ra_ctx * ctx)1474 ra_alloc(struct ir3_ra_ctx *ctx)
1475 {
1476 	if (!ra_allocate(ctx->g))
1477 		return -1;
1478 
1479 	foreach_block (block, &ctx->ir->block_list) {
1480 		ra_block_alloc(ctx, block);
1481 	}
1482 
1483 	return 0;
1484 }
1485 
1486 /* if we end up with split/collect instructions with non-matching src
1487  * and dest regs, that means something has gone wrong.  Which makes it
1488  * a pretty good sanity check.
1489  */
1490 static void
ra_sanity_check(struct ir3 * ir)1491 ra_sanity_check(struct ir3 *ir)
1492 {
1493 	foreach_block (block, &ir->block_list) {
1494 		foreach_instr (instr, &block->instr_list) {
1495 			if (instr->opc == OPC_META_SPLIT) {
1496 				struct ir3_register *dst = instr->regs[0];
1497 				struct ir3_register *src = instr->regs[1];
1498 				debug_assert(dst->num == (src->num + instr->split.off));
1499 			} else if (instr->opc == OPC_META_COLLECT) {
1500 				struct ir3_register *dst = instr->regs[0];
1501 
1502 				foreach_src_n (src, n, instr) {
1503 					debug_assert(dst->num == (src->num - n));
1504 				}
1505 			}
1506 		}
1507 	}
1508 }
1509 
1510 static int
ir3_ra_pass(struct ir3_shader_variant * v,struct ir3_instruction ** precolor,unsigned nprecolor,bool scalar_pass)1511 ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1512 		unsigned nprecolor, bool scalar_pass)
1513 {
1514 	struct ir3_ra_ctx ctx = {
1515 			.v = v,
1516 			.ir = v->ir,
1517 			.set = v->mergedregs ?
1518 				v->ir->compiler->mergedregs_set : v->ir->compiler->set,
1519 			.scalar_pass = scalar_pass,
1520 	};
1521 	int ret;
1522 
1523 	ret = setjmp(ctx.jmp_env);
1524 	if (ret)
1525 		goto fail;
1526 
1527 	ra_init(&ctx);
1528 	ra_add_interference(&ctx);
1529 	ra_precolor(&ctx, precolor, nprecolor);
1530 	if (scalar_pass)
1531 		ra_precolor_assigned(&ctx);
1532 	ret = ra_alloc(&ctx);
1533 
1534 fail:
1535 	ra_destroy(&ctx);
1536 
1537 	return ret;
1538 }
1539 
1540 int
ir3_ra(struct ir3_shader_variant * v,struct ir3_instruction ** precolor,unsigned nprecolor)1541 ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1542 		unsigned nprecolor)
1543 {
1544 	int ret;
1545 
1546 	/* First pass, assign the vecN (non-scalar) registers: */
1547 	ret = ir3_ra_pass(v, precolor, nprecolor, false);
1548 	if (ret)
1549 		return ret;
1550 
1551 	ir3_debug_print(v->ir, "AFTER: ir3_ra (1st pass)");
1552 
1553 	/* Second pass, assign the scalar registers: */
1554 	ret = ir3_ra_pass(v, precolor, nprecolor, true);
1555 	if (ret)
1556 		return ret;
1557 
1558 	ir3_debug_print(v->ir, "AFTER: ir3_ra (2st pass)");
1559 
1560 #ifdef DEBUG
1561 #  define SANITY_CHECK DEBUG
1562 #else
1563 #  define SANITY_CHECK 0
1564 #endif
1565 	if (SANITY_CHECK)
1566 		ra_sanity_check(v->ir);
1567 
1568 	return ret;
1569 }
1570