1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "midgard_quirks.h"
30
31 struct phys_reg {
32 /* Physical register: 0-31 */
33 unsigned reg;
34
35 /* Byte offset into the physical register: 0-15 */
36 unsigned offset;
37
38 /* log2(bytes per component) for fast mul/div */
39 unsigned shift;
40 };
41
42 /* Shift up by reg_offset and horizontally by dst_offset. */
43
44 static void
offset_swizzle(unsigned * swizzle,unsigned reg_offset,unsigned srcshift,unsigned dstshift,unsigned dst_offset)45 offset_swizzle(unsigned *swizzle, unsigned reg_offset, unsigned srcshift, unsigned dstshift, unsigned dst_offset)
46 {
47 unsigned out[MIR_VEC_COMPONENTS];
48
49 signed reg_comp = reg_offset >> srcshift;
50 signed dst_comp = dst_offset >> dstshift;
51
52 unsigned max_component = (16 >> srcshift) - 1;
53
54 assert(reg_comp << srcshift == reg_offset);
55 assert(dst_comp << dstshift == dst_offset);
56
57 for (signed c = 0; c < MIR_VEC_COMPONENTS; ++c) {
58 signed comp = MAX2(c - dst_comp, 0);
59 out[c] = MIN2(swizzle[comp] + reg_comp, max_component);
60 }
61
62 memcpy(swizzle, out, sizeof(out));
63 }
64
65 /* Helper to return the default phys_reg for a given register */
66
67 static struct phys_reg
default_phys_reg(int reg,unsigned shift)68 default_phys_reg(int reg, unsigned shift)
69 {
70 struct phys_reg r = {
71 .reg = reg,
72 .offset = 0,
73 .shift = shift
74 };
75
76 return r;
77 }
78
79 /* Determine which physical register, swizzle, and mask a virtual
80 * register corresponds to */
81
82 static struct phys_reg
index_to_reg(compiler_context * ctx,struct lcra_state * l,unsigned reg,unsigned shift)83 index_to_reg(compiler_context *ctx, struct lcra_state *l, unsigned reg, unsigned shift)
84 {
85 /* Check for special cases */
86 if (reg == ~0)
87 return default_phys_reg(REGISTER_UNUSED, shift);
88 else if (reg >= SSA_FIXED_MINIMUM)
89 return default_phys_reg(SSA_REG_FROM_FIXED(reg), shift);
90 else if (!l)
91 return default_phys_reg(REGISTER_UNUSED, shift);
92
93 struct phys_reg r = {
94 .reg = l->solutions[reg] / 16,
95 .offset = l->solutions[reg] & 0xF,
96 .shift = shift
97 };
98
99 /* Report that we actually use this register, and return it */
100
101 if (r.reg < 16)
102 ctx->work_registers = MAX2(ctx->work_registers, r.reg);
103
104 return r;
105 }
106
107 static void
set_class(unsigned * classes,unsigned node,unsigned class)108 set_class(unsigned *classes, unsigned node, unsigned class)
109 {
110 if (node < SSA_FIXED_MINIMUM && class != classes[node]) {
111 assert(classes[node] == REG_CLASS_WORK);
112 classes[node] = class;
113 }
114 }
115
116 /* Special register classes impose special constraints on who can read their
117 * values, so check that */
118
119 static bool ASSERTED
check_read_class(unsigned * classes,unsigned tag,unsigned node)120 check_read_class(unsigned *classes, unsigned tag, unsigned node)
121 {
122 /* Non-nodes are implicitly ok */
123 if (node >= SSA_FIXED_MINIMUM)
124 return true;
125
126 switch (classes[node]) {
127 case REG_CLASS_LDST:
128 return (tag == TAG_LOAD_STORE_4);
129 case REG_CLASS_TEXR:
130 return (tag == TAG_TEXTURE_4);
131 case REG_CLASS_TEXW:
132 return (tag != TAG_LOAD_STORE_4);
133 case REG_CLASS_WORK:
134 return IS_ALU(tag);
135 default:
136 unreachable("Invalid class");
137 }
138 }
139
140 static bool ASSERTED
check_write_class(unsigned * classes,unsigned tag,unsigned node)141 check_write_class(unsigned *classes, unsigned tag, unsigned node)
142 {
143 /* Non-nodes are implicitly ok */
144 if (node >= SSA_FIXED_MINIMUM)
145 return true;
146
147 switch (classes[node]) {
148 case REG_CLASS_TEXR:
149 return true;
150 case REG_CLASS_TEXW:
151 return (tag == TAG_TEXTURE_4);
152 case REG_CLASS_LDST:
153 case REG_CLASS_WORK:
154 return IS_ALU(tag) || (tag == TAG_LOAD_STORE_4);
155 default:
156 unreachable("Invalid class");
157 }
158 }
159
160 /* Prepass before RA to ensure special class restrictions are met. The idea is
161 * to create a bit field of types of instructions that read a particular index.
162 * Later, we'll add moves as appropriate and rewrite to specialize by type. */
163
164 static void
mark_node_class(unsigned * bitfield,unsigned node)165 mark_node_class (unsigned *bitfield, unsigned node)
166 {
167 if (node < SSA_FIXED_MINIMUM)
168 BITSET_SET(bitfield, node);
169 }
170
171 void
mir_lower_special_reads(compiler_context * ctx)172 mir_lower_special_reads(compiler_context *ctx)
173 {
174 size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
175
176 /* Bitfields for the various types of registers we could have. aluw can
177 * be written by either ALU or load/store */
178
179 unsigned *alur = calloc(sz, 1);
180 unsigned *aluw = calloc(sz, 1);
181 unsigned *brar = calloc(sz, 1);
182 unsigned *ldst = calloc(sz, 1);
183 unsigned *texr = calloc(sz, 1);
184 unsigned *texw = calloc(sz, 1);
185
186 /* Pass #1 is analysis, a linear scan to fill out the bitfields */
187
188 mir_foreach_instr_global(ctx, ins) {
189 switch (ins->type) {
190 case TAG_ALU_4:
191 mark_node_class(aluw, ins->dest);
192 mark_node_class(alur, ins->src[0]);
193 mark_node_class(alur, ins->src[1]);
194 mark_node_class(alur, ins->src[2]);
195
196 if (ins->compact_branch && ins->writeout)
197 mark_node_class(brar, ins->src[0]);
198
199 break;
200
201 case TAG_LOAD_STORE_4:
202 mark_node_class(aluw, ins->dest);
203 mark_node_class(ldst, ins->src[0]);
204 mark_node_class(ldst, ins->src[1]);
205 mark_node_class(ldst, ins->src[2]);
206 mark_node_class(ldst, ins->src[3]);
207 break;
208
209 case TAG_TEXTURE_4:
210 mark_node_class(texr, ins->src[0]);
211 mark_node_class(texr, ins->src[1]);
212 mark_node_class(texr, ins->src[2]);
213 mark_node_class(texw, ins->dest);
214 break;
215 }
216 }
217
218 /* Pass #2 is lowering now that we've analyzed all the classes.
219 * Conceptually, if an index is only marked for a single type of use,
220 * there is nothing to lower. If it is marked for different uses, we
221 * split up based on the number of types of uses. To do so, we divide
222 * into N distinct classes of use (where N>1 by definition), emit N-1
223 * moves from the index to copies of the index, and finally rewrite N-1
224 * of the types of uses to use the corresponding move */
225
226 unsigned spill_idx = ctx->temp_count;
227
228 for (unsigned i = 0; i < ctx->temp_count; ++i) {
229 bool is_alur = BITSET_TEST(alur, i);
230 bool is_aluw = BITSET_TEST(aluw, i);
231 bool is_brar = BITSET_TEST(brar, i);
232 bool is_ldst = BITSET_TEST(ldst, i);
233 bool is_texr = BITSET_TEST(texr, i);
234 bool is_texw = BITSET_TEST(texw, i);
235
236 /* Analyse to check how many distinct uses there are. ALU ops
237 * (alur) can read the results of the texture pipeline (texw)
238 * but not ldst or texr. Load/store ops (ldst) cannot read
239 * anything but load/store inputs. Texture pipeline cannot read
240 * anything but texture inputs. TODO: Simplify. */
241
242 bool collision =
243 (is_alur && (is_ldst || is_texr)) ||
244 (is_ldst && (is_alur || is_texr || is_texw)) ||
245 (is_texr && (is_alur || is_ldst || is_texw)) ||
246 (is_texw && (is_aluw || is_ldst || is_texr)) ||
247 (is_brar && is_texw);
248
249 if (!collision)
250 continue;
251
252 /* Use the index as-is as the work copy. Emit copies for
253 * special uses */
254
255 unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4, TAG_ALU_4};
256 bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw, is_brar };
257
258 for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) {
259 if (!collisions[j]) continue;
260
261 /* When the hazard is from reading, we move and rewrite
262 * sources (typical case). When it's from writing, we
263 * flip the move and rewrite destinations (obscure,
264 * only from control flow -- impossible in SSA) */
265
266 bool hazard_write = (j == 2);
267
268 unsigned idx = spill_idx++;
269
270 /* Insert move before each read/write, depending on the
271 * hazard we're trying to account for */
272
273 mir_foreach_instr_global_safe(ctx, pre_use) {
274 if (pre_use->type != classes[j])
275 continue;
276
277 if (hazard_write) {
278 if (pre_use->dest != i)
279 continue;
280
281 midgard_instruction m = v_mov(idx, i);
282 m.dest_type = pre_use->dest_type;
283 m.src_types[1] = m.dest_type;
284 m.mask = pre_use->mask;
285
286 midgard_instruction *use = mir_next_op(pre_use);
287 assert(use);
288 mir_insert_instruction_before(ctx, use, m);
289 mir_rewrite_index_dst_single(pre_use, i, idx);
290 } else {
291 if (!mir_has_arg(pre_use, i))
292 continue;
293
294 idx = spill_idx++;
295
296 midgard_instruction m = v_mov(i, idx);
297 m.mask = mir_from_bytemask(mir_round_bytemask_up(
298 mir_bytemask_of_read_components(pre_use, i), 32), 32);
299 mir_insert_instruction_before(ctx, pre_use, m);
300 mir_rewrite_index_src_single(pre_use, i, idx);
301 }
302 }
303 }
304 }
305
306 free(alur);
307 free(aluw);
308 free(brar);
309 free(ldst);
310 free(texr);
311 free(texw);
312 }
313
314 static void
mir_compute_interference(compiler_context * ctx,struct lcra_state * l)315 mir_compute_interference(
316 compiler_context *ctx,
317 struct lcra_state *l)
318 {
319 /* First, we need liveness information to be computed per block */
320 mir_compute_liveness(ctx);
321
322 /* We need to force r1.w live throughout a blend shader */
323
324 if (ctx->is_blend) {
325 unsigned r1w = ~0;
326
327 mir_foreach_block(ctx, _block) {
328 midgard_block *block = (midgard_block *) _block;
329 mir_foreach_instr_in_block_rev(block, ins) {
330 if (ins->writeout)
331 r1w = ins->dest;
332 }
333
334 if (r1w != ~0)
335 break;
336 }
337
338 mir_foreach_instr_global(ctx, ins) {
339 if (ins->dest < ctx->temp_count)
340 lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), r1w, 0xF);
341 }
342 }
343
344 /* Now that every block has live_in/live_out computed, we can determine
345 * interference by walking each block linearly. Take live_out at the
346 * end of each block and walk the block backwards. */
347
348 mir_foreach_block(ctx, _blk) {
349 midgard_block *blk = (midgard_block *) _blk;
350 uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
351
352 mir_foreach_instr_in_block_rev(blk, ins) {
353 /* Mark all registers live after the instruction as
354 * interfering with the destination */
355
356 unsigned dest = ins->dest;
357
358 if (dest < ctx->temp_count) {
359 for (unsigned i = 0; i < ctx->temp_count; ++i)
360 if (live[i]) {
361 unsigned mask = mir_bytemask(ins);
362 lcra_add_node_interference(l, dest, mask, i, live[i]);
363 }
364 }
365
366 /* Update live_in */
367 mir_liveness_ins_update(live, ins, ctx->temp_count);
368 }
369
370 free(live);
371 }
372 }
373
374 static bool
mir_is_64(midgard_instruction * ins)375 mir_is_64(midgard_instruction *ins)
376 {
377 if (nir_alu_type_get_type_size(ins->dest_type) == 64)
378 return true;
379
380 mir_foreach_src(ins, v) {
381 if (nir_alu_type_get_type_size(ins->src_types[v]) == 64)
382 return true;
383 }
384
385 return false;
386 }
387
388 /* This routine performs the actual register allocation. It should be succeeded
389 * by install_registers */
390
391 static struct lcra_state *
allocate_registers(compiler_context * ctx,bool * spilled)392 allocate_registers(compiler_context *ctx, bool *spilled)
393 {
394 /* The number of vec4 work registers available depends on when the
395 * uniforms start and the shader stage. By ABI we limit blend shaders
396 * to 8 registers, should be lower XXX */
397 int work_count = ctx->is_blend ? 8 :
398 16 - MAX2((ctx->uniform_cutoff - 8), 0);
399
400 /* No register allocation to do with no SSA */
401
402 if (!ctx->temp_count)
403 return NULL;
404
405 /* Initialize LCRA. Allocate an extra node at the end for a precoloured
406 * r1 for interference */
407
408 struct lcra_state *l = lcra_alloc_equations(ctx->temp_count + 1, 5);
409 unsigned node_r1 = ctx->temp_count;
410
411 /* Starts of classes, in bytes */
412 l->class_start[REG_CLASS_WORK] = 16 * 0;
413 l->class_start[REG_CLASS_LDST] = 16 * 26;
414 l->class_start[REG_CLASS_TEXR] = 16 * 28;
415 l->class_start[REG_CLASS_TEXW] = 16 * 28;
416
417 l->class_size[REG_CLASS_WORK] = 16 * work_count;
418 l->class_size[REG_CLASS_LDST] = 16 * 2;
419 l->class_size[REG_CLASS_TEXR] = 16 * 2;
420 l->class_size[REG_CLASS_TEXW] = 16 * 2;
421
422 lcra_set_disjoint_class(l, REG_CLASS_TEXR, REG_CLASS_TEXW);
423
424 /* To save space on T*20, we don't have real texture registers.
425 * Instead, tex inputs reuse the load/store pipeline registers, and
426 * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
427 * noting that this handles interferences and sizes correctly. */
428
429 if (ctx->quirks & MIDGARD_INTERPIPE_REG_ALIASING) {
430 l->class_start[REG_CLASS_TEXR] = l->class_start[REG_CLASS_LDST];
431 l->class_start[REG_CLASS_TEXW] = l->class_start[REG_CLASS_WORK];
432 }
433
434 unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
435 unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
436 unsigned *min_bound = calloc(sizeof(unsigned), ctx->temp_count);
437
438 mir_foreach_instr_global(ctx, ins) {
439 /* Swizzles of 32-bit sources on 64-bit instructions need to be
440 * aligned to either bottom (xy) or top (zw). More general
441 * swizzle lowering should happen prior to scheduling (TODO),
442 * but once we get RA we shouldn't disrupt this further. Align
443 * sources of 64-bit instructions. */
444
445 if (ins->type == TAG_ALU_4 && mir_is_64(ins)) {
446 mir_foreach_src(ins, v) {
447 unsigned s = ins->src[v];
448
449 if (s < ctx->temp_count)
450 min_alignment[s] = 3;
451 }
452 }
453
454 if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->op)) {
455 mir_foreach_src(ins, v) {
456 unsigned s = ins->src[v];
457 unsigned size = nir_alu_type_get_type_size(ins->src_types[v]);
458
459 if (s < ctx->temp_count)
460 min_alignment[s] = (size == 64) ? 3 : 2;
461 }
462 }
463
464 if (ins->dest >= SSA_FIXED_MINIMUM) continue;
465
466 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
467
468 if (ins->is_pack)
469 size = 32;
470
471 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
472 int comps1 = util_logbase2(ins->mask);
473
474 int bytes = (comps1 + 1) * (size / 8);
475
476 /* Use the largest class if there's ambiguity, this
477 * handles partial writes */
478
479 int dest = ins->dest;
480 found_class[dest] = MAX2(found_class[dest], bytes);
481
482 min_alignment[dest] =
483 (size == 16) ? 1 : /* (1 << 1) = 2-byte */
484 (size == 32) ? 2 : /* (1 << 2) = 4-byte */
485 (size == 64) ? 3 : /* (1 << 3) = 8-byte */
486 3; /* 8-bit todo */
487
488 /* We can't cross xy/zw boundaries. TODO: vec8 can */
489 if (size == 16)
490 min_bound[dest] = 8;
491
492 /* We don't have a swizzle for the conditional and we don't
493 * want to muck with the conditional itself, so just force
494 * alignment for now */
495
496 if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
497 min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
498
499 /* LCRA assumes bound >= alignment */
500 min_bound[dest] = 16;
501 }
502
503 /* Since ld/st swizzles and masks are 32-bit only, we need them
504 * aligned to enable final packing */
505 if (ins->type == TAG_LOAD_STORE_4)
506 min_alignment[dest] = MAX2(min_alignment[dest], 2);
507 }
508
509 for (unsigned i = 0; i < ctx->temp_count; ++i) {
510 lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2,
511 min_bound[i] ? min_bound[i] : 16);
512 lcra_restrict_range(l, i, found_class[i]);
513 }
514
515 free(found_class);
516 free(min_alignment);
517 free(min_bound);
518
519 /* Next, we'll determine semantic class. We default to zero (work).
520 * But, if we're used with a special operation, that will force us to a
521 * particular class. Each node must be assigned to exactly one class; a
522 * prepass before RA should have lowered what-would-have-been
523 * multiclass nodes into a series of moves to break it up into multiple
524 * nodes (TODO) */
525
526 mir_foreach_instr_global(ctx, ins) {
527 /* Check if this operation imposes any classes */
528
529 if (ins->type == TAG_LOAD_STORE_4) {
530 set_class(l->class, ins->src[0], REG_CLASS_LDST);
531 set_class(l->class, ins->src[1], REG_CLASS_LDST);
532 set_class(l->class, ins->src[2], REG_CLASS_LDST);
533 set_class(l->class, ins->src[3], REG_CLASS_LDST);
534
535 if (OP_IS_VEC4_ONLY(ins->op)) {
536 lcra_restrict_range(l, ins->dest, 16);
537 lcra_restrict_range(l, ins->src[0], 16);
538 lcra_restrict_range(l, ins->src[1], 16);
539 lcra_restrict_range(l, ins->src[2], 16);
540 lcra_restrict_range(l, ins->src[3], 16);
541 }
542 } else if (ins->type == TAG_TEXTURE_4) {
543 set_class(l->class, ins->dest, REG_CLASS_TEXW);
544 set_class(l->class, ins->src[0], REG_CLASS_TEXR);
545 set_class(l->class, ins->src[1], REG_CLASS_TEXR);
546 set_class(l->class, ins->src[2], REG_CLASS_TEXR);
547 set_class(l->class, ins->src[3], REG_CLASS_TEXR);
548 }
549 }
550
551 /* Check that the semantics of the class are respected */
552 mir_foreach_instr_global(ctx, ins) {
553 assert(check_write_class(l->class, ins->type, ins->dest));
554 assert(check_read_class(l->class, ins->type, ins->src[0]));
555 assert(check_read_class(l->class, ins->type, ins->src[1]));
556 assert(check_read_class(l->class, ins->type, ins->src[2]));
557 assert(check_read_class(l->class, ins->type, ins->src[3]));
558 }
559
560 /* Mark writeout to r0, depth to r1.x, stencil to r1.y,
561 * render target to r1.z, unknown to r1.w */
562 mir_foreach_instr_global(ctx, ins) {
563 if (!(ins->compact_branch && ins->writeout)) continue;
564
565 if (ins->src[0] < ctx->temp_count)
566 l->solutions[ins->src[0]] = 0;
567
568 if (ins->src[2] < ctx->temp_count)
569 l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_X * 4;
570
571 if (ins->src[3] < ctx->temp_count)
572 l->solutions[ins->src[3]] = (16 * 1) + COMPONENT_Y * 4;
573
574 if (ins->src[1] < ctx->temp_count)
575 l->solutions[ins->src[1]] = (16 * 1) + COMPONENT_Z * 4;
576
577 if (ins->dest < ctx->temp_count)
578 l->solutions[ins->dest] = (16 * 1) + COMPONENT_W * 4;
579 }
580
581 /* Destinations of instructions in a writeout block cannot be assigned
582 * to r1 unless they are actually used as r1 from the writeout itself,
583 * since the writes to r1 are special. A code sequence like:
584 *
585 * sadd.fmov r1.x, [...]
586 * vadd.fadd r0, r1, r2
587 * [writeout branch]
588 *
589 * will misbehave since the r1.x write will be interpreted as a
590 * gl_FragDepth write so it won't show up correctly when r1 is read in
591 * the following segment. We model this as interference.
592 */
593
594 l->solutions[node_r1] = (16 * 1);
595
596 mir_foreach_block(ctx, _blk) {
597 midgard_block *blk = (midgard_block *) _blk;
598
599 mir_foreach_bundle_in_block(blk, v) {
600 /* We need at least a writeout and nonwriteout instruction */
601 if (v->instruction_count < 2)
602 continue;
603
604 /* Branches always come at the end */
605 midgard_instruction *br = v->instructions[v->instruction_count - 1];
606
607 if (!br->writeout)
608 continue;
609
610 for (signed i = v->instruction_count - 2; i >= 0; --i) {
611 midgard_instruction *ins = v->instructions[i];
612
613 if (ins->dest >= ctx->temp_count)
614 continue;
615
616 bool used_as_r1 = (br->dest == ins->dest);
617
618 mir_foreach_src(br, s)
619 used_as_r1 |= (s > 0) && (br->src[s] == ins->dest);
620
621 if (!used_as_r1)
622 lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), node_r1, 0xFFFF);
623 }
624 }
625 }
626
627 /* Precolour blend input to r0. Note writeout is necessarily at the end
628 * and blend shaders are single-RT only so there is only a single
629 * writeout block, so this cannot conflict with the writeout r0 (there
630 * is no need to have an intermediate move) */
631
632 if (ctx->blend_input != ~0) {
633 assert(ctx->blend_input < ctx->temp_count);
634 l->solutions[ctx->blend_input] = 0;
635 }
636
637 /* Same for the dual-source blend input/output, except here we use r2,
638 * which is also set in the fragment shader. */
639
640 if (ctx->blend_src1 != ~0) {
641 assert(ctx->blend_src1 < ctx->temp_count);
642 l->solutions[ctx->blend_src1] = (16 * 2);
643 ctx->work_registers = MAX2(ctx->work_registers, 2);
644 }
645
646 mir_compute_interference(ctx, l);
647
648 *spilled = !lcra_solve(l);
649 return l;
650 }
651
652
653 /* Once registers have been decided via register allocation
654 * (allocate_registers), we need to rewrite the MIR to use registers instead of
655 * indices */
656
657 static void
install_registers_instr(compiler_context * ctx,struct lcra_state * l,midgard_instruction * ins)658 install_registers_instr(
659 compiler_context *ctx,
660 struct lcra_state *l,
661 midgard_instruction *ins)
662 {
663 unsigned src_shift[MIR_SRC_COUNT];
664
665 for (unsigned i = 0; i < MIR_SRC_COUNT; ++i) {
666 src_shift[i] =
667 util_logbase2(nir_alu_type_get_type_size(ins->src_types[i]) / 8);
668 }
669
670 unsigned dest_shift =
671 util_logbase2(nir_alu_type_get_type_size(ins->dest_type) / 8);
672
673 switch (ins->type) {
674 case TAG_ALU_4:
675 case TAG_ALU_8:
676 case TAG_ALU_12:
677 case TAG_ALU_16: {
678 if (ins->compact_branch)
679 return;
680
681 struct phys_reg src1 = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
682 struct phys_reg src2 = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
683 struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
684
685 mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
686
687 unsigned dest_offset =
688 GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
689 dest.offset;
690
691 offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
692 if (!ins->has_inline_constant)
693 offset_swizzle(ins->swizzle[1], src2.offset, src2.shift, dest.shift, dest_offset);
694 if (ins->src[0] != ~0)
695 ins->src[0] = SSA_FIXED_REGISTER(src1.reg);
696 if (ins->src[1] != ~0)
697 ins->src[1] = SSA_FIXED_REGISTER(src2.reg);
698 if (ins->dest != ~0)
699 ins->dest = SSA_FIXED_REGISTER(dest.reg);
700 break;
701 }
702
703 case TAG_LOAD_STORE_4: {
704 /* Which physical register we read off depends on
705 * whether we are loading or storing -- think about the
706 * logical dataflow */
707
708 bool encodes_src = OP_IS_STORE(ins->op);
709
710 if (encodes_src) {
711 struct phys_reg src = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
712 assert(src.reg == 26 || src.reg == 27);
713
714 ins->src[0] = SSA_FIXED_REGISTER(src.reg);
715 offset_swizzle(ins->swizzle[0], src.offset, src.shift, 0, 0);
716 } else {
717 struct phys_reg dst = index_to_reg(ctx, l, ins->dest, dest_shift);
718
719 ins->dest = SSA_FIXED_REGISTER(dst.reg);
720 offset_swizzle(ins->swizzle[0], 0, 2, 2, dst.offset);
721 mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
722 }
723
724 /* We also follow up by actual arguments */
725
726 for (int i = 1; i <= 3; i++) {
727 unsigned src_index = ins->src[i];
728 if (src_index != ~0) {
729 struct phys_reg src = index_to_reg(ctx, l, src_index, src_shift[i]);
730 unsigned component = src.offset >> src.shift;
731 assert(component << src.shift == src.offset);
732 ins->src[i] = SSA_FIXED_REGISTER(src.reg);
733 ins->swizzle[i][0] += component;
734 }
735 }
736
737 break;
738 }
739
740 case TAG_TEXTURE_4: {
741 if (ins->op == TEXTURE_OP_BARRIER)
742 break;
743
744 /* Grab RA results */
745 struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
746 struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
747 struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], src_shift[2]);
748 struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], src_shift[3]);
749
750 /* First, install the texture coordinate */
751 if (ins->src[1] != ~0)
752 ins->src[1] = SSA_FIXED_REGISTER(coord.reg);
753 offset_swizzle(ins->swizzle[1], coord.offset, coord.shift, dest.shift, 0);
754
755 /* Next, install the destination */
756 if (ins->dest != ~0)
757 ins->dest = SSA_FIXED_REGISTER(dest.reg);
758 offset_swizzle(ins->swizzle[0], 0, 2, dest.shift,
759 dest_shift == 1 ? dest.offset % 8 :
760 dest.offset);
761 mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
762
763 /* If there is a register LOD/bias, use it */
764 if (ins->src[2] != ~0) {
765 assert(!(lod.offset & 3));
766 ins->src[2] = SSA_FIXED_REGISTER(lod.reg);
767 ins->swizzle[2][0] = lod.offset / 4;
768 }
769
770 /* If there is an offset register, install it */
771 if (ins->src[3] != ~0) {
772 ins->src[3] = SSA_FIXED_REGISTER(offset.reg);
773 ins->swizzle[3][0] = offset.offset / 4;
774 }
775
776 break;
777 }
778
779 default:
780 break;
781 }
782 }
783
784 static void
install_registers(compiler_context * ctx,struct lcra_state * l)785 install_registers(compiler_context *ctx, struct lcra_state *l)
786 {
787 mir_foreach_instr_global(ctx, ins)
788 install_registers_instr(ctx, l, ins);
789 }
790
791
792 /* If register allocation fails, find the best spill node */
793
794 static signed
mir_choose_spill_node(compiler_context * ctx,struct lcra_state * l)795 mir_choose_spill_node(
796 compiler_context *ctx,
797 struct lcra_state *l)
798 {
799 /* We can't spill a previously spilled value or an unspill */
800
801 mir_foreach_instr_global(ctx, ins) {
802 if (ins->no_spill & (1 << l->spill_class)) {
803 lcra_set_node_spill_cost(l, ins->dest, -1);
804
805 if (l->spill_class != REG_CLASS_WORK) {
806 mir_foreach_src(ins, s)
807 lcra_set_node_spill_cost(l, ins->src[s], -1);
808 }
809 }
810 }
811
812 return lcra_get_best_spill_node(l);
813 }
814
815 /* Once we've chosen a spill node, spill it */
816
817 static void
mir_spill_register(compiler_context * ctx,unsigned spill_node,unsigned spill_class,unsigned * spill_count)818 mir_spill_register(
819 compiler_context *ctx,
820 unsigned spill_node,
821 unsigned spill_class,
822 unsigned *spill_count)
823 {
824 if (spill_class == REG_CLASS_WORK && ctx->is_blend)
825 unreachable("Blend shader spilling is currently unimplemented");
826
827 unsigned spill_index = ctx->temp_count;
828
829 /* We have a spill node, so check the class. Work registers
830 * legitimately spill to TLS, but special registers just spill to work
831 * registers */
832
833 bool is_special = spill_class != REG_CLASS_WORK;
834 bool is_special_w = spill_class == REG_CLASS_TEXW;
835
836 /* Allocate TLS slot (maybe) */
837 unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
838
839 /* For TLS, replace all stores to the spilled node. For
840 * special reads, just keep as-is; the class will be demoted
841 * implicitly. For special writes, spill to a work register */
842
843 if (!is_special || is_special_w) {
844 if (is_special_w)
845 spill_slot = spill_index++;
846
847 mir_foreach_block(ctx, _block) {
848 midgard_block *block = (midgard_block *) _block;
849 mir_foreach_instr_in_block_safe(block, ins) {
850 if (ins->dest != spill_node) continue;
851
852 midgard_instruction st;
853
854 /* Note: it's important to match the mask of the spill
855 * with the mask of the instruction whose destination
856 * we're spilling, or otherwise we'll read invalid
857 * components and can fail RA in a subsequent iteration
858 */
859
860 if (is_special_w) {
861 st = v_mov(spill_node, spill_slot);
862 st.no_spill |= (1 << spill_class);
863 st.mask = ins->mask;
864 st.dest_type = st.src_types[1] = ins->dest_type;
865 } else {
866 ins->dest = spill_index++;
867 ins->no_spill |= (1 << spill_class);
868 st = v_load_store_scratch(ins->dest, spill_slot, true, ins->mask);
869 }
870
871 /* Hint: don't rewrite this node */
872 st.hint = true;
873
874 mir_insert_instruction_after_scheduled(ctx, block, ins, st);
875
876 if (!is_special)
877 ctx->spills++;
878 }
879 }
880 }
881
882 /* For special reads, figure out how many bytes we need */
883 unsigned read_bytemask = 0;
884
885 mir_foreach_instr_global_safe(ctx, ins) {
886 read_bytemask |= mir_bytemask_of_read_components(ins, spill_node);
887 }
888
889 /* Insert a load from TLS before the first consecutive
890 * use of the node, rewriting to use spilled indices to
891 * break up the live range. Or, for special, insert a
892 * move. Ironically the latter *increases* register
893 * pressure, but the two uses of the spilling mechanism
894 * are somewhat orthogonal. (special spilling is to use
895 * work registers to back special registers; TLS
896 * spilling is to use memory to back work registers) */
897
898 mir_foreach_block(ctx, _block) {
899 midgard_block *block = (midgard_block *) _block;
900 mir_foreach_instr_in_block(block, ins) {
901 /* We can't rewrite the moves used to spill in the
902 * first place. These moves are hinted. */
903 if (ins->hint) continue;
904
905 /* If we don't use the spilled value, nothing to do */
906 if (!mir_has_arg(ins, spill_node)) continue;
907
908 unsigned index = 0;
909
910 if (!is_special_w) {
911 index = ++spill_index;
912
913 midgard_instruction *before = ins;
914 midgard_instruction st;
915
916 if (is_special) {
917 /* Move */
918 st = v_mov(spill_node, index);
919 st.no_spill |= (1 << spill_class);
920 } else {
921 /* TLS load */
922 st = v_load_store_scratch(index, spill_slot, false, 0xF);
923 }
924
925 /* Mask the load based on the component count
926 * actually needed to prevent RA loops */
927
928 st.mask = mir_from_bytemask(mir_round_bytemask_up(
929 read_bytemask, 32), 32);
930
931 mir_insert_instruction_before_scheduled(ctx, block, before, st);
932 } else {
933 /* Special writes already have their move spilled in */
934 index = spill_slot;
935 }
936
937
938 /* Rewrite to use */
939 mir_rewrite_index_src_single(ins, spill_node, index);
940
941 if (!is_special)
942 ctx->fills++;
943 }
944 }
945
946 /* Reset hints */
947
948 mir_foreach_instr_global(ctx, ins) {
949 ins->hint = false;
950 }
951 }
952
953 /* Run register allocation in a loop, spilling until we succeed */
954
955 void
mir_ra(compiler_context * ctx)956 mir_ra(compiler_context *ctx)
957 {
958 struct lcra_state *l = NULL;
959 bool spilled = false;
960 int iter_count = 1000; /* max iterations */
961
962 /* Number of 128-bit slots in memory we've spilled into */
963 unsigned spill_count = 0;
964
965
966 mir_create_pipeline_registers(ctx);
967
968 do {
969 if (spilled) {
970 signed spill_node = mir_choose_spill_node(ctx, l);
971
972 if (spill_node == -1) {
973 fprintf(stderr, "ERROR: Failed to choose spill node\n");
974 lcra_free(l);
975 return;
976 }
977
978 mir_spill_register(ctx, spill_node, l->spill_class, &spill_count);
979 }
980
981 mir_squeeze_index(ctx);
982 mir_invalidate_liveness(ctx);
983
984 if (l) {
985 lcra_free(l);
986 l = NULL;
987 }
988
989 l = allocate_registers(ctx, &spilled);
990 } while(spilled && ((iter_count--) > 0));
991
992 if (iter_count <= 0) {
993 fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
994 assert(0);
995 }
996
997 /* Report spilling information. spill_count is in 128-bit slots (vec4 x
998 * fp32), but tls_size is in bytes, so multiply by 16 */
999
1000 ctx->tls_size = spill_count * 16;
1001
1002 install_registers(ctx, l);
1003
1004 lcra_free(l);
1005 }
1006