• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3  * Copyright (C) 2019 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 #include "util/u_math.h"
28 #include "util/u_memory.h"
29 #include "midgard_quirks.h"
30 
31 struct phys_reg {
32         /* Physical register: 0-31 */
33         unsigned reg;
34 
35         /* Byte offset into the physical register: 0-15 */
36         unsigned offset;
37 
38         /* log2(bytes per component) for fast mul/div */
39         unsigned shift;
40 };
41 
42 /* Shift up by reg_offset and horizontally by dst_offset. */
43 
44 static void
offset_swizzle(unsigned * swizzle,unsigned reg_offset,unsigned srcshift,unsigned dstshift,unsigned dst_offset)45 offset_swizzle(unsigned *swizzle, unsigned reg_offset, unsigned srcshift, unsigned dstshift, unsigned dst_offset)
46 {
47         unsigned out[MIR_VEC_COMPONENTS];
48 
49         signed reg_comp = reg_offset >> srcshift;
50         signed dst_comp = dst_offset >> dstshift;
51 
52         unsigned max_component = (16 >> srcshift) - 1;
53 
54         assert(reg_comp << srcshift == reg_offset);
55         assert(dst_comp << dstshift == dst_offset);
56 
57         for (signed c = 0; c < MIR_VEC_COMPONENTS; ++c) {
58                 signed comp = MAX2(c - dst_comp, 0);
59                 out[c] = MIN2(swizzle[comp] + reg_comp, max_component);
60         }
61 
62         memcpy(swizzle, out, sizeof(out));
63 }
64 
65 /* Helper to return the default phys_reg for a given register */
66 
67 static struct phys_reg
default_phys_reg(int reg,unsigned shift)68 default_phys_reg(int reg, unsigned shift)
69 {
70         struct phys_reg r = {
71                 .reg = reg,
72                 .offset = 0,
73                 .shift = shift
74         };
75 
76         return r;
77 }
78 
79 /* Determine which physical register, swizzle, and mask a virtual
80  * register corresponds to */
81 
82 static struct phys_reg
index_to_reg(compiler_context * ctx,struct lcra_state * l,unsigned reg,unsigned shift)83 index_to_reg(compiler_context *ctx, struct lcra_state *l, unsigned reg, unsigned shift)
84 {
85         /* Check for special cases */
86         if (reg == ~0)
87                 return default_phys_reg(REGISTER_UNUSED, shift);
88         else if (reg >= SSA_FIXED_MINIMUM)
89                 return default_phys_reg(SSA_REG_FROM_FIXED(reg), shift);
90         else if (!l)
91                 return default_phys_reg(REGISTER_UNUSED, shift);
92 
93         struct phys_reg r = {
94                 .reg = l->solutions[reg] / 16,
95                 .offset = l->solutions[reg] & 0xF,
96                 .shift = shift
97         };
98 
99         /* Report that we actually use this register, and return it */
100 
101         if (r.reg < 16)
102                 ctx->info->work_reg_count = MAX2(ctx->info->work_reg_count, r.reg + 1);
103 
104         return r;
105 }
106 
107 static void
set_class(unsigned * classes,unsigned node,unsigned class)108 set_class(unsigned *classes, unsigned node, unsigned class)
109 {
110         if (node < SSA_FIXED_MINIMUM && class != classes[node]) {
111                 assert(classes[node] == REG_CLASS_WORK);
112                 classes[node] = class;
113         }
114 }
115 
116 /* Special register classes impose special constraints on who can read their
117  * values, so check that */
118 
119 static bool ASSERTED
check_read_class(unsigned * classes,unsigned tag,unsigned node)120 check_read_class(unsigned *classes, unsigned tag, unsigned node)
121 {
122         /* Non-nodes are implicitly ok */
123         if (node >= SSA_FIXED_MINIMUM)
124                 return true;
125 
126         switch (classes[node]) {
127         case REG_CLASS_LDST:
128                 return (tag == TAG_LOAD_STORE_4);
129         case REG_CLASS_TEXR:
130                 return (tag == TAG_TEXTURE_4);
131         case REG_CLASS_TEXW:
132                 return (tag != TAG_LOAD_STORE_4);
133         case REG_CLASS_WORK:
134                 return IS_ALU(tag);
135         default:
136                 unreachable("Invalid class");
137         }
138 }
139 
140 static bool ASSERTED
check_write_class(unsigned * classes,unsigned tag,unsigned node)141 check_write_class(unsigned *classes, unsigned tag, unsigned node)
142 {
143         /* Non-nodes are implicitly ok */
144         if (node >= SSA_FIXED_MINIMUM)
145                 return true;
146 
147         switch (classes[node]) {
148         case REG_CLASS_TEXR:
149                 return true;
150         case REG_CLASS_TEXW:
151                 return (tag == TAG_TEXTURE_4);
152         case REG_CLASS_LDST:
153         case REG_CLASS_WORK:
154                 return IS_ALU(tag) || (tag == TAG_LOAD_STORE_4);
155         default:
156                 unreachable("Invalid class");
157         }
158 }
159 
160 /* Prepass before RA to ensure special class restrictions are met. The idea is
161  * to create a bit field of types of instructions that read a particular index.
162  * Later, we'll add moves as appropriate and rewrite to specialize by type. */
163 
164 static void
mark_node_class(unsigned * bitfield,unsigned node)165 mark_node_class (unsigned *bitfield, unsigned node)
166 {
167         if (node < SSA_FIXED_MINIMUM)
168                 BITSET_SET(bitfield, node);
169 }
170 
171 void
mir_lower_special_reads(compiler_context * ctx)172 mir_lower_special_reads(compiler_context *ctx)
173 {
174         size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
175 
176         /* Bitfields for the various types of registers we could have. aluw can
177          * be written by either ALU or load/store */
178 
179         unsigned *alur = calloc(sz, 1);
180         unsigned *aluw = calloc(sz, 1);
181         unsigned *brar = calloc(sz, 1);
182         unsigned *ldst = calloc(sz, 1);
183         unsigned *texr = calloc(sz, 1);
184         unsigned *texw = calloc(sz, 1);
185 
186         /* Pass #1 is analysis, a linear scan to fill out the bitfields */
187 
188         mir_foreach_instr_global(ctx, ins) {
189                 switch (ins->type) {
190                 case TAG_ALU_4:
191                         mark_node_class(aluw, ins->dest);
192                         mark_node_class(alur, ins->src[0]);
193                         mark_node_class(alur, ins->src[1]);
194                         mark_node_class(alur, ins->src[2]);
195 
196                         if (ins->compact_branch && ins->writeout)
197                                 mark_node_class(brar, ins->src[0]);
198 
199                         break;
200 
201                 case TAG_LOAD_STORE_4:
202                         mark_node_class(aluw, ins->dest);
203                         mark_node_class(ldst, ins->src[0]);
204                         mark_node_class(ldst, ins->src[1]);
205                         mark_node_class(ldst, ins->src[2]);
206                         mark_node_class(ldst, ins->src[3]);
207                         break;
208 
209                 case TAG_TEXTURE_4:
210                         mark_node_class(texr, ins->src[0]);
211                         mark_node_class(texr, ins->src[1]);
212                         mark_node_class(texr, ins->src[2]);
213                         mark_node_class(texw, ins->dest);
214                         break;
215 
216                 default:
217                         break;
218                 }
219         }
220 
221         /* Pass #2 is lowering now that we've analyzed all the classes.
222          * Conceptually, if an index is only marked for a single type of use,
223          * there is nothing to lower. If it is marked for different uses, we
224          * split up based on the number of types of uses. To do so, we divide
225          * into N distinct classes of use (where N>1 by definition), emit N-1
226          * moves from the index to copies of the index, and finally rewrite N-1
227          * of the types of uses to use the corresponding move */
228 
229         unsigned spill_idx = ctx->temp_count;
230 
231         for (unsigned i = 0; i < ctx->temp_count; ++i) {
232                 bool is_alur = BITSET_TEST(alur, i);
233                 bool is_aluw = BITSET_TEST(aluw, i);
234                 bool is_brar = BITSET_TEST(brar, i);
235                 bool is_ldst = BITSET_TEST(ldst, i);
236                 bool is_texr = BITSET_TEST(texr, i);
237                 bool is_texw = BITSET_TEST(texw, i);
238 
239                 /* Analyse to check how many distinct uses there are. ALU ops
240                  * (alur) can read the results of the texture pipeline (texw)
241                  * but not ldst or texr. Load/store ops (ldst) cannot read
242                  * anything but load/store inputs. Texture pipeline cannot read
243                  * anything but texture inputs. TODO: Simplify.  */
244 
245                 bool collision =
246                         (is_alur && (is_ldst || is_texr)) ||
247                         (is_ldst && (is_alur || is_texr || is_texw)) ||
248                         (is_texr && (is_alur || is_ldst || is_texw)) ||
249                         (is_texw && (is_aluw || is_ldst || is_texr)) ||
250                         (is_brar && is_texw);
251 
252                 if (!collision)
253                         continue;
254 
255                 /* Use the index as-is as the work copy. Emit copies for
256                  * special uses */
257 
258                 unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4, TAG_ALU_4};
259                 bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw, is_brar };
260 
261                 for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) {
262                         if (!collisions[j]) continue;
263 
264                         /* When the hazard is from reading, we move and rewrite
265                          * sources (typical case). When it's from writing, we
266                          * flip the move and rewrite destinations (obscure,
267                          * only from control flow -- impossible in SSA) */
268 
269                         bool hazard_write = (j == 2);
270 
271                         unsigned idx = spill_idx++;
272 
273                         /* Insert move before each read/write, depending on the
274                          * hazard we're trying to account for */
275 
276                         mir_foreach_instr_global_safe(ctx, pre_use) {
277                                 if (pre_use->type != classes[j])
278                                         continue;
279 
280                                 if (hazard_write) {
281                                         if (pre_use->dest != i)
282                                                 continue;
283 
284                                         midgard_instruction m = v_mov(idx, i);
285                                         m.dest_type = pre_use->dest_type;
286                                         m.src_types[1] = m.dest_type;
287                                         m.mask = pre_use->mask;
288 
289                                         midgard_instruction *use = mir_next_op(pre_use);
290                                         assert(use);
291                                         mir_insert_instruction_before(ctx, use, m);
292                                         mir_rewrite_index_dst_single(pre_use, i, idx);
293                                 } else {
294                                         if (!mir_has_arg(pre_use, i))
295                                                 continue;
296 
297                                         idx = spill_idx++;
298 
299                                         midgard_instruction m = v_mov(i, idx);
300                                         m.mask = mir_from_bytemask(mir_round_bytemask_up(
301                                                                 mir_bytemask_of_read_components(pre_use, i), 32), 32);
302                                         mir_insert_instruction_before(ctx, pre_use, m);
303                                         mir_rewrite_index_src_single(pre_use, i, idx);
304                                 }
305                         }
306                 }
307         }
308 
309         free(alur);
310         free(aluw);
311         free(brar);
312         free(ldst);
313         free(texr);
314         free(texw);
315 }
316 
317 static void
mir_compute_interference(compiler_context * ctx,struct lcra_state * l)318 mir_compute_interference(
319                 compiler_context *ctx,
320                 struct lcra_state *l)
321 {
322         /* First, we need liveness information to be computed per block */
323         mir_compute_liveness(ctx);
324 
325         /* We need to force r1.w live throughout a blend shader */
326 
327         if (ctx->inputs->is_blend) {
328                 unsigned r1w = ~0;
329 
330                 mir_foreach_block(ctx, _block) {
331                         midgard_block *block = (midgard_block *) _block;
332                         mir_foreach_instr_in_block_rev(block, ins) {
333                                 if (ins->writeout)
334                                         r1w = ins->dest;
335                         }
336 
337                         if (r1w != ~0)
338                                 break;
339                 }
340 
341                 mir_foreach_instr_global(ctx, ins) {
342                         if (ins->dest < ctx->temp_count)
343                                 lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), r1w, 0xF);
344                 }
345         }
346 
347         /* Now that every block has live_in/live_out computed, we can determine
348          * interference by walking each block linearly. Take live_out at the
349          * end of each block and walk the block backwards. */
350 
351         mir_foreach_block(ctx, _blk) {
352                 midgard_block *blk = (midgard_block *) _blk;
353 
354                 /* The scalar and vector units run in parallel. We need to make
355                  * sure they don't write to same portion of the register file
356                  * otherwise the result is undefined. Add interferences to
357                  * avoid this situation.
358                  */
359                 util_dynarray_foreach(&blk->bundles, midgard_bundle, bundle) {
360                         midgard_instruction *instrs[2][4];
361                         unsigned instr_count[2] = { 0, 0 };
362 
363                         for (unsigned i = 0; i < bundle->instruction_count; i++) {
364                                 if (bundle->instructions[i]->unit == UNIT_VMUL ||
365                                     bundle->instructions[i]->unit == UNIT_SADD)
366                                         instrs[0][instr_count[0]++] = bundle->instructions[i];
367                                 else
368                                         instrs[1][instr_count[1]++] = bundle->instructions[i];
369                         }
370 
371                         for (unsigned i = 0; i < ARRAY_SIZE(instr_count); i++) {
372                                 for (unsigned j = 0; j < instr_count[i]; j++) {
373                                         midgard_instruction *ins_a = instrs[i][j];
374 
375                                         if (ins_a->dest >= ctx->temp_count) continue;
376 
377                                         for (unsigned k = j + 1; k < instr_count[i]; k++) {
378                                                 midgard_instruction *ins_b = instrs[i][k];
379 
380                                                 if (ins_b->dest >= ctx->temp_count) continue;
381 
382                                                 lcra_add_node_interference(l, ins_b->dest,
383                                                                            mir_bytemask(ins_b),
384                                                                            ins_a->dest,
385                                                                            mir_bytemask(ins_a));
386                                         }
387                                 }
388                         }
389                 }
390 
391                 uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
392 
393                 mir_foreach_instr_in_block_rev(blk, ins) {
394                         /* Mark all registers live after the instruction as
395                          * interfering with the destination */
396 
397                         unsigned dest = ins->dest;
398 
399                         if (dest < ctx->temp_count) {
400                                 for (unsigned i = 0; i < ctx->temp_count; ++i) {
401                                         if (live[i]) {
402                                                 unsigned mask = mir_bytemask(ins);
403                                                 lcra_add_node_interference(l, dest, mask, i, live[i]);
404                                         }
405                                 }
406                         }
407 
408                         /* Add blend shader interference: blend shaders might
409                          * clobber r0-r3. */
410                         if (ins->compact_branch && ins->writeout) {
411                                 for (unsigned i = 0; i < ctx->temp_count; ++i) {
412                                         if (!live[i])
413                                                 continue;
414 
415                                         for (unsigned j = 0; j < 4; j++) {
416                                                 lcra_add_node_interference(l, ctx->temp_count + j,
417                                                                 0xFFFF,
418                                                                 i, live[i]);
419                                         }
420                                 }
421                         }
422 
423                         /* Update live_in */
424                         mir_liveness_ins_update(live, ins, ctx->temp_count);
425                 }
426 
427                 free(live);
428         }
429 }
430 
431 static bool
mir_is_64(midgard_instruction * ins)432 mir_is_64(midgard_instruction *ins)
433 {
434         if (nir_alu_type_get_type_size(ins->dest_type) == 64)
435                 return true;
436 
437         mir_foreach_src(ins, v) {
438                 if (nir_alu_type_get_type_size(ins->src_types[v]) == 64)
439                         return true;
440         }
441 
442         return false;
443 }
444 
445 /* This routine performs the actual register allocation. It should be succeeded
446  * by install_registers */
447 
448 static struct lcra_state *
allocate_registers(compiler_context * ctx,bool * spilled)449 allocate_registers(compiler_context *ctx, bool *spilled)
450 {
451         /* The number of vec4 work registers available depends on the number of
452          * register-mapped uniforms and the shader stage. By ABI we limit blend
453          * shaders to 8 registers, should be lower XXX */
454         int rmu = ctx->info->push.count / 4;
455         int work_count = ctx->inputs->is_blend ? 8 : 16 - MAX2(rmu - 8, 0);
456 
457        /* No register allocation to do with no SSA */
458 
459         if (!ctx->temp_count)
460                 return NULL;
461 
462         /* Initialize LCRA. Allocate extra node at the end for r1-r3 for
463          * interference */
464 
465         struct lcra_state *l = lcra_alloc_equations(ctx->temp_count + 4, 5);
466         unsigned node_r1 = ctx->temp_count + 1;
467 
468         /* Starts of classes, in bytes */
469         l->class_start[REG_CLASS_WORK]  = 16 * 0;
470         l->class_start[REG_CLASS_LDST]  = 16 * 26;
471         l->class_start[REG_CLASS_TEXR]  = 16 * 28;
472         l->class_start[REG_CLASS_TEXW]  = 16 * 28;
473 
474         l->class_size[REG_CLASS_WORK] = 16 * work_count;
475         l->class_size[REG_CLASS_LDST]  = 16 * 2;
476         l->class_size[REG_CLASS_TEXR]  = 16 * 2;
477         l->class_size[REG_CLASS_TEXW]  = 16 * 2;
478 
479         lcra_set_disjoint_class(l, REG_CLASS_TEXR, REG_CLASS_TEXW);
480 
481         /* To save space on T*20, we don't have real texture registers.
482          * Instead, tex inputs reuse the load/store pipeline registers, and
483          * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
484          * noting that this handles interferences and sizes correctly. */
485 
486         if (ctx->quirks & MIDGARD_INTERPIPE_REG_ALIASING) {
487                 l->class_start[REG_CLASS_TEXR] = l->class_start[REG_CLASS_LDST];
488                 l->class_start[REG_CLASS_TEXW] = l->class_start[REG_CLASS_WORK];
489         }
490 
491         unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
492         unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
493         unsigned *min_bound = calloc(sizeof(unsigned), ctx->temp_count);
494 
495         mir_foreach_instr_global(ctx, ins) {
496                 /* Swizzles of 32-bit sources on 64-bit instructions need to be
497                  * aligned to either bottom (xy) or top (zw). More general
498                  * swizzle lowering should happen prior to scheduling (TODO),
499                  * but once we get RA we shouldn't disrupt this further. Align
500                  * sources of 64-bit instructions. */
501 
502                 if (ins->type == TAG_ALU_4 && mir_is_64(ins)) {
503                         mir_foreach_src(ins, v) {
504                                 unsigned s = ins->src[v];
505 
506                                 if (s < ctx->temp_count)
507                                         min_alignment[s] = MAX2(3, min_alignment[s]);
508                         }
509                 }
510 
511                 if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->op)) {
512                         mir_foreach_src(ins, v) {
513                                 unsigned s = ins->src[v];
514                                 unsigned size = nir_alu_type_get_type_size(ins->src_types[v]);
515 
516                                 if (s < ctx->temp_count)
517                                         min_alignment[s] = MAX2((size == 64) ? 3 : 2, min_alignment[s]);
518                         }
519                 }
520 
521                 /* Anything read as 16-bit needs proper alignment to ensure the
522                  * resulting code can be packed.
523                  */
524                 mir_foreach_src(ins, s) {
525                         unsigned src_size = nir_alu_type_get_type_size(ins->src_types[s]);
526                         if (src_size == 16 && ins->src[s] < SSA_FIXED_MINIMUM)
527                                 min_bound[ins->src[s]] = MAX2(min_bound[ins->src[s]], 8);
528                 }
529 
530                 /* Everything after this concerns only the destination, not the
531                  * sources.
532                  */
533                 if (ins->dest >= SSA_FIXED_MINIMUM) continue;
534 
535                 unsigned size = nir_alu_type_get_type_size(ins->dest_type);
536 
537                 if (ins->is_pack)
538                         size = 32;
539 
540                 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
541                 int comps1 = util_logbase2(ins->mask);
542 
543                 int bytes = (comps1 + 1) * (size / 8);
544 
545                 /* Use the largest class if there's ambiguity, this
546                  * handles partial writes */
547 
548                 int dest = ins->dest;
549                 found_class[dest] = MAX2(found_class[dest], bytes);
550 
551                 min_alignment[dest] =
552                         MAX2(min_alignment[dest],
553                              (size == 16) ? 1 : /* (1 << 1) = 2-byte */
554                              (size == 32) ? 2 : /* (1 << 2) = 4-byte */
555                              (size == 64) ? 3 : /* (1 << 3) = 8-byte */
556                              3); /* 8-bit todo */
557 
558                 /* We can't cross xy/zw boundaries. TODO: vec8 can */
559                 if (size == 16 && min_alignment[dest] != 4)
560                         min_bound[dest] = 8;
561 
562                 /* We don't have a swizzle for the conditional and we don't
563                  * want to muck with the conditional itself, so just force
564                  * alignment for now */
565 
566                 if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
567                         min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
568 
569                         /* LCRA assumes bound >= alignment */
570                         min_bound[dest] = 16;
571                 }
572 
573                 /* Since ld/st swizzles and masks are 32-bit only, we need them
574                  * aligned to enable final packing */
575                 if (ins->type == TAG_LOAD_STORE_4)
576                         min_alignment[dest] = MAX2(min_alignment[dest], 2);
577         }
578 
579         for (unsigned i = 0; i < ctx->temp_count; ++i) {
580                 lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2,
581                                 min_bound[i] ? min_bound[i] : 16);
582                 lcra_restrict_range(l, i, found_class[i]);
583         }
584 
585         free(found_class);
586         free(min_alignment);
587         free(min_bound);
588 
589         /* Next, we'll determine semantic class. We default to zero (work).
590          * But, if we're used with a special operation, that will force us to a
591          * particular class. Each node must be assigned to exactly one class; a
592          * prepass before RA should have lowered what-would-have-been
593          * multiclass nodes into a series of moves to break it up into multiple
594          * nodes (TODO) */
595 
596         mir_foreach_instr_global(ctx, ins) {
597                 /* Check if this operation imposes any classes */
598 
599                 if (ins->type == TAG_LOAD_STORE_4) {
600                         set_class(l->class, ins->src[0], REG_CLASS_LDST);
601                         set_class(l->class, ins->src[1], REG_CLASS_LDST);
602                         set_class(l->class, ins->src[2], REG_CLASS_LDST);
603                         set_class(l->class, ins->src[3], REG_CLASS_LDST);
604 
605                         if (OP_IS_VEC4_ONLY(ins->op)) {
606                                 lcra_restrict_range(l, ins->dest, 16);
607                                 lcra_restrict_range(l, ins->src[0], 16);
608                                 lcra_restrict_range(l, ins->src[1], 16);
609                                 lcra_restrict_range(l, ins->src[2], 16);
610                                 lcra_restrict_range(l, ins->src[3], 16);
611                         }
612                 } else if (ins->type == TAG_TEXTURE_4) {
613                         set_class(l->class, ins->dest, REG_CLASS_TEXW);
614                         set_class(l->class, ins->src[0], REG_CLASS_TEXR);
615                         set_class(l->class, ins->src[1], REG_CLASS_TEXR);
616                         set_class(l->class, ins->src[2], REG_CLASS_TEXR);
617                         set_class(l->class, ins->src[3], REG_CLASS_TEXR);
618                 }
619         }
620 
621         /* Check that the semantics of the class are respected */
622         mir_foreach_instr_global(ctx, ins) {
623                 assert(check_write_class(l->class, ins->type, ins->dest));
624                 assert(check_read_class(l->class, ins->type, ins->src[0]));
625                 assert(check_read_class(l->class, ins->type, ins->src[1]));
626                 assert(check_read_class(l->class, ins->type, ins->src[2]));
627                 assert(check_read_class(l->class, ins->type, ins->src[3]));
628         }
629 
630         /* Mark writeout to r0, depth to r1.x, stencil to r1.y,
631          * render target to r1.z, unknown to r1.w */
632         mir_foreach_instr_global(ctx, ins) {
633                 if (!(ins->compact_branch && ins->writeout)) continue;
634 
635                 if (ins->src[0] < ctx->temp_count)
636                         l->solutions[ins->src[0]] = 0;
637 
638                 if (ins->src[2] < ctx->temp_count)
639                         l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_X * 4;
640 
641                 if (ins->src[3] < ctx->temp_count)
642                         l->solutions[ins->src[3]] = (16 * 1) + COMPONENT_Y * 4;
643 
644                 if (ins->src[1] < ctx->temp_count)
645                         l->solutions[ins->src[1]] = (16 * 1) + COMPONENT_Z * 4;
646 
647                 if (ins->dest < ctx->temp_count)
648                         l->solutions[ins->dest] = (16 * 1) + COMPONENT_W * 4;
649         }
650 
651         /* Destinations of instructions in a writeout block cannot be assigned
652          * to r1 unless they are actually used as r1 from the writeout itself,
653          * since the writes to r1 are special. A code sequence like:
654          *
655          *      sadd.fmov r1.x, [...]
656          *      vadd.fadd r0, r1, r2
657          *      [writeout branch]
658          *
659          * will misbehave since the r1.x write will be interpreted as a
660          * gl_FragDepth write so it won't show up correctly when r1 is read in
661          * the following segment. We model this as interference.
662          */
663 
664         for (unsigned i = 0; i < 4; ++i)
665                 l->solutions[ctx->temp_count + i] = (16 * i);
666 
667         mir_foreach_block(ctx, _blk) {
668                 midgard_block *blk = (midgard_block *) _blk;
669 
670                 mir_foreach_bundle_in_block(blk, v) {
671                         /* We need at least a writeout and nonwriteout instruction */
672                         if (v->instruction_count < 2)
673                                 continue;
674 
675                         /* Branches always come at the end */
676                         midgard_instruction *br = v->instructions[v->instruction_count - 1];
677 
678                         if (!br->writeout)
679                                 continue;
680 
681                         for (signed i = v->instruction_count - 2; i >= 0; --i) {
682                                 midgard_instruction *ins = v->instructions[i];
683 
684                                 if (ins->dest >= ctx->temp_count)
685                                         continue;
686 
687                                 bool used_as_r1 = (br->dest == ins->dest);
688 
689                                 mir_foreach_src(br, s)
690                                         used_as_r1 |= (s > 0) && (br->src[s] == ins->dest);
691 
692                                 if (!used_as_r1)
693                                         lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), node_r1, 0xFFFF);
694                         }
695                 }
696         }
697 
698         /* Precolour blend input to r0. Note writeout is necessarily at the end
699          * and blend shaders are single-RT only so there is only a single
700          * writeout block, so this cannot conflict with the writeout r0 (there
701          * is no need to have an intermediate move) */
702 
703         if (ctx->blend_input != ~0) {
704                 assert(ctx->blend_input < ctx->temp_count);
705                 l->solutions[ctx->blend_input] = 0;
706         }
707 
708         /* Same for the dual-source blend input/output, except here we use r2,
709          * which is also set in the fragment shader. */
710 
711         if (ctx->blend_src1 != ~0) {
712                 assert(ctx->blend_src1 < ctx->temp_count);
713                 l->solutions[ctx->blend_src1] = (16 * 2);
714                 ctx->info->work_reg_count = MAX2(ctx->info->work_reg_count, 3);
715         }
716 
717         mir_compute_interference(ctx, l);
718 
719         *spilled = !lcra_solve(l);
720         return l;
721 }
722 
723 
724 /* Once registers have been decided via register allocation
725  * (allocate_registers), we need to rewrite the MIR to use registers instead of
726  * indices */
727 
728 static void
install_registers_instr(compiler_context * ctx,struct lcra_state * l,midgard_instruction * ins)729 install_registers_instr(
730         compiler_context *ctx,
731         struct lcra_state *l,
732         midgard_instruction *ins)
733 {
734         unsigned src_shift[MIR_SRC_COUNT];
735 
736         for (unsigned i = 0; i < MIR_SRC_COUNT; ++i) {
737                 src_shift[i] =
738                         util_logbase2(nir_alu_type_get_type_size(ins->src_types[i]) / 8);
739         }
740 
741         unsigned dest_shift =
742                 util_logbase2(nir_alu_type_get_type_size(ins->dest_type) / 8);
743 
744         switch (ins->type) {
745         case TAG_ALU_4:
746         case TAG_ALU_8:
747         case TAG_ALU_12:
748         case TAG_ALU_16: {
749                  if (ins->compact_branch)
750                          return;
751 
752                 struct phys_reg src1 = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
753                 struct phys_reg src2 = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
754                 struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
755 
756                 mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
757 
758                 unsigned dest_offset =
759                         GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
760                         dest.offset;
761 
762                 offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
763                 if (!ins->has_inline_constant)
764                         offset_swizzle(ins->swizzle[1], src2.offset, src2.shift, dest.shift, dest_offset);
765                 if (ins->src[0] != ~0)
766                         ins->src[0] = SSA_FIXED_REGISTER(src1.reg);
767                 if (ins->src[1] != ~0)
768                         ins->src[1] = SSA_FIXED_REGISTER(src2.reg);
769                 if (ins->dest != ~0)
770                         ins->dest = SSA_FIXED_REGISTER(dest.reg);
771                 break;
772         }
773 
774         case TAG_LOAD_STORE_4: {
775                 /* Which physical register we read off depends on
776                  * whether we are loading or storing -- think about the
777                  * logical dataflow */
778 
779                 bool encodes_src = OP_IS_STORE(ins->op);
780 
781                 if (encodes_src) {
782                         struct phys_reg src = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
783                         assert(src.reg == 26 || src.reg == 27);
784 
785                         ins->src[0] = SSA_FIXED_REGISTER(src.reg);
786                         offset_swizzle(ins->swizzle[0], src.offset, src.shift, 0, 0);
787                } else {
788                         struct phys_reg dst = index_to_reg(ctx, l, ins->dest, dest_shift);
789 
790                         ins->dest = SSA_FIXED_REGISTER(dst.reg);
791                         offset_swizzle(ins->swizzle[0], 0, 2, dest_shift, dst.offset);
792                         mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
793                 }
794 
795                 /* We also follow up by actual arguments */
796 
797                 for (int i = 1; i <= 3; i++) {
798                         unsigned src_index = ins->src[i];
799                         if (src_index != ~0) {
800                                 struct phys_reg src = index_to_reg(ctx, l, src_index, src_shift[i]);
801                                 unsigned component = src.offset >> src.shift;
802                                 assert(component << src.shift == src.offset);
803                                 ins->src[i] = SSA_FIXED_REGISTER(src.reg);
804                                 ins->swizzle[i][0] += component;
805                         }
806                 }
807 
808                 break;
809         }
810 
811         case TAG_TEXTURE_4: {
812                 if (ins->op == midgard_tex_op_barrier)
813                         break;
814 
815                 /* Grab RA results */
816                 struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
817                 struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
818                 struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], src_shift[2]);
819                 struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], src_shift[3]);
820 
821                 /* First, install the texture coordinate */
822                 if (ins->src[1] != ~0)
823                         ins->src[1] = SSA_FIXED_REGISTER(coord.reg);
824                 offset_swizzle(ins->swizzle[1], coord.offset, coord.shift, dest.shift, 0);
825 
826                 /* Next, install the destination */
827                 if (ins->dest != ~0)
828                         ins->dest = SSA_FIXED_REGISTER(dest.reg);
829                 offset_swizzle(ins->swizzle[0], 0, 2, dest.shift,
830                                 dest_shift == 1 ? dest.offset % 8 :
831                                 dest.offset);
832                 mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
833 
834                 /* If there is a register LOD/bias, use it */
835                 if (ins->src[2] != ~0) {
836                         assert(!(lod.offset & 3));
837                         ins->src[2] = SSA_FIXED_REGISTER(lod.reg);
838                         ins->swizzle[2][0] = lod.offset / 4;
839                 }
840 
841                 /* If there is an offset register, install it */
842                 if (ins->src[3] != ~0) {
843                         ins->src[3] = SSA_FIXED_REGISTER(offset.reg);
844                         ins->swizzle[3][0] = offset.offset / 4;
845                 }
846 
847                 break;
848         }
849 
850         default:
851                 break;
852         }
853 }
854 
855 static void
install_registers(compiler_context * ctx,struct lcra_state * l)856 install_registers(compiler_context *ctx, struct lcra_state *l)
857 {
858         mir_foreach_instr_global(ctx, ins)
859                 install_registers_instr(ctx, l, ins);
860 }
861 
862 
863 /* If register allocation fails, find the best spill node */
864 
865 static signed
mir_choose_spill_node(compiler_context * ctx,struct lcra_state * l)866 mir_choose_spill_node(
867                 compiler_context *ctx,
868                 struct lcra_state *l)
869 {
870         /* We can't spill a previously spilled value or an unspill */
871 
872         mir_foreach_instr_global(ctx, ins) {
873                 if (ins->no_spill & (1 << l->spill_class)) {
874                         lcra_set_node_spill_cost(l, ins->dest, -1);
875 
876                         if (l->spill_class != REG_CLASS_WORK) {
877                                 mir_foreach_src(ins, s)
878                                         lcra_set_node_spill_cost(l, ins->src[s], -1);
879                         }
880                 }
881         }
882 
883         return lcra_get_best_spill_node(l);
884 }
885 
886 /* Once we've chosen a spill node, spill it */
887 
888 static void
mir_spill_register(compiler_context * ctx,unsigned spill_node,unsigned spill_class,unsigned * spill_count)889 mir_spill_register(
890                 compiler_context *ctx,
891                 unsigned spill_node,
892                 unsigned spill_class,
893                 unsigned *spill_count)
894 {
895         if (spill_class == REG_CLASS_WORK && ctx->inputs->is_blend)
896                 unreachable("Blend shader spilling is currently unimplemented");
897 
898         unsigned spill_index = ctx->temp_count;
899 
900         /* We have a spill node, so check the class. Work registers
901          * legitimately spill to TLS, but special registers just spill to work
902          * registers */
903 
904         bool is_special = spill_class != REG_CLASS_WORK;
905         bool is_special_w = spill_class == REG_CLASS_TEXW;
906 
907         /* Allocate TLS slot (maybe) */
908         unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
909 
910         /* For special reads, figure out how many bytes we need */
911         unsigned read_bytemask = 0;
912 
913         /* If multiple instructions write to this destination, we'll have to
914          * fill from TLS before writing */
915         unsigned write_count = 0;
916 
917         mir_foreach_instr_global_safe(ctx, ins) {
918                 read_bytemask |= mir_bytemask_of_read_components(ins, spill_node);
919                 if (ins->dest == spill_node)
920                         ++write_count;
921         }
922 
923         /* For TLS, replace all stores to the spilled node. For
924          * special reads, just keep as-is; the class will be demoted
925          * implicitly. For special writes, spill to a work register */
926 
927         if (!is_special || is_special_w) {
928                 if (is_special_w)
929                         spill_slot = spill_index++;
930 
931                 unsigned last_id = ~0;
932                 unsigned last_fill = ~0;
933                 unsigned last_spill_index = ~0;
934                 midgard_instruction *last_spill = NULL;
935 
936                 mir_foreach_block(ctx, _block) {
937                 midgard_block *block = (midgard_block *) _block;
938                 mir_foreach_instr_in_block_safe(block, ins) {
939                         if (ins->dest != spill_node) continue;
940 
941                         /* Note: it's important to match the mask of the spill
942                          * with the mask of the instruction whose destination
943                          * we're spilling, or otherwise we'll read invalid
944                          * components and can fail RA in a subsequent iteration
945                          */
946 
947                         if (is_special_w) {
948                                 midgard_instruction st = v_mov(spill_node, spill_slot);
949                                 st.no_spill |= (1 << spill_class);
950                                 st.mask = ins->mask;
951                                 st.dest_type = st.src_types[1] = ins->dest_type;
952 
953                                 /* Hint: don't rewrite this node */
954                                 st.hint = true;
955 
956                                 mir_insert_instruction_after_scheduled(ctx, block, ins, st);
957                         } else {
958                                 unsigned bundle = ins->bundle_id;
959                                 unsigned dest = (bundle == last_id)? last_spill_index : spill_index++;
960 
961                                 unsigned bytemask = mir_bytemask(ins);
962                                 unsigned write_mask = mir_from_bytemask(mir_round_bytemask_up(
963                                                                            bytemask, 32), 32);
964 
965                                 if (write_count > 1 && bytemask != 0xFFFF && bundle != last_fill) {
966                                         midgard_instruction read =
967                                                 v_load_store_scratch(dest, spill_slot, false, 0xF);
968                                         mir_insert_instruction_before_scheduled(ctx, block, ins, read);
969                                         write_mask = 0xF;
970                                         last_fill = bundle;
971                                 }
972 
973                                 ins->dest = dest;
974                                 ins->no_spill |= (1 << spill_class);
975 
976                                 bool move = false;
977 
978                                 /* In the same bundle, reads of the destination
979                                  * of the spilt instruction need to be direct */
980                                 midgard_instruction *it = ins;
981                                 while ((it = list_first_entry(&it->link, midgard_instruction, link))
982                                        && (it->bundle_id == bundle)) {
983 
984                                         if (!mir_has_arg(it, spill_node)) continue;
985 
986                                         mir_rewrite_index_src_single(it, spill_node, dest);
987 
988                                         /* The spilt instruction will write to
989                                          * a work register for `it` to read but
990                                          * the spill needs an LD/ST register */
991                                         move = true;
992                                 }
993 
994                                 if (move)
995                                         dest = spill_index++;
996 
997                                 if (last_id == bundle) {
998                                         last_spill->mask |= write_mask;
999                                         u_foreach_bit(c, write_mask)
1000                                                 last_spill->swizzle[0][c] = c;
1001                                 } else {
1002                                         midgard_instruction st =
1003                                                 v_load_store_scratch(dest, spill_slot, true, write_mask);
1004                                         last_spill = mir_insert_instruction_after_scheduled(ctx, block, ins, st);
1005                                 }
1006 
1007                                 if (move) {
1008                                         midgard_instruction mv = v_mov(ins->dest, dest);
1009                                         mv.no_spill |= (1 << spill_class);
1010 
1011                                         mir_insert_instruction_after_scheduled(ctx, block, ins, mv);
1012                                 }
1013 
1014                                 last_id = bundle;
1015                                 last_spill_index = ins->dest;
1016                         }
1017 
1018                         if (!is_special)
1019                                 ctx->spills++;
1020                 }
1021                 }
1022         }
1023 
1024         /* Insert a load from TLS before the first consecutive
1025          * use of the node, rewriting to use spilled indices to
1026          * break up the live range. Or, for special, insert a
1027          * move. Ironically the latter *increases* register
1028          * pressure, but the two uses of the spilling mechanism
1029          * are somewhat orthogonal. (special spilling is to use
1030          * work registers to back special registers; TLS
1031          * spilling is to use memory to back work registers) */
1032 
1033         mir_foreach_block(ctx, _block) {
1034                 midgard_block *block = (midgard_block *) _block;
1035                 mir_foreach_instr_in_block(block, ins) {
1036                         /* We can't rewrite the moves used to spill in the
1037                          * first place. These moves are hinted. */
1038                         if (ins->hint) continue;
1039 
1040                         /* If we don't use the spilled value, nothing to do */
1041                         if (!mir_has_arg(ins, spill_node)) continue;
1042 
1043                         unsigned index = 0;
1044 
1045                         if (!is_special_w) {
1046                                 index = ++spill_index;
1047 
1048                                 midgard_instruction *before = ins;
1049                                 midgard_instruction st;
1050 
1051                                 if (is_special) {
1052                                         /* Move */
1053                                         st = v_mov(spill_node, index);
1054                                         st.no_spill |= (1 << spill_class);
1055                                 } else {
1056                                         /* TLS load */
1057                                         st = v_load_store_scratch(index, spill_slot, false, 0xF);
1058                                 }
1059 
1060                                 /* Mask the load based on the component count
1061                                  * actually needed to prevent RA loops */
1062 
1063                                 st.mask = mir_from_bytemask(mir_round_bytemask_up(
1064                                                         read_bytemask, 32), 32);
1065 
1066                                 mir_insert_instruction_before_scheduled(ctx, block, before, st);
1067                         } else {
1068                                 /* Special writes already have their move spilled in */
1069                                 index = spill_slot;
1070                         }
1071 
1072 
1073                         /* Rewrite to use */
1074                         mir_rewrite_index_src_single(ins, spill_node, index);
1075 
1076                         if (!is_special)
1077                                 ctx->fills++;
1078                 }
1079         }
1080 
1081         /* Reset hints */
1082 
1083         mir_foreach_instr_global(ctx, ins) {
1084                 ins->hint = false;
1085         }
1086 }
1087 
1088 static void
mir_demote_uniforms(compiler_context * ctx,unsigned new_cutoff)1089 mir_demote_uniforms(compiler_context *ctx, unsigned new_cutoff)
1090 {
1091         unsigned uniforms = ctx->info->push.count / 4;
1092         unsigned old_work_count = 16 - MAX2(uniforms - 8, 0);
1093         unsigned work_count = 16 - MAX2((new_cutoff - 8), 0);
1094 
1095         unsigned min_demote = SSA_FIXED_REGISTER(old_work_count);
1096         unsigned max_demote = SSA_FIXED_REGISTER(work_count);
1097 
1098         mir_foreach_block(ctx, _block) {
1099                 midgard_block *block = (midgard_block *) _block;
1100                 mir_foreach_instr_in_block(block, ins) {
1101                         mir_foreach_src(ins, i) {
1102                                 if (ins->src[i] < min_demote || ins->src[i] >= max_demote)
1103                                         continue;
1104 
1105                                 midgard_instruction *before = ins;
1106 
1107                                 unsigned temp = make_compiler_temp(ctx);
1108                                 unsigned idx = (23 - SSA_REG_FROM_FIXED(ins->src[i])) * 4;
1109                                 assert(idx < ctx->info->push.count);
1110 
1111                                 ctx->ubo_mask |= BITSET_BIT(ctx->info->push.words[idx].ubo);
1112 
1113                                 midgard_instruction ld = {
1114                                         .type = TAG_LOAD_STORE_4,
1115                                         .mask = 0xF,
1116                                         .dest = temp,
1117                                         .dest_type = ins->src_types[i],
1118                                         .src = { ~0, ~0, ~0, ~0 },
1119                                         .swizzle = SWIZZLE_IDENTITY_4,
1120                                         .op = midgard_op_ld_ubo_128,
1121                                         .load_store = {
1122                                                 .index_reg = REGISTER_LDST_ZERO,
1123                                         },
1124                                         .constants.u32[0] = ctx->info->push.words[idx].offset
1125                                 };
1126 
1127                                 midgard_pack_ubo_index_imm(&ld.load_store,
1128                                                            ctx->info->push.words[idx].ubo);
1129 
1130                                 mir_insert_instruction_before_scheduled(ctx, block, before, ld);
1131 
1132                                 mir_rewrite_index_src_single(ins, ins->src[i], temp);
1133                         }
1134                 }
1135         }
1136 
1137         ctx->info->push.count = MIN2(ctx->info->push.count, new_cutoff * 4);
1138 }
1139 
1140 /* Run register allocation in a loop, spilling until we succeed */
1141 
1142 void
mir_ra(compiler_context * ctx)1143 mir_ra(compiler_context *ctx)
1144 {
1145         struct lcra_state *l = NULL;
1146         bool spilled = false;
1147         int iter_count = 1000; /* max iterations */
1148 
1149         /* Number of 128-bit slots in memory we've spilled into */
1150         unsigned spill_count = DIV_ROUND_UP(ctx->info->tls_size, 16);
1151 
1152 
1153         mir_create_pipeline_registers(ctx);
1154 
1155         do {
1156                 if (spilled) {
1157                         signed spill_node = mir_choose_spill_node(ctx, l);
1158                         unsigned uniforms = ctx->info->push.count / 4;
1159 
1160                         /* It's a lot cheaper to demote uniforms to get more
1161                          * work registers than to spill to TLS. */
1162                         if (l->spill_class == REG_CLASS_WORK && uniforms > 8) {
1163                                 mir_demote_uniforms(ctx, MAX2(uniforms - 4, 8));
1164                         } else if (spill_node == -1) {
1165                                 fprintf(stderr, "ERROR: Failed to choose spill node\n");
1166                                 lcra_free(l);
1167                                 return;
1168                         } else {
1169                                 mir_spill_register(ctx, spill_node, l->spill_class, &spill_count);
1170                         }
1171                 }
1172 
1173                 mir_squeeze_index(ctx);
1174                 mir_invalidate_liveness(ctx);
1175 
1176                 if (l) {
1177                         lcra_free(l);
1178                         l = NULL;
1179                 }
1180 
1181                 l = allocate_registers(ctx, &spilled);
1182         } while(spilled && ((iter_count--) > 0));
1183 
1184         if (iter_count <= 0) {
1185                 fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
1186                 assert(0);
1187         }
1188 
1189         /* Report spilling information. spill_count is in 128-bit slots (vec4 x
1190          * fp32), but tls_size is in bytes, so multiply by 16 */
1191 
1192         ctx->info->tls_size = spill_count * 16;
1193 
1194         install_registers(ctx, l);
1195 
1196         lcra_free(l);
1197 }
1198