• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3  * Copyright (C) 2019-2020 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "compiler.h"
26 #include "midgard_ops.h"
27 #include "midgard_quirks.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "util/half_float.h"
31 
32 /* Scheduling for Midgard is complicated, to say the least. ALU instructions
33  * must be grouped into VLIW bundles according to following model:
34  *
35  * [VMUL] [SADD]
36  * [VADD] [SMUL] [VLUT]
37  *
38  * A given instruction can execute on some subset of the units (or a few can
39  * execute on all). Instructions can be either vector or scalar; only scalar
40  * instructions can execute on SADD/SMUL units. Units on a given line execute
41  * in parallel. Subsequent lines execute separately and can pass results
42  * directly via pipeline registers r24/r25, bypassing the register file.
43  *
44  * A bundle can optionally have 128-bits of embedded constants, shared across
45  * all of the instructions within a bundle.
46  *
47  * Instructions consuming conditionals (branches and conditional selects)
48  * require their condition to be written into the conditional register (r31)
49  * within the same bundle they are consumed.
50  *
51  * Fragment writeout requires its argument to be written in full within the
52  * same bundle as the branch, with no hanging dependencies.
53  *
54  * Load/store instructions are also in bundles of simply two instructions, and
55  * texture instructions have no bundling.
56  *
57  * -------------------------------------------------------------------------
58  *
59  */
60 
61 /* We create the dependency graph with per-byte granularity */
62 
63 #define BYTE_COUNT 16
64 
65 static void
add_dependency(struct util_dynarray * table,unsigned index,uint16_t mask,midgard_instruction ** instructions,unsigned child)66 add_dependency(struct util_dynarray *table, unsigned index, uint16_t mask, midgard_instruction **instructions, unsigned child)
67 {
68         for (unsigned i = 0; i < BYTE_COUNT; ++i) {
69                 if (!(mask & (1 << i)))
70                         continue;
71 
72                 struct util_dynarray *parents = &table[(BYTE_COUNT * index) + i];
73 
74                 util_dynarray_foreach(parents, unsigned, parent) {
75                         BITSET_WORD *dependents = instructions[*parent]->dependents;
76 
77                         /* Already have the dependency */
78                         if (BITSET_TEST(dependents, child))
79                                 continue;
80 
81                         BITSET_SET(dependents, child);
82                         instructions[child]->nr_dependencies++;
83                 }
84         }
85 }
86 
87 static void
mark_access(struct util_dynarray * table,unsigned index,uint16_t mask,unsigned parent)88 mark_access(struct util_dynarray *table, unsigned index, uint16_t mask, unsigned parent)
89 {
90         for (unsigned i = 0; i < BYTE_COUNT; ++i) {
91                 if (!(mask & (1 << i)))
92                         continue;
93 
94                 util_dynarray_append(&table[(BYTE_COUNT * index) + i], unsigned, parent);
95         }
96 }
97 
98 static void
mir_create_dependency_graph(midgard_instruction ** instructions,unsigned count,unsigned node_count)99 mir_create_dependency_graph(midgard_instruction **instructions, unsigned count, unsigned node_count)
100 {
101         size_t sz = node_count * BYTE_COUNT;
102 
103         struct util_dynarray *last_read = calloc(sizeof(struct util_dynarray), sz);
104         struct util_dynarray *last_write = calloc(sizeof(struct util_dynarray), sz);
105 
106         for (unsigned i = 0; i < sz; ++i) {
107                 util_dynarray_init(&last_read[i], NULL);
108                 util_dynarray_init(&last_write[i], NULL);
109         }
110 
111         /* Initialize dependency graph */
112         for (unsigned i = 0; i < count; ++i) {
113                 instructions[i]->dependents =
114                         calloc(BITSET_WORDS(count), sizeof(BITSET_WORD));
115 
116                 instructions[i]->nr_dependencies = 0;
117         }
118 
119         /* Populate dependency graph */
120         for (signed i = count - 1; i >= 0; --i) {
121                 if (instructions[i]->compact_branch)
122                         continue;
123 
124                 unsigned dest = instructions[i]->dest;
125                 unsigned mask = mir_bytemask(instructions[i]);
126 
127                 mir_foreach_src((*instructions), s) {
128                         unsigned src = instructions[i]->src[s];
129 
130                         if (src < node_count) {
131                                 unsigned readmask = mir_bytemask_of_read_components(instructions[i], src);
132                                 add_dependency(last_write, src, readmask, instructions, i);
133                         }
134                 }
135 
136                 if (dest < node_count) {
137                         add_dependency(last_read, dest, mask, instructions, i);
138                         add_dependency(last_write, dest, mask, instructions, i);
139                         mark_access(last_write, dest, mask, i);
140                 }
141 
142                 mir_foreach_src((*instructions), s) {
143                         unsigned src = instructions[i]->src[s];
144 
145                         if (src < node_count) {
146                                 unsigned readmask = mir_bytemask_of_read_components(instructions[i], src);
147                                 mark_access(last_read, src, readmask, i);
148                         }
149                 }
150         }
151 
152         /* If there is a branch, all instructions depend on it, as interblock
153          * execution must be purely in-order */
154 
155         if (instructions[count - 1]->compact_branch) {
156                 BITSET_WORD *dependents = instructions[count - 1]->dependents;
157 
158                 for (signed i = count - 2; i >= 0; --i) {
159                         if (BITSET_TEST(dependents, i))
160                                 continue;
161 
162                         BITSET_SET(dependents, i);
163                         instructions[i]->nr_dependencies++;
164                 }
165         }
166 
167         /* Free the intermediate structures */
168         for (unsigned i = 0; i < sz; ++i) {
169                 util_dynarray_fini(&last_read[i]);
170                 util_dynarray_fini(&last_write[i]);
171         }
172 
173         free(last_read);
174         free(last_write);
175 }
176 
177 /* Does the mask cover more than a scalar? */
178 
179 static bool
is_single_component_mask(unsigned mask)180 is_single_component_mask(unsigned mask)
181 {
182         int components = 0;
183 
184         for (int c = 0; c < 8; ++c) {
185                 if (mask & (1 << c))
186                         components++;
187         }
188 
189         return components == 1;
190 }
191 
192 /* Helpers for scheudling */
193 
194 static bool
mir_is_scalar(midgard_instruction * ains)195 mir_is_scalar(midgard_instruction *ains)
196 {
197         /* Do we try to use it as a vector op? */
198         if (!is_single_component_mask(ains->mask))
199                 return false;
200 
201         /* Otherwise, check mode hazards */
202         bool could_scalar = true;
203         unsigned szd = nir_alu_type_get_type_size(ains->dest_type);
204         unsigned sz0 = nir_alu_type_get_type_size(ains->src_types[0]);
205         unsigned sz1 = nir_alu_type_get_type_size(ains->src_types[1]);
206 
207         /* Only 16/32-bit can run on a scalar unit */
208         could_scalar &= (szd == 16) || (szd == 32);
209 
210         if (ains->src[0] != ~0)
211                 could_scalar &= (sz0 == 16) || (sz0 == 32);
212 
213         if (ains->src[1] != ~0)
214                 could_scalar &= (sz1 == 16) || (sz1 == 32);
215 
216         if (midgard_is_integer_out_op(ains->op) && ains->outmod != midgard_outmod_int_wrap)
217                 return false;
218 
219         return could_scalar;
220 }
221 
222 /* How many bytes does this ALU instruction add to the bundle? */
223 
224 static unsigned
bytes_for_instruction(midgard_instruction * ains)225 bytes_for_instruction(midgard_instruction *ains)
226 {
227         if (ains->unit & UNITS_ANY_VECTOR)
228                 return sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
229         else if (ains->unit == ALU_ENAB_BRANCH)
230                 return sizeof(midgard_branch_extended);
231         else if (ains->compact_branch)
232                 return sizeof(uint16_t);
233         else
234                 return sizeof(midgard_reg_info) + sizeof(midgard_scalar_alu);
235 }
236 
237 /* We would like to flatten the linked list of midgard_instructions in a bundle
238  * to an array of pointers on the heap for easy indexing */
239 
240 static midgard_instruction **
flatten_mir(midgard_block * block,unsigned * len)241 flatten_mir(midgard_block *block, unsigned *len)
242 {
243         *len = list_length(&block->base.instructions);
244 
245         if (!(*len))
246                 return NULL;
247 
248         midgard_instruction **instructions =
249                 calloc(sizeof(midgard_instruction *), *len);
250 
251         unsigned i = 0;
252 
253         mir_foreach_instr_in_block(block, ins)
254                 instructions[i++] = ins;
255 
256         return instructions;
257 }
258 
259 /* The worklist is the set of instructions that can be scheduled now; that is,
260  * the set of instructions with no remaining dependencies */
261 
262 static void
mir_initialize_worklist(BITSET_WORD * worklist,midgard_instruction ** instructions,unsigned count)263 mir_initialize_worklist(BITSET_WORD *worklist, midgard_instruction **instructions, unsigned count)
264 {
265         for (unsigned i = 0; i < count; ++i) {
266                 if (instructions[i]->nr_dependencies == 0)
267                         BITSET_SET(worklist, i);
268         }
269 }
270 
271 /* Update the worklist after an instruction terminates. Remove its edges from
272  * the graph and if that causes any node to have no dependencies, add it to the
273  * worklist */
274 
275 static void
mir_update_worklist(BITSET_WORD * worklist,unsigned count,midgard_instruction ** instructions,midgard_instruction * done)276 mir_update_worklist(
277                 BITSET_WORD *worklist, unsigned count,
278                 midgard_instruction **instructions, midgard_instruction *done)
279 {
280         /* Sanity check: if no instruction terminated, there is nothing to do.
281          * If the instruction that terminated had dependencies, that makes no
282          * sense and means we messed up the worklist. Finally, as the purpose
283          * of this routine is to update dependents, we abort early if there are
284          * no dependents defined. */
285 
286         if (!done)
287                 return;
288 
289         assert(done->nr_dependencies == 0);
290 
291         if (!done->dependents)
292                 return;
293 
294         /* We have an instruction with dependents. Iterate each dependent to
295          * remove one dependency (`done`), adding dependents to the worklist
296          * where possible. */
297 
298         unsigned i;
299         BITSET_FOREACH_SET(i, done->dependents, count) {
300                 assert(instructions[i]->nr_dependencies);
301 
302                 if (!(--instructions[i]->nr_dependencies))
303                         BITSET_SET(worklist, i);
304         }
305 
306         free(done->dependents);
307 }
308 
309 /* While scheduling, we need to choose instructions satisfying certain
310  * criteria. As we schedule backwards, we choose the *last* instruction in the
311  * worklist to simulate in-order scheduling. Chosen instructions must satisfy a
312  * given predicate. */
313 
314 struct midgard_predicate {
315         /* TAG or ~0 for dont-care */
316         unsigned tag;
317 
318         /* True if we want to pop off the chosen instruction */
319         bool destructive;
320 
321         /* For ALU, choose only this unit */
322         unsigned unit;
323 
324         /* State for bundle constants. constants is the actual constants
325          * for the bundle. constant_count is the number of bytes (up to
326          * 16) currently in use for constants. When picking in destructive
327          * mode, the constants array will be updated, and the instruction
328          * will be adjusted to index into the constants array */
329 
330         midgard_constants *constants;
331         unsigned constant_mask;
332 
333         /* Exclude this destination (if not ~0) */
334         unsigned exclude;
335 
336         /* Don't schedule instructions consuming conditionals (since we already
337          * scheduled one). Excludes conditional branches and csel */
338         bool no_cond;
339 
340         /* Require (or reject) a minimal mask and (if nonzero) given
341          * destination. Used for writeout optimizations */
342 
343         unsigned mask;
344         unsigned no_mask;
345         unsigned dest;
346 
347         /* Whether to not-care/only/never schedule imov/fmov instructions This
348          * allows non-move instructions to get priority on each unit */
349         unsigned move_mode;
350 
351         /* For load/store: how many pipeline registers are in use? The two
352          * scheduled instructions cannot use more than the 256-bits of pipeline
353          * space available or RA will fail (as it would run out of pipeline
354          * registers and fail to spill without breaking the schedule) */
355 
356         unsigned pipeline_count;
357 };
358 
359 static bool
mir_adjust_constant(midgard_instruction * ins,unsigned src,unsigned * bundle_constant_mask,unsigned * comp_mapping,uint8_t * bundle_constants,bool upper)360 mir_adjust_constant(midgard_instruction *ins, unsigned src,
361                 unsigned *bundle_constant_mask,
362                 unsigned *comp_mapping,
363                 uint8_t *bundle_constants,
364                 bool upper)
365 {
366         unsigned type_size = nir_alu_type_get_type_size(ins->src_types[src]) / 8;
367         unsigned type_shift = util_logbase2(type_size);
368         unsigned max_comp = mir_components_for_type(ins->src_types[src]);
369         unsigned comp_mask = mir_from_bytemask(mir_round_bytemask_up(
370                                 mir_bytemask_of_read_components_index(ins, src),
371                                 type_size * 8),
372                                                type_size * 8);
373         unsigned type_mask = (1 << type_size) - 1;
374 
375         /* Upper only makes sense for 16-bit */
376         if (type_size != 16 && upper)
377                 return false;
378 
379         /* For 16-bit, we need to stay on either upper or lower halves to avoid
380          * disrupting the swizzle */
381         unsigned start = upper ? 8 : 0;
382         unsigned length = (type_size == 2) ? 8 : 16;
383 
384         for (unsigned comp = 0; comp < max_comp; comp++) {
385                 if (!(comp_mask & (1 << comp)))
386                         continue;
387 
388                 uint8_t *constantp = ins->constants.u8 + (type_size * comp);
389                 unsigned best_reuse_bytes = 0;
390                 signed best_place = -1;
391                 unsigned i, j;
392 
393                 for (i = start; i < (start + length); i += type_size) {
394                         unsigned reuse_bytes = 0;
395 
396                         for (j = 0; j < type_size; j++) {
397                                 if (!(*bundle_constant_mask & (1 << (i + j))))
398                                         continue;
399                                 if (constantp[j] != bundle_constants[i + j])
400                                         break;
401                                 if ((i + j) > (start + length))
402                                         break;
403 
404                                 reuse_bytes++;
405                         }
406 
407                         /* Select the place where existing bytes can be
408                          * reused so we leave empty slots to others
409                          */
410                         if (j == type_size &&
411                             (reuse_bytes > best_reuse_bytes || best_place < 0)) {
412                                 best_reuse_bytes = reuse_bytes;
413                                 best_place = i;
414                                 break;
415                         }
416                 }
417 
418                 /* This component couldn't fit in the remaining constant slot,
419                  * no need check the remaining components, bail out now
420                  */
421                 if (best_place < 0)
422                         return false;
423 
424                 memcpy(&bundle_constants[i], constantp, type_size);
425                 *bundle_constant_mask |= type_mask << best_place;
426                 comp_mapping[comp] = best_place >> type_shift;
427         }
428 
429         return true;
430 }
431 
432 /* For an instruction that can fit, adjust it to fit and update the constants
433  * array, in destructive mode. Returns whether the fitting was successful. */
434 
435 static bool
mir_adjust_constants(midgard_instruction * ins,struct midgard_predicate * pred,bool destructive)436 mir_adjust_constants(midgard_instruction *ins,
437                 struct midgard_predicate *pred,
438                 bool destructive)
439 {
440         /* No constant, nothing to adjust */
441         if (!ins->has_constants)
442                 return true;
443 
444         unsigned r_constant = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
445         unsigned bundle_constant_mask = pred->constant_mask;
446         unsigned comp_mapping[2][16] = { };
447         uint8_t bundle_constants[16];
448 
449         memcpy(bundle_constants, pred->constants, 16);
450 
451         /* Let's try to find a place for each active component of the constant
452          * register.
453          */
454         for (unsigned src = 0; src < 2; ++src) {
455                 if (ins->src[src] != SSA_FIXED_REGISTER(REGISTER_CONSTANT))
456                         continue;
457 
458                 /* First, try lower half (or whole for !16) */
459                 if (mir_adjust_constant(ins, src, &bundle_constant_mask,
460                                 comp_mapping[src], bundle_constants, false))
461                         continue;
462 
463                 /* Next, try upper half */
464                 if (mir_adjust_constant(ins, src, &bundle_constant_mask,
465                                 comp_mapping[src], bundle_constants, true))
466                         continue;
467 
468                 /* Otherwise bail */
469                 return false;
470         }
471 
472         /* If non-destructive, we're done */
473         if (!destructive)
474                 return true;
475 
476 	/* Otherwise update the constant_mask and constant values */
477         pred->constant_mask = bundle_constant_mask;
478         memcpy(pred->constants, bundle_constants, 16);
479 
480         /* Use comp_mapping as a swizzle */
481         mir_foreach_src(ins, s) {
482                 if (ins->src[s] == r_constant)
483                         mir_compose_swizzle(ins->swizzle[s], comp_mapping[s], ins->swizzle[s]);
484         }
485 
486         return true;
487 }
488 
489 /* Conservative estimate of the pipeline registers required for load/store */
490 
491 static unsigned
mir_pipeline_count(midgard_instruction * ins)492 mir_pipeline_count(midgard_instruction *ins)
493 {
494         unsigned bytecount = 0;
495 
496         mir_foreach_src(ins, i) {
497                 /* Skip empty source  */
498                 if (ins->src[i] == ~0) continue;
499 
500                 unsigned bytemask = mir_bytemask_of_read_components_index(ins, i);
501 
502                 unsigned max = util_logbase2(bytemask) + 1;
503                 bytecount += max;
504         }
505 
506         return DIV_ROUND_UP(bytecount, 16);
507 }
508 
509 /* Matches FADD x, x with modifiers compatible. Since x + x = x * 2, for
510  * any x including of the form f(y) for some swizzle/abs/neg function f */
511 
512 static bool
mir_is_add_2(midgard_instruction * ins)513 mir_is_add_2(midgard_instruction *ins)
514 {
515         if (ins->op != midgard_alu_op_fadd)
516                 return false;
517 
518         if (ins->src[0] != ins->src[1])
519                 return false;
520 
521         if (ins->src_types[0] != ins->src_types[1])
522                 return false;
523 
524         for (unsigned i = 0; i < MIR_VEC_COMPONENTS; ++i) {
525                 if (ins->swizzle[0][i] != ins->swizzle[1][i])
526                         return false;
527         }
528 
529         if (ins->src_abs[0] != ins->src_abs[1])
530                 return false;
531 
532         if (ins->src_neg[0] != ins->src_neg[1])
533                 return false;
534 
535         return true;
536 }
537 
538 static void
mir_adjust_unit(midgard_instruction * ins,unsigned unit)539 mir_adjust_unit(midgard_instruction *ins, unsigned unit)
540 {
541         /* FADD x, x = FMUL x, #2 */
542         if (mir_is_add_2(ins) && (unit & (UNITS_MUL | UNIT_VLUT))) {
543                 ins->op = midgard_alu_op_fmul;
544 
545                 ins->src[1] = ~0;
546                 ins->src_abs[1] = false;
547                 ins->src_neg[1] = false;
548 
549                 ins->has_inline_constant = true;
550                 ins->inline_constant = _mesa_float_to_half(2.0);
551         }
552 }
553 
554 static unsigned
mir_has_unit(midgard_instruction * ins,unsigned unit)555 mir_has_unit(midgard_instruction *ins, unsigned unit)
556 {
557         if (alu_opcode_props[ins->op].props & unit)
558                 return true;
559 
560         /* FADD x, x can run on any adder or any multiplier */
561         if (mir_is_add_2(ins))
562                 return true;
563 
564         return false;
565 }
566 
567 /* Net change in liveness if an instruction were scheduled. Loosely based on
568  * ir3's scheduler. */
569 
570 static int
mir_live_effect(uint16_t * liveness,midgard_instruction * ins,bool destructive)571 mir_live_effect(uint16_t *liveness, midgard_instruction *ins, bool destructive)
572 {
573         /* TODO: what if dest is used multiple times? */
574         int free_live = 0;
575 
576         if (ins->dest < SSA_FIXED_MINIMUM) {
577                 unsigned bytemask = mir_bytemask(ins);
578                 bytemask = util_next_power_of_two(bytemask + 1) - 1;
579                 free_live += util_bitcount(liveness[ins->dest] & bytemask);
580 
581                 if (destructive)
582                         liveness[ins->dest] &= ~bytemask;
583         }
584 
585         int new_live = 0;
586 
587         mir_foreach_src(ins, s) {
588                 unsigned S = ins->src[s];
589 
590                 bool dupe = false;
591 
592                 for (unsigned q = 0; q < s; ++q)
593                         dupe |= (ins->src[q] == S);
594 
595                 if (dupe)
596                         continue;
597 
598                 if (S < SSA_FIXED_MINIMUM) {
599                         unsigned bytemask = mir_bytemask_of_read_components(ins, S);
600                         bytemask = util_next_power_of_two(bytemask + 1) - 1;
601 
602                         /* Count only the new components */
603                         new_live += util_bitcount(bytemask & ~(liveness[S]));
604 
605                         if (destructive)
606                                 liveness[S] |= bytemask;
607                 }
608         }
609 
610         return new_live - free_live;
611 }
612 
613 static midgard_instruction *
mir_choose_instruction(midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned count,struct midgard_predicate * predicate)614 mir_choose_instruction(
615                 midgard_instruction **instructions,
616                 uint16_t *liveness,
617                 BITSET_WORD *worklist, unsigned count,
618                 struct midgard_predicate *predicate)
619 {
620         /* Parse the predicate */
621         unsigned tag = predicate->tag;
622         bool alu = tag == TAG_ALU_4;
623         bool ldst = tag == TAG_LOAD_STORE_4;
624         unsigned unit = predicate->unit;
625         bool branch = alu && (unit == ALU_ENAB_BR_COMPACT);
626         bool scalar = (unit != ~0) && (unit & UNITS_SCALAR);
627         bool no_cond = predicate->no_cond;
628 
629         unsigned mask = predicate->mask;
630         unsigned dest = predicate->dest;
631         bool needs_dest = mask & 0xF;
632 
633         /* Iterate to find the best instruction satisfying the predicate */
634         unsigned i;
635 
636         signed best_index = -1;
637         signed best_effect = INT_MAX;
638         bool best_conditional = false;
639 
640         /* Enforce a simple metric limiting distance to keep down register
641          * pressure. TOOD: replace with liveness tracking for much better
642          * results */
643 
644         unsigned max_active = 0;
645         unsigned max_distance = 36;
646 
647         BITSET_FOREACH_SET(i, worklist, count) {
648                 max_active = MAX2(max_active, i);
649         }
650 
651         BITSET_FOREACH_SET(i, worklist, count) {
652                 bool is_move = alu &&
653                         (instructions[i]->op == midgard_alu_op_imov ||
654                          instructions[i]->op == midgard_alu_op_fmov);
655 
656                 if ((max_active - i) >= max_distance)
657                         continue;
658 
659                 if (tag != ~0 && instructions[i]->type != tag)
660                         continue;
661 
662                 if (predicate->exclude != ~0 && instructions[i]->dest == predicate->exclude)
663                         continue;
664 
665                 if (alu && !branch && !(mir_has_unit(instructions[i], unit)))
666                         continue;
667 
668                 /* 0: don't care, 1: no moves, 2: only moves */
669                 if (predicate->move_mode && ((predicate->move_mode - 1) != is_move))
670                         continue;
671 
672                 if (branch && !instructions[i]->compact_branch)
673                         continue;
674 
675                 if (alu && scalar && !mir_is_scalar(instructions[i]))
676                         continue;
677 
678                 if (alu && !mir_adjust_constants(instructions[i], predicate, false))
679                         continue;
680 
681                 if (needs_dest && instructions[i]->dest != dest)
682                         continue;
683 
684                 if (mask && ((~instructions[i]->mask) & mask))
685                         continue;
686 
687                 if (instructions[i]->mask & predicate->no_mask)
688                         continue;
689 
690                 if (ldst && mir_pipeline_count(instructions[i]) + predicate->pipeline_count > 2)
691                         continue;
692 
693                 bool conditional = alu && !branch && OP_IS_CSEL(instructions[i]->op);
694                 conditional |= (branch && instructions[i]->branch.conditional);
695 
696                 if (conditional && no_cond)
697                         continue;
698 
699                 int effect = mir_live_effect(liveness, instructions[i], false);
700 
701                 if (effect > best_effect)
702                         continue;
703 
704                 if (effect == best_effect && (signed) i < best_index)
705                         continue;
706 
707                 best_effect = effect;
708                 best_index = i;
709                 best_conditional = conditional;
710         }
711 
712         /* Did we find anything?  */
713 
714         if (best_index < 0)
715                 return NULL;
716 
717         /* If we found something, remove it from the worklist */
718         assert(best_index < count);
719 
720         if (predicate->destructive) {
721                 BITSET_CLEAR(worklist, best_index);
722 
723                 if (alu)
724                         mir_adjust_constants(instructions[best_index], predicate, true);
725 
726                 if (ldst)
727                         predicate->pipeline_count += mir_pipeline_count(instructions[best_index]);
728 
729                 if (alu)
730                         mir_adjust_unit(instructions[best_index], unit);
731 
732                 /* Once we schedule a conditional, we can't again */
733                 predicate->no_cond |= best_conditional;
734                 mir_live_effect(liveness, instructions[best_index], true);
735         }
736 
737         return instructions[best_index];
738 }
739 
740 /* Still, we don't choose instructions in a vacuum. We need a way to choose the
741  * best bundle type (ALU, load/store, texture). Nondestructive. */
742 
743 static unsigned
mir_choose_bundle(midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned count)744 mir_choose_bundle(
745                 midgard_instruction **instructions,
746                 uint16_t *liveness,
747                 BITSET_WORD *worklist, unsigned count)
748 {
749         /* At the moment, our algorithm is very simple - use the bundle of the
750          * best instruction, regardless of what else could be scheduled
751          * alongside it. This is not optimal but it works okay for in-order */
752 
753         struct midgard_predicate predicate = {
754                 .tag = ~0,
755                 .destructive = false,
756                 .exclude = ~0
757         };
758 
759         midgard_instruction *chosen = mir_choose_instruction(instructions, liveness, worklist, count, &predicate);
760 
761         if (chosen)
762                 return chosen->type;
763         else
764                 return ~0;
765 }
766 
767 /* We want to choose an ALU instruction filling a given unit */
768 static void
mir_choose_alu(midgard_instruction ** slot,midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned len,struct midgard_predicate * predicate,unsigned unit)769 mir_choose_alu(midgard_instruction **slot,
770                 midgard_instruction **instructions,
771                 uint16_t *liveness,
772                 BITSET_WORD *worklist, unsigned len,
773                 struct midgard_predicate *predicate,
774                 unsigned unit)
775 {
776         /* Did we already schedule to this slot? */
777         if ((*slot) != NULL)
778                 return;
779 
780         /* Try to schedule something, if not */
781         predicate->unit = unit;
782         *slot = mir_choose_instruction(instructions, liveness, worklist, len, predicate);
783 
784         /* Store unit upon scheduling */
785         if (*slot && !((*slot)->compact_branch))
786                 (*slot)->unit = unit;
787 }
788 
789 /* When we are scheduling a branch/csel, we need the consumed condition in the
790  * same block as a pipeline register. There are two options to enable this:
791  *
792  *  - Move the conditional into the bundle. Preferred, but only works if the
793  *    conditional is used only once and is from this block.
794  *  - Copy the conditional.
795  *
796  * We search for the conditional. If it's in this block, single-use, and
797  * without embedded constants, we schedule it immediately. Otherwise, we
798  * schedule a move for it.
799  *
800  * mir_comparison_mobile is a helper to find the moveable condition.
801  */
802 
803 static unsigned
mir_comparison_mobile(compiler_context * ctx,midgard_instruction ** instructions,struct midgard_predicate * predicate,unsigned count,unsigned cond)804 mir_comparison_mobile(
805                 compiler_context *ctx,
806                 midgard_instruction **instructions,
807                 struct midgard_predicate *predicate,
808                 unsigned count,
809                 unsigned cond)
810 {
811         if (!mir_single_use(ctx, cond))
812                 return ~0;
813 
814         unsigned ret = ~0;
815 
816         for (unsigned i = 0; i < count; ++i) {
817                 if (instructions[i]->dest != cond)
818                         continue;
819 
820                 /* Must fit in an ALU bundle */
821                 if (instructions[i]->type != TAG_ALU_4)
822                         return ~0;
823 
824                 /* If it would itself require a condition, that's recursive */
825                 if (OP_IS_CSEL(instructions[i]->op))
826                         return ~0;
827 
828                 /* We'll need to rewrite to .w but that doesn't work for vector
829                  * ops that don't replicate (ball/bany), so bail there */
830 
831                 if (GET_CHANNEL_COUNT(alu_opcode_props[instructions[i]->op].props))
832                         return ~0;
833 
834                 /* Ensure it will fit with constants */
835 
836                 if (!mir_adjust_constants(instructions[i], predicate, false))
837                         return ~0;
838 
839                 /* Ensure it is written only once */
840 
841                 if (ret != ~0)
842                         return ~0;
843                 else
844                         ret = i;
845         }
846 
847         /* Inject constants now that we are sure we want to */
848         if (ret != ~0)
849                 mir_adjust_constants(instructions[ret], predicate, true);
850 
851         return ret;
852 }
853 
854 /* Using the information about the moveable conditional itself, we either pop
855  * that condition off the worklist for use now, or create a move to
856  * artificially schedule instead as a fallback */
857 
858 static midgard_instruction *
mir_schedule_comparison(compiler_context * ctx,midgard_instruction ** instructions,struct midgard_predicate * predicate,BITSET_WORD * worklist,unsigned count,unsigned cond,bool vector,unsigned * swizzle,midgard_instruction * user)859 mir_schedule_comparison(
860                 compiler_context *ctx,
861                 midgard_instruction **instructions,
862                 struct midgard_predicate *predicate,
863                 BITSET_WORD *worklist, unsigned count,
864                 unsigned cond, bool vector, unsigned *swizzle,
865                 midgard_instruction *user)
866 {
867         /* TODO: swizzle when scheduling */
868         unsigned comp_i =
869                 (!vector && (swizzle[0] == 0)) ?
870                 mir_comparison_mobile(ctx, instructions, predicate, count, cond) : ~0;
871 
872         /* If we can, schedule the condition immediately */
873         if ((comp_i != ~0) && BITSET_TEST(worklist, comp_i)) {
874                 assert(comp_i < count);
875                 BITSET_CLEAR(worklist, comp_i);
876                 return instructions[comp_i];
877         }
878 
879         /* Otherwise, we insert a move */
880 
881         midgard_instruction mov = v_mov(cond, cond);
882         mov.mask = vector ? 0xF : 0x1;
883         memcpy(mov.swizzle[1], swizzle, sizeof(mov.swizzle[1]));
884 
885         return mir_insert_instruction_before(ctx, user, mov);
886 }
887 
888 /* Most generally, we need instructions writing to r31 in the appropriate
889  * components */
890 
891 static midgard_instruction *
mir_schedule_condition(compiler_context * ctx,struct midgard_predicate * predicate,BITSET_WORD * worklist,unsigned count,midgard_instruction ** instructions,midgard_instruction * last)892 mir_schedule_condition(compiler_context *ctx,
893                 struct midgard_predicate *predicate,
894                 BITSET_WORD *worklist, unsigned count,
895                 midgard_instruction **instructions,
896                 midgard_instruction *last)
897 {
898         /* For a branch, the condition is the only argument; for csel, third */
899         bool branch = last->compact_branch;
900         unsigned condition_index = branch ? 0 : 2;
901 
902         /* csel_v is vector; otherwise, conditions are scalar */
903         bool vector = !branch && OP_IS_CSEL_V(last->op);
904 
905         /* Grab the conditional instruction */
906 
907         midgard_instruction *cond = mir_schedule_comparison(
908                         ctx, instructions, predicate, worklist, count, last->src[condition_index],
909                         vector, last->swizzle[2], last);
910 
911         /* We have exclusive reign over this (possibly move) conditional
912          * instruction. We can rewrite into a pipeline conditional register */
913 
914         predicate->exclude = cond->dest;
915         cond->dest = SSA_FIXED_REGISTER(31);
916 
917         if (!vector) {
918                 cond->mask = (1 << COMPONENT_W);
919 
920                 mir_foreach_src(cond, s) {
921                         if (cond->src[s] == ~0)
922                                 continue;
923 
924                         for (unsigned q = 0; q < 4; ++q)
925                                 cond->swizzle[s][q + COMPONENT_W] = cond->swizzle[s][q];
926                 }
927         }
928 
929         /* Schedule the unit: csel is always in the latter pipeline, so a csel
930          * condition must be in the former pipeline stage (vmul/sadd),
931          * depending on scalar/vector of the instruction itself. A branch must
932          * be written from the latter pipeline stage and a branch condition is
933          * always scalar, so it is always in smul (exception: ball/bany, which
934          * will be vadd) */
935 
936         if (branch)
937                 cond->unit = UNIT_SMUL;
938         else
939                 cond->unit = vector ? UNIT_VMUL : UNIT_SADD;
940 
941         return cond;
942 }
943 
944 /* Schedules a single bundle of the given type */
945 
946 static midgard_bundle
mir_schedule_texture(midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned len,bool is_vertex)947 mir_schedule_texture(
948                 midgard_instruction **instructions,
949                 uint16_t *liveness,
950                 BITSET_WORD *worklist, unsigned len,
951                 bool is_vertex)
952 {
953         struct midgard_predicate predicate = {
954                 .tag = TAG_TEXTURE_4,
955                 .destructive = true,
956                 .exclude = ~0
957         };
958 
959         midgard_instruction *ins =
960                 mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
961 
962         mir_update_worklist(worklist, len, instructions, ins);
963 
964         struct midgard_bundle out = {
965                 .tag = ins->op == TEXTURE_OP_BARRIER ?
966                         TAG_TEXTURE_4_BARRIER :
967                         (ins->op == TEXTURE_OP_TEXEL_FETCH) || is_vertex ?
968                         TAG_TEXTURE_4_VTX : TAG_TEXTURE_4,
969                 .instruction_count = 1,
970                 .instructions = { ins }
971         };
972 
973         return out;
974 }
975 
976 static midgard_bundle
mir_schedule_ldst(midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned len)977 mir_schedule_ldst(
978                 midgard_instruction **instructions,
979                 uint16_t *liveness,
980                 BITSET_WORD *worklist, unsigned len)
981 {
982         struct midgard_predicate predicate = {
983                 .tag = TAG_LOAD_STORE_4,
984                 .destructive = true,
985                 .exclude = ~0
986         };
987 
988         /* Try to pick two load/store ops. Second not gauranteed to exist */
989 
990         midgard_instruction *ins =
991                 mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
992 
993         midgard_instruction *pair =
994                 mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
995 
996         struct midgard_bundle out = {
997                 .tag = TAG_LOAD_STORE_4,
998                 .instruction_count = pair ? 2 : 1,
999                 .instructions = { ins, pair }
1000         };
1001 
1002         /* We have to update the worklist atomically, since the two
1003          * instructions run concurrently (TODO: verify it's not pipelined) */
1004 
1005         mir_update_worklist(worklist, len, instructions, ins);
1006         mir_update_worklist(worklist, len, instructions, pair);
1007 
1008         return out;
1009 }
1010 
1011 static void
mir_schedule_zs_write(compiler_context * ctx,struct midgard_predicate * predicate,midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned len,midgard_instruction * branch,midgard_instruction ** smul,midgard_instruction ** vadd,midgard_instruction ** vlut,bool stencil)1012 mir_schedule_zs_write(
1013                 compiler_context *ctx,
1014                 struct midgard_predicate *predicate,
1015                 midgard_instruction **instructions,
1016                 uint16_t *liveness,
1017                 BITSET_WORD *worklist, unsigned len,
1018                 midgard_instruction *branch,
1019                 midgard_instruction **smul,
1020                 midgard_instruction **vadd,
1021                 midgard_instruction **vlut,
1022                 bool stencil)
1023 {
1024         bool success = false;
1025         unsigned idx = stencil ? 3 : 2;
1026         unsigned src = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(1) : branch->src[idx];
1027 
1028         predicate->dest = src;
1029         predicate->mask = 0x1;
1030 
1031         midgard_instruction **units[] = { smul, vadd, vlut };
1032         unsigned unit_names[] = { UNIT_SMUL, UNIT_VADD, UNIT_VLUT };
1033 
1034         for (unsigned i = 0; i < 3; ++i) {
1035                 if (*(units[i]))
1036                         continue;
1037 
1038                 predicate->unit = unit_names[i];
1039                 midgard_instruction *ins =
1040                         mir_choose_instruction(instructions, liveness, worklist, len, predicate);
1041 
1042                 if (ins) {
1043                         ins->unit = unit_names[i];
1044                         *(units[i]) = ins;
1045                         success |= true;
1046                         break;
1047                 }
1048         }
1049 
1050         predicate->dest = predicate->mask = 0;
1051 
1052         if (success)
1053                 return;
1054 
1055         midgard_instruction *mov = ralloc(ctx, midgard_instruction);
1056         *mov = v_mov(src, make_compiler_temp(ctx));
1057         mov->mask = 0x1;
1058 
1059         branch->src[idx] = mov->dest;
1060 
1061         if (stencil) {
1062                 unsigned swizzle = (branch->src[0] == ~0) ? COMPONENT_Y : COMPONENT_X;
1063 
1064                 for (unsigned c = 0; c < 16; ++c)
1065                         mov->swizzle[1][c] = swizzle;
1066         }
1067 
1068         for (unsigned i = 0; i < 3; ++i) {
1069                 if (!(*(units[i]))) {
1070                         *(units[i]) = mov;
1071                         mov->unit = unit_names[i];
1072                         return;
1073                 }
1074         }
1075 
1076         unreachable("Could not schedule Z/S move to any unit");
1077 }
1078 
1079 static midgard_bundle
mir_schedule_alu(compiler_context * ctx,midgard_instruction ** instructions,uint16_t * liveness,BITSET_WORD * worklist,unsigned len)1080 mir_schedule_alu(
1081                 compiler_context *ctx,
1082                 midgard_instruction **instructions,
1083                 uint16_t *liveness,
1084                 BITSET_WORD *worklist, unsigned len)
1085 {
1086         struct midgard_bundle bundle = {};
1087 
1088         unsigned bytes_emitted = sizeof(bundle.control);
1089 
1090         struct midgard_predicate predicate = {
1091                 .tag = TAG_ALU_4,
1092                 .destructive = true,
1093                 .exclude = ~0,
1094                 .constants = &bundle.constants
1095         };
1096 
1097         midgard_instruction *vmul = NULL;
1098         midgard_instruction *vadd = NULL;
1099         midgard_instruction *vlut = NULL;
1100         midgard_instruction *smul = NULL;
1101         midgard_instruction *sadd = NULL;
1102         midgard_instruction *branch = NULL;
1103 
1104         mir_choose_alu(&branch, instructions, liveness, worklist, len, &predicate, ALU_ENAB_BR_COMPACT);
1105         mir_update_worklist(worklist, len, instructions, branch);
1106         unsigned writeout = branch ? branch->writeout : 0;
1107 
1108         if (branch && branch->branch.conditional) {
1109                 midgard_instruction *cond = mir_schedule_condition(ctx, &predicate, worklist, len, instructions, branch);
1110 
1111                 if (cond->unit == UNIT_VADD)
1112                         vadd = cond;
1113                 else if (cond->unit == UNIT_SMUL)
1114                         smul = cond;
1115                 else
1116                         unreachable("Bad condition");
1117         }
1118 
1119         /* If we have a render target reference, schedule a move for it. Since
1120          * this will be in sadd, we boost this to prevent scheduling csel into
1121          * smul */
1122 
1123         if (writeout && (branch->constants.u32[0] || ctx->is_blend)) {
1124                 sadd = ralloc(ctx, midgard_instruction);
1125                 *sadd = v_mov(~0, make_compiler_temp(ctx));
1126                 sadd->unit = UNIT_SADD;
1127                 sadd->mask = 0x1;
1128                 sadd->has_inline_constant = true;
1129                 sadd->inline_constant = branch->constants.u32[0];
1130                 branch->src[1] = sadd->dest;
1131                 branch->src_types[1] = sadd->dest_type;
1132 
1133                 /* Mask off any conditionals. Could be optimized to just scalar
1134                  * conditionals TODO */
1135                 predicate.no_cond = true;
1136         }
1137 
1138         if (writeout) {
1139                 /* Propagate up */
1140                 bundle.last_writeout = branch->last_writeout;
1141         }
1142 
1143         /* When MRT is in use, writeout loops require r1.w to be filled (with a
1144          * return address? by symmetry with Bifrost, etc), at least for blend
1145          * shaders to work properly. When MRT is not in use (including on SFBD
1146          * GPUs), this is not needed. Blend shaders themselves don't know if
1147          * they are paired with MRT or not so they always need this, at least
1148          * on MFBD GPUs. */
1149 
1150         if (writeout && (ctx->is_blend || ctx->writeout_branch[1])) {
1151                 vadd = ralloc(ctx, midgard_instruction);
1152                 *vadd = v_mov(~0, make_compiler_temp(ctx));
1153 
1154                 if (!ctx->is_blend) {
1155                         vadd->op = midgard_alu_op_iadd;
1156                         vadd->src[0] = SSA_FIXED_REGISTER(31);
1157                         vadd->src_types[0] = nir_type_uint32;
1158 
1159                         for (unsigned c = 0; c < 16; ++c)
1160                                 vadd->swizzle[0][c] = COMPONENT_X;
1161 
1162                         vadd->has_inline_constant = true;
1163                         vadd->inline_constant = 0;
1164                 } else {
1165                         vadd->src[1] = SSA_FIXED_REGISTER(1);
1166                         vadd->src_types[0] = nir_type_uint32;
1167 
1168                         for (unsigned c = 0; c < 16; ++c)
1169                                 vadd->swizzle[1][c] = COMPONENT_W;
1170                 }
1171 
1172                 vadd->unit = UNIT_VADD;
1173                 vadd->mask = 0x1;
1174                 branch->dest = vadd->dest;
1175                 branch->dest_type = vadd->dest_type;
1176         }
1177 
1178         if (writeout & PAN_WRITEOUT_Z)
1179                 mir_schedule_zs_write(ctx, &predicate, instructions, liveness, worklist, len, branch, &smul, &vadd, &vlut, false);
1180 
1181         if (writeout & PAN_WRITEOUT_S)
1182                 mir_schedule_zs_write(ctx, &predicate, instructions, liveness, worklist, len, branch, &smul, &vadd, &vlut, true);
1183 
1184         mir_choose_alu(&smul, instructions, liveness, worklist, len, &predicate, UNIT_SMUL);
1185 
1186         for (unsigned mode = 1; mode < 3; ++mode) {
1187                 predicate.move_mode = mode;
1188                 predicate.no_mask = writeout ? (1 << 3) : 0;
1189                 mir_choose_alu(&vlut, instructions, liveness, worklist, len, &predicate, UNIT_VLUT);
1190                 predicate.no_mask = 0;
1191                 mir_choose_alu(&vadd, instructions, liveness, worklist, len, &predicate, UNIT_VADD);
1192         }
1193 
1194         /* Reset */
1195         predicate.move_mode = 0;
1196 
1197         mir_update_worklist(worklist, len, instructions, vlut);
1198         mir_update_worklist(worklist, len, instructions, vadd);
1199         mir_update_worklist(worklist, len, instructions, smul);
1200 
1201         bool vadd_csel = vadd && OP_IS_CSEL(vadd->op);
1202         bool smul_csel = smul && OP_IS_CSEL(smul->op);
1203 
1204         if (vadd_csel || smul_csel) {
1205                 midgard_instruction *ins = vadd_csel ? vadd : smul;
1206                 midgard_instruction *cond = mir_schedule_condition(ctx, &predicate, worklist, len, instructions, ins);
1207 
1208                 if (cond->unit == UNIT_VMUL)
1209                         vmul = cond;
1210                 else if (cond->unit == UNIT_SADD)
1211                         sadd = cond;
1212                 else
1213                         unreachable("Bad condition");
1214         }
1215 
1216         /* Stage 2, let's schedule sadd before vmul for writeout */
1217         mir_choose_alu(&sadd, instructions, liveness, worklist, len, &predicate, UNIT_SADD);
1218 
1219         /* Check if writeout reads its own register */
1220 
1221         if (writeout) {
1222                 midgard_instruction *stages[] = { sadd, vadd, smul, vlut };
1223                 unsigned src = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(0) : branch->src[0];
1224                 unsigned writeout_mask = 0x0;
1225                 bool bad_writeout = false;
1226 
1227                 for (unsigned i = 0; i < ARRAY_SIZE(stages); ++i) {
1228                         if (!stages[i])
1229                                 continue;
1230 
1231                         if (stages[i]->dest != src)
1232                                 continue;
1233 
1234                         writeout_mask |= stages[i]->mask;
1235                         bad_writeout |= mir_has_arg(stages[i], branch->src[0]);
1236                 }
1237 
1238                 /* It's possible we'll be able to schedule something into vmul
1239                  * to fill r0. Let's peak into the future, trying to schedule
1240                  * vmul specially that way. */
1241 
1242                 unsigned full_mask = 0xF;
1243 
1244                 if (!bad_writeout && writeout_mask != full_mask) {
1245                         predicate.unit = UNIT_VMUL;
1246                         predicate.dest = src;
1247                         predicate.mask = writeout_mask ^ full_mask;
1248 
1249                         struct midgard_instruction *peaked =
1250                                 mir_choose_instruction(instructions, liveness, worklist, len, &predicate);
1251 
1252                         if (peaked) {
1253                                 vmul = peaked;
1254                                 vmul->unit = UNIT_VMUL;
1255                                 writeout_mask |= predicate.mask;
1256                                 assert(writeout_mask == full_mask);
1257                         }
1258 
1259                         /* Cleanup */
1260                         predicate.dest = predicate.mask = 0;
1261                 }
1262 
1263                 /* Finally, add a move if necessary */
1264                 if (bad_writeout || writeout_mask != full_mask) {
1265                         unsigned temp = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx);
1266 
1267                         vmul = ralloc(ctx, midgard_instruction);
1268                         *vmul = v_mov(src, temp);
1269                         vmul->unit = UNIT_VMUL;
1270                         vmul->mask = full_mask ^ writeout_mask;
1271 
1272                         /* Rewrite to use our temp */
1273 
1274                         for (unsigned i = 0; i < ARRAY_SIZE(stages); ++i) {
1275                                 if (stages[i])
1276                                         mir_rewrite_index_dst_single(stages[i], src, temp);
1277                         }
1278 
1279                         mir_rewrite_index_src_single(branch, src, temp);
1280                 }
1281         }
1282 
1283         mir_choose_alu(&vmul, instructions, liveness, worklist, len, &predicate, UNIT_VMUL);
1284 
1285         mir_update_worklist(worklist, len, instructions, vmul);
1286         mir_update_worklist(worklist, len, instructions, sadd);
1287 
1288         bundle.has_embedded_constants = predicate.constant_mask != 0;
1289 
1290         unsigned padding = 0;
1291 
1292         /* Now that we have finished scheduling, build up the bundle */
1293         midgard_instruction *stages[] = { vmul, sadd, vadd, smul, vlut, branch };
1294 
1295         for (unsigned i = 0; i < ARRAY_SIZE(stages); ++i) {
1296                 if (stages[i]) {
1297                         bundle.control |= stages[i]->unit;
1298                         bytes_emitted += bytes_for_instruction(stages[i]);
1299                         bundle.instructions[bundle.instruction_count++] = stages[i];
1300 
1301                         /* If we branch, we can't spill to TLS since the store
1302                          * instruction will never get executed. We could try to
1303                          * break the bundle but this is probably easier for
1304                          * now. */
1305 
1306                         if (branch)
1307                                 stages[i]->no_spill |= (1 << REG_CLASS_WORK);
1308                 }
1309         }
1310 
1311         /* Pad ALU op to nearest word */
1312 
1313         if (bytes_emitted & 15) {
1314                 padding = 16 - (bytes_emitted & 15);
1315                 bytes_emitted += padding;
1316         }
1317 
1318         /* Constants must always be quadwords */
1319         if (bundle.has_embedded_constants)
1320                 bytes_emitted += 16;
1321 
1322         /* Size ALU instruction for tag */
1323         bundle.tag = (TAG_ALU_4) + (bytes_emitted / 16) - 1;
1324 
1325         bool tilebuf_wait = branch && branch->compact_branch &&
1326            branch->branch.target_type == TARGET_TILEBUF_WAIT;
1327 
1328         /* MRT capable GPUs use a special writeout procedure */
1329         if ((writeout || tilebuf_wait) && !(ctx->quirks & MIDGARD_NO_UPPER_ALU))
1330                 bundle.tag += 4;
1331 
1332         bundle.padding = padding;
1333         bundle.control |= bundle.tag;
1334 
1335         return bundle;
1336 }
1337 
1338 /* Schedule a single block by iterating its instruction to create bundles.
1339  * While we go, tally about the bundle sizes to compute the block size. */
1340 
1341 
1342 static void
schedule_block(compiler_context * ctx,midgard_block * block)1343 schedule_block(compiler_context *ctx, midgard_block *block)
1344 {
1345         /* Copy list to dynamic array */
1346         unsigned len = 0;
1347         midgard_instruction **instructions = flatten_mir(block, &len);
1348 
1349         if (!len)
1350                 return;
1351 
1352         /* Calculate dependencies and initial worklist */
1353         unsigned node_count = ctx->temp_count + 1;
1354         mir_create_dependency_graph(instructions, len, node_count);
1355 
1356         /* Allocate the worklist */
1357         size_t sz = BITSET_WORDS(len) * sizeof(BITSET_WORD);
1358         BITSET_WORD *worklist = calloc(sz, 1);
1359         uint16_t *liveness = calloc(node_count, 2);
1360         mir_initialize_worklist(worklist, instructions, len);
1361 
1362         struct util_dynarray bundles;
1363         util_dynarray_init(&bundles, NULL);
1364 
1365         block->quadword_count = 0;
1366 
1367         for (;;) {
1368                 unsigned tag = mir_choose_bundle(instructions, liveness, worklist, len);
1369                 midgard_bundle bundle;
1370 
1371                 if (tag == TAG_TEXTURE_4)
1372                         bundle = mir_schedule_texture(instructions, liveness, worklist, len, ctx->stage != MESA_SHADER_FRAGMENT);
1373                 else if (tag == TAG_LOAD_STORE_4)
1374                         bundle = mir_schedule_ldst(instructions, liveness, worklist, len);
1375                 else if (tag == TAG_ALU_4)
1376                         bundle = mir_schedule_alu(ctx, instructions, liveness, worklist, len);
1377                 else
1378                         break;
1379 
1380                 util_dynarray_append(&bundles, midgard_bundle, bundle);
1381                 block->quadword_count += midgard_tag_props[bundle.tag].size;
1382         }
1383 
1384         /* We emitted bundles backwards; copy into the block in reverse-order */
1385 
1386         util_dynarray_init(&block->bundles, block);
1387         util_dynarray_foreach_reverse(&bundles, midgard_bundle, bundle) {
1388                 util_dynarray_append(&block->bundles, midgard_bundle, *bundle);
1389         }
1390         util_dynarray_fini(&bundles);
1391 
1392         block->scheduled = true;
1393         ctx->quadword_count += block->quadword_count;
1394 
1395         /* Reorder instructions to match bundled. First remove existing
1396          * instructions and then recreate the list */
1397 
1398         mir_foreach_instr_in_block_safe(block, ins) {
1399                 list_del(&ins->link);
1400         }
1401 
1402         mir_foreach_instr_in_block_scheduled_rev(block, ins) {
1403                 list_add(&ins->link, &block->base.instructions);
1404         }
1405 
1406 	free(instructions); /* Allocated by flatten_mir() */
1407 	free(worklist);
1408         free(liveness);
1409 }
1410 
1411 void
midgard_schedule_program(compiler_context * ctx)1412 midgard_schedule_program(compiler_context *ctx)
1413 {
1414         midgard_promote_uniforms(ctx);
1415 
1416         /* Must be lowered right before scheduling */
1417         mir_squeeze_index(ctx);
1418         mir_lower_special_reads(ctx);
1419         mir_squeeze_index(ctx);
1420 
1421         /* Lowering can introduce some dead moves */
1422 
1423         mir_foreach_block(ctx, _block) {
1424                 midgard_block *block = (midgard_block *) _block;
1425                 midgard_opt_dead_move_eliminate(ctx, block);
1426                 schedule_block(ctx, block);
1427         }
1428 
1429 }
1430