• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/dag.h"
28 #include "util/u_math.h"
29 
30 #include "ir3.h"
31 #include "ir3_compiler.h"
32 
33 #ifdef DEBUG
34 #define SCHED_DEBUG (ir3_shader_debug & IR3_DBG_SCHEDMSGS)
35 #else
36 #define SCHED_DEBUG 0
37 #endif
38 #define d(fmt, ...)                                                            \
39    do {                                                                        \
40       if (SCHED_DEBUG) {                                                       \
41          mesa_logi("SCHED: " fmt, ##__VA_ARGS__);                              \
42       }                                                                        \
43    } while (0)
44 
45 #define di(instr, fmt, ...)                                                    \
46    do {                                                                        \
47       if (SCHED_DEBUG) {                                                       \
48          struct log_stream *stream = mesa_log_streami();                       \
49          mesa_log_stream_printf(stream, "SCHED: " fmt ": ", ##__VA_ARGS__);    \
50          ir3_print_instr_stream(stream, instr);                                \
51          mesa_log_stream_destroy(stream);                                      \
52       }                                                                        \
53    } while (0)
54 
55 /*
56  * Instruction Scheduling:
57  *
58  * A block-level pre-RA scheduler, which works by creating a DAG of
59  * instruction dependencies, and heuristically picking a DAG head
60  * (instruction with no unscheduled dependencies).
61  *
62  * Where possible, it tries to pick instructions that avoid nop delay
63  * slots, but it will prefer to pick instructions that reduce (or do
64  * not increase) the number of live values.
65  *
66  * If the only possible choices are instructions that increase the
67  * number of live values, it will try to pick the one with the earliest
68  * consumer (based on pre-sched program order).
69  *
70  * There are a few special cases that need to be handled, since sched
71  * is currently independent of register allocation.  Usages of address
72  * register (a0.x) or predicate register (p0.x) must be serialized.  Ie.
73  * if you have two pairs of instructions that write the same special
74  * register and then read it, then those pairs cannot be interleaved.
75  * To solve this, when we are in such a scheduling "critical section",
76  * and we encounter a conflicting write to a special register, we try
77  * to schedule any remaining instructions that use that value first.
78  *
79  * TODO we can detect too-large live_values here.. would be a good place
80  * to "spill" cheap things, like move from uniform/immed.  (Constructing
81  * list of ssa def consumers before sched pass would make this easier.
82  * Also, in general it is general it might be best not to re-use load_immed
83  * across blocks.
84  *
85  * TODO we can use (abs)/(neg) src modifiers in a lot of cases to reduce
86  * the # of immediates in play (or at least that would help with
87  * dEQP-GLES31.functional.ubo.random.all_per_block_buffers.*).. probably
88  * do this in a nir pass that inserts fneg/etc?  The cp pass should fold
89  * these into src modifiers..
90  */
91 
92 struct ir3_sched_ctx {
93    struct ir3_block *block; /* the current block */
94    struct dag *dag;
95 
96    struct list_head unscheduled_list; /* unscheduled instructions */
97    struct ir3_instruction *scheduled; /* last scheduled instr */
98    struct ir3_instruction *addr0;     /* current a0.x user, if any */
99    struct ir3_instruction *addr1;     /* current a1.x user, if any */
100    struct ir3_instruction *pred;      /* current p0.x user, if any */
101 
102    struct ir3_instruction *split; /* most-recently-split a0/a1/p0 producer */
103 
104    int remaining_kills;
105    int remaining_tex;
106 
107    bool error;
108 
109    unsigned ip;
110 
111    int sy_delay;
112    int ss_delay;
113 
114    /* We order the scheduled (sy)/(ss) producers, and keep track of the
115     * index of the last waited on instruction, so we can know which
116     * instructions are still outstanding (and therefore would require us to
117     * wait for all outstanding instructions before scheduling a use).
118     */
119    int sy_index, first_outstanding_sy_index;
120    int ss_index, first_outstanding_ss_index;
121 };
122 
123 struct ir3_sched_node {
124    struct dag_node dag; /* must be first for util_dynarray_foreach */
125    struct ir3_instruction *instr;
126 
127    unsigned delay;
128    unsigned max_delay;
129 
130    unsigned sy_index;
131    unsigned ss_index;
132 
133    /* For ready instructions, the earliest possible ip that it could be
134     * scheduled.
135     */
136    unsigned earliest_ip;
137 
138    /* For instructions that are a meta:collect src, once we schedule
139     * the first src of the collect, the entire vecN is live (at least
140     * from the PoV of the first RA pass.. the 2nd scalar pass can fill
141     * in some of the gaps, but often not all).  So we want to help out
142     * RA, and realize that as soon as we schedule the first collect
143     * src, there is no penalty to schedule the remainder (ie. they
144     * don't make additional values live).  In fact we'd prefer to
145     * schedule the rest ASAP to minimize the live range of the vecN.
146     *
147     * For instructions that are the src of a collect, we track the
148     * corresponding collect, and mark them as partially live as soon
149     * as any one of the src's is scheduled.
150     */
151    struct ir3_instruction *collect;
152    bool partially_live;
153 
154    /* Is this instruction a direct or indirect dependency for a kill?
155     * If so, we should prioritize it when possible
156     */
157    bool kill_path;
158 
159    /* This node represents a shader output.  A semi-common pattern in
160     * shaders is something along the lines of:
161     *
162     *    fragcolor.w = 1.0
163     *
164     * Which we'd prefer to schedule as late as possible, since it
165     * produces a live value that is never killed/consumed.  So detect
166     * outputs up-front, and avoid scheduling them unless the reduce
167     * register pressure (or at least are neutral)
168     */
169    bool output;
170 };
171 
172 #define foreach_sched_node(__n, __list)                                        \
173    list_for_each_entry (struct ir3_sched_node, __n, __list, dag.link)
174 
175 static void sched_node_init(struct ir3_sched_ctx *ctx,
176                             struct ir3_instruction *instr);
177 static void sched_node_add_dep(struct ir3_instruction *instr,
178                                struct ir3_instruction *src, int i);
179 
180 static bool
is_scheduled(struct ir3_instruction * instr)181 is_scheduled(struct ir3_instruction *instr)
182 {
183    return !!(instr->flags & IR3_INSTR_MARK);
184 }
185 
186 /* check_src_cond() passing a ir3_sched_ctx. */
187 static bool
sched_check_src_cond(struct ir3_instruction * instr,bool (* cond)(struct ir3_instruction *,struct ir3_sched_ctx *),struct ir3_sched_ctx * ctx)188 sched_check_src_cond(struct ir3_instruction *instr,
189                      bool (*cond)(struct ir3_instruction *,
190                                   struct ir3_sched_ctx *),
191                      struct ir3_sched_ctx *ctx)
192 {
193    foreach_ssa_src (src, instr) {
194       /* meta:split/collect aren't real instructions, the thing that
195        * we actually care about is *their* srcs
196        */
197       if ((src->opc == OPC_META_SPLIT) || (src->opc == OPC_META_COLLECT)) {
198          if (sched_check_src_cond(src, cond, ctx))
199             return true;
200       } else {
201          if (cond(src, ctx))
202             return true;
203       }
204    }
205 
206    return false;
207 }
208 
209 /* Is this a sy producer that hasn't been waited on yet? */
210 
211 static bool
is_outstanding_sy(struct ir3_instruction * instr,struct ir3_sched_ctx * ctx)212 is_outstanding_sy(struct ir3_instruction *instr, struct ir3_sched_ctx *ctx)
213 {
214    if (!is_sy_producer(instr))
215       return false;
216 
217    /* The sched node is only valid within the same block, we cannot
218     * really say anything about src's from other blocks
219     */
220    if (instr->block != ctx->block)
221       return true;
222 
223    struct ir3_sched_node *n = instr->data;
224    return n->sy_index >= ctx->first_outstanding_sy_index;
225 }
226 
227 static bool
is_outstanding_ss(struct ir3_instruction * instr,struct ir3_sched_ctx * ctx)228 is_outstanding_ss(struct ir3_instruction *instr, struct ir3_sched_ctx *ctx)
229 {
230    if (!is_ss_producer(instr))
231       return false;
232 
233    /* The sched node is only valid within the same block, we cannot
234     * really say anything about src's from other blocks
235     */
236    if (instr->block != ctx->block)
237       return true;
238 
239    struct ir3_sched_node *n = instr->data;
240    return n->ss_index >= ctx->first_outstanding_ss_index;
241 }
242 
243 static unsigned
cycle_count(struct ir3_instruction * instr)244 cycle_count(struct ir3_instruction *instr)
245 {
246    if (instr->opc == OPC_META_COLLECT) {
247       /* Assume that only immed/const sources produce moves */
248       unsigned n = 0;
249       foreach_src (src, instr) {
250          if (src->flags & (IR3_REG_IMMED | IR3_REG_CONST))
251             n++;
252       }
253       return n;
254    } else if (is_meta(instr)) {
255       return 0;
256    } else {
257       return 1;
258    }
259 }
260 
261 static void
schedule(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)262 schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
263 {
264    assert(ctx->block == instr->block);
265 
266    /* remove from depth list:
267     */
268    list_delinit(&instr->node);
269 
270    if (writes_addr0(instr)) {
271       assert(ctx->addr0 == NULL);
272       ctx->addr0 = instr;
273    }
274 
275    if (writes_addr1(instr)) {
276       assert(ctx->addr1 == NULL);
277       ctx->addr1 = instr;
278    }
279 
280    if (writes_pred(instr)) {
281       assert(ctx->pred == NULL);
282       ctx->pred = instr;
283    }
284 
285    instr->flags |= IR3_INSTR_MARK;
286 
287    di(instr, "schedule");
288 
289    list_addtail(&instr->node, &instr->block->instr_list);
290    ctx->scheduled = instr;
291 
292    if (is_kill_or_demote(instr)) {
293       assert(ctx->remaining_kills > 0);
294       ctx->remaining_kills--;
295    }
296 
297    struct ir3_sched_node *n = instr->data;
298 
299    /* If this instruction is a meta:collect src, mark the remaining
300     * collect srcs as partially live.
301     */
302    if (n->collect) {
303       foreach_ssa_src (src, n->collect) {
304          if (src->block != instr->block)
305             continue;
306          struct ir3_sched_node *sn = src->data;
307          sn->partially_live = true;
308       }
309    }
310 
311    bool counts_for_delay = is_alu(instr) || is_flow(instr);
312 
313    /* TODO: switch to "cycles". For now try to match ir3_delay. */
314    unsigned delay_cycles = counts_for_delay ? 1 + instr->repeat : 0;
315 
316    /* We insert any nop's needed to get to earliest_ip, then advance
317     * delay_cycles by scheduling the instruction.
318     */
319    ctx->ip = MAX2(ctx->ip, n->earliest_ip) + delay_cycles;
320 
321    util_dynarray_foreach (&n->dag.edges, struct dag_edge, edge) {
322       unsigned delay = (unsigned)(uintptr_t)edge->data;
323       struct ir3_sched_node *child =
324          container_of(edge->child, struct ir3_sched_node, dag);
325       child->earliest_ip = MAX2(child->earliest_ip, ctx->ip + delay);
326    }
327 
328    dag_prune_head(ctx->dag, &n->dag);
329 
330    unsigned cycles = cycle_count(instr);
331 
332    if (is_ss_producer(instr)) {
333       ctx->ss_delay = soft_ss_delay(instr);
334       n->ss_index = ctx->ss_index++;
335    } else if (!is_meta(instr) &&
336               sched_check_src_cond(instr, is_outstanding_ss, ctx)) {
337       ctx->ss_delay = 0;
338       ctx->first_outstanding_ss_index = ctx->ss_index;
339    } else if (ctx->ss_delay > 0) {
340       ctx->ss_delay -= MIN2(cycles, ctx->ss_delay);
341    }
342 
343    if (is_sy_producer(instr)) {
344       /* NOTE that this isn't an attempt to hide texture fetch latency,
345        * but an attempt to hide the cost of switching to another warp.
346        * If we can, we'd like to try to schedule another texture fetch
347        * before scheduling something that would sync.
348        */
349       ctx->sy_delay = soft_sy_delay(instr, ctx->block->shader);
350       assert(ctx->remaining_tex > 0);
351       ctx->remaining_tex--;
352       n->sy_index = ctx->sy_index++;
353    } else if (!is_meta(instr) &&
354               sched_check_src_cond(instr, is_outstanding_sy, ctx)) {
355       ctx->sy_delay = 0;
356       ctx->first_outstanding_sy_index = ctx->sy_index;
357    } else if (ctx->sy_delay > 0) {
358       ctx->sy_delay -= MIN2(cycles, ctx->sy_delay);
359    }
360 
361 }
362 
363 struct ir3_sched_notes {
364    /* there is at least one kill which could be scheduled, except
365     * for unscheduled bary.f's:
366     */
367    bool blocked_kill;
368    /* there is at least one instruction that could be scheduled,
369     * except for conflicting address/predicate register usage:
370     */
371    bool addr0_conflict, addr1_conflict, pred_conflict;
372 };
373 
374 static bool
should_skip(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)375 should_skip(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
376 {
377    if (ctx->remaining_kills && (is_tex(instr) || is_mem(instr))) {
378       /* avoid texture/memory access if we have unscheduled kills
379        * that could make the expensive operation unnecessary.  By
380        * definition, if there are remaining kills, and this instr
381        * is not a dependency of a kill, there are other instructions
382        * that we can choose from.
383        */
384       struct ir3_sched_node *n = instr->data;
385       if (!n->kill_path)
386          return true;
387    }
388 
389    return false;
390 }
391 
392 /* could an instruction be scheduled if specified ssa src was scheduled? */
393 static bool
could_sched(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr,struct ir3_instruction * src)394 could_sched(struct ir3_sched_ctx *ctx,
395             struct ir3_instruction *instr, struct ir3_instruction *src)
396 {
397    foreach_ssa_src (other_src, instr) {
398       /* if dependency not scheduled, we aren't ready yet: */
399       if ((src != other_src) && !is_scheduled(other_src)) {
400          return false;
401       }
402    }
403 
404    /* Instructions not in the current block can never be scheduled.
405     */
406    if (instr->block != src->block)
407       return false;
408 
409    return !should_skip(ctx, instr);
410 }
411 
412 /* Check if instruction is ok to schedule.  Make sure it is not blocked
413  * by use of addr/predicate register, etc.
414  */
415 static bool
check_instr(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes,struct ir3_instruction * instr)416 check_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
417             struct ir3_instruction *instr)
418 {
419    assert(!is_scheduled(instr));
420 
421    if (instr == ctx->split) {
422       /* Don't schedule instructions created by splitting a a0.x/a1.x/p0.x
423        * write until another "normal" instruction has been scheduled.
424        */
425       return false;
426    }
427 
428    if (should_skip(ctx, instr))
429        return false;
430 
431    /* For instructions that write address register we need to
432     * make sure there is at least one instruction that uses the
433     * addr value which is otherwise ready.
434     *
435     * NOTE if any instructions use pred register and have other
436     * src args, we would need to do the same for writes_pred()..
437     */
438    if (writes_addr0(instr)) {
439       struct ir3 *ir = instr->block->shader;
440       bool ready = false;
441       for (unsigned i = 0; (i < ir->a0_users_count) && !ready; i++) {
442          struct ir3_instruction *indirect = ir->a0_users[i];
443          if (!indirect)
444             continue;
445          if (indirect->address->def != instr->dsts[0])
446             continue;
447          ready = could_sched(ctx, indirect, instr);
448       }
449 
450       /* nothing could be scheduled, so keep looking: */
451       if (!ready)
452          return false;
453    }
454 
455    if (writes_addr1(instr)) {
456       struct ir3 *ir = instr->block->shader;
457       bool ready = false;
458       for (unsigned i = 0; (i < ir->a1_users_count) && !ready; i++) {
459          struct ir3_instruction *indirect = ir->a1_users[i];
460          if (!indirect)
461             continue;
462          if (indirect->address->def != instr->dsts[0])
463             continue;
464          ready = could_sched(ctx, indirect, instr);
465       }
466 
467       /* nothing could be scheduled, so keep looking: */
468       if (!ready)
469          return false;
470    }
471 
472    /* if this is a write to address/predicate register, and that
473     * register is currently in use, we need to defer until it is
474     * free:
475     */
476    if (writes_addr0(instr) && ctx->addr0) {
477       assert(ctx->addr0 != instr);
478       notes->addr0_conflict = true;
479       return false;
480    }
481 
482    if (writes_addr1(instr) && ctx->addr1) {
483       assert(ctx->addr1 != instr);
484       notes->addr1_conflict = true;
485       return false;
486    }
487 
488    if (writes_pred(instr) && ctx->pred) {
489       assert(ctx->pred != instr);
490       notes->pred_conflict = true;
491       return false;
492    }
493 
494    /* if the instruction is a kill, we need to ensure *every*
495     * bary.f is scheduled.  The hw seems unhappy if the thread
496     * gets killed before the end-input (ei) flag is hit.
497     *
498     * We could do this by adding each bary.f instruction as
499     * virtual ssa src for the kill instruction.  But we have
500     * fixed length instr->srcs[].
501     *
502     * TODO we could handle this by false-deps now, probably.
503     */
504    if (is_kill_or_demote(instr)) {
505       struct ir3 *ir = instr->block->shader;
506 
507       for (unsigned i = 0; i < ir->baryfs_count; i++) {
508          struct ir3_instruction *baryf = ir->baryfs[i];
509          if (baryf->flags & IR3_INSTR_UNUSED)
510             continue;
511          if (!is_scheduled(baryf)) {
512             notes->blocked_kill = true;
513             return false;
514          }
515       }
516    }
517 
518    return true;
519 }
520 
521 /* Find the instr->ip of the closest use of an instruction, in
522  * pre-sched order.  This isn't going to be the same as post-sched
523  * order, but it is a reasonable approximation to limit scheduling
524  * instructions *too* early.  This is mostly to prevent bad behavior
525  * in cases where we have a large number of possible instructions
526  * to choose, to avoid creating too much parallelism (ie. blowing
527  * up register pressure)
528  *
529  * See
530  * dEQP-GLES31.functional.atomic_counter.layout.reverse_offset.inc_dec.8_counters_5_calls_1_thread
531  */
532 static int
nearest_use(struct ir3_instruction * instr)533 nearest_use(struct ir3_instruction *instr)
534 {
535    unsigned nearest = ~0;
536    foreach_ssa_use (use, instr)
537       if (!is_scheduled(use))
538          nearest = MIN2(nearest, use->ip);
539 
540    /* slight hack.. this heuristic tends to push bary.f's to later
541     * in the shader, closer to their uses.  But we actually would
542     * prefer to get these scheduled earlier, to unlock varying
543     * storage for more VS jobs:
544     */
545    if (is_input(instr))
546       nearest /= 2;
547 
548    return nearest;
549 }
550 
551 static bool
is_only_nonscheduled_use(struct ir3_instruction * instr,struct ir3_instruction * use)552 is_only_nonscheduled_use(struct ir3_instruction *instr,
553                          struct ir3_instruction *use)
554 {
555    foreach_ssa_use (other_use, instr) {
556       if (other_use != use && !is_scheduled(other_use))
557          return false;
558    }
559 
560    return true;
561 }
562 
563 static unsigned
new_regs(struct ir3_instruction * instr)564 new_regs(struct ir3_instruction *instr)
565 {
566    unsigned regs = 0;
567 
568    foreach_dst (dst, instr) {
569       if (!is_dest_gpr(dst))
570          continue;
571       regs += reg_elems(dst);
572    }
573 
574    return regs;
575 }
576 
577 /* find net change to live values if instruction were scheduled: */
578 static int
live_effect(struct ir3_instruction * instr)579 live_effect(struct ir3_instruction *instr)
580 {
581    struct ir3_sched_node *n = instr->data;
582    int new_live =
583       (n->partially_live || !instr->uses || instr->uses->entries == 0)
584          ? 0
585          : new_regs(instr);
586    int freed_live = 0;
587 
588    /* if we schedule something that causes a vecN to be live,
589     * then count all it's other components too:
590     */
591    if (n->collect)
592       new_live *= n->collect->srcs_count;
593 
594    foreach_ssa_src_n (src, n, instr) {
595       if (__is_false_dep(instr, n))
596          continue;
597 
598       if (instr->block != src->block)
599          continue;
600 
601       if (is_only_nonscheduled_use(src, instr))
602          freed_live += new_regs(src);
603    }
604 
605    return new_live - freed_live;
606 }
607 
608 /* Determine if this is an instruction that we'd prefer not to schedule
609  * yet, in order to avoid an (ss)/(sy) sync.  This is limited by the
610  * ss_delay/sy_delay counters, ie. the more cycles it has been since
611  * the last SFU/tex, the less costly a sync would be, and the number of
612  * outstanding SFU/tex instructions to prevent a blowup in register pressure.
613  */
614 static bool
should_defer(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)615 should_defer(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
616 {
617    if (ctx->ss_delay) {
618       if (sched_check_src_cond(instr, is_outstanding_ss, ctx))
619          return true;
620    }
621 
622    /* We mostly just want to try to schedule another texture fetch
623     * before scheduling something that would (sy) sync, so we can
624     * limit this rule to cases where there are remaining texture
625     * fetches
626     */
627    if (ctx->sy_delay && ctx->remaining_tex) {
628       if (sched_check_src_cond(instr, is_outstanding_sy, ctx))
629          return true;
630    }
631 
632    /* Avoid scheduling too many outstanding texture or sfu instructions at
633     * once by deferring further tex/SFU instructions. This both prevents
634     * stalls when the queue of texture/sfu instructions becomes too large,
635     * and prevents unacceptably large increases in register pressure from too
636     * many outstanding texture instructions.
637     */
638    if (ctx->sy_index - ctx->first_outstanding_sy_index >= 8 && is_sy_producer(instr))
639       return true;
640 
641    if (ctx->ss_index - ctx->first_outstanding_ss_index >= 8 && is_ss_producer(instr))
642       return true;
643 
644    return false;
645 }
646 
647 static struct ir3_sched_node *choose_instr_inc(struct ir3_sched_ctx *ctx,
648                                                struct ir3_sched_notes *notes,
649                                                bool defer, bool avoid_output);
650 
651 enum choose_instr_dec_rank {
652    DEC_NEUTRAL,
653    DEC_NEUTRAL_READY,
654    DEC_FREED,
655    DEC_FREED_READY,
656 };
657 
658 static const char *
dec_rank_name(enum choose_instr_dec_rank rank)659 dec_rank_name(enum choose_instr_dec_rank rank)
660 {
661    switch (rank) {
662    case DEC_NEUTRAL:
663       return "neutral";
664    case DEC_NEUTRAL_READY:
665       return "neutral+ready";
666    case DEC_FREED:
667       return "freed";
668    case DEC_FREED_READY:
669       return "freed+ready";
670    default:
671       return NULL;
672    }
673 }
674 
675 static unsigned
node_delay(struct ir3_sched_ctx * ctx,struct ir3_sched_node * n)676 node_delay(struct ir3_sched_ctx *ctx, struct ir3_sched_node *n)
677 {
678    return MAX2(n->earliest_ip, ctx->ip) - ctx->ip;
679 }
680 
681 /**
682  * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSR (Code
683  * Scheduling for Register pressure) heuristic.
684  *
685  * Only handles the case of choosing instructions that reduce register pressure
686  * or are even.
687  */
688 static struct ir3_sched_node *
choose_instr_dec(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes,bool defer)689 choose_instr_dec(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
690                  bool defer)
691 {
692    const char *mode = defer ? "-d" : "";
693    struct ir3_sched_node *chosen = NULL;
694    enum choose_instr_dec_rank chosen_rank = DEC_NEUTRAL;
695 
696    foreach_sched_node (n, &ctx->dag->heads) {
697       if (defer && should_defer(ctx, n->instr))
698          continue;
699 
700       unsigned d = node_delay(ctx, n);
701 
702       int live = live_effect(n->instr);
703       if (live > 0)
704          continue;
705 
706       if (!check_instr(ctx, notes, n->instr))
707          continue;
708 
709       enum choose_instr_dec_rank rank;
710       if (live < 0) {
711          /* Prioritize instrs which free up regs and can be scheduled with no
712           * delay.
713           */
714          if (d == 0)
715             rank = DEC_FREED_READY;
716          else
717             rank = DEC_FREED;
718       } else {
719          /* Contra the paper, pick a leader with no effect on used regs.  This
720           * may open up new opportunities, as otherwise a single-operand instr
721           * consuming a value will tend to block finding freeing that value.
722           * This had a massive effect on reducing spilling on V3D.
723           *
724           * XXX: Should this prioritize ready?
725           */
726          if (d == 0)
727             rank = DEC_NEUTRAL_READY;
728          else
729             rank = DEC_NEUTRAL;
730       }
731 
732       /* Prefer higher-ranked instructions, or in the case of a rank tie, the
733        * highest latency-to-end-of-program instruction.
734        */
735       if (!chosen || rank > chosen_rank ||
736           (rank == chosen_rank && chosen->max_delay < n->max_delay)) {
737          chosen = n;
738          chosen_rank = rank;
739       }
740    }
741 
742    if (chosen) {
743       di(chosen->instr, "dec%s: chose (%s)", mode, dec_rank_name(chosen_rank));
744       return chosen;
745    }
746 
747    return choose_instr_inc(ctx, notes, defer, true);
748 }
749 
750 enum choose_instr_inc_rank {
751    INC_DISTANCE,
752    INC_DISTANCE_READY,
753 };
754 
755 static const char *
inc_rank_name(enum choose_instr_inc_rank rank)756 inc_rank_name(enum choose_instr_inc_rank rank)
757 {
758    switch (rank) {
759    case INC_DISTANCE:
760       return "distance";
761    case INC_DISTANCE_READY:
762       return "distance+ready";
763    default:
764       return NULL;
765    }
766 }
767 
768 /**
769  * When we can't choose an instruction that reduces register pressure or
770  * is neutral, we end up here to try and pick the least bad option.
771  */
772 static struct ir3_sched_node *
choose_instr_inc(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes,bool defer,bool avoid_output)773 choose_instr_inc(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
774                  bool defer, bool avoid_output)
775 {
776    const char *mode = defer ? "-d" : "";
777    struct ir3_sched_node *chosen = NULL;
778    enum choose_instr_inc_rank chosen_rank = INC_DISTANCE;
779 
780    /*
781     * From hear on out, we are picking something that increases
782     * register pressure.  So try to pick something which will
783     * be consumed soon:
784     */
785    unsigned chosen_distance = 0;
786 
787    /* Pick the max delay of the remaining ready set. */
788    foreach_sched_node (n, &ctx->dag->heads) {
789       if (avoid_output && n->output)
790          continue;
791 
792       if (defer && should_defer(ctx, n->instr))
793          continue;
794 
795       if (!check_instr(ctx, notes, n->instr))
796          continue;
797 
798       unsigned d = node_delay(ctx, n);
799 
800       enum choose_instr_inc_rank rank;
801       if (d == 0)
802          rank = INC_DISTANCE_READY;
803       else
804          rank = INC_DISTANCE;
805 
806       unsigned distance = nearest_use(n->instr);
807 
808       if (!chosen || rank > chosen_rank ||
809           (rank == chosen_rank && distance < chosen_distance)) {
810          chosen = n;
811          chosen_distance = distance;
812          chosen_rank = rank;
813       }
814    }
815 
816    if (chosen) {
817       di(chosen->instr, "inc%s: chose (%s)", mode, inc_rank_name(chosen_rank));
818       return chosen;
819    }
820 
821    return NULL;
822 }
823 
824 /* Handles instruction selections for instructions we want to prioritize
825  * even if csp/csr would not pick them.
826  */
827 static struct ir3_sched_node *
choose_instr_prio(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes)828 choose_instr_prio(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes)
829 {
830    struct ir3_sched_node *chosen = NULL;
831 
832    foreach_sched_node (n, &ctx->dag->heads) {
833       /*
834        * - phi nodes and inputs must be scheduled first
835        * - split should be scheduled first, so that the vector value is
836        *   killed as soon as possible. RA cannot split up the vector and
837        *   reuse components that have been killed until it's been killed.
838        * - collect, on the other hand, should be treated as a "normal"
839        *   instruction, and may add to register pressure if its sources are
840        *   part of another vector or immediates.
841        */
842       if (!is_meta(n->instr) || n->instr->opc == OPC_META_COLLECT)
843          continue;
844 
845       if (!chosen || (chosen->max_delay < n->max_delay))
846          chosen = n;
847    }
848 
849    if (chosen) {
850       di(chosen->instr, "prio: chose (meta)");
851       return chosen;
852    }
853 
854    return NULL;
855 }
856 
857 static void
dump_state(struct ir3_sched_ctx * ctx)858 dump_state(struct ir3_sched_ctx *ctx)
859 {
860    if (!SCHED_DEBUG)
861       return;
862 
863    foreach_sched_node (n, &ctx->dag->heads) {
864       di(n->instr, "maxdel=%3d le=%d del=%u ", n->max_delay,
865          live_effect(n->instr), node_delay(ctx, n));
866 
867       util_dynarray_foreach (&n->dag.edges, struct dag_edge, edge) {
868          struct ir3_sched_node *child = (struct ir3_sched_node *)edge->child;
869 
870          di(child->instr, " -> (%d parents) ", child->dag.parent_count);
871       }
872    }
873 }
874 
875 /* find instruction to schedule: */
876 static struct ir3_instruction *
choose_instr(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes)877 choose_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes)
878 {
879    struct ir3_sched_node *chosen;
880 
881    dump_state(ctx);
882 
883    chosen = choose_instr_prio(ctx, notes);
884    if (chosen)
885       return chosen->instr;
886 
887    chosen = choose_instr_dec(ctx, notes, true);
888    if (chosen)
889       return chosen->instr;
890 
891    chosen = choose_instr_dec(ctx, notes, false);
892    if (chosen)
893       return chosen->instr;
894 
895    chosen = choose_instr_inc(ctx, notes, false, false);
896    if (chosen)
897       return chosen->instr;
898 
899    return NULL;
900 }
901 
902 static struct ir3_instruction *
split_instr(struct ir3_sched_ctx * ctx,struct ir3_instruction * orig_instr)903 split_instr(struct ir3_sched_ctx *ctx, struct ir3_instruction *orig_instr)
904 {
905    struct ir3_instruction *new_instr = ir3_instr_clone(orig_instr);
906    di(new_instr, "split instruction");
907    sched_node_init(ctx, new_instr);
908    return new_instr;
909 }
910 
911 /* "spill" the address registers by remapping any unscheduled
912  * instructions which depend on the current address register
913  * to a clone of the instruction which wrote the address reg.
914  */
915 static struct ir3_instruction *
split_addr(struct ir3_sched_ctx * ctx,struct ir3_instruction ** addr,struct ir3_instruction ** users,unsigned users_count)916 split_addr(struct ir3_sched_ctx *ctx, struct ir3_instruction **addr,
917            struct ir3_instruction **users, unsigned users_count)
918 {
919    struct ir3_instruction *new_addr = NULL;
920    unsigned i;
921 
922    assert(*addr);
923 
924    for (i = 0; i < users_count; i++) {
925       struct ir3_instruction *indirect = users[i];
926 
927       if (!indirect)
928          continue;
929 
930       /* skip instructions already scheduled: */
931       if (is_scheduled(indirect))
932          continue;
933 
934       /* remap remaining instructions using current addr
935        * to new addr:
936        */
937       if (indirect->address->def == (*addr)->dsts[0]) {
938          if (!new_addr) {
939             new_addr = split_instr(ctx, *addr);
940             /* original addr is scheduled, but new one isn't: */
941             new_addr->flags &= ~IR3_INSTR_MARK;
942          }
943          indirect->address->def = new_addr->dsts[0];
944          /* don't need to remove old dag edge since old addr is
945           * already scheduled:
946           */
947          sched_node_add_dep(indirect, new_addr, 0);
948          di(indirect, "new address");
949       }
950    }
951 
952    /* all remaining indirects remapped to new addr: */
953    *addr = NULL;
954 
955    return new_addr;
956 }
957 
958 /* "spill" the predicate register by remapping any unscheduled
959  * instructions which depend on the current predicate register
960  * to a clone of the instruction which wrote the address reg.
961  */
962 static struct ir3_instruction *
split_pred(struct ir3_sched_ctx * ctx)963 split_pred(struct ir3_sched_ctx *ctx)
964 {
965    struct ir3 *ir;
966    struct ir3_instruction *new_pred = NULL;
967    unsigned i;
968 
969    assert(ctx->pred);
970 
971    ir = ctx->pred->block->shader;
972 
973    for (i = 0; i < ir->predicates_count; i++) {
974       struct ir3_instruction *predicated = ir->predicates[i];
975 
976       if (!predicated)
977          continue;
978 
979       /* skip instructions already scheduled: */
980       if (is_scheduled(predicated))
981          continue;
982 
983       /* remap remaining instructions using current pred
984        * to new pred:
985        *
986        * TODO is there ever a case when pred isn't first
987        * (and only) src?
988        */
989       if (ssa(predicated->srcs[0]) == ctx->pred) {
990          if (!new_pred) {
991             new_pred = split_instr(ctx, ctx->pred);
992             /* original pred is scheduled, but new one isn't: */
993             new_pred->flags &= ~IR3_INSTR_MARK;
994          }
995          predicated->srcs[0]->def->instr = new_pred;
996          /* don't need to remove old dag edge since old pred is
997           * already scheduled:
998           */
999          sched_node_add_dep(predicated, new_pred, 0);
1000          di(predicated, "new predicate");
1001       }
1002    }
1003 
1004    if (ctx->block->condition == ctx->pred) {
1005       if (!new_pred) {
1006          new_pred = split_instr(ctx, ctx->pred);
1007          /* original pred is scheduled, but new one isn't: */
1008          new_pred->flags &= ~IR3_INSTR_MARK;
1009       }
1010       ctx->block->condition = new_pred;
1011       d("new branch condition");
1012    }
1013 
1014    /* all remaining predicated remapped to new pred: */
1015    ctx->pred = NULL;
1016 
1017    return new_pred;
1018 }
1019 
1020 static void
sched_node_init(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)1021 sched_node_init(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
1022 {
1023    struct ir3_sched_node *n = rzalloc(ctx->dag, struct ir3_sched_node);
1024 
1025    dag_init_node(ctx->dag, &n->dag);
1026 
1027    n->instr = instr;
1028    instr->data = n;
1029 }
1030 
1031 static void
sched_node_add_dep(struct ir3_instruction * instr,struct ir3_instruction * src,int i)1032 sched_node_add_dep(struct ir3_instruction *instr, struct ir3_instruction *src,
1033                    int i)
1034 {
1035    /* don't consider dependencies in other blocks: */
1036    if (src->block != instr->block)
1037       return;
1038 
1039    /* we could have false-dep's that end up unused: */
1040    if (src->flags & IR3_INSTR_UNUSED) {
1041       assert(__is_false_dep(instr, i));
1042       return;
1043    }
1044 
1045    struct ir3_sched_node *n = instr->data;
1046    struct ir3_sched_node *sn = src->data;
1047 
1048    /* If src is consumed by a collect, track that to realize that once
1049     * any of the collect srcs are live, we should hurry up and schedule
1050     * the rest.
1051     */
1052    if (instr->opc == OPC_META_COLLECT)
1053       sn->collect = instr;
1054 
1055    unsigned d_soft = ir3_delayslots(src, instr, i, true);
1056    unsigned d = ir3_delayslots(src, instr, i, false);
1057 
1058    /* delays from (ss) and (sy) are considered separately and more accurately in
1059     * the scheduling heuristic, so ignore it when calculating the ip of
1060     * instructions, but do consider it when prioritizing which instructions to
1061     * schedule.
1062     */
1063    dag_add_edge_max_data(&sn->dag, &n->dag, (uintptr_t)d);
1064 
1065    n->delay = MAX2(n->delay, d_soft);
1066 }
1067 
1068 static void
mark_kill_path(struct ir3_instruction * instr)1069 mark_kill_path(struct ir3_instruction *instr)
1070 {
1071    struct ir3_sched_node *n = instr->data;
1072 
1073    if (n->kill_path) {
1074       return;
1075    }
1076 
1077    n->kill_path = true;
1078 
1079    foreach_ssa_src (src, instr) {
1080       if (src->block != instr->block)
1081          continue;
1082       mark_kill_path(src);
1083    }
1084 }
1085 
1086 /* Is it an output? */
1087 static bool
is_output_collect(struct ir3_instruction * instr)1088 is_output_collect(struct ir3_instruction *instr)
1089 {
1090    if (instr->opc != OPC_META_COLLECT)
1091       return false;
1092 
1093    foreach_ssa_use (use, instr) {
1094       if (use->opc != OPC_END && use->opc != OPC_CHMASK)
1095          return false;
1096    }
1097 
1098    return true;
1099 }
1100 
1101 /* Is it's only use as output? */
1102 static bool
is_output_only(struct ir3_instruction * instr)1103 is_output_only(struct ir3_instruction *instr)
1104 {
1105    foreach_ssa_use (use, instr)
1106       if (!is_output_collect(use))
1107          return false;
1108 
1109    return true;
1110 }
1111 
1112 static void
sched_node_add_deps(struct ir3_instruction * instr)1113 sched_node_add_deps(struct ir3_instruction *instr)
1114 {
1115    /* There's nothing to do for phi nodes, since they always go first. And
1116     * phi nodes can reference sources later in the same block, so handling
1117     * sources is not only unnecessary but could cause problems.
1118     */
1119    if (instr->opc == OPC_META_PHI)
1120       return;
1121 
1122    /* Since foreach_ssa_src() already handles false-dep's we can construct
1123     * the DAG easily in a single pass.
1124     */
1125    foreach_ssa_src_n (src, i, instr) {
1126       sched_node_add_dep(instr, src, i);
1127    }
1128 
1129    /* NOTE that all inputs must be scheduled before a kill, so
1130     * mark these to be prioritized as well:
1131     */
1132    if (is_kill_or_demote(instr) || is_input(instr)) {
1133       mark_kill_path(instr);
1134    }
1135 
1136    if (is_output_only(instr)) {
1137       struct ir3_sched_node *n = instr->data;
1138       n->output = true;
1139    }
1140 }
1141 
1142 static void
sched_dag_max_delay_cb(struct dag_node * node,void * state)1143 sched_dag_max_delay_cb(struct dag_node *node, void *state)
1144 {
1145    struct ir3_sched_node *n = (struct ir3_sched_node *)node;
1146    uint32_t max_delay = 0;
1147 
1148    util_dynarray_foreach (&n->dag.edges, struct dag_edge, edge) {
1149       struct ir3_sched_node *child = (struct ir3_sched_node *)edge->child;
1150       max_delay = MAX2(child->max_delay, max_delay);
1151    }
1152 
1153    n->max_delay = MAX2(n->max_delay, max_delay + n->delay);
1154 }
1155 
1156 static void
sched_dag_init(struct ir3_sched_ctx * ctx)1157 sched_dag_init(struct ir3_sched_ctx *ctx)
1158 {
1159    ctx->dag = dag_create(ctx);
1160 
1161    foreach_instr (instr, &ctx->unscheduled_list)
1162       sched_node_init(ctx, instr);
1163 
1164    foreach_instr (instr, &ctx->unscheduled_list)
1165       sched_node_add_deps(instr);
1166 
1167    dag_traverse_bottom_up(ctx->dag, sched_dag_max_delay_cb, NULL);
1168 }
1169 
1170 static void
sched_dag_destroy(struct ir3_sched_ctx * ctx)1171 sched_dag_destroy(struct ir3_sched_ctx *ctx)
1172 {
1173    ralloc_free(ctx->dag);
1174    ctx->dag = NULL;
1175 }
1176 
1177 static void
sched_block(struct ir3_sched_ctx * ctx,struct ir3_block * block)1178 sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
1179 {
1180    ctx->block = block;
1181 
1182    /* addr/pred writes are per-block: */
1183    ctx->addr0 = NULL;
1184    ctx->addr1 = NULL;
1185    ctx->pred = NULL;
1186    ctx->sy_delay = 0;
1187    ctx->ss_delay = 0;
1188    ctx->sy_index = ctx->first_outstanding_sy_index = 0;
1189    ctx->ss_index = ctx->first_outstanding_ss_index = 0;
1190 
1191    /* move all instructions to the unscheduled list, and
1192     * empty the block's instruction list (to which we will
1193     * be inserting).
1194     */
1195    list_replace(&block->instr_list, &ctx->unscheduled_list);
1196    list_inithead(&block->instr_list);
1197 
1198    sched_dag_init(ctx);
1199 
1200    ctx->remaining_kills = 0;
1201    ctx->remaining_tex = 0;
1202    foreach_instr_safe (instr, &ctx->unscheduled_list) {
1203       if (is_kill_or_demote(instr))
1204          ctx->remaining_kills++;
1205       if (is_sy_producer(instr))
1206          ctx->remaining_tex++;
1207    }
1208 
1209    /* First schedule all meta:input and meta:phi instructions, followed by
1210     * tex-prefetch.  We want all of the instructions that load values into
1211     * registers before the shader starts to go before any other instructions.
1212     * But in particular we want inputs to come before prefetches.  This is
1213     * because a FS's bary_ij input may not actually be live in the shader,
1214     * but it should not be scheduled on top of any other input (but can be
1215     * overwritten by a tex prefetch)
1216     *
1217     * Note: Because the first block cannot have predecessors, meta:input and
1218     * meta:phi cannot exist in the same block.
1219     */
1220    foreach_instr_safe (instr, &ctx->unscheduled_list)
1221       if (instr->opc == OPC_META_INPUT || instr->opc == OPC_META_PHI)
1222          schedule(ctx, instr);
1223 
1224    foreach_instr_safe (instr, &ctx->unscheduled_list)
1225       if (instr->opc == OPC_META_TEX_PREFETCH)
1226          schedule(ctx, instr);
1227 
1228    while (!list_is_empty(&ctx->unscheduled_list)) {
1229       struct ir3_sched_notes notes = {0};
1230       struct ir3_instruction *instr;
1231 
1232       instr = choose_instr(ctx, &notes);
1233       if (instr) {
1234          unsigned delay = node_delay(ctx, instr->data);
1235          d("delay=%u", delay);
1236 
1237          assert(delay <= 6);
1238 
1239          schedule(ctx, instr);
1240 
1241          /* Since we've scheduled a "real" instruction, we can now
1242           * schedule any split instruction created by the scheduler again.
1243           */
1244          ctx->split = NULL;
1245       } else {
1246          struct ir3_instruction *new_instr = NULL;
1247          struct ir3 *ir = block->shader;
1248 
1249          /* nothing available to schedule.. if we are blocked on
1250           * address/predicate register conflict, then break the
1251           * deadlock by cloning the instruction that wrote that
1252           * reg:
1253           */
1254          if (notes.addr0_conflict) {
1255             new_instr =
1256                split_addr(ctx, &ctx->addr0, ir->a0_users, ir->a0_users_count);
1257          } else if (notes.addr1_conflict) {
1258             new_instr =
1259                split_addr(ctx, &ctx->addr1, ir->a1_users, ir->a1_users_count);
1260          } else if (notes.pred_conflict) {
1261             new_instr = split_pred(ctx);
1262          } else {
1263             d("unscheduled_list:");
1264             foreach_instr (instr, &ctx->unscheduled_list)
1265                di(instr, "unscheduled: ");
1266             assert(0);
1267             ctx->error = true;
1268             return;
1269          }
1270 
1271          if (new_instr) {
1272             list_delinit(&new_instr->node);
1273             list_addtail(&new_instr->node, &ctx->unscheduled_list);
1274          }
1275 
1276          /* If we produced a new instruction, do not schedule it next to
1277           * guarantee progress.
1278           */
1279          ctx->split = new_instr;
1280       }
1281    }
1282 
1283    sched_dag_destroy(ctx);
1284 }
1285 
1286 int
ir3_sched(struct ir3 * ir)1287 ir3_sched(struct ir3 *ir)
1288 {
1289    struct ir3_sched_ctx *ctx = rzalloc(NULL, struct ir3_sched_ctx);
1290 
1291    foreach_block (block, &ir->block_list) {
1292       foreach_instr (instr, &block->instr_list) {
1293          instr->data = NULL;
1294       }
1295    }
1296 
1297    ir3_count_instructions(ir);
1298    ir3_clear_mark(ir);
1299    ir3_find_ssa_uses(ir, ctx, false);
1300 
1301    foreach_block (block, &ir->block_list) {
1302       sched_block(ctx, block);
1303    }
1304 
1305    int ret = ctx->error ? -1 : 0;
1306 
1307    ralloc_free(ctx);
1308 
1309    return ret;
1310 }
1311 
1312 static unsigned
get_array_id(struct ir3_instruction * instr)1313 get_array_id(struct ir3_instruction *instr)
1314 {
1315    /* The expectation is that there is only a single array
1316     * src or dst, ir3_cp should enforce this.
1317     */
1318 
1319    foreach_dst (dst, instr)
1320       if (dst->flags & IR3_REG_ARRAY)
1321          return dst->array.id;
1322    foreach_src (src, instr)
1323       if (src->flags & IR3_REG_ARRAY)
1324          return src->array.id;
1325 
1326    unreachable("this was unexpected");
1327 }
1328 
1329 /* does instruction 'prior' need to be scheduled before 'instr'? */
1330 static bool
depends_on(struct ir3_instruction * instr,struct ir3_instruction * prior)1331 depends_on(struct ir3_instruction *instr, struct ir3_instruction *prior)
1332 {
1333    /* TODO for dependencies that are related to a specific object, ie
1334     * a specific SSBO/image/array, we could relax this constraint to
1335     * make accesses to unrelated objects not depend on each other (at
1336     * least as long as not declared coherent)
1337     */
1338    if (((instr->barrier_class & IR3_BARRIER_EVERYTHING) &&
1339         prior->barrier_class) ||
1340        ((prior->barrier_class & IR3_BARRIER_EVERYTHING) &&
1341         instr->barrier_class))
1342       return true;
1343 
1344    if (instr->barrier_class & prior->barrier_conflict) {
1345       if (!(instr->barrier_class &
1346             ~(IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W))) {
1347          /* if only array barrier, then we can further limit false-deps
1348           * by considering the array-id, ie reads/writes to different
1349           * arrays do not depend on each other (no aliasing)
1350           */
1351          if (get_array_id(instr) != get_array_id(prior)) {
1352             return false;
1353          }
1354       }
1355 
1356       return true;
1357    }
1358 
1359    return false;
1360 }
1361 
1362 static void
add_barrier_deps(struct ir3_block * block,struct ir3_instruction * instr)1363 add_barrier_deps(struct ir3_block *block, struct ir3_instruction *instr)
1364 {
1365    struct list_head *prev = instr->node.prev;
1366    struct list_head *next = instr->node.next;
1367 
1368    /* add dependencies on previous instructions that must be scheduled
1369     * prior to the current instruction
1370     */
1371    while (prev != &block->instr_list) {
1372       struct ir3_instruction *pi =
1373          list_entry(prev, struct ir3_instruction, node);
1374 
1375       prev = prev->prev;
1376 
1377       if (is_meta(pi))
1378          continue;
1379 
1380       if (instr->barrier_class == pi->barrier_class) {
1381          ir3_instr_add_dep(instr, pi);
1382          break;
1383       }
1384 
1385       if (depends_on(instr, pi))
1386          ir3_instr_add_dep(instr, pi);
1387    }
1388 
1389    /* add dependencies on this instruction to following instructions
1390     * that must be scheduled after the current instruction:
1391     */
1392    while (next != &block->instr_list) {
1393       struct ir3_instruction *ni =
1394          list_entry(next, struct ir3_instruction, node);
1395 
1396       next = next->next;
1397 
1398       if (is_meta(ni))
1399          continue;
1400 
1401       if (instr->barrier_class == ni->barrier_class) {
1402          ir3_instr_add_dep(ni, instr);
1403          break;
1404       }
1405 
1406       if (depends_on(ni, instr))
1407          ir3_instr_add_dep(ni, instr);
1408    }
1409 }
1410 
1411 /* before scheduling a block, we need to add any necessary false-dependencies
1412  * to ensure that:
1413  *
1414  *  (1) barriers are scheduled in the right order wrt instructions related
1415  *      to the barrier
1416  *
1417  *  (2) reads that come before a write actually get scheduled before the
1418  *      write
1419  */
1420 bool
ir3_sched_add_deps(struct ir3 * ir)1421 ir3_sched_add_deps(struct ir3 *ir)
1422 {
1423    bool progress = false;
1424 
1425    foreach_block (block, &ir->block_list) {
1426       foreach_instr (instr, &block->instr_list) {
1427          if (instr->barrier_class) {
1428             add_barrier_deps(block, instr);
1429             progress = true;
1430          }
1431       }
1432    }
1433 
1434    return progress;
1435 }
1436