• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "util/ralloc.h"
28 #include "util/u_math.h"
29 
30 #include "ir3.h"
31 #include "ir3_shader.h"
32 
33 /*
34  * Legalize:
35  *
36  * The legalize pass handles ensuring sufficient nop's and sync flags for
37  * correct execution.
38  *
39  * 1) Iteratively determine where sync ((sy)/(ss)) flags are needed,
40  *    based on state flowing out of predecessor blocks until there is
41  *    no further change.  In some cases this requires inserting nops.
42  * 2) Mark (ei) on last varying input
43  * 3) Final nop scheduling for instruction latency
44  * 4) Resolve jumps and schedule blocks, marking potential convergence
45  *    points with (jp)
46  */
47 
48 struct ir3_legalize_ctx {
49    struct ir3_compiler *compiler;
50    struct ir3_shader_variant *so;
51    gl_shader_stage type;
52    int max_bary;
53    bool early_input_release;
54    bool has_inputs;
55 };
56 
57 struct ir3_legalize_state {
58    regmask_t needs_ss;
59    regmask_t needs_ss_war; /* write after read */
60    regmask_t needs_sy;
61 };
62 
63 struct ir3_legalize_block_data {
64    bool valid;
65    struct ir3_legalize_state state;
66 };
67 
68 /* We want to evaluate each block from the position of any other
69  * predecessor block, in order that the flags set are the union of
70  * all possible program paths.
71  *
72  * To do this, we need to know the output state (needs_ss/ss_war/sy)
73  * of all predecessor blocks.  The tricky thing is loops, which mean
74  * that we can't simply recursively process each predecessor block
75  * before legalizing the current block.
76  *
77  * How we handle that is by looping over all the blocks until the
78  * results converge.  If the output state of a given block changes
79  * in a given pass, this means that all successor blocks are not
80  * yet fully legalized.
81  */
82 
83 static bool
legalize_block(struct ir3_legalize_ctx * ctx,struct ir3_block * block)84 legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
85 {
86    struct ir3_legalize_block_data *bd = block->data;
87 
88    if (bd->valid)
89       return false;
90 
91    struct ir3_instruction *last_n = NULL;
92    struct list_head instr_list;
93    struct ir3_legalize_state prev_state = bd->state;
94    struct ir3_legalize_state *state = &bd->state;
95    bool last_input_needs_ss = false;
96    bool has_tex_prefetch = false;
97    bool mergedregs = ctx->so->mergedregs;
98 
99    /* our input state is the OR of all predecessor blocks' state: */
100    for (unsigned i = 0; i < block->predecessors_count; i++) {
101       struct ir3_block *predecessor = block->predecessors[i];
102       struct ir3_legalize_block_data *pbd = predecessor->data;
103       struct ir3_legalize_state *pstate = &pbd->state;
104 
105       /* Our input (ss)/(sy) state is based on OR'ing the output
106        * state of all our predecessor blocks
107        */
108       regmask_or(&state->needs_ss, &state->needs_ss, &pstate->needs_ss);
109       regmask_or(&state->needs_ss_war, &state->needs_ss_war,
110                  &pstate->needs_ss_war);
111       regmask_or(&state->needs_sy, &state->needs_sy, &pstate->needs_sy);
112    }
113 
114    /* We need to take phsyical-only edges into account when tracking shared
115     * registers.
116     */
117    for (unsigned i = 0; i < block->physical_predecessors_count; i++) {
118       struct ir3_block *predecessor = block->physical_predecessors[i];
119       struct ir3_legalize_block_data *pbd = predecessor->data;
120       struct ir3_legalize_state *pstate = &pbd->state;
121 
122       regmask_or_shared(&state->needs_ss, &state->needs_ss, &pstate->needs_ss);
123    }
124 
125    unsigned input_count = 0;
126 
127    foreach_instr (n, &block->instr_list) {
128       if (is_input(n)) {
129          input_count++;
130       }
131    }
132 
133    unsigned inputs_remaining = input_count;
134 
135    /* Either inputs are in the first block or we expect inputs to be released
136     * with the end of the program.
137     */
138    assert(input_count == 0 || !ctx->early_input_release ||
139           block == ir3_after_preamble(block->shader));
140 
141    /* remove all the instructions from the list, we'll be adding
142     * them back in as we go
143     */
144    list_replace(&block->instr_list, &instr_list);
145    list_inithead(&block->instr_list);
146 
147    foreach_instr_safe (n, &instr_list) {
148       unsigned i;
149 
150       n->flags &= ~(IR3_INSTR_SS | IR3_INSTR_SY);
151 
152       /* _meta::tex_prefetch instructions removed later in
153        * collect_tex_prefetches()
154        */
155       if (is_meta(n) && (n->opc != OPC_META_TEX_PREFETCH))
156          continue;
157 
158       if (is_input(n)) {
159          struct ir3_register *inloc = n->srcs[0];
160          assert(inloc->flags & IR3_REG_IMMED);
161          ctx->max_bary = MAX2(ctx->max_bary, inloc->iim_val);
162       }
163 
164       if ((last_n && is_barrier(last_n)) || n->opc == OPC_SHPE) {
165          n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
166          last_input_needs_ss = false;
167          regmask_init(&state->needs_ss_war, mergedregs);
168          regmask_init(&state->needs_ss, mergedregs);
169          regmask_init(&state->needs_sy, mergedregs);
170       }
171 
172       if (last_n && (last_n->opc == OPC_PREDT)) {
173          n->flags |= IR3_INSTR_SS;
174          regmask_init(&state->needs_ss_war, mergedregs);
175          regmask_init(&state->needs_ss, mergedregs);
176       }
177 
178       /* NOTE: consider dst register too.. it could happen that
179        * texture sample instruction (for example) writes some
180        * components which are unused.  A subsequent instruction
181        * that writes the same register can race w/ the sam instr
182        * resulting in undefined results:
183        */
184       for (i = 0; i < n->dsts_count + n->srcs_count; i++) {
185          struct ir3_register *reg;
186          if (i < n->dsts_count)
187             reg = n->dsts[i];
188          else
189             reg = n->srcs[i - n->dsts_count];
190 
191          if (reg_gpr(reg)) {
192 
193             /* TODO: we probably only need (ss) for alu
194              * instr consuming sfu result.. need to make
195              * some tests for both this and (sy)..
196              */
197             if (regmask_get(&state->needs_ss, reg)) {
198                n->flags |= IR3_INSTR_SS;
199                last_input_needs_ss = false;
200                regmask_init(&state->needs_ss_war, mergedregs);
201                regmask_init(&state->needs_ss, mergedregs);
202             }
203 
204             if (regmask_get(&state->needs_sy, reg)) {
205                n->flags |= IR3_INSTR_SY;
206                regmask_init(&state->needs_sy, mergedregs);
207             }
208          }
209       }
210 
211       foreach_dst (reg, n) {
212          if (regmask_get(&state->needs_ss_war, reg)) {
213             n->flags |= IR3_INSTR_SS;
214             last_input_needs_ss = false;
215             regmask_init(&state->needs_ss_war, mergedregs);
216             regmask_init(&state->needs_ss, mergedregs);
217          }
218       }
219 
220       /* cat5+ does not have an (ss) bit, if needed we need to
221        * insert a nop to carry the sync flag.  Would be kinda
222        * clever if we were aware of this during scheduling, but
223        * this should be a pretty rare case:
224        */
225       if ((n->flags & IR3_INSTR_SS) && (opc_cat(n->opc) >= 5)) {
226          struct ir3_instruction *nop;
227          nop = ir3_NOP(block);
228          nop->flags |= IR3_INSTR_SS;
229          n->flags &= ~IR3_INSTR_SS;
230       }
231 
232       /* need to be able to set (ss) on first instruction: */
233       if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
234          ir3_NOP(block);
235 
236       if (ctx->compiler->samgq_workaround &&
237           ctx->type != MESA_SHADER_FRAGMENT &&
238           ctx->type != MESA_SHADER_COMPUTE && n->opc == OPC_SAMGQ) {
239          struct ir3_instruction *samgp;
240 
241          list_delinit(&n->node);
242 
243          for (i = 0; i < 4; i++) {
244             samgp = ir3_instr_clone(n);
245             samgp->opc = OPC_SAMGP0 + i;
246             if (i > 1)
247                samgp->flags |= IR3_INSTR_SY;
248          }
249       } else {
250          list_delinit(&n->node);
251          list_addtail(&n->node, &block->instr_list);
252       }
253 
254       if (is_sfu(n))
255          regmask_set(&state->needs_ss, n->dsts[0]);
256 
257       foreach_dst (dst, n) {
258          if (dst->flags & IR3_REG_SHARED)
259             regmask_set(&state->needs_ss, dst);
260       }
261 
262       if (is_tex_or_prefetch(n)) {
263          regmask_set(&state->needs_sy, n->dsts[0]);
264          if (n->opc == OPC_META_TEX_PREFETCH)
265             has_tex_prefetch = true;
266       } else if (n->opc == OPC_RESINFO) {
267          regmask_set(&state->needs_ss, n->dsts[0]);
268          ir3_NOP(block)->flags |= IR3_INSTR_SS;
269          last_input_needs_ss = false;
270       } else if (is_load(n)) {
271          if (is_local_mem_load(n))
272             regmask_set(&state->needs_ss, n->dsts[0]);
273          else
274             regmask_set(&state->needs_sy, n->dsts[0]);
275       } else if (is_atomic(n->opc)) {
276          if (is_bindless_atomic(n->opc)) {
277             regmask_set(&state->needs_sy, n->srcs[2]);
278          } else if (is_global_a3xx_atomic(n->opc) ||
279                     is_global_a6xx_atomic(n->opc)) {
280             regmask_set(&state->needs_sy, n->dsts[0]);
281          } else {
282             regmask_set(&state->needs_ss, n->dsts[0]);
283          }
284       }
285 
286       if (is_ssbo(n->opc) || is_global_a3xx_atomic(n->opc) ||
287           is_bindless_atomic(n->opc))
288          ctx->so->has_ssbo = true;
289 
290       /* both tex/sfu appear to not always immediately consume
291        * their src register(s):
292        */
293       if (is_tex(n) || is_sfu(n) || is_mem(n)) {
294          foreach_src (reg, n) {
295             regmask_set(&state->needs_ss_war, reg);
296          }
297       }
298 
299       if (ctx->early_input_release && is_input(n)) {
300          last_input_needs_ss |= (n->opc == OPC_LDLV);
301 
302          assert(inputs_remaining > 0);
303          inputs_remaining--;
304          if (inputs_remaining == 0) {
305             /* This is the last input. We add the (ei) flag to release
306              * varying memory after this executes. If it's an ldlv,
307              * however, we need to insert a dummy bary.f on which we can
308              * set the (ei) flag. We may also need to insert an (ss) to
309              * guarantee that all ldlv's have finished fetching their
310              * results before releasing the varying memory.
311              */
312             struct ir3_instruction *last_input = n;
313             if (n->opc == OPC_LDLV) {
314                struct ir3_instruction *baryf;
315 
316                /* (ss)bary.f (ei)r63.x, 0, r0.x */
317                baryf = ir3_instr_create(block, OPC_BARY_F, 1, 2);
318                ir3_dst_create(baryf, regid(63, 0), 0);
319                ir3_src_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
320                ir3_src_create(baryf, regid(0, 0), 0);
321 
322                last_input = baryf;
323             }
324 
325             last_input->dsts[0]->flags |= IR3_REG_EI;
326             if (last_input_needs_ss) {
327                last_input->flags |= IR3_INSTR_SS;
328                regmask_init(&state->needs_ss_war, mergedregs);
329                regmask_init(&state->needs_ss, mergedregs);
330             }
331          }
332       }
333 
334       last_n = n;
335    }
336 
337    assert(inputs_remaining == 0 || !ctx->early_input_release);
338 
339    if (has_tex_prefetch && !ctx->has_inputs) {
340       /* texture prefetch, but *no* inputs.. we need to insert a
341        * dummy bary.f at the top of the shader to unblock varying
342        * storage:
343        */
344       struct ir3_instruction *baryf;
345 
346       /* (ss)bary.f (ei)r63.x, 0, r0.x */
347       baryf = ir3_instr_create(block, OPC_BARY_F, 1, 2);
348       ir3_dst_create(baryf, regid(63, 0), 0)->flags |= IR3_REG_EI;
349       ir3_src_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
350       ir3_src_create(baryf, regid(0, 0), 0);
351 
352       /* insert the dummy bary.f at head: */
353       list_delinit(&baryf->node);
354       list_add(&baryf->node, &block->instr_list);
355    }
356 
357    bd->valid = true;
358 
359    if (memcmp(&prev_state, state, sizeof(*state))) {
360       /* our output state changed, this invalidates all of our
361        * successors:
362        */
363       for (unsigned i = 0; i < ARRAY_SIZE(block->successors); i++) {
364          if (!block->successors[i])
365             break;
366          struct ir3_legalize_block_data *pbd = block->successors[i]->data;
367          pbd->valid = false;
368       }
369    }
370 
371    return true;
372 }
373 
374 /* Expands dsxpp and dsypp macros to:
375  *
376  * dsxpp.1 dst, src
377  * dsxpp.1.p dst, src
378  *
379  * We apply this after flags syncing, as we don't want to sync in between the
380  * two (which might happen if dst == src).  We do it before nop scheduling
381  * because that needs to count actual instructions.
382  */
383 static bool
apply_fine_deriv_macro(struct ir3_legalize_ctx * ctx,struct ir3_block * block)384 apply_fine_deriv_macro(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
385 {
386    struct list_head instr_list;
387 
388    /* remove all the instructions from the list, we'll be adding
389     * them back in as we go
390     */
391    list_replace(&block->instr_list, &instr_list);
392    list_inithead(&block->instr_list);
393 
394    foreach_instr_safe (n, &instr_list) {
395       list_addtail(&n->node, &block->instr_list);
396 
397       if (n->opc == OPC_DSXPP_MACRO || n->opc == OPC_DSYPP_MACRO) {
398          n->opc = (n->opc == OPC_DSXPP_MACRO) ? OPC_DSXPP_1 : OPC_DSYPP_1;
399 
400          struct ir3_instruction *op_p = ir3_instr_clone(n);
401          op_p->flags = IR3_INSTR_P;
402 
403          ctx->so->need_fine_derivatives = true;
404       }
405    }
406 
407    return true;
408 }
409 
410 /* NOTE: branch instructions are always the last instruction(s)
411  * in the block.  We take advantage of this as we resolve the
412  * branches, since "if (foo) break;" constructs turn into
413  * something like:
414  *
415  *   block3 {
416  *   	...
417  *   	0029:021: mov.s32s32 r62.x, r1.y
418  *   	0082:022: br !p0.x, target=block5
419  *   	0083:023: br p0.x, target=block4
420  *   	// succs: if _[0029:021: mov.s32s32] block4; else block5;
421  *   }
422  *   block4 {
423  *   	0084:024: jump, target=block6
424  *   	// succs: block6;
425  *   }
426  *   block5 {
427  *   	0085:025: jump, target=block7
428  *   	// succs: block7;
429  *   }
430  *
431  * ie. only instruction in block4/block5 is a jump, so when
432  * resolving branches we can easily detect this by checking
433  * that the first instruction in the target block is itself
434  * a jump, and setup the br directly to the jump's target
435  * (and strip back out the now unreached jump)
436  *
437  * TODO sometimes we end up with things like:
438  *
439  *    br !p0.x, #2
440  *    br p0.x, #12
441  *    add.u r0.y, r0.y, 1
442  *
443  * If we swapped the order of the branches, we could drop one.
444  */
445 static struct ir3_block *
resolve_dest_block(struct ir3_block * block)446 resolve_dest_block(struct ir3_block *block)
447 {
448    /* special case for last block: */
449    if (!block->successors[0])
450       return block;
451 
452    /* NOTE that we may or may not have inserted the jump
453     * in the target block yet, so conditions to resolve
454     * the dest to the dest block's successor are:
455     *
456     *   (1) successor[1] == NULL &&
457     *   (2) (block-is-empty || only-instr-is-jump)
458     */
459    if (block->successors[1] == NULL) {
460       if (list_is_empty(&block->instr_list)) {
461          return block->successors[0];
462       } else if (list_length(&block->instr_list) == 1) {
463          struct ir3_instruction *instr =
464             list_first_entry(&block->instr_list, struct ir3_instruction, node);
465          if (instr->opc == OPC_JUMP) {
466             /* If this jump is backwards, then we will probably convert
467              * the jump being resolved to a backwards jump, which will
468              * change a loop-with-continue or loop-with-if into a
469              * doubly-nested loop and change the convergence behavior.
470              * Disallow this here.
471              */
472             if (block->successors[0]->index <= block->index)
473                return block;
474             return block->successors[0];
475          }
476       }
477    }
478    return block;
479 }
480 
481 static void
remove_unused_block(struct ir3_block * old_target)482 remove_unused_block(struct ir3_block *old_target)
483 {
484    list_delinit(&old_target->node);
485 
486    /* If there are any physical predecessors due to fallthroughs, then they may
487     * fall through to any of the physical successors of this block. But we can
488     * only fit two, so just pick the "earliest" one, i.e. the fallthrough if
489     * possible.
490     *
491     * TODO: we really ought to have unlimited numbers of physical successors,
492     * both because of this and because we currently don't model some scenarios
493     * with nested break/continue correctly.
494     */
495    struct ir3_block *new_target;
496    if (old_target->physical_successors[1] &&
497        old_target->physical_successors[1]->start_ip <
498        old_target->physical_successors[0]->start_ip) {
499       new_target = old_target->physical_successors[1];
500    } else {
501       new_target = old_target->physical_successors[0];
502    }
503 
504    for (unsigned i = 0; i < old_target->physical_predecessors_count; i++) {
505       struct ir3_block *pred = old_target->physical_predecessors[i];
506       if (pred->physical_successors[0] == old_target) {
507          if (!new_target) {
508             /* If we remove a physical successor, make sure the only physical
509              * successor is the first one.
510              */
511             pred->physical_successors[0] = pred->physical_successors[1];
512             pred->physical_successors[1] = NULL;
513          } else {
514             pred->physical_successors[0] = new_target;
515          }
516       } else {
517          assert(pred->physical_successors[1] == old_target);
518          pred->physical_successors[1] = new_target;
519       }
520       if (new_target)
521          ir3_block_add_physical_predecessor(new_target, pred);
522    }
523 
524    /* cleanup dangling predecessors: */
525    for (unsigned i = 0; i < ARRAY_SIZE(old_target->successors); i++) {
526       if (old_target->successors[i]) {
527          struct ir3_block *succ = old_target->successors[i];
528          ir3_block_remove_predecessor(succ, old_target);
529       }
530    }
531 
532    for (unsigned i = 0; i < ARRAY_SIZE(old_target->physical_successors); i++) {
533       if (old_target->physical_successors[i]) {
534          struct ir3_block *succ = old_target->physical_successors[i];
535          ir3_block_remove_physical_predecessor(succ, old_target);
536       }
537    }
538 }
539 
540 static bool
retarget_jump(struct ir3_instruction * instr,struct ir3_block * new_target)541 retarget_jump(struct ir3_instruction *instr, struct ir3_block *new_target)
542 {
543    struct ir3_block *old_target = instr->cat0.target;
544    struct ir3_block *cur_block = instr->block;
545 
546    /* update current blocks successors to reflect the retargetting: */
547    if (cur_block->successors[0] == old_target) {
548       cur_block->successors[0] = new_target;
549    } else {
550       assert(cur_block->successors[1] == old_target);
551       cur_block->successors[1] = new_target;
552    }
553 
554    /* also update physical_successors: */
555    if (cur_block->physical_successors[0] == old_target) {
556       cur_block->physical_successors[0] = new_target;
557    } else {
558       assert(cur_block->physical_successors[1] == old_target);
559       cur_block->physical_successors[1] = new_target;
560    }
561 
562    /* update new target's predecessors: */
563    ir3_block_add_predecessor(new_target, cur_block);
564    ir3_block_add_physical_predecessor(new_target, cur_block);
565 
566    /* and remove old_target's predecessor: */
567    ir3_block_remove_predecessor(old_target, cur_block);
568    ir3_block_remove_physical_predecessor(old_target, cur_block);
569 
570    instr->cat0.target = new_target;
571 
572    if (old_target->predecessors_count == 0) {
573       remove_unused_block(old_target);
574       return true;
575    }
576 
577    return false;
578 }
579 
580 static bool
opt_jump(struct ir3 * ir)581 opt_jump(struct ir3 *ir)
582 {
583    bool progress = false;
584 
585    unsigned index = 0;
586    foreach_block (block, &ir->block_list)
587       block->index = index++;
588 
589    foreach_block (block, &ir->block_list) {
590       foreach_instr (instr, &block->instr_list) {
591          if (!is_flow(instr) || !instr->cat0.target)
592             continue;
593 
594          struct ir3_block *tblock = resolve_dest_block(instr->cat0.target);
595          if (tblock != instr->cat0.target) {
596             progress = true;
597 
598             /* Exit early if we deleted a block to avoid iterator
599              * weirdness/assert fails
600              */
601             if (retarget_jump(instr, tblock))
602                return true;
603          }
604       }
605 
606       /* Detect the case where the block ends either with:
607        * - A single unconditional jump to the next block.
608        * - Two jump instructions with opposite conditions, and one of the
609        *   them jumps to the next block.
610        * We can remove the one that jumps to the next block in either case.
611        */
612       if (list_is_empty(&block->instr_list))
613          continue;
614 
615       struct ir3_instruction *jumps[2] = {NULL, NULL};
616       jumps[0] =
617          list_last_entry(&block->instr_list, struct ir3_instruction, node);
618       if (!list_is_singular(&block->instr_list))
619          jumps[1] =
620             list_last_entry(&jumps[0]->node, struct ir3_instruction, node);
621 
622       if (jumps[0]->opc == OPC_JUMP)
623          jumps[1] = NULL;
624       else if (jumps[0]->opc != OPC_B || !jumps[1] || jumps[1]->opc != OPC_B)
625          continue;
626 
627       for (unsigned i = 0; i < 2; i++) {
628          if (!jumps[i])
629             continue;
630 
631          struct ir3_block *tblock = jumps[i]->cat0.target;
632          if (&tblock->node == block->node.next) {
633             list_delinit(&jumps[i]->node);
634             progress = true;
635             break;
636          }
637       }
638    }
639 
640    return progress;
641 }
642 
643 static void
resolve_jumps(struct ir3 * ir)644 resolve_jumps(struct ir3 *ir)
645 {
646    foreach_block (block, &ir->block_list)
647       foreach_instr (instr, &block->instr_list)
648          if (is_flow(instr) && instr->cat0.target) {
649             struct ir3_instruction *target = list_first_entry(
650                &instr->cat0.target->instr_list, struct ir3_instruction, node);
651 
652             instr->cat0.immed = (int)target->ip - (int)instr->ip;
653          }
654 }
655 
656 static void
mark_jp(struct ir3_block * block)657 mark_jp(struct ir3_block *block)
658 {
659    /* We only call this on the end block (in kill_sched) or after retargeting
660     * all jumps to empty blocks (in mark_xvergence_points) so there's no need to
661     * worry about empty blocks.
662     */
663    assert(!list_is_empty(&block->instr_list));
664 
665    struct ir3_instruction *target =
666       list_first_entry(&block->instr_list, struct ir3_instruction, node);
667    target->flags |= IR3_INSTR_JP;
668 }
669 
670 /* Mark points where control flow converges or diverges.
671  *
672  * Divergence points could actually be re-convergence points where
673  * "parked" threads are recoverged with threads that took the opposite
674  * path last time around.  Possibly it is easier to think of (jp) as
675  * "the execution mask might have changed".
676  */
677 static void
mark_xvergence_points(struct ir3 * ir)678 mark_xvergence_points(struct ir3 *ir)
679 {
680    foreach_block (block, &ir->block_list) {
681       /* We need to insert (jp) if an entry in the "branch stack" is created for
682        * our block. This happens if there is a predecessor to our block that may
683        * fallthrough to an earlier block in the physical CFG, either because it
684        * ends in a non-uniform conditional branch or because there's a
685        * fallthrough for an block in-between that also starts with (jp) and was
686        * pushed on the branch stack already.
687        */
688       for (unsigned i = 0; i < block->predecessors_count; i++) {
689          struct ir3_block *pred = block->predecessors[i];
690 
691          for (unsigned j = 0; j < ARRAY_SIZE(pred->physical_successors); j++) {
692             if (pred->physical_successors[j] != NULL &&
693                 pred->physical_successors[j]->start_ip < block->start_ip)
694                mark_jp(block);
695 
696             /* If the predecessor just falls through to this block, we still
697              * need to check if it "falls through" by jumping to the block. This
698              * can happen if opt_jump fails and the block ends in two branches,
699              * or if there's an empty if-statement (which currently can happen
700              * with binning shaders after dead-code elimination) and the block
701              * before ends with a conditional branch directly to this block.
702              */
703             if (pred->physical_successors[j] == block) {
704                foreach_instr_rev (instr, &pred->instr_list) {
705                   if (!is_flow(instr))
706                      break;
707                   if (instr->cat0.target == block) {
708                      mark_jp(block);
709                      break;
710                   }
711                }
712             }
713          }
714       }
715    }
716 }
717 
718 /* Insert the branch/jump instructions for flow control between blocks.
719  * Initially this is done naively, without considering if the successor
720  * block immediately follows the current block (ie. so no jump required),
721  * but that is cleaned up in opt_jump().
722  *
723  * TODO what ensures that the last write to p0.x in a block is the
724  * branch condition?  Have we been getting lucky all this time?
725  */
726 static void
block_sched(struct ir3 * ir)727 block_sched(struct ir3 *ir)
728 {
729    foreach_block (block, &ir->block_list) {
730       if (block->successors[1]) {
731          /* if/else, conditional branches to "then" or "else": */
732          struct ir3_instruction *br1, *br2;
733 
734          if (block->brtype == IR3_BRANCH_GETONE ||
735              block->brtype == IR3_BRANCH_SHPS) {
736             /* getone/shps can't be inverted, and it wouldn't even make sense
737              * to follow it with an inverted branch, so follow it by an
738              * unconditional branch.
739              */
740             assert(!block->condition);
741             if (block->brtype == IR3_BRANCH_GETONE)
742                br1 = ir3_GETONE(block);
743             else
744                br1 = ir3_SHPS(block);
745             br1->cat0.target = block->successors[1];
746 
747             br2 = ir3_JUMP(block);
748             br2->cat0.target = block->successors[0];
749          } else {
750             assert(block->condition);
751 
752             /* create "else" branch first (since "then" block should
753              * frequently/always end up being a fall-thru):
754              */
755             br1 = ir3_instr_create(block, OPC_B, 0, 1);
756             ir3_src_create(br1, regid(REG_P0, 0), 0)->def =
757                block->condition->dsts[0];
758             br1->cat0.inv1 = true;
759             br1->cat0.target = block->successors[1];
760 
761             /* "then" branch: */
762             br2 = ir3_instr_create(block, OPC_B, 0, 1);
763             ir3_src_create(br2, regid(REG_P0, 0), 0)->def =
764                block->condition->dsts[0];
765             br2->cat0.target = block->successors[0];
766 
767             switch (block->brtype) {
768             case IR3_BRANCH_COND:
769                br1->cat0.brtype = br2->cat0.brtype = BRANCH_PLAIN;
770                break;
771             case IR3_BRANCH_ALL:
772                br1->cat0.brtype = BRANCH_ANY;
773                br2->cat0.brtype = BRANCH_ALL;
774                break;
775             case IR3_BRANCH_ANY:
776                br1->cat0.brtype = BRANCH_ALL;
777                br2->cat0.brtype = BRANCH_ANY;
778                break;
779             case IR3_BRANCH_GETONE:
780             case IR3_BRANCH_SHPS:
781                unreachable("can't get here");
782             }
783          }
784       } else if (block->successors[0]) {
785          /* otherwise unconditional jump to next block: */
786          struct ir3_instruction *jmp;
787 
788          jmp = ir3_JUMP(block);
789          jmp->cat0.target = block->successors[0];
790       }
791    }
792 }
793 
794 /* Here we workaround the fact that kill doesn't actually kill the thread as
795  * GL expects. The last instruction always needs to be an end instruction,
796  * which means that if we're stuck in a loop where kill is the only way out,
797  * then we may have to jump out to the end. kill may also have the d3d
798  * semantics of converting the thread to a helper thread, rather than setting
799  * the exec mask to 0, in which case the helper thread could get stuck in an
800  * infinite loop.
801  *
802  * We do this late, both to give the scheduler the opportunity to reschedule
803  * kill instructions earlier and to avoid having to create a separate basic
804  * block.
805  *
806  * TODO: Assuming that the wavefront doesn't stop as soon as all threads are
807  * killed, we might benefit by doing this more aggressively when the remaining
808  * part of the program after the kill is large, since that would let us
809  * skip over the instructions when there are no non-killed threads left.
810  */
811 static void
kill_sched(struct ir3 * ir,struct ir3_shader_variant * so)812 kill_sched(struct ir3 *ir, struct ir3_shader_variant *so)
813 {
814    /* True if we know that this block will always eventually lead to the end
815     * block:
816     */
817    bool always_ends = true;
818    bool added = false;
819    struct ir3_block *last_block =
820       list_last_entry(&ir->block_list, struct ir3_block, node);
821 
822    foreach_block_rev (block, &ir->block_list) {
823       for (unsigned i = 0; i < 2 && block->successors[i]; i++) {
824          if (block->successors[i]->start_ip <= block->end_ip)
825             always_ends = false;
826       }
827 
828       if (always_ends)
829          continue;
830 
831       foreach_instr_safe (instr, &block->instr_list) {
832          if (instr->opc != OPC_KILL)
833             continue;
834 
835          struct ir3_instruction *br = ir3_instr_create(block, OPC_B, 0, 1);
836          ir3_src_create(br, instr->srcs[0]->num, instr->srcs[0]->flags)->wrmask =
837             1;
838          br->cat0.target =
839             list_last_entry(&ir->block_list, struct ir3_block, node);
840 
841          list_del(&br->node);
842          list_add(&br->node, &instr->node);
843 
844          added = true;
845       }
846    }
847 
848    if (added) {
849       /* I'm not entirely sure how the branchstack works, but we probably
850        * need to add at least one entry for the divergence which is resolved
851        * at the end:
852        */
853       so->branchstack++;
854 
855       /* We don't update predecessors/successors, so we have to do this
856        * manually:
857        */
858       mark_jp(last_block);
859    }
860 }
861 
862 /* Insert nop's required to make this a legal/valid shader program: */
863 static void
nop_sched(struct ir3 * ir,struct ir3_shader_variant * so)864 nop_sched(struct ir3 *ir, struct ir3_shader_variant *so)
865 {
866    foreach_block (block, &ir->block_list) {
867       struct ir3_instruction *last = NULL;
868       struct list_head instr_list;
869 
870       /* remove all the instructions from the list, we'll be adding
871        * them back in as we go
872        */
873       list_replace(&block->instr_list, &instr_list);
874       list_inithead(&block->instr_list);
875 
876       foreach_instr_safe (instr, &instr_list) {
877          unsigned delay = ir3_delay_calc(block, instr, so->mergedregs);
878 
879          /* NOTE: I think the nopN encoding works for a5xx and
880           * probably a4xx, but not a3xx.  So far only tested on
881           * a6xx.
882           */
883 
884          if ((delay > 0) && (ir->compiler->gen >= 6) && last &&
885              ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3)) &&
886              (last->repeat == 0)) {
887             /* the previous cat2/cat3 instruction can encode at most 3 nop's: */
888             unsigned transfer = MIN2(delay, 3 - last->nop);
889             last->nop += transfer;
890             delay -= transfer;
891          }
892 
893          if ((delay > 0) && last && (last->opc == OPC_NOP)) {
894             /* the previous nop can encode at most 5 repeats: */
895             unsigned transfer = MIN2(delay, 5 - last->repeat);
896             last->repeat += transfer;
897             delay -= transfer;
898          }
899 
900          if (delay > 0) {
901             assert(delay <= 6);
902             ir3_NOP(block)->repeat = delay - 1;
903          }
904 
905          list_addtail(&instr->node, &block->instr_list);
906          last = instr;
907       }
908    }
909 }
910 
911 bool
ir3_legalize(struct ir3 * ir,struct ir3_shader_variant * so,int * max_bary)912 ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary)
913 {
914    struct ir3_legalize_ctx *ctx = rzalloc(ir, struct ir3_legalize_ctx);
915    bool mergedregs = so->mergedregs;
916    bool progress;
917 
918    ctx->so = so;
919    ctx->max_bary = -1;
920    ctx->compiler = ir->compiler;
921    ctx->type = ir->type;
922 
923    /* allocate per-block data: */
924    foreach_block (block, &ir->block_list) {
925       struct ir3_legalize_block_data *bd =
926          rzalloc(ctx, struct ir3_legalize_block_data);
927 
928       regmask_init(&bd->state.needs_ss_war, mergedregs);
929       regmask_init(&bd->state.needs_ss, mergedregs);
930       regmask_init(&bd->state.needs_sy, mergedregs);
931 
932       block->data = bd;
933    }
934 
935    /* We may have failed to pull all input loads into the first block.
936     * In such case at the moment we aren't able to find a better place
937     * to for (ei) than the end of the program.
938     * a5xx and a6xx do automatically release varying storage at the end.
939     */
940    ctx->early_input_release = true;
941    struct ir3_block *start_block = ir3_after_preamble(ir);
942    foreach_block (block, &ir->block_list) {
943       foreach_instr (instr, &block->instr_list) {
944          if (is_input(instr)) {
945             ctx->has_inputs = true;
946             if (block != start_block) {
947                ctx->early_input_release = false;
948                break;
949             }
950          }
951       }
952    }
953 
954    assert(ctx->early_input_release || ctx->compiler->gen >= 5);
955 
956    /* process each block: */
957    do {
958       progress = false;
959       foreach_block (block, &ir->block_list) {
960          progress |= legalize_block(ctx, block);
961       }
962    } while (progress);
963 
964    *max_bary = ctx->max_bary;
965 
966    block_sched(ir);
967    if (so->type == MESA_SHADER_FRAGMENT)
968       kill_sched(ir, so);
969 
970    foreach_block (block, &ir->block_list) {
971       progress |= apply_fine_deriv_macro(ctx, block);
972    }
973 
974    nop_sched(ir, so);
975 
976    while (opt_jump(ir))
977       ;
978 
979    ir3_count_instructions(ir);
980    resolve_jumps(ir);
981 
982    mark_xvergence_points(ir);
983 
984    ralloc_free(ctx);
985 
986    return true;
987 }
988