1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/ralloc.h"
28 #include "util/u_math.h"
29
30 #include "ir3.h"
31 #include "ir3_shader.h"
32
33 /*
34 * Legalize:
35 *
36 * The legalize pass handles ensuring sufficient nop's and sync flags for
37 * correct execution.
38 *
39 * 1) Iteratively determine where sync ((sy)/(ss)) flags are needed,
40 * based on state flowing out of predecessor blocks until there is
41 * no further change. In some cases this requires inserting nops.
42 * 2) Mark (ei) on last varying input, and (ul) on last use of a0.x
43 * 3) Final nop scheduling for instruction latency
44 * 4) Resolve jumps and schedule blocks, marking potential convergence
45 * points with (jp)
46 */
47
48 struct ir3_legalize_ctx {
49 struct ir3_compiler *compiler;
50 struct ir3_shader_variant *so;
51 gl_shader_stage type;
52 int max_bary;
53 bool early_input_release;
54 };
55
56 struct ir3_legalize_state {
57 regmask_t needs_ss;
58 regmask_t needs_ss_war; /* write after read */
59 regmask_t needs_sy;
60 };
61
62 struct ir3_legalize_block_data {
63 bool valid;
64 struct ir3_legalize_state state;
65 };
66
67 /* We want to evaluate each block from the position of any other
68 * predecessor block, in order that the flags set are the union of
69 * all possible program paths.
70 *
71 * To do this, we need to know the output state (needs_ss/ss_war/sy)
72 * of all predecessor blocks. The tricky thing is loops, which mean
73 * that we can't simply recursively process each predecessor block
74 * before legalizing the current block.
75 *
76 * How we handle that is by looping over all the blocks until the
77 * results converge. If the output state of a given block changes
78 * in a given pass, this means that all successor blocks are not
79 * yet fully legalized.
80 */
81
82 static bool
legalize_block(struct ir3_legalize_ctx * ctx,struct ir3_block * block)83 legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
84 {
85 struct ir3_legalize_block_data *bd = block->data;
86
87 if (bd->valid)
88 return false;
89
90 struct ir3_instruction *last_rel = NULL;
91 struct ir3_instruction *last_n = NULL;
92 struct list_head instr_list;
93 struct ir3_legalize_state prev_state = bd->state;
94 struct ir3_legalize_state *state = &bd->state;
95 bool last_input_needs_ss = false;
96 bool has_tex_prefetch = false;
97 bool mergedregs = ctx->so->mergedregs;
98
99 /* our input state is the OR of all predecessor blocks' state: */
100 for (unsigned i = 0; i < block->predecessors_count; i++) {
101 struct ir3_block *predecessor = block->predecessors[i];
102 struct ir3_legalize_block_data *pbd = predecessor->data;
103 struct ir3_legalize_state *pstate = &pbd->state;
104
105 /* Our input (ss)/(sy) state is based on OR'ing the output
106 * state of all our predecessor blocks
107 */
108 regmask_or(&state->needs_ss, &state->needs_ss, &pstate->needs_ss);
109 regmask_or(&state->needs_ss_war, &state->needs_ss_war,
110 &pstate->needs_ss_war);
111 regmask_or(&state->needs_sy, &state->needs_sy, &pstate->needs_sy);
112 }
113
114 unsigned input_count = 0;
115
116 foreach_instr (n, &block->instr_list) {
117 if (is_input(n)) {
118 input_count++;
119 }
120 }
121
122 unsigned inputs_remaining = input_count;
123
124 /* Either inputs are in the first block or we expect inputs to be released
125 * with the end of the program.
126 */
127 assert(input_count == 0 || !ctx->early_input_release ||
128 block == ir3_start_block(block->shader));
129
130 /* remove all the instructions from the list, we'll be adding
131 * them back in as we go
132 */
133 list_replace(&block->instr_list, &instr_list);
134 list_inithead(&block->instr_list);
135
136 foreach_instr_safe (n, &instr_list) {
137 unsigned i;
138
139 n->flags &= ~(IR3_INSTR_SS | IR3_INSTR_SY);
140
141 /* _meta::tex_prefetch instructions removed later in
142 * collect_tex_prefetches()
143 */
144 if (is_meta(n) && (n->opc != OPC_META_TEX_PREFETCH))
145 continue;
146
147 if (is_input(n)) {
148 struct ir3_register *inloc = n->srcs[0];
149 assert(inloc->flags & IR3_REG_IMMED);
150 ctx->max_bary = MAX2(ctx->max_bary, inloc->iim_val);
151 }
152
153 if (last_n && is_barrier(last_n)) {
154 n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
155 last_input_needs_ss = false;
156 regmask_init(&state->needs_ss_war, mergedregs);
157 regmask_init(&state->needs_ss, mergedregs);
158 regmask_init(&state->needs_sy, mergedregs);
159 }
160
161 if (last_n && (last_n->opc == OPC_PREDT)) {
162 n->flags |= IR3_INSTR_SS;
163 regmask_init(&state->needs_ss_war, mergedregs);
164 regmask_init(&state->needs_ss, mergedregs);
165 }
166
167 /* NOTE: consider dst register too.. it could happen that
168 * texture sample instruction (for example) writes some
169 * components which are unused. A subsequent instruction
170 * that writes the same register can race w/ the sam instr
171 * resulting in undefined results:
172 */
173 for (i = 0; i < n->dsts_count + n->srcs_count; i++) {
174 struct ir3_register *reg;
175 if (i < n->dsts_count)
176 reg = n->dsts[i];
177 else
178 reg = n->srcs[i - n->dsts_count];
179
180 if (reg_gpr(reg)) {
181
182 /* TODO: we probably only need (ss) for alu
183 * instr consuming sfu result.. need to make
184 * some tests for both this and (sy)..
185 */
186 if (regmask_get(&state->needs_ss, reg)) {
187 n->flags |= IR3_INSTR_SS;
188 last_input_needs_ss = false;
189 regmask_init(&state->needs_ss_war, mergedregs);
190 regmask_init(&state->needs_ss, mergedregs);
191 }
192
193 if (regmask_get(&state->needs_sy, reg)) {
194 n->flags |= IR3_INSTR_SY;
195 regmask_init(&state->needs_sy, mergedregs);
196 }
197 }
198
199 /* TODO: is it valid to have address reg loaded from a
200 * relative src (ie. mova a0, c<a0.x+4>)? If so, the
201 * last_rel check below should be moved ahead of this:
202 */
203 if (reg->flags & IR3_REG_RELATIV)
204 last_rel = n;
205 }
206
207 foreach_dst (reg, n) {
208 if (regmask_get(&state->needs_ss_war, reg)) {
209 n->flags |= IR3_INSTR_SS;
210 last_input_needs_ss = false;
211 regmask_init(&state->needs_ss_war, mergedregs);
212 regmask_init(&state->needs_ss, mergedregs);
213 }
214
215 if (last_rel && (reg->num == regid(REG_A0, 0))) {
216 last_rel->flags |= IR3_INSTR_UL;
217 last_rel = NULL;
218 }
219 }
220
221 /* cat5+ does not have an (ss) bit, if needed we need to
222 * insert a nop to carry the sync flag. Would be kinda
223 * clever if we were aware of this during scheduling, but
224 * this should be a pretty rare case:
225 */
226 if ((n->flags & IR3_INSTR_SS) && (opc_cat(n->opc) >= 5)) {
227 struct ir3_instruction *nop;
228 nop = ir3_NOP(block);
229 nop->flags |= IR3_INSTR_SS;
230 n->flags &= ~IR3_INSTR_SS;
231 }
232
233 /* need to be able to set (ss) on first instruction: */
234 if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
235 ir3_NOP(block);
236
237 if (ctx->compiler->samgq_workaround &&
238 ctx->type != MESA_SHADER_FRAGMENT &&
239 ctx->type != MESA_SHADER_COMPUTE && n->opc == OPC_SAMGQ) {
240 struct ir3_instruction *samgp;
241
242 list_delinit(&n->node);
243
244 for (i = 0; i < 4; i++) {
245 samgp = ir3_instr_clone(n);
246 samgp->opc = OPC_SAMGP0 + i;
247 if (i > 1)
248 samgp->flags |= IR3_INSTR_SY;
249 }
250 } else {
251 list_delinit(&n->node);
252 list_addtail(&n->node, &block->instr_list);
253 }
254
255 if (is_sfu(n))
256 regmask_set(&state->needs_ss, n->dsts[0]);
257
258 if (is_tex_or_prefetch(n)) {
259 regmask_set(&state->needs_sy, n->dsts[0]);
260 if (n->opc == OPC_META_TEX_PREFETCH)
261 has_tex_prefetch = true;
262 } else if (n->opc == OPC_RESINFO) {
263 regmask_set(&state->needs_ss, n->dsts[0]);
264 ir3_NOP(block)->flags |= IR3_INSTR_SS;
265 last_input_needs_ss = false;
266 } else if (is_load(n)) {
267 /* seems like ldlv needs (ss) bit instead?? which is odd but
268 * makes a bunch of flat-varying tests start working on a4xx.
269 */
270 if ((n->opc == OPC_LDLV) || (n->opc == OPC_LDL) ||
271 (n->opc == OPC_LDLW))
272 regmask_set(&state->needs_ss, n->dsts[0]);
273 else
274 regmask_set(&state->needs_sy, n->dsts[0]);
275 } else if (is_atomic(n->opc)) {
276 if (n->flags & IR3_INSTR_G) {
277 if (ctx->compiler->gen >= 6) {
278 /* New encoding, returns result via second src: */
279 regmask_set(&state->needs_sy, n->srcs[2]);
280 } else {
281 regmask_set(&state->needs_sy, n->dsts[0]);
282 }
283 } else {
284 regmask_set(&state->needs_ss, n->dsts[0]);
285 }
286 }
287
288 if (is_ssbo(n->opc) || (is_atomic(n->opc) && (n->flags & IR3_INSTR_G)))
289 ctx->so->has_ssbo = true;
290
291 /* both tex/sfu appear to not always immediately consume
292 * their src register(s):
293 */
294 if (is_tex(n) || is_sfu(n) || is_mem(n)) {
295 foreach_src (reg, n) {
296 regmask_set(&state->needs_ss_war, reg);
297 }
298 }
299
300 if (ctx->early_input_release && is_input(n)) {
301 last_input_needs_ss |= (n->opc == OPC_LDLV);
302
303 assert(inputs_remaining > 0);
304 inputs_remaining--;
305 if (inputs_remaining == 0) {
306 /* This is the last input. We add the (ei) flag to release
307 * varying memory after this executes. If it's an ldlv,
308 * however, we need to insert a dummy bary.f on which we can
309 * set the (ei) flag. We may also need to insert an (ss) to
310 * guarantee that all ldlv's have finished fetching their
311 * results before releasing the varying memory.
312 */
313 struct ir3_instruction *last_input = n;
314 if (n->opc == OPC_LDLV) {
315 struct ir3_instruction *baryf;
316
317 /* (ss)bary.f (ei)r63.x, 0, r0.x */
318 baryf = ir3_instr_create(block, OPC_BARY_F, 1, 2);
319 ir3_dst_create(baryf, regid(63, 0), 0);
320 ir3_src_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
321 ir3_src_create(baryf, regid(0, 0), 0);
322
323 last_input = baryf;
324 }
325
326 last_input->dsts[0]->flags |= IR3_REG_EI;
327 if (last_input_needs_ss) {
328 last_input->flags |= IR3_INSTR_SS;
329 regmask_init(&state->needs_ss_war, mergedregs);
330 regmask_init(&state->needs_ss, mergedregs);
331 }
332 }
333 }
334
335 last_n = n;
336 }
337
338 assert(inputs_remaining == 0 || !ctx->early_input_release);
339
340 if (has_tex_prefetch && input_count == 0) {
341 /* texture prefetch, but *no* inputs.. we need to insert a
342 * dummy bary.f at the top of the shader to unblock varying
343 * storage:
344 */
345 struct ir3_instruction *baryf;
346
347 /* (ss)bary.f (ei)r63.x, 0, r0.x */
348 baryf = ir3_instr_create(block, OPC_BARY_F, 1, 2);
349 ir3_dst_create(baryf, regid(63, 0), 0)->flags |= IR3_REG_EI;
350 ir3_src_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
351 ir3_src_create(baryf, regid(0, 0), 0);
352
353 /* insert the dummy bary.f at head: */
354 list_delinit(&baryf->node);
355 list_add(&baryf->node, &block->instr_list);
356 }
357
358 if (last_rel)
359 last_rel->flags |= IR3_INSTR_UL;
360
361 bd->valid = true;
362
363 if (memcmp(&prev_state, state, sizeof(*state))) {
364 /* our output state changed, this invalidates all of our
365 * successors:
366 */
367 for (unsigned i = 0; i < ARRAY_SIZE(block->successors); i++) {
368 if (!block->successors[i])
369 break;
370 struct ir3_legalize_block_data *pbd = block->successors[i]->data;
371 pbd->valid = false;
372 }
373 }
374
375 return true;
376 }
377
378 /* Expands dsxpp and dsypp macros to:
379 *
380 * dsxpp.1 dst, src
381 * dsxpp.1.p dst, src
382 *
383 * We apply this after flags syncing, as we don't want to sync in between the
384 * two (which might happen if dst == src). We do it before nop scheduling
385 * because that needs to count actual instructions.
386 */
387 static bool
apply_fine_deriv_macro(struct ir3_legalize_ctx * ctx,struct ir3_block * block)388 apply_fine_deriv_macro(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
389 {
390 struct list_head instr_list;
391
392 /* remove all the instructions from the list, we'll be adding
393 * them back in as we go
394 */
395 list_replace(&block->instr_list, &instr_list);
396 list_inithead(&block->instr_list);
397
398 foreach_instr_safe (n, &instr_list) {
399 list_addtail(&n->node, &block->instr_list);
400
401 if (n->opc == OPC_DSXPP_MACRO || n->opc == OPC_DSYPP_MACRO) {
402 n->opc = (n->opc == OPC_DSXPP_MACRO) ? OPC_DSXPP_1 : OPC_DSYPP_1;
403
404 struct ir3_instruction *op_p = ir3_instr_clone(n);
405 op_p->flags = IR3_INSTR_P;
406
407 ctx->so->need_fine_derivatives = true;
408 }
409 }
410
411 return true;
412 }
413
414 /* NOTE: branch instructions are always the last instruction(s)
415 * in the block. We take advantage of this as we resolve the
416 * branches, since "if (foo) break;" constructs turn into
417 * something like:
418 *
419 * block3 {
420 * ...
421 * 0029:021: mov.s32s32 r62.x, r1.y
422 * 0082:022: br !p0.x, target=block5
423 * 0083:023: br p0.x, target=block4
424 * // succs: if _[0029:021: mov.s32s32] block4; else block5;
425 * }
426 * block4 {
427 * 0084:024: jump, target=block6
428 * // succs: block6;
429 * }
430 * block5 {
431 * 0085:025: jump, target=block7
432 * // succs: block7;
433 * }
434 *
435 * ie. only instruction in block4/block5 is a jump, so when
436 * resolving branches we can easily detect this by checking
437 * that the first instruction in the target block is itself
438 * a jump, and setup the br directly to the jump's target
439 * (and strip back out the now unreached jump)
440 *
441 * TODO sometimes we end up with things like:
442 *
443 * br !p0.x, #2
444 * br p0.x, #12
445 * add.u r0.y, r0.y, 1
446 *
447 * If we swapped the order of the branches, we could drop one.
448 */
449 static struct ir3_block *
resolve_dest_block(struct ir3_block * block)450 resolve_dest_block(struct ir3_block *block)
451 {
452 /* special case for last block: */
453 if (!block->successors[0])
454 return block;
455
456 /* NOTE that we may or may not have inserted the jump
457 * in the target block yet, so conditions to resolve
458 * the dest to the dest block's successor are:
459 *
460 * (1) successor[1] == NULL &&
461 * (2) (block-is-empty || only-instr-is-jump)
462 */
463 if (block->successors[1] == NULL) {
464 if (list_is_empty(&block->instr_list)) {
465 return block->successors[0];
466 } else if (list_length(&block->instr_list) == 1) {
467 struct ir3_instruction *instr =
468 list_first_entry(&block->instr_list, struct ir3_instruction, node);
469 if (instr->opc == OPC_JUMP) {
470 /* If this jump is backwards, then we will probably convert
471 * the jump being resolved to a backwards jump, which will
472 * change a loop-with-continue or loop-with-if into a
473 * doubly-nested loop and change the convergence behavior.
474 * Disallow this here.
475 */
476 if (block->successors[0]->index <= block->index)
477 return block;
478 return block->successors[0];
479 }
480 }
481 }
482 return block;
483 }
484
485 static void
remove_unused_block(struct ir3_block * old_target)486 remove_unused_block(struct ir3_block *old_target)
487 {
488 list_delinit(&old_target->node);
489
490 /* cleanup dangling predecessors: */
491 for (unsigned i = 0; i < ARRAY_SIZE(old_target->successors); i++) {
492 if (old_target->successors[i]) {
493 struct ir3_block *succ = old_target->successors[i];
494 ir3_block_remove_predecessor(succ, old_target);
495 }
496 }
497 }
498
499 static bool
retarget_jump(struct ir3_instruction * instr,struct ir3_block * new_target)500 retarget_jump(struct ir3_instruction *instr, struct ir3_block *new_target)
501 {
502 struct ir3_block *old_target = instr->cat0.target;
503 struct ir3_block *cur_block = instr->block;
504
505 /* update current blocks successors to reflect the retargetting: */
506 if (cur_block->successors[0] == old_target) {
507 cur_block->successors[0] = new_target;
508 } else {
509 debug_assert(cur_block->successors[1] == old_target);
510 cur_block->successors[1] = new_target;
511 }
512
513 /* also update physical_successors.. we don't really need them at
514 * this stage, but it keeps ir3_validate happy:
515 */
516 if (cur_block->physical_successors[0] == old_target) {
517 cur_block->physical_successors[0] = new_target;
518 } else {
519 debug_assert(cur_block->physical_successors[1] == old_target);
520 cur_block->physical_successors[1] = new_target;
521 }
522
523 /* update new target's predecessors: */
524 ir3_block_add_predecessor(new_target, cur_block);
525
526 /* and remove old_target's predecessor: */
527 ir3_block_remove_predecessor(old_target, cur_block);
528
529 instr->cat0.target = new_target;
530
531 if (old_target->predecessors_count == 0) {
532 remove_unused_block(old_target);
533 return true;
534 }
535
536 return false;
537 }
538
539 static bool
opt_jump(struct ir3 * ir)540 opt_jump(struct ir3 *ir)
541 {
542 bool progress = false;
543
544 unsigned index = 0;
545 foreach_block (block, &ir->block_list)
546 block->index = index++;
547
548 foreach_block (block, &ir->block_list) {
549 foreach_instr (instr, &block->instr_list) {
550 if (!is_flow(instr) || !instr->cat0.target)
551 continue;
552
553 struct ir3_block *tblock = resolve_dest_block(instr->cat0.target);
554 if (tblock != instr->cat0.target) {
555 progress = true;
556
557 /* Exit early if we deleted a block to avoid iterator
558 * weirdness/assert fails
559 */
560 if (retarget_jump(instr, tblock))
561 return true;
562 }
563 }
564
565 /* Detect the case where the block ends either with:
566 * - A single unconditional jump to the next block.
567 * - Two jump instructions with opposite conditions, and one of the
568 * them jumps to the next block.
569 * We can remove the one that jumps to the next block in either case.
570 */
571 if (list_is_empty(&block->instr_list))
572 continue;
573
574 struct ir3_instruction *jumps[2] = {NULL, NULL};
575 jumps[0] =
576 list_last_entry(&block->instr_list, struct ir3_instruction, node);
577 if (!list_is_singular(&block->instr_list))
578 jumps[1] =
579 list_last_entry(&jumps[0]->node, struct ir3_instruction, node);
580
581 if (jumps[0]->opc == OPC_JUMP)
582 jumps[1] = NULL;
583 else if (jumps[0]->opc != OPC_B || !jumps[1] || jumps[1]->opc != OPC_B)
584 continue;
585
586 for (unsigned i = 0; i < 2; i++) {
587 if (!jumps[i])
588 continue;
589
590 struct ir3_block *tblock = jumps[i]->cat0.target;
591 if (&tblock->node == block->node.next) {
592 list_delinit(&jumps[i]->node);
593 progress = true;
594 break;
595 }
596 }
597 }
598
599 return progress;
600 }
601
602 static void
resolve_jumps(struct ir3 * ir)603 resolve_jumps(struct ir3 *ir)
604 {
605 foreach_block (block, &ir->block_list)
606 foreach_instr (instr, &block->instr_list)
607 if (is_flow(instr) && instr->cat0.target) {
608 struct ir3_instruction *target = list_first_entry(
609 &instr->cat0.target->instr_list, struct ir3_instruction, node);
610
611 instr->cat0.immed = (int)target->ip - (int)instr->ip;
612 }
613 }
614
615 static void
mark_jp(struct ir3_block * block)616 mark_jp(struct ir3_block *block)
617 {
618 struct ir3_instruction *target =
619 list_first_entry(&block->instr_list, struct ir3_instruction, node);
620 target->flags |= IR3_INSTR_JP;
621 }
622
623 /* Mark points where control flow converges or diverges.
624 *
625 * Divergence points could actually be re-convergence points where
626 * "parked" threads are recoverged with threads that took the opposite
627 * path last time around. Possibly it is easier to think of (jp) as
628 * "the execution mask might have changed".
629 */
630 static void
mark_xvergence_points(struct ir3 * ir)631 mark_xvergence_points(struct ir3 *ir)
632 {
633 foreach_block (block, &ir->block_list) {
634 if (block->predecessors_count > 1) {
635 /* if a block has more than one possible predecessor, then
636 * the first instruction is a convergence point.
637 */
638 mark_jp(block);
639 } else if (block->predecessors_count == 1) {
640 /* If a block has one predecessor, which has multiple possible
641 * successors, it is a divergence point.
642 */
643 for (unsigned i = 0; i < block->predecessors_count; i++) {
644 struct ir3_block *predecessor = block->predecessors[i];
645 if (predecessor->successors[1]) {
646 mark_jp(block);
647 }
648 }
649 }
650 }
651 }
652
653 /* Insert the branch/jump instructions for flow control between blocks.
654 * Initially this is done naively, without considering if the successor
655 * block immediately follows the current block (ie. so no jump required),
656 * but that is cleaned up in opt_jump().
657 *
658 * TODO what ensures that the last write to p0.x in a block is the
659 * branch condition? Have we been getting lucky all this time?
660 */
661 static void
block_sched(struct ir3 * ir)662 block_sched(struct ir3 *ir)
663 {
664 foreach_block (block, &ir->block_list) {
665 if (block->successors[1]) {
666 /* if/else, conditional branches to "then" or "else": */
667 struct ir3_instruction *br1, *br2;
668
669 if (block->brtype == IR3_BRANCH_GETONE) {
670 /* getone can't be inverted, and it wouldn't even make sense
671 * to follow it with an inverted branch, so follow it by an
672 * unconditional branch.
673 */
674 debug_assert(!block->condition);
675 br1 = ir3_GETONE(block);
676 br1->cat0.target = block->successors[1];
677
678 br2 = ir3_JUMP(block);
679 br2->cat0.target = block->successors[0];
680 } else {
681 debug_assert(block->condition);
682
683 /* create "else" branch first (since "then" block should
684 * frequently/always end up being a fall-thru):
685 */
686 br1 = ir3_instr_create(block, OPC_B, 0, 1);
687 ir3_src_create(br1, regid(REG_P0, 0), 0)->def =
688 block->condition->dsts[0];
689 br1->cat0.inv1 = true;
690 br1->cat0.target = block->successors[1];
691
692 /* "then" branch: */
693 br2 = ir3_instr_create(block, OPC_B, 0, 1);
694 ir3_src_create(br2, regid(REG_P0, 0), 0)->def =
695 block->condition->dsts[0];
696 br2->cat0.target = block->successors[0];
697
698 switch (block->brtype) {
699 case IR3_BRANCH_COND:
700 br1->cat0.brtype = br2->cat0.brtype = BRANCH_PLAIN;
701 break;
702 case IR3_BRANCH_ALL:
703 br1->cat0.brtype = BRANCH_ANY;
704 br2->cat0.brtype = BRANCH_ALL;
705 break;
706 case IR3_BRANCH_ANY:
707 br1->cat0.brtype = BRANCH_ALL;
708 br2->cat0.brtype = BRANCH_ANY;
709 break;
710 case IR3_BRANCH_GETONE:
711 unreachable("can't get here");
712 }
713 }
714 } else if (block->successors[0]) {
715 /* otherwise unconditional jump to next block: */
716 struct ir3_instruction *jmp;
717
718 jmp = ir3_JUMP(block);
719 jmp->cat0.target = block->successors[0];
720 }
721 }
722 }
723
724 /* Here we workaround the fact that kill doesn't actually kill the thread as
725 * GL expects. The last instruction always needs to be an end instruction,
726 * which means that if we're stuck in a loop where kill is the only way out,
727 * then we may have to jump out to the end. kill may also have the d3d
728 * semantics of converting the thread to a helper thread, rather than setting
729 * the exec mask to 0, in which case the helper thread could get stuck in an
730 * infinite loop.
731 *
732 * We do this late, both to give the scheduler the opportunity to reschedule
733 * kill instructions earlier and to avoid having to create a separate basic
734 * block.
735 *
736 * TODO: Assuming that the wavefront doesn't stop as soon as all threads are
737 * killed, we might benefit by doing this more aggressively when the remaining
738 * part of the program after the kill is large, since that would let us
739 * skip over the instructions when there are no non-killed threads left.
740 */
741 static void
kill_sched(struct ir3 * ir,struct ir3_shader_variant * so)742 kill_sched(struct ir3 *ir, struct ir3_shader_variant *so)
743 {
744 /* True if we know that this block will always eventually lead to the end
745 * block:
746 */
747 bool always_ends = true;
748 bool added = false;
749 struct ir3_block *last_block =
750 list_last_entry(&ir->block_list, struct ir3_block, node);
751
752 foreach_block_rev (block, &ir->block_list) {
753 for (unsigned i = 0; i < 2 && block->successors[i]; i++) {
754 if (block->successors[i]->start_ip <= block->end_ip)
755 always_ends = false;
756 }
757
758 if (always_ends)
759 continue;
760
761 foreach_instr_safe (instr, &block->instr_list) {
762 if (instr->opc != OPC_KILL)
763 continue;
764
765 struct ir3_instruction *br = ir3_instr_create(block, OPC_B, 0, 1);
766 ir3_src_create(br, instr->srcs[0]->num, instr->srcs[0]->flags)->wrmask =
767 1;
768 br->cat0.target =
769 list_last_entry(&ir->block_list, struct ir3_block, node);
770
771 list_del(&br->node);
772 list_add(&br->node, &instr->node);
773
774 added = true;
775 }
776 }
777
778 if (added) {
779 /* I'm not entirely sure how the branchstack works, but we probably
780 * need to add at least one entry for the divergence which is resolved
781 * at the end:
782 */
783 so->branchstack++;
784
785 /* We don't update predecessors/successors, so we have to do this
786 * manually:
787 */
788 mark_jp(last_block);
789 }
790 }
791
792 /* Insert nop's required to make this a legal/valid shader program: */
793 static void
nop_sched(struct ir3 * ir,struct ir3_shader_variant * so)794 nop_sched(struct ir3 *ir, struct ir3_shader_variant *so)
795 {
796 foreach_block (block, &ir->block_list) {
797 struct ir3_instruction *last = NULL;
798 struct list_head instr_list;
799
800 /* remove all the instructions from the list, we'll be adding
801 * them back in as we go
802 */
803 list_replace(&block->instr_list, &instr_list);
804 list_inithead(&block->instr_list);
805
806 foreach_instr_safe (instr, &instr_list) {
807 unsigned delay = ir3_delay_calc_exact(block, instr, so->mergedregs);
808
809 /* NOTE: I think the nopN encoding works for a5xx and
810 * probably a4xx, but not a3xx. So far only tested on
811 * a6xx.
812 */
813
814 if ((delay > 0) && (ir->compiler->gen >= 6) && last &&
815 ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3)) &&
816 (last->repeat == 0)) {
817 /* the previous cat2/cat3 instruction can encode at most 3 nop's: */
818 unsigned transfer = MIN2(delay, 3 - last->nop);
819 last->nop += transfer;
820 delay -= transfer;
821 }
822
823 if ((delay > 0) && last && (last->opc == OPC_NOP)) {
824 /* the previous nop can encode at most 5 repeats: */
825 unsigned transfer = MIN2(delay, 5 - last->repeat);
826 last->repeat += transfer;
827 delay -= transfer;
828 }
829
830 if (delay > 0) {
831 debug_assert(delay <= 6);
832 ir3_NOP(block)->repeat = delay - 1;
833 }
834
835 list_addtail(&instr->node, &block->instr_list);
836 last = instr;
837 }
838 }
839 }
840
841 bool
ir3_legalize(struct ir3 * ir,struct ir3_shader_variant * so,int * max_bary)842 ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary)
843 {
844 struct ir3_legalize_ctx *ctx = rzalloc(ir, struct ir3_legalize_ctx);
845 bool mergedregs = so->mergedregs;
846 bool progress;
847
848 ctx->so = so;
849 ctx->max_bary = -1;
850 ctx->compiler = ir->compiler;
851 ctx->type = ir->type;
852
853 /* allocate per-block data: */
854 foreach_block (block, &ir->block_list) {
855 struct ir3_legalize_block_data *bd =
856 rzalloc(ctx, struct ir3_legalize_block_data);
857
858 regmask_init(&bd->state.needs_ss_war, mergedregs);
859 regmask_init(&bd->state.needs_ss, mergedregs);
860 regmask_init(&bd->state.needs_sy, mergedregs);
861
862 block->data = bd;
863 }
864
865 ir3_remove_nops(ir);
866
867 /* We may have failed to pull all input loads into the first block.
868 * In such case at the moment we aren't able to find a better place
869 * to for (ei) than the end of the program.
870 * a5xx and a6xx do automatically release varying storage at the end.
871 */
872 ctx->early_input_release = true;
873 struct ir3_block *start_block = ir3_start_block(ir);
874 foreach_block (block, &ir->block_list) {
875 foreach_instr (instr, &block->instr_list) {
876 if (is_input(instr) && block != start_block) {
877 ctx->early_input_release = false;
878 break;
879 }
880 }
881 }
882
883 assert(ctx->early_input_release || ctx->compiler->gen >= 5);
884
885 /* process each block: */
886 do {
887 progress = false;
888 foreach_block (block, &ir->block_list) {
889 progress |= legalize_block(ctx, block);
890 }
891 } while (progress);
892
893 *max_bary = ctx->max_bary;
894
895 block_sched(ir);
896 if (so->type == MESA_SHADER_FRAGMENT)
897 kill_sched(ir, so);
898
899 foreach_block (block, &ir->block_list) {
900 progress |= apply_fine_deriv_macro(ctx, block);
901 }
902
903 nop_sched(ir, so);
904
905 while (opt_jump(ir))
906 ;
907
908 ir3_count_instructions(ir);
909 resolve_jumps(ir);
910
911 mark_xvergence_points(ir);
912
913 ralloc_free(ctx);
914
915 return true;
916 }
917