1 /*
2 * Copyright © 2010 Luca Barbieri
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file lower_jumps.cpp
26 *
27 * This pass lowers jumps (break, continue, and return) to if/else structures.
28 *
29 * It can be asked to:
30 * 1. Pull jumps out of ifs where possible
31 * 2. Remove all "continue"s, replacing them with an "execute flag"
32 * 3. Replace all "break" with a single conditional one at the end of the loop
33 * 4. Replace all "return"s with a single return at the end of the function,
34 * for the main function and/or other functions
35 *
36 * Applying this pass gives several benefits:
37 * 1. All functions can be inlined.
38 * 2. nv40 and other pre-DX10 chips without "continue" can be supported
39 * 3. nv30 and other pre-DX10 chips with no control flow at all are better
40 * supported
41 *
42 * Continues are lowered by adding a per-loop "execute flag", initialized to
43 * true, that when cleared inhibits all execution until the end of the loop.
44 *
45 * Breaks are lowered to continues, plus setting a "break flag" that is checked
46 * at the end of the loop, and trigger the unique "break".
47 *
48 * Returns are lowered to breaks/continues, plus adding a "return flag" that
49 * causes loops to break again out of their enclosing loops until all the
50 * loops are exited: then the "execute flag" logic will ignore everything
51 * until the end of the function.
52 *
53 * Note that "continue" and "return" can also be implemented by adding
54 * a dummy loop and using break.
55 * However, this is bad for hardware with limited nesting depth, and
56 * prevents further optimization, and thus is not currently performed.
57 */
58
59 #include "compiler/glsl_types.h"
60 #include <string.h>
61 #include "ir.h"
62
63 /**
64 * Enum recording the result of analyzing how control flow might exit
65 * an IR node.
66 *
67 * Each possible value of jump_strength indicates a strictly stronger
68 * guarantee on control flow than the previous value.
69 *
70 * The ordering of strengths roughly reflects the way jumps are
71 * lowered: jumps with higher strength tend to be lowered to jumps of
72 * lower strength. Accordingly, strength is used as a heuristic to
73 * determine which lowering to perform first.
74 *
75 * This enum is also used by get_jump_strength() to categorize
76 * instructions as either break, continue, return, or other. When
77 * used in this fashion, strength_always_clears_execute_flag is not
78 * used.
79 *
80 * The control flow analysis made by this optimization pass makes two
81 * simplifying assumptions:
82 *
83 * - It ignores discard instructions, since they are lowered by a
84 * separate pass (lower_discard.cpp).
85 *
86 * - It assumes it is always possible for control to flow from a loop
87 * to the instruction immediately following it. Technically, this
88 * is not true (since all execution paths through the loop might
89 * jump back to the top, or return from the function).
90 *
91 * Both of these simplifying assumptions are safe, since they can never
92 * cause reachable code to be incorrectly classified as unreachable;
93 * they can only do the opposite.
94 */
95 enum jump_strength
96 {
97 /**
98 * Analysis has produced no guarantee on how control flow might
99 * exit this IR node. It might fall out the bottom (with or
100 * without clearing the execute flag, if present), or it might
101 * continue to the top of the innermost enclosing loop, break out
102 * of it, or return from the function.
103 */
104 strength_none,
105
106 /**
107 * The only way control can fall out the bottom of this node is
108 * through a code path that clears the execute flag. It might also
109 * continue to the top of the innermost enclosing loop, break out
110 * of it, or return from the function.
111 */
112 strength_always_clears_execute_flag,
113
114 /**
115 * Control cannot fall out the bottom of this node. It might
116 * continue to the top of the innermost enclosing loop, break out
117 * of it, or return from the function.
118 */
119 strength_continue,
120
121 /**
122 * Control cannot fall out the bottom of this node, or continue the
123 * top of the innermost enclosing loop. It can only break out of
124 * it or return from the function.
125 */
126 strength_break,
127
128 /**
129 * Control cannot fall out the bottom of this node, continue to the
130 * top of the innermost enclosing loop, or break out of it. It can
131 * only return from the function.
132 */
133 strength_return
134 };
135
136 namespace {
137
138 struct block_record
139 {
140 /* minimum jump strength (of lowered IR, not pre-lowering IR)
141 *
142 * If the block ends with a jump, must be the strength of the jump.
143 * Otherwise, the jump would be dead and have been deleted before)
144 *
145 * If the block doesn't end with a jump, it can be different than strength_none if all paths before it lead to some jump
146 * (e.g. an if with a return in one branch, and a break in the other, while not lowering them)
147 * Note that identical jumps are usually unified though.
148 */
149 jump_strength min_strength;
150
151 /* can anything clear the execute flag? */
152 bool may_clear_execute_flag;
153
block_record__anona1a049dd0111::block_record154 block_record()
155 {
156 this->min_strength = strength_none;
157 this->may_clear_execute_flag = false;
158 }
159 };
160
161 struct loop_record
162 {
163 ir_function_signature* signature;
164 ir_loop* loop;
165
166 /* used to avoid lowering the break used to represent lowered breaks */
167 unsigned nesting_depth;
168 bool in_if_at_the_end_of_the_loop;
169
170 bool may_set_return_flag;
171
172 ir_variable* execute_flag; /* cleared to emulate continue */
173
loop_record__anona1a049dd0111::loop_record174 loop_record(ir_function_signature* p_signature = 0, ir_loop* p_loop = 0)
175 {
176 this->signature = p_signature;
177 this->loop = p_loop;
178 this->nesting_depth = 0;
179 this->in_if_at_the_end_of_the_loop = false;
180 this->may_set_return_flag = false;
181 this->execute_flag = 0;
182 }
183
get_execute_flag__anona1a049dd0111::loop_record184 ir_variable* get_execute_flag()
185 {
186 /* also supported for the "function loop" */
187 if(!this->execute_flag) {
188 exec_list& list = this->loop ? this->loop->body_instructions : signature->body;
189 this->execute_flag = new(this->signature) ir_variable(&glsl_type_builtin_bool, "execute_flag", ir_var_temporary);
190 list.push_head(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(execute_flag), new(this->signature) ir_constant(true)));
191 list.push_head(this->execute_flag);
192 }
193 return this->execute_flag;
194 }
195 };
196
197 struct function_record
198 {
199 ir_function_signature* signature;
200 ir_variable* return_flag; /* used to break out of all loops and then jump to the return instruction */
201 ir_variable* return_value;
202 unsigned nesting_depth;
203
function_record__anona1a049dd0111::function_record204 function_record(ir_function_signature* p_signature = 0)
205 {
206 this->signature = p_signature;
207 this->return_flag = 0;
208 this->return_value = 0;
209 this->nesting_depth = 0;
210 }
211
212 };
213
214 struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
215 /* Postconditions: on exit of any visit() function:
216 *
217 * ANALYSIS: this->block.min_strength,
218 * this->block.may_clear_execute_flag, and
219 * this->loop.may_set_return_flag are updated to reflect the
220 * characteristics of the visited statement.
221 *
222 * DEAD_CODE_ELIMINATION: If this->block.min_strength is not
223 * strength_none, the visited node is at the end of its exec_list.
224 * In other words, any unreachable statements that follow the
225 * visited statement in its exec_list have been removed.
226 *
227 * CONTAINED_JUMPS_LOWERED: If the visited statement contains other
228 * statements, then should_lower_jump() is false for all of the
229 * return, break, or continue statements it contains.
230 *
231 * Note that visiting a jump does not lower it. That is the
232 * responsibility of the statement (or function signature) that
233 * contains the jump.
234 */
235
236 using ir_control_flow_visitor::visit;
237
238 bool progress;
239
240 struct function_record function;
241 struct loop_record loop;
242 struct block_record block;
243
244 bool pull_out_jumps;
245 bool lower_continue;
246
ir_lower_jumps_visitor__anona1a049dd0111::ir_lower_jumps_visitor247 ir_lower_jumps_visitor()
248 : progress(false),
249 pull_out_jumps(false),
250 lower_continue(false)
251 {
252 }
253
truncate_after_instruction__anona1a049dd0111::ir_lower_jumps_visitor254 void truncate_after_instruction(exec_node *ir)
255 {
256 if (!ir)
257 return;
258
259 while (!ir->get_next()->is_tail_sentinel()) {
260 ((ir_instruction *)ir->get_next())->remove();
261 this->progress = true;
262 }
263 }
264
move_outer_block_inside__anona1a049dd0111::ir_lower_jumps_visitor265 void move_outer_block_inside(ir_instruction *ir, exec_list *inner_block)
266 {
267 while (!ir->get_next()->is_tail_sentinel()) {
268 ir_instruction *move_ir = (ir_instruction *)ir->get_next();
269
270 move_ir->remove();
271 inner_block->push_tail(move_ir);
272 }
273 }
274
visit__anona1a049dd0111::ir_lower_jumps_visitor275 virtual void visit(class ir_loop_jump * ir)
276 {
277 /* Eliminate all instructions after each one, since they are
278 * unreachable. This satisfies the DEAD_CODE_ELIMINATION
279 * postcondition.
280 */
281 truncate_after_instruction(ir);
282
283 /* Set this->block.min_strength based on this instruction. This
284 * satisfies the ANALYSIS postcondition. It is not necessary to
285 * update this->block.may_clear_execute_flag or
286 * this->loop.may_set_return_flag, because an unlowered jump
287 * instruction can't change any flags.
288 */
289 this->block.min_strength = ir->is_break() ? strength_break : strength_continue;
290
291 /* The CONTAINED_JUMPS_LOWERED postcondition is already
292 * satisfied, because jump statements can't contain other
293 * statements.
294 */
295 }
296
visit__anona1a049dd0111::ir_lower_jumps_visitor297 virtual void visit(class ir_return * ir)
298 {
299 /* Eliminate all instructions after each one, since they are
300 * unreachable. This satisfies the DEAD_CODE_ELIMINATION
301 * postcondition.
302 */
303 truncate_after_instruction(ir);
304
305 /* Set this->block.min_strength based on this instruction. This
306 * satisfies the ANALYSIS postcondition. It is not necessary to
307 * update this->block.may_clear_execute_flag or
308 * this->loop.may_set_return_flag, because an unlowered return
309 * instruction can't change any flags.
310 */
311 this->block.min_strength = strength_return;
312
313 /* The CONTAINED_JUMPS_LOWERED postcondition is already
314 * satisfied, because jump statements can't contain other
315 * statements.
316 */
317 }
318
visit__anona1a049dd0111::ir_lower_jumps_visitor319 virtual void visit(class ir_discard * ir)
320 {
321 /* Nothing needs to be done. The ANALYSIS and
322 * DEAD_CODE_ELIMINATION postconditions are already satisfied,
323 * because discard statements are ignored by this optimization
324 * pass. The CONTAINED_JUMPS_LOWERED postcondition is already
325 * satisfied, because discard statements can't contain other
326 * statements.
327 */
328 (void) ir;
329 }
330
get_jump_strength__anona1a049dd0111::ir_lower_jumps_visitor331 enum jump_strength get_jump_strength(ir_instruction* ir)
332 {
333 if(!ir)
334 return strength_none;
335 else if(ir->ir_type == ir_type_loop_jump) {
336 if(((ir_loop_jump*)ir)->is_break())
337 return strength_break;
338 else
339 return strength_continue;
340 } else
341 return strength_none;
342 }
343
should_lower_jump__anona1a049dd0111::ir_lower_jumps_visitor344 bool should_lower_jump(ir_jump* ir)
345 {
346 unsigned strength = get_jump_strength(ir);
347 bool lower;
348 switch(strength)
349 {
350 case strength_none:
351 lower = false; /* don't change this, code relies on it */
352 break;
353 case strength_continue:
354 lower = lower_continue;
355 break;
356 case strength_break:
357 lower = false;
358 break;
359 }
360 return lower;
361 }
362
visit_block__anona1a049dd0111::ir_lower_jumps_visitor363 block_record visit_block(exec_list* list)
364 {
365 /* Note: since visiting a node may change that node's next
366 * pointer, we can't use visit_exec_list(), because
367 * visit_exec_list() caches the node's next pointer before
368 * visiting it. So we use foreach_in_list() instead.
369 *
370 * foreach_in_list() isn't safe if the node being visited gets
371 * removed, but fortunately this visitor doesn't do that.
372 */
373
374 block_record saved_block = this->block;
375 this->block = block_record();
376 foreach_in_list(ir_instruction, node, list) {
377 node->accept(this);
378 }
379 block_record ret = this->block;
380 this->block = saved_block;
381 return ret;
382 }
383
visit__anona1a049dd0111::ir_lower_jumps_visitor384 virtual void visit(ir_if *ir)
385 {
386 if(this->loop.nesting_depth == 0 && ir->get_next()->is_tail_sentinel())
387 this->loop.in_if_at_the_end_of_the_loop = true;
388
389 ++this->function.nesting_depth;
390 ++this->loop.nesting_depth;
391
392 block_record block_records[2];
393 ir_jump* jumps[2];
394
395 /* Recursively lower nested jumps. This satisfies the
396 * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
397 * unconditional jumps at the end of ir->then_instructions and
398 * ir->else_instructions, which are handled below.
399 */
400 block_records[0] = visit_block(&ir->then_instructions);
401 block_records[1] = visit_block(&ir->else_instructions);
402
403 retry: /* we get here if we put code after the if inside a branch */
404
405 /* Determine which of ir->then_instructions and
406 * ir->else_instructions end with an unconditional jump.
407 */
408 for(unsigned i = 0; i < 2; ++i) {
409 exec_list& list = i ? ir->else_instructions : ir->then_instructions;
410 jumps[i] = 0;
411 if(!list.is_empty() && get_jump_strength((ir_instruction*)list.get_tail()))
412 jumps[i] = (ir_jump*)list.get_tail();
413 }
414
415 /* Loop until we have satisfied the CONTAINED_JUMPS_LOWERED
416 * postcondition by lowering jumps in both then_instructions and
417 * else_instructions.
418 */
419 for(;;) {
420 /* Determine the types of the jumps that terminate
421 * ir->then_instructions and ir->else_instructions.
422 */
423 jump_strength jump_strengths[2];
424
425 for(unsigned i = 0; i < 2; ++i) {
426 if(jumps[i]) {
427 jump_strengths[i] = block_records[i].min_strength;
428 assert(jump_strengths[i] == get_jump_strength(jumps[i]));
429 } else
430 jump_strengths[i] = strength_none;
431 }
432
433 /* If both code paths end in a jump, and the jumps are the
434 * same, and we are pulling out jumps, replace them with a
435 * single jump that comes after the if instruction. The new
436 * jump will be visited next, and it will be lowered if
437 * necessary by the loop or conditional that encloses it.
438 */
439 if(pull_out_jumps && jump_strengths[0] == jump_strengths[1]) {
440 bool unify = true;
441 if(jump_strengths[0] == strength_continue)
442 ir->insert_after(new(ir) ir_loop_jump(ir_loop_jump::jump_continue));
443 else if(jump_strengths[0] == strength_break)
444 ir->insert_after(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
445 /* FINISHME: unify returns with identical expressions */
446 else if(jump_strengths[0] == strength_return && glsl_type_is_void(this->function.signature->return_type))
447 ir->insert_after(new(ir) ir_return(NULL));
448 else
449 unify = false;
450
451 if(unify) {
452 jumps[0]->remove();
453 jumps[1]->remove();
454 this->progress = true;
455
456 /* Update jumps[] to reflect the fact that the jumps
457 * are gone, and update block_records[] to reflect the
458 * fact that control can now flow to the next
459 * instruction.
460 */
461 jumps[0] = 0;
462 jumps[1] = 0;
463 block_records[0].min_strength = strength_none;
464 block_records[1].min_strength = strength_none;
465
466 /* The CONTAINED_JUMPS_LOWERED postcondition is now
467 * satisfied, so we can break out of the loop.
468 */
469 break;
470 }
471 }
472
473 /* lower a jump: if both need to lowered, start with the strongest one, so that
474 * we might later unify the lowered version with the other one
475 */
476 bool should_lower[2];
477 for(unsigned i = 0; i < 2; ++i)
478 should_lower[i] = should_lower_jump(jumps[i]);
479
480 int lower;
481 if(should_lower[1] && should_lower[0])
482 lower = jump_strengths[1] > jump_strengths[0];
483 else if(should_lower[0])
484 lower = 0;
485 else if(should_lower[1])
486 lower = 1;
487 else
488 /* Neither code path ends in a jump that needs to be
489 * lowered, so the CONTAINED_JUMPS_LOWERED postcondition
490 * is satisfied and we can break out of the loop.
491 */
492 break;
493
494 if(jump_strengths[lower] == strength_break) {
495 unreachable("no lowering of breaks any more");
496 } else if(jump_strengths[lower] == strength_continue) {
497 /* To lower a continue, we create an execute flag (if the
498 * loop doesn't have one already) and replace the continue
499 * with an instruction that clears it.
500 *
501 * Note that this code path gets exercised when lowering
502 * return statements that are not inside a loop, so
503 * this->loop must be initialized even outside of loops.
504 */
505 ir_variable* execute_flag = this->loop.get_execute_flag();
506 jumps[lower]->replace_with(new(ir) ir_assignment(new (ir) ir_dereference_variable(execute_flag), new (ir) ir_constant(false)));
507 /* Note: we must update block_records and jumps to reflect
508 * the fact that the control path has been altered to an
509 * instruction that clears the execute flag.
510 */
511 jumps[lower] = 0;
512 block_records[lower].min_strength = strength_always_clears_execute_flag;
513 block_records[lower].may_clear_execute_flag = true;
514 this->progress = true;
515
516 /* Let the loop run again, in case the other branch of the
517 * if needs to be lowered too.
518 */
519 }
520 }
521
522 /* move out a jump out if possible */
523 if(pull_out_jumps) {
524 /* If one of the branches ends in a jump, and control cannot
525 * fall out the bottom of the other branch, then we can move
526 * the jump after the if.
527 *
528 * Set move_out to the branch we are moving a jump out of.
529 */
530 int move_out = -1;
531 if(jumps[0] && block_records[1].min_strength >= strength_continue)
532 move_out = 0;
533 else if(jumps[1] && block_records[0].min_strength >= strength_continue)
534 move_out = 1;
535
536 if(move_out >= 0)
537 {
538 jumps[move_out]->remove();
539 ir->insert_after(jumps[move_out]);
540 /* Note: we must update block_records and jumps to reflect
541 * the fact that the jump has been moved out of the if.
542 */
543 jumps[move_out] = 0;
544 block_records[move_out].min_strength = strength_none;
545 this->progress = true;
546 }
547 }
548
549 /* Now satisfy the ANALYSIS postcondition by setting
550 * this->block.min_strength and
551 * this->block.may_clear_execute_flag based on the
552 * characteristics of the two branches.
553 */
554 if(block_records[0].min_strength < block_records[1].min_strength)
555 this->block.min_strength = block_records[0].min_strength;
556 else
557 this->block.min_strength = block_records[1].min_strength;
558 this->block.may_clear_execute_flag = this->block.may_clear_execute_flag || block_records[0].may_clear_execute_flag || block_records[1].may_clear_execute_flag;
559
560 /* Now we need to clean up the instructions that follow the
561 * if.
562 *
563 * If those instructions are unreachable, then satisfy the
564 * DEAD_CODE_ELIMINATION postcondition by eliminating them.
565 * Otherwise that postcondition is already satisfied.
566 */
567 if(this->block.min_strength)
568 truncate_after_instruction(ir);
569 else if(this->block.may_clear_execute_flag)
570 {
571 /* If the "if" instruction might clear the execute flag, then
572 * we need to guard any instructions that follow so that they
573 * are only executed if the execute flag is set.
574 *
575 * If one of the branches of the "if" always clears the
576 * execute flag, and the other branch never clears it, then
577 * this is easy: just move all the instructions following the
578 * "if" into the branch that never clears it.
579 */
580 int move_into = -1;
581 if(block_records[0].min_strength && !block_records[1].may_clear_execute_flag)
582 move_into = 1;
583 else if(block_records[1].min_strength && !block_records[0].may_clear_execute_flag)
584 move_into = 0;
585
586 if(move_into >= 0) {
587 assert(!block_records[move_into].min_strength && !block_records[move_into].may_clear_execute_flag); /* otherwise, we just truncated */
588
589 exec_list* list = move_into ? &ir->else_instructions : &ir->then_instructions;
590 exec_node* next = ir->get_next();
591 if(!next->is_tail_sentinel()) {
592 move_outer_block_inside(ir, list);
593
594 /* If any instructions moved, then we need to visit
595 * them (since they are now inside the "if"). Since
596 * block_records[move_into] is in its default state
597 * (see assertion above), we can safely replace
598 * block_records[move_into] with the result of this
599 * analysis.
600 */
601 exec_list list;
602 list.head_sentinel.next = next;
603 block_records[move_into] = visit_block(&list);
604
605 /*
606 * Then we need to re-start our jump lowering, since one
607 * of the instructions we moved might be a jump that
608 * needs to be lowered.
609 */
610 this->progress = true;
611 goto retry;
612 }
613 } else {
614 /* If we get here, then the simple case didn't apply; we
615 * need to actually guard the instructions that follow.
616 *
617 * To avoid creating unnecessarily-deep nesting, first
618 * look through the instructions that follow and unwrap
619 * any instructions that that are already wrapped in the
620 * appropriate guard.
621 */
622 exec_node *node;
623 for(node = ir->get_next(); !node->is_tail_sentinel();)
624 {
625 ir_instruction* ir_after = (ir_instruction*)node;
626 ir_if* ir_if = ir_after->as_if();
627 if(ir_if && ir_if->else_instructions.is_empty()) {
628 ir_dereference_variable* ir_if_cond_deref = ir_if->condition->as_dereference_variable();
629 if(ir_if_cond_deref && ir_if_cond_deref->var == this->loop.execute_flag) {
630 ir_instruction* ir_next = (ir_instruction*)ir_after->get_next();
631 ir_after->insert_before(&ir_if->then_instructions);
632 ir_after->remove();
633 ir_after = ir_next;
634 continue;
635 }
636 }
637 node = ir_after->get_next();
638
639 /* only set this if we find any unprotected instruction */
640 this->progress = true;
641 }
642
643 /* Then, wrap all the instructions that follow in a single
644 * guard.
645 */
646 if(!ir->get_next()->is_tail_sentinel()) {
647 assert(this->loop.execute_flag);
648 ir_if* if_execute = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.execute_flag));
649 move_outer_block_inside(ir, &if_execute->then_instructions);
650 ir->insert_after(if_execute);
651 }
652 }
653 }
654 --this->loop.nesting_depth;
655 --this->function.nesting_depth;
656 }
657
visit__anona1a049dd0111::ir_lower_jumps_visitor658 virtual void visit(ir_loop *ir)
659 {
660 /* Visit the body of the loop, with a fresh data structure in
661 * this->loop so that the analysis we do here won't bleed into
662 * enclosing loops.
663 *
664 * We assume that all code after a loop is reachable from the
665 * loop (see comments on enum jump_strength), so the
666 * DEAD_CODE_ELIMINATION postcondition is automatically
667 * satisfied, as is the block.min_strength portion of the
668 * ANALYSIS postcondition.
669 *
670 * The block.may_clear_execute_flag portion of the ANALYSIS
671 * postcondition is automatically satisfied because execute
672 * flags do not propagate outside of loops.
673 *
674 * The loop.may_set_return_flag portion of the ANALYSIS
675 * postcondition is handled below.
676 */
677 ++this->function.nesting_depth;
678 loop_record saved_loop = this->loop;
679 this->loop = loop_record(this->function.signature, ir);
680
681 /* Recursively lower nested jumps. This satisfies the
682 * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
683 * an unconditional continue or return at the bottom of the
684 * loop, which are handled below.
685 */
686 block_record body = visit_block(&ir->body_instructions);
687
688 /* If the loop ends in an unconditional continue, eliminate it
689 * because it is redundant.
690 */
691 ir_instruction *ir_last
692 = (ir_instruction *) ir->body_instructions.get_tail();
693 if (get_jump_strength(ir_last) == strength_continue) {
694 ir_last->remove();
695 }
696
697 if(body.min_strength >= strength_break) {
698 /* FINISHME: If the min_strength of the loop body is
699 * strength_break or strength_return, that means that it
700 * isn't a loop at all, since control flow always leaves the
701 * body of the loop via break or return. In principle the
702 * loop could be eliminated in this case. This optimization
703 * is not implemented yet.
704 */
705 }
706
707
708 /* If the body of the loop may set the return flag, then at
709 * least one return was lowered to a break, so we need to ensure
710 * that the return flag is checked after the body of the loop is
711 * executed.
712 */
713 if(this->loop.may_set_return_flag) {
714 assert(this->function.return_flag);
715 /* Generate the if statement to check the return flag */
716 ir_if* return_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->function.return_flag));
717 /* Note: we also need to propagate the knowledge that the
718 * return flag may get set to the outer context. This
719 * satisfies the loop.may_set_return_flag part of the
720 * ANALYSIS postcondition.
721 */
722 saved_loop.may_set_return_flag = true;
723 if(saved_loop.loop)
724 /* If this loop is nested inside another one, then the if
725 * statement that we generated should break out of that
726 * loop if the return flag is set. Caller will lower that
727 * break statement if necessary.
728 */
729 return_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
730 else {
731 /* Otherwise, ensure that the instructions that follow are only
732 * executed if the return flag is clear. We can do that by moving
733 * those instructions into the else clause of the generated if
734 * statement.
735 */
736 move_outer_block_inside(ir, &return_if->else_instructions);
737
738 /* In case the loop is embedded inside an if add a new return to
739 * the return flag then branch and let a future pass tidy it up.
740 */
741 if (glsl_type_is_void(this->function.signature->return_type))
742 return_if->then_instructions.push_tail(new(ir) ir_return(NULL));
743 else {
744 assert(this->function.return_value);
745 ir_variable* return_value = this->function.return_value;
746 return_if->then_instructions.push_tail(
747 new(ir) ir_return(new(ir) ir_dereference_variable(return_value)));
748 }
749 }
750
751 ir->insert_after(return_if);
752 }
753
754 this->loop = saved_loop;
755 --this->function.nesting_depth;
756 }
757
visit__anona1a049dd0111::ir_lower_jumps_visitor758 virtual void visit(ir_function_signature *ir)
759 {
760 /* these are not strictly necessary */
761 assert(!this->function.signature);
762 assert(!this->loop.loop);
763
764 function_record saved_function = this->function;
765 loop_record saved_loop = this->loop;
766 this->function = function_record(ir);
767 this->loop = loop_record(ir);
768
769 assert(!this->loop.loop);
770
771 /* Visit the body of the function to lower any jumps that occur
772 * in it, except possibly an unconditional return statement at
773 * the end of it.
774 */
775 visit_block(&ir->body);
776
777 /* If the body ended in an unconditional return of non-void,
778 * then we don't need to lower it because it's the one canonical
779 * return.
780 *
781 * If the body ended in a return of void, eliminate it because
782 * it is redundant.
783 */
784 if (glsl_type_is_void(ir->return_type) &&
785 get_jump_strength((ir_instruction *) ir->body.get_tail())) {
786 ir_jump *jump = (ir_jump *) ir->body.get_tail();
787 assert (jump->ir_type == ir_type_return);
788 jump->remove();
789 }
790
791 if(this->function.return_value)
792 ir->body.push_tail(new(ir) ir_return(new (ir) ir_dereference_variable(this->function.return_value)));
793
794 this->loop = saved_loop;
795 this->function = saved_function;
796 }
797
visit__anona1a049dd0111::ir_lower_jumps_visitor798 virtual void visit(class ir_function * ir)
799 {
800 visit_block(&ir->signatures);
801 }
802 };
803
804 } /* anonymous namespace */
805
806 bool
do_lower_jumps(exec_list * instructions,bool pull_out_jumps,bool lower_continue)807 do_lower_jumps(exec_list *instructions, bool pull_out_jumps, bool lower_continue)
808 {
809 ir_lower_jumps_visitor v;
810 v.pull_out_jumps = pull_out_jumps;
811 v.lower_continue = lower_continue;
812
813 bool progress_ever = false;
814 do {
815 v.progress = false;
816 visit_exec_list_safe(instructions, &v);
817 progress_ever = v.progress || progress_ever;
818 } while (v.progress);
819
820 return progress_ever;
821 }
822