• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Connor Abbott
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_control_flow.h"
30 
31 /*
32  * This file implements an optimization that deletes statically
33  * unreachable/dead code. In NIR, one way this can happen is when an if
34  * statement has a constant condition:
35  *
36  * if (true) {
37  *    ...
38  * }
39  *
40  * We delete the if statement and paste the contents of the always-executed
41  * branch into the surrounding control flow, possibly removing more code if
42  * the branch had a jump at the end.
43  *
44  * Another way is that control flow can end in a jump so that code after it
45  * never gets executed. In particular, this can happen after optimizing
46  * something like:
47  *
48  * if (true) {
49  *    ...
50  *    break;
51  * }
52  * ...
53  *
54  * We also consider the case where both branches of an if end in a jump, e.g.:
55  *
56  * if (...) {
57  *    break;
58  * } else {
59  *    continue;
60  * }
61  * ...
62  *
63  * Finally, we also handle removing useless loops and ifs, i.e. loops and ifs
64  * with no side effects and without any definitions that are used
65  * elsewhere. This case is a little different from the first two in that the
66  * code is actually run (it just never does anything), but there are similar
67  * issues with needing to be careful with restarting after deleting the
68  * cf_node (see dead_cf_list()) so this is a convenient place to remove them.
69  */
70 
71 static void
remove_after_cf_node(nir_cf_node * node)72 remove_after_cf_node(nir_cf_node *node)
73 {
74    nir_cf_node *end = node;
75    while (!nir_cf_node_is_last(end))
76       end = nir_cf_node_next(end);
77 
78    nir_cf_list list;
79    nir_cf_extract(&list, nir_after_cf_node(node), nir_after_cf_node(end));
80    nir_cf_delete(&list);
81 }
82 
83 static void
opt_constant_if(nir_if * if_stmt,bool condition)84 opt_constant_if(nir_if *if_stmt, bool condition)
85 {
86    nir_block *last_block = condition ? nir_if_last_then_block(if_stmt)
87                                      : nir_if_last_else_block(if_stmt);
88 
89    /* The control flow list we're about to paste in may include a jump at the
90     * end, and in that case we have to delete the rest of the control flow
91     * list after the if since it's unreachable and the validator will balk if
92     * we don't.
93     */
94 
95    if (nir_block_ends_in_jump(last_block)) {
96       remove_after_cf_node(&if_stmt->cf_node);
97    } else {
98       /* Remove any phi nodes after the if by rewriting uses to point to the
99        * correct source.
100        */
101       nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&if_stmt->cf_node));
102       nir_foreach_phi_safe(phi, after) {
103          nir_def *def = NULL;
104          nir_foreach_phi_src(phi_src, phi) {
105             if (phi_src->pred != last_block)
106                continue;
107 
108             def = phi_src->src.ssa;
109          }
110 
111          assert(def);
112          nir_def_rewrite_uses(&phi->def, def);
113          nir_instr_remove(&phi->instr);
114       }
115    }
116 
117    /* Finally, actually paste in the then or else branch and delete the if. */
118    struct exec_list *cf_list = condition ? &if_stmt->then_list
119                                          : &if_stmt->else_list;
120 
121    nir_cf_list list;
122    nir_cf_list_extract(&list, cf_list);
123    nir_cf_reinsert(&list, nir_after_cf_node(&if_stmt->cf_node));
124    nir_cf_node_remove(&if_stmt->cf_node);
125 }
126 
127 static bool
def_only_used_in_cf_node(nir_def * def,void * _node)128 def_only_used_in_cf_node(nir_def *def, void *_node)
129 {
130    nir_cf_node *node = _node;
131    assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
132 
133    nir_block *before = nir_cf_node_as_block(nir_cf_node_prev(node));
134    nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
135 
136    nir_foreach_use_including_if(use, def) {
137       nir_block *block;
138 
139       if (nir_src_is_if(use))
140          block = nir_cf_node_as_block(nir_cf_node_prev(&nir_src_parent_if(use)->cf_node));
141       else
142          block = nir_src_parent_instr(use)->block;
143 
144       /* Because NIR is structured, we can easily determine whether or not a
145        * value escapes a CF node by looking at the block indices of its uses
146        * to see if they lie outside the bounds of the CF node.
147        *
148        * Note: Normally, the uses of a phi instruction are considered to be
149        * used in the block that is the predecessor of the phi corresponding to
150        * that use.  If we were computing liveness or something similar, that
151        * would mean a special case here for phis.  However, we're trying here
152        * to determine if the SSA def ever escapes the loop.  If it's used by a
153        * phi that lives outside the loop then it doesn't matter if the
154        * corresponding predecessor is inside the loop or not because the value
155        * can go through the phi into the outside world and escape the loop.
156        */
157       if (block->index <= before->index || block->index >= after->index)
158          return false;
159    }
160 
161    return true;
162 }
163 
164 /*
165  * Test if a loop or if node is dead. Such nodes are dead if:
166  *
167  * 1) It has no side effects (i.e. intrinsics which could possibly affect the
168  * state of the program aside from producing an SSA value, indicated by a lack
169  * of NIR_INTRINSIC_CAN_ELIMINATE).
170  *
171  * 2) It has no phi instructions after it, since those indicate values inside
172  * the node being used after the node.
173  *
174  * 3) None of the values defined inside the node is used outside the node,
175  * i.e. none of the definitions that dominate the node exit are used outside.
176  *
177  * If those conditions hold, then the node is dead and can be deleted.
178  */
179 
180 static bool
node_is_dead(nir_cf_node * node)181 node_is_dead(nir_cf_node *node)
182 {
183    assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
184 
185    nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
186 
187    /* Quick check if there are any phis that follow this CF node.  If there
188     * are, then we automatically know it isn't dead.
189     */
190    if (!exec_list_is_empty(&after->instr_list) &&
191        nir_block_first_instr(after)->type == nir_instr_type_phi)
192       return false;
193 
194    nir_function_impl *impl = nir_cf_node_get_function(node);
195    nir_metadata_require(impl, nir_metadata_block_index);
196 
197    nir_foreach_block_in_cf_node(block, node) {
198       bool inside_loop = node->type == nir_cf_node_loop;
199       for (nir_cf_node *n = &block->cf_node;
200            !inside_loop && n != node; n = n->parent) {
201          if (n->type == nir_cf_node_loop)
202             inside_loop = true;
203       }
204 
205       nir_foreach_instr(instr, block) {
206          if (instr->type == nir_instr_type_call)
207             return false;
208 
209          /* Return and halt instructions can cause us to skip over other
210           * side-effecting instructions after the loop, so consider them to
211           * have side effects here.
212           *
213           * When the block is not inside a loop, break and continue might also
214           * cause a skip.
215           */
216          if (instr->type == nir_instr_type_jump &&
217              (!inside_loop ||
218               nir_instr_as_jump(instr)->type == nir_jump_return ||
219               nir_instr_as_jump(instr)->type == nir_jump_halt))
220             return false;
221 
222          if (instr->type == nir_instr_type_intrinsic) {
223             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
224             if (!(nir_intrinsic_infos[intrin->intrinsic].flags &
225                   NIR_INTRINSIC_CAN_ELIMINATE))
226                return false;
227 
228             switch (intrin->intrinsic) {
229             case nir_intrinsic_load_deref:
230             case nir_intrinsic_load_ssbo:
231             case nir_intrinsic_load_global:
232                /* If there's a memory barrier after the loop, a load might be
233                 * required to happen before some other instruction after the
234                 * barrier, so it is not valid to eliminate it -- unless we
235                 * know we can reorder it.
236                 *
237                 * Consider only loads that the result can be affected by other
238                 * invocations.
239                 */
240                if (intrin->intrinsic == nir_intrinsic_load_deref) {
241                   nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
242                   if (!nir_deref_mode_may_be(deref, nir_var_mem_ssbo |
243                                                        nir_var_mem_shared |
244                                                        nir_var_mem_global |
245                                                        nir_var_shader_out))
246                      break;
247                }
248                if (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER)
249                   break;
250                return false;
251 
252             case nir_intrinsic_load_shared:
253             case nir_intrinsic_load_shared2_amd:
254             case nir_intrinsic_load_output:
255             case nir_intrinsic_load_per_vertex_output:
256                /* Same as above loads. */
257                return false;
258 
259             default:
260                /* Do nothing. */
261                break;
262             }
263          }
264 
265          if (!nir_foreach_def(instr, def_only_used_in_cf_node, node))
266             return false;
267       }
268    }
269 
270    return true;
271 }
272 
273 static bool
dead_cf_block(nir_block * block)274 dead_cf_block(nir_block *block)
275 {
276    /* opt_constant_if() doesn't handle this case. */
277    if (nir_block_ends_in_jump(block) &&
278        !exec_node_is_tail_sentinel(block->cf_node.node.next)) {
279       remove_after_cf_node(&block->cf_node);
280       return true;
281    }
282 
283    nir_if *following_if = nir_block_get_following_if(block);
284    if (following_if) {
285       if (nir_src_is_const(following_if->condition)) {
286          opt_constant_if(following_if, nir_src_as_bool(following_if->condition));
287          return true;
288       } else if (nir_src_is_undef(following_if->condition)) {
289          opt_constant_if(following_if, false);
290          return true;
291       }
292 
293       if (node_is_dead(&following_if->cf_node)) {
294          nir_cf_node_remove(&following_if->cf_node);
295          return true;
296       }
297    }
298 
299    nir_loop *following_loop = nir_block_get_following_loop(block);
300    if (!following_loop)
301       return false;
302 
303    if (!node_is_dead(&following_loop->cf_node))
304       return false;
305 
306    nir_cf_node_remove(&following_loop->cf_node);
307    return true;
308 }
309 
310 static bool
dead_cf_list(struct exec_list * list,bool * list_ends_in_jump)311 dead_cf_list(struct exec_list *list, bool *list_ends_in_jump)
312 {
313    bool progress = false;
314    *list_ends_in_jump = false;
315 
316    nir_cf_node *prev = NULL;
317 
318    foreach_list_typed(nir_cf_node, cur, node, list) {
319       switch (cur->type) {
320       case nir_cf_node_block: {
321          nir_block *block = nir_cf_node_as_block(cur);
322          while (dead_cf_block(block)) {
323             /* We just deleted the if or loop after this block.
324              * nir_cf_node_remove may have deleted the block before
325              * or after it -- which one is an implementation detail.
326              * Therefore, to recover the place we were at, we have
327              * to use the previous cf_node.
328              */
329 
330             if (prev) {
331                cur = nir_cf_node_next(prev);
332             } else {
333                cur = exec_node_data(nir_cf_node, exec_list_get_head(list),
334                                     node);
335             }
336 
337             block = nir_cf_node_as_block(cur);
338 
339             progress = true;
340          }
341 
342          if (nir_block_ends_in_jump(block)) {
343             assert(exec_node_is_tail_sentinel(cur->node.next));
344             *list_ends_in_jump = true;
345          }
346 
347          break;
348       }
349 
350       case nir_cf_node_if: {
351          nir_if *if_stmt = nir_cf_node_as_if(cur);
352          bool then_ends_in_jump, else_ends_in_jump;
353          progress |= dead_cf_list(&if_stmt->then_list, &then_ends_in_jump);
354          progress |= dead_cf_list(&if_stmt->else_list, &else_ends_in_jump);
355 
356          if (then_ends_in_jump && else_ends_in_jump) {
357             *list_ends_in_jump = true;
358             nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
359             if (!exec_list_is_empty(&next->instr_list) ||
360                 !exec_node_is_tail_sentinel(next->cf_node.node.next)) {
361                remove_after_cf_node(cur);
362                return true;
363             }
364          }
365 
366          break;
367       }
368 
369       case nir_cf_node_loop: {
370          nir_loop *loop = nir_cf_node_as_loop(cur);
371          assert(!nir_loop_has_continue_construct(loop));
372          bool dummy;
373          progress |= dead_cf_list(&loop->body, &dummy);
374 
375          nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
376          if (next->predecessors->entries == 0 &&
377              (!exec_list_is_empty(&next->instr_list) ||
378               !exec_node_is_tail_sentinel(next->cf_node.node.next))) {
379             remove_after_cf_node(cur);
380             return true;
381          }
382          break;
383       }
384 
385       default:
386          unreachable("unknown cf node type");
387       }
388 
389       prev = cur;
390    }
391 
392    return progress;
393 }
394 
395 static bool
opt_dead_cf_impl(nir_function_impl * impl)396 opt_dead_cf_impl(nir_function_impl *impl)
397 {
398    bool dummy;
399    bool progress = dead_cf_list(&impl->body, &dummy);
400 
401    if (progress) {
402       nir_metadata_preserve(impl, nir_metadata_none);
403       nir_rematerialize_derefs_in_use_blocks_impl(impl);
404 
405       /* The CF manipulation code called by this pass is smart enough to keep
406        * from breaking any SSA use/def chains by replacing any uses of removed
407        * instructions with SSA undefs.  However, it's not quite smart enough
408        * to always preserve the dominance properties.  In particular, if you
409        * remove the one break from a loop, stuff in the loop may still be used
410        * outside the loop even though there's no path between the two.  We can
411        * easily fix these issues by calling nir_repair_ssa which will ensure
412        * that the dominance properties hold.
413        */
414       nir_repair_ssa_impl(impl);
415    } else {
416       nir_metadata_preserve(impl, nir_metadata_all);
417    }
418 
419    return progress;
420 }
421 
422 bool
nir_opt_dead_cf(nir_shader * shader)423 nir_opt_dead_cf(nir_shader *shader)
424 {
425    bool progress = false;
426 
427    nir_foreach_function_impl(impl, shader)
428       progress |= opt_dead_cf_impl(impl);
429 
430    return progress;
431 }
432