1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "nir_control_flow.h"
30
31 /*
32 * This file implements an optimization that deletes statically
33 * unreachable/dead code. In NIR, one way this can happen is when an if
34 * statement has a constant condition:
35 *
36 * if (true) {
37 * ...
38 * }
39 *
40 * We delete the if statement and paste the contents of the always-executed
41 * branch into the surrounding control flow, possibly removing more code if
42 * the branch had a jump at the end.
43 *
44 * Another way is that control flow can end in a jump so that code after it
45 * never gets executed. In particular, this can happen after optimizing
46 * something like:
47 *
48 * if (true) {
49 * ...
50 * break;
51 * }
52 * ...
53 *
54 * We also consider the case where both branches of an if end in a jump, e.g.:
55 *
56 * if (...) {
57 * break;
58 * } else {
59 * continue;
60 * }
61 * ...
62 *
63 * Finally, we also handle removing useless loops and ifs, i.e. loops and ifs
64 * with no side effects and without any definitions that are used
65 * elsewhere. This case is a little different from the first two in that the
66 * code is actually run (it just never does anything), but there are similar
67 * issues with needing to be careful with restarting after deleting the
68 * cf_node (see dead_cf_list()) so this is a convenient place to remove them.
69 */
70
71 static void
remove_after_cf_node(nir_cf_node * node)72 remove_after_cf_node(nir_cf_node *node)
73 {
74 nir_cf_node *end = node;
75 while (!nir_cf_node_is_last(end))
76 end = nir_cf_node_next(end);
77
78 nir_cf_list list;
79 nir_cf_extract(&list, nir_after_cf_node(node), nir_after_cf_node(end));
80 nir_cf_delete(&list);
81 }
82
83 static void
opt_constant_if(nir_if * if_stmt,bool condition)84 opt_constant_if(nir_if *if_stmt, bool condition)
85 {
86 /* First, we need to remove any phi nodes after the if by rewriting uses to
87 * point to the correct source.
88 */
89 nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&if_stmt->cf_node));
90 nir_block *last_block = condition ? nir_if_last_then_block(if_stmt)
91 : nir_if_last_else_block(if_stmt);
92
93 nir_foreach_instr_safe(instr, after) {
94 if (instr->type != nir_instr_type_phi)
95 break;
96
97 nir_phi_instr *phi = nir_instr_as_phi(instr);
98 nir_ssa_def *def = NULL;
99 nir_foreach_phi_src(phi_src, phi) {
100 if (phi_src->pred != last_block)
101 continue;
102
103 assert(phi_src->src.is_ssa);
104 def = phi_src->src.ssa;
105 }
106
107 assert(def);
108 assert(phi->dest.is_ssa);
109 nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
110 nir_instr_remove(instr);
111 }
112
113 /* The control flow list we're about to paste in may include a jump at the
114 * end, and in that case we have to delete the rest of the control flow
115 * list after the if since it's unreachable and the validator will balk if
116 * we don't.
117 */
118
119 if (!exec_list_is_empty(&last_block->instr_list)) {
120 nir_instr *last_instr = nir_block_last_instr(last_block);
121 if (last_instr->type == nir_instr_type_jump)
122 remove_after_cf_node(&if_stmt->cf_node);
123 }
124
125 /* Finally, actually paste in the then or else branch and delete the if. */
126 struct exec_list *cf_list = condition ? &if_stmt->then_list
127 : &if_stmt->else_list;
128
129 nir_cf_list list;
130 nir_cf_list_extract(&list, cf_list);
131 nir_cf_reinsert(&list, nir_after_cf_node(&if_stmt->cf_node));
132 nir_cf_node_remove(&if_stmt->cf_node);
133 }
134
135 static bool
def_only_used_in_cf_node(nir_ssa_def * def,void * _node)136 def_only_used_in_cf_node(nir_ssa_def *def, void *_node)
137 {
138 nir_cf_node *node = _node;
139 assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
140
141 nir_block *before = nir_cf_node_as_block(nir_cf_node_prev(node));
142 nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
143
144 nir_foreach_use(use, def) {
145 /* Because NIR is structured, we can easily determine whether or not a
146 * value escapes a CF node by looking at the block indices of its uses
147 * to see if they lie outside the bounds of the CF node.
148 *
149 * Note: Normally, the uses of a phi instruction are considered to be
150 * used in the block that is the predecessor of the phi corresponding to
151 * that use. If we were computing liveness or something similar, that
152 * would mean a special case here for phis. However, we're trying here
153 * to determine if the SSA def ever escapes the loop. If it's used by a
154 * phi that lives outside the loop then it doesn't matter if the
155 * corresponding predecessor is inside the loop or not because the value
156 * can go through the phi into the outside world and escape the loop.
157 */
158 if (use->parent_instr->block->index <= before->index ||
159 use->parent_instr->block->index >= after->index)
160 return false;
161 }
162
163 /* Same check for if-condition uses */
164 nir_foreach_if_use(use, def) {
165 nir_block *use_block =
166 nir_cf_node_as_block(nir_cf_node_prev(&use->parent_if->cf_node));
167
168 if (use_block->index <= before->index ||
169 use_block->index >= after->index)
170 return false;
171 }
172
173 return true;
174 }
175
176 /*
177 * Test if a loop node is dead. Such nodes are dead if:
178 *
179 * 1) It has no side effects (i.e. intrinsics which could possibly affect the
180 * state of the program aside from producing an SSA value, indicated by a lack
181 * of NIR_INTRINSIC_CAN_ELIMINATE).
182 *
183 * 2) It has no phi instructions after it, since those indicate values inside
184 * the node being used after the node.
185 *
186 * 3) None of the values defined inside the node is used outside the node,
187 * i.e. none of the definitions that dominate the node exit are used outside.
188 *
189 * If those conditions hold, then the node is dead and can be deleted.
190 */
191
192 static bool
node_is_dead(nir_cf_node * node)193 node_is_dead(nir_cf_node *node)
194 {
195 assert(node->type == nir_cf_node_loop);
196
197 nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
198
199 /* Quick check if there are any phis that follow this CF node. If there
200 * are, then we automatically know it isn't dead.
201 */
202 if (!exec_list_is_empty(&after->instr_list) &&
203 nir_block_first_instr(after)->type == nir_instr_type_phi)
204 return false;
205
206 nir_function_impl *impl = nir_cf_node_get_function(node);
207 nir_metadata_require(impl, nir_metadata_block_index);
208
209 nir_foreach_block_in_cf_node(block, node) {
210 bool inside_loop = node->type == nir_cf_node_loop;
211 for (nir_cf_node *n = &block->cf_node;
212 !inside_loop && n != node; n = n->parent) {
213 if (n->type == nir_cf_node_loop)
214 inside_loop = true;
215 }
216
217 nir_foreach_instr(instr, block) {
218 if (instr->type == nir_instr_type_call)
219 return false;
220
221 /* Return and halt instructions can cause us to skip over other
222 * side-effecting instructions after the loop, so consider them to
223 * have side effects here.
224 *
225 * When the block is not inside a loop, break and continue might also
226 * cause a skip.
227 */
228 if (instr->type == nir_instr_type_jump &&
229 (!inside_loop ||
230 nir_instr_as_jump(instr)->type == nir_jump_return ||
231 nir_instr_as_jump(instr)->type == nir_jump_halt))
232 return false;
233
234 if (instr->type == nir_instr_type_intrinsic) {
235 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
236 if (!(nir_intrinsic_infos[intrin->intrinsic].flags &
237 NIR_INTRINSIC_CAN_ELIMINATE))
238 return false;
239
240 switch (intrin->intrinsic) {
241 case nir_intrinsic_load_deref:
242 case nir_intrinsic_load_ssbo:
243 case nir_intrinsic_load_global:
244 /* If there's a memory barrier after the loop, a load might be
245 * required to happen before some other instruction after the
246 * barrier, so it is not valid to eliminate it -- unless we
247 * know we can reorder it.
248 *
249 * Consider only loads that the result can be affected by other
250 * invocations.
251 */
252 if (intrin->intrinsic == nir_intrinsic_load_deref) {
253 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
254 if (!nir_deref_mode_may_be(deref, nir_var_mem_ssbo |
255 nir_var_mem_shared |
256 nir_var_mem_global |
257 nir_var_shader_out))
258 break;
259 }
260 if (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER)
261 break;
262 return false;
263
264 case nir_intrinsic_load_shared:
265 case nir_intrinsic_load_output:
266 case nir_intrinsic_load_per_vertex_output:
267 /* Same as above loads. */
268 return false;
269
270 default:
271 /* Do nothing. */
272 break;
273 }
274 }
275
276 if (!nir_foreach_ssa_def(instr, def_only_used_in_cf_node, node))
277 return false;
278 }
279 }
280
281 return true;
282 }
283
284 static bool
dead_cf_block(nir_block * block)285 dead_cf_block(nir_block *block)
286 {
287 nir_if *following_if = nir_block_get_following_if(block);
288 if (following_if) {
289 if (!nir_src_is_const(following_if->condition))
290 return false;
291
292 opt_constant_if(following_if, nir_src_as_bool(following_if->condition));
293 return true;
294 }
295
296 nir_loop *following_loop = nir_block_get_following_loop(block);
297 if (!following_loop)
298 return false;
299
300 if (!node_is_dead(&following_loop->cf_node))
301 return false;
302
303 nir_cf_node_remove(&following_loop->cf_node);
304 return true;
305 }
306
307 static bool
dead_cf_list(struct exec_list * list,bool * list_ends_in_jump)308 dead_cf_list(struct exec_list *list, bool *list_ends_in_jump)
309 {
310 bool progress = false;
311 *list_ends_in_jump = false;
312
313 nir_cf_node *prev = NULL;
314
315 foreach_list_typed(nir_cf_node, cur, node, list) {
316 switch (cur->type) {
317 case nir_cf_node_block: {
318 nir_block *block = nir_cf_node_as_block(cur);
319 if (dead_cf_block(block)) {
320 /* We just deleted the if or loop after this block, so we may have
321 * deleted the block before or after it -- which one is an
322 * implementation detail. Therefore, to recover the place we were
323 * at, we have to use the previous cf_node.
324 */
325
326 if (prev) {
327 cur = nir_cf_node_next(prev);
328 } else {
329 cur = exec_node_data(nir_cf_node, exec_list_get_head(list),
330 node);
331 }
332
333 block = nir_cf_node_as_block(cur);
334
335 progress = true;
336 }
337
338 if (nir_block_ends_in_jump(block)) {
339 *list_ends_in_jump = true;
340
341 if (!exec_node_is_tail_sentinel(cur->node.next)) {
342 remove_after_cf_node(cur);
343 return true;
344 }
345 }
346
347 break;
348 }
349
350 case nir_cf_node_if: {
351 nir_if *if_stmt = nir_cf_node_as_if(cur);
352 bool then_ends_in_jump, else_ends_in_jump;
353 progress |= dead_cf_list(&if_stmt->then_list, &then_ends_in_jump);
354 progress |= dead_cf_list(&if_stmt->else_list, &else_ends_in_jump);
355
356 if (then_ends_in_jump && else_ends_in_jump) {
357 *list_ends_in_jump = true;
358 nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
359 if (!exec_list_is_empty(&next->instr_list) ||
360 !exec_node_is_tail_sentinel(next->cf_node.node.next)) {
361 remove_after_cf_node(cur);
362 return true;
363 }
364 }
365
366 break;
367 }
368
369 case nir_cf_node_loop: {
370 nir_loop *loop = nir_cf_node_as_loop(cur);
371 bool dummy;
372 progress |= dead_cf_list(&loop->body, &dummy);
373
374 nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
375 if (next->predecessors->entries == 0 &&
376 (!exec_list_is_empty(&next->instr_list) ||
377 !exec_node_is_tail_sentinel(next->cf_node.node.next))) {
378 remove_after_cf_node(cur);
379 return true;
380 }
381 break;
382 }
383
384 default:
385 unreachable("unknown cf node type");
386 }
387
388 prev = cur;
389 }
390
391 return progress;
392 }
393
394 static bool
opt_dead_cf_impl(nir_function_impl * impl)395 opt_dead_cf_impl(nir_function_impl *impl)
396 {
397 bool dummy;
398 bool progress = dead_cf_list(&impl->body, &dummy);
399
400 if (progress) {
401 nir_metadata_preserve(impl, nir_metadata_none);
402
403 /* The CF manipulation code called by this pass is smart enough to keep
404 * from breaking any SSA use/def chains by replacing any uses of removed
405 * instructions with SSA undefs. However, it's not quite smart enough
406 * to always preserve the dominance properties. In particular, if you
407 * remove the one break from a loop, stuff in the loop may still be used
408 * outside the loop even though there's no path between the two. We can
409 * easily fix these issues by calling nir_repair_ssa which will ensure
410 * that the dominance properties hold.
411 */
412 nir_repair_ssa_impl(impl);
413 } else {
414 nir_metadata_preserve(impl, nir_metadata_all);
415 }
416
417 return progress;
418 }
419
420 bool
nir_opt_dead_cf(nir_shader * shader)421 nir_opt_dead_cf(nir_shader *shader)
422 {
423 bool progress = false;
424
425 nir_foreach_function(function, shader)
426 if (function->impl)
427 progress |= opt_dead_cf_impl(function->impl);
428
429 return progress;
430 }
431