• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  */
26 
27 #include "nir.h"
28 #include "nir_worklist.h"
29 #include "nir_vla.h"
30 
31 /*
32  * Basic liveness analysis.  This works only in SSA form.
33  *
34  * This liveness pass treats phi nodes as being melded to the space between
35  * blocks so that the destinations of a phi are in the livein of the block
36  * in which it resides and the sources are in the liveout of the
37  * corresponding block.  By formulating the liveness information in this
38  * way, we ensure that the definition of any variable dominates its entire
39  * live range.  This is true because the only way that the definition of an
40  * SSA value may not dominate a use is if the use is in a phi node and the
41  * uses in phi no are in the live-out of the corresponding predecessor
42  * block but not in the live-in of the block containing the phi node.
43  */
44 
45 struct live_ssa_defs_state {
46    unsigned bitset_words;
47 
48    /* Used in propagate_across_edge() */
49    BITSET_WORD *tmp_live;
50 
51    nir_block_worklist worklist;
52 };
53 
54 /* Initialize the liveness data to zero and add the given block to the
55  * worklist.
56  */
57 static void
init_liveness_block(nir_block * block,struct live_ssa_defs_state * state)58 init_liveness_block(nir_block *block,
59                     struct live_ssa_defs_state *state)
60 {
61    block->live_in = reralloc(block, block->live_in, BITSET_WORD,
62                              state->bitset_words);
63    memset(block->live_in, 0, state->bitset_words * sizeof(BITSET_WORD));
64 
65    block->live_out = reralloc(block, block->live_out, BITSET_WORD,
66                               state->bitset_words);
67    memset(block->live_out, 0, state->bitset_words * sizeof(BITSET_WORD));
68 
69    nir_block_worklist_push_head(&state->worklist, block);
70 }
71 
72 static bool
set_src_live(nir_src * src,void * void_live)73 set_src_live(nir_src *src, void *void_live)
74 {
75    BITSET_WORD *live = void_live;
76 
77    if (!src->is_ssa)
78       return true;
79 
80    if (nir_src_is_undef(*src))
81       return true;   /* undefined variables are never live */
82 
83    BITSET_SET(live, src->ssa->index);
84 
85    return true;
86 }
87 
88 static bool
set_ssa_def_dead(nir_ssa_def * def,void * void_live)89 set_ssa_def_dead(nir_ssa_def *def, void *void_live)
90 {
91    BITSET_WORD *live = void_live;
92 
93    BITSET_CLEAR(live, def->index);
94 
95    return true;
96 }
97 
98 /** Propagates the live in of succ across the edge to the live out of pred
99  *
100  * Phi nodes exist "between" blocks and all the phi nodes at the start of a
101  * block act "in parallel".  When we propagate from the live_in of one
102  * block to the live out of the other, we have to kill any writes from phis
103  * and make live any sources.
104  *
105  * Returns true if updating live out of pred added anything
106  */
107 static bool
propagate_across_edge(nir_block * pred,nir_block * succ,struct live_ssa_defs_state * state)108 propagate_across_edge(nir_block *pred, nir_block *succ,
109                       struct live_ssa_defs_state *state)
110 {
111    BITSET_WORD *live = state->tmp_live;
112    memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
113 
114    nir_foreach_instr(instr, succ) {
115       if (instr->type != nir_instr_type_phi)
116          break;
117       nir_phi_instr *phi = nir_instr_as_phi(instr);
118 
119       assert(phi->dest.is_ssa);
120       set_ssa_def_dead(&phi->dest.ssa, live);
121    }
122 
123    nir_foreach_instr(instr, succ) {
124       if (instr->type != nir_instr_type_phi)
125          break;
126       nir_phi_instr *phi = nir_instr_as_phi(instr);
127 
128       nir_foreach_phi_src(src, phi) {
129          if (src->pred == pred) {
130             set_src_live(&src->src, live);
131             break;
132          }
133       }
134    }
135 
136    BITSET_WORD progress = 0;
137    for (unsigned i = 0; i < state->bitset_words; ++i) {
138       progress |= live[i] & ~pred->live_out[i];
139       pred->live_out[i] |= live[i];
140    }
141    return progress != 0;
142 }
143 
144 void
nir_live_ssa_defs_impl(nir_function_impl * impl)145 nir_live_ssa_defs_impl(nir_function_impl *impl)
146 {
147    struct live_ssa_defs_state state = {
148       .bitset_words = BITSET_WORDS(impl->ssa_alloc),
149    };
150    state.tmp_live = rzalloc_array(impl, BITSET_WORD, state.bitset_words),
151 
152    /* Number the instructions so we can do cheap interference tests using the
153     * instruction index.
154     */
155    nir_metadata_require(impl, nir_metadata_instr_index);
156 
157    nir_block_worklist_init(&state.worklist, impl->num_blocks, NULL);
158 
159    /* Allocate live_in and live_out sets and add all of the blocks to the
160     * worklist.
161     */
162    nir_foreach_block(block, impl) {
163       init_liveness_block(block, &state);
164    }
165 
166 
167    /* We're now ready to work through the worklist and update the liveness
168     * sets of each of the blocks.  By the time we get to this point, every
169     * block in the function implementation has been pushed onto the
170     * worklist in reverse order.  As long as we keep the worklist
171     * up-to-date as we go, everything will get covered.
172     */
173    while (!nir_block_worklist_is_empty(&state.worklist)) {
174       /* We pop them off in the reverse order we pushed them on.  This way
175        * the first walk of the instructions is backwards so we only walk
176        * once in the case of no control flow.
177        */
178       nir_block *block = nir_block_worklist_pop_head(&state.worklist);
179 
180       memcpy(block->live_in, block->live_out,
181              state.bitset_words * sizeof(BITSET_WORD));
182 
183       nir_if *following_if = nir_block_get_following_if(block);
184       if (following_if)
185          set_src_live(&following_if->condition, block->live_in);
186 
187       nir_foreach_instr_reverse(instr, block) {
188          /* Phi nodes are handled seperately so we want to skip them.  Since
189           * we are going backwards and they are at the beginning, we can just
190           * break as soon as we see one.
191           */
192          if (instr->type == nir_instr_type_phi)
193             break;
194 
195          nir_foreach_ssa_def(instr, set_ssa_def_dead, block->live_in);
196          nir_foreach_src(instr, set_src_live, block->live_in);
197       }
198 
199       /* Walk over all of the predecessors of the current block updating
200        * their live in with the live out of this one.  If anything has
201        * changed, add the predecessor to the work list so that we ensure
202        * that the new information is used.
203        */
204       set_foreach(block->predecessors, entry) {
205          nir_block *pred = (nir_block *)entry->key;
206          if (propagate_across_edge(pred, block, &state))
207             nir_block_worklist_push_tail(&state.worklist, pred);
208       }
209    }
210 
211    ralloc_free(state.tmp_live);
212    nir_block_worklist_fini(&state.worklist);
213 }
214 
215 /** Return the live set at a cursor
216  *
217  * Note: The bitset returned may be the live_in or live_out from the block in
218  *       which the instruction lives.  Do not ralloc_free() it directly;
219  *       instead, provide a mem_ctx and free that.
220  */
221 const BITSET_WORD *
nir_get_live_ssa_defs(nir_cursor cursor,void * mem_ctx)222 nir_get_live_ssa_defs(nir_cursor cursor, void *mem_ctx)
223 {
224    nir_block *block = nir_cursor_current_block(cursor);
225    nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
226    assert(impl->valid_metadata & nir_metadata_live_ssa_defs);
227 
228    switch (cursor.option) {
229    case nir_cursor_before_block:
230       return cursor.block->live_in;
231 
232    case nir_cursor_after_block:
233       return cursor.block->live_out;
234 
235    case nir_cursor_before_instr:
236       if (cursor.instr == nir_block_first_instr(cursor.instr->block))
237          return cursor.instr->block->live_in;
238       break;
239 
240    case nir_cursor_after_instr:
241       if (cursor.instr == nir_block_last_instr(cursor.instr->block))
242          return cursor.instr->block->live_out;
243       break;
244    }
245 
246    /* If we got here, we're an instruction cursor mid-block */
247    const unsigned bitset_words = BITSET_WORDS(impl->ssa_alloc);
248    BITSET_WORD *live = ralloc_array(mem_ctx, BITSET_WORD, bitset_words);
249    memcpy(live, block->live_out, bitset_words * sizeof(BITSET_WORD));
250 
251    nir_foreach_instr_reverse(instr, block) {
252       if (cursor.option == nir_cursor_after_instr && instr == cursor.instr)
253          break;
254 
255       /* If someone asked for liveness in the middle of a bunch of phis,
256        * that's an error.  Since we are going backwards and they are at the
257        * beginning, we can just blow up as soon as we see one.
258        */
259       assert(instr->type != nir_instr_type_phi);
260       if (instr->type == nir_instr_type_phi)
261          break;
262 
263       nir_foreach_ssa_def(instr, set_ssa_def_dead, live);
264       nir_foreach_src(instr, set_src_live, live);
265 
266       if (cursor.option == nir_cursor_before_instr && instr == cursor.instr)
267          break;
268    }
269 
270    return live;
271 }
272 
273 static bool
src_does_not_use_def(nir_src * src,void * def)274 src_does_not_use_def(nir_src *src, void *def)
275 {
276    return !src->is_ssa || src->ssa != (nir_ssa_def *)def;
277 }
278 
279 static bool
search_for_use_after_instr(nir_instr * start,nir_ssa_def * def)280 search_for_use_after_instr(nir_instr *start, nir_ssa_def *def)
281 {
282    /* Only look for a use strictly after the given instruction */
283    struct exec_node *node = start->node.next;
284    while (!exec_node_is_tail_sentinel(node)) {
285       nir_instr *instr = exec_node_data(nir_instr, node, node);
286       if (!nir_foreach_src(instr, src_does_not_use_def, def))
287          return true;
288       node = node->next;
289    }
290 
291    /* If uses are considered to be in the block immediately preceding the if
292     * so we need to also check the following if condition, if any.
293     */
294    nir_if *following_if = nir_block_get_following_if(start->block);
295    if (following_if && following_if->condition.is_ssa &&
296        following_if->condition.ssa == def)
297       return true;
298 
299    return false;
300 }
301 
302 /* Returns true if def is live at instr assuming that def comes before
303  * instr in a pre DFS search of the dominance tree.
304  */
305 static bool
nir_ssa_def_is_live_at(nir_ssa_def * def,nir_instr * instr)306 nir_ssa_def_is_live_at(nir_ssa_def *def, nir_instr *instr)
307 {
308    if (BITSET_TEST(instr->block->live_out, def->index)) {
309       /* Since def dominates instr, if def is in the liveout of the block,
310        * it's live at instr
311        */
312       return true;
313    } else {
314       if (BITSET_TEST(instr->block->live_in, def->index) ||
315           def->parent_instr->block == instr->block) {
316          /* In this case it is either live coming into instr's block or it
317           * is defined in the same block.  In this case, we simply need to
318           * see if it is used after instr.
319           */
320          return search_for_use_after_instr(instr, def);
321       } else {
322          return false;
323       }
324    }
325 }
326 
327 bool
nir_ssa_defs_interfere(nir_ssa_def * a,nir_ssa_def * b)328 nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b)
329 {
330    if (a->parent_instr == b->parent_instr) {
331       /* Two variables defined at the same time interfere assuming at
332        * least one isn't dead.
333        */
334       return true;
335    } else if (a->parent_instr->type == nir_instr_type_ssa_undef ||
336               b->parent_instr->type == nir_instr_type_ssa_undef) {
337       /* If either variable is an ssa_undef, then there's no interference */
338       return false;
339    } else if (a->parent_instr->index < b->parent_instr->index) {
340       return nir_ssa_def_is_live_at(a, b->parent_instr);
341    } else {
342       return nir_ssa_def_is_live_at(b, a->parent_instr);
343    }
344 }
345 
346 /* Takes an SSA def's defs and uses and expands the live interval to cover
347  * that range.  Control flow effects are handled separately.
348  */
def_cb(nir_ssa_def * def,void * state)349 static bool def_cb(nir_ssa_def *def, void *state)
350 {
351    nir_instr_liveness *liveness = state;
352    nir_instr *instr = def->parent_instr;
353    int index = def->index;
354 
355    liveness->defs[index].start = MIN2(liveness->defs[index].start, instr->index);
356 
357    nir_foreach_use(src, def) {
358       liveness->defs[index].end = MAX2(liveness->defs[index].end,
359                                        src->parent_instr->index);
360    }
361 
362    return true;
363 }
364 
365 nir_instr_liveness *
nir_live_ssa_defs_per_instr(nir_function_impl * impl)366 nir_live_ssa_defs_per_instr(nir_function_impl *impl)
367 {
368    /* We'll use block-level live_ssa_defs to expand our per-instr ranges for
369     * control flow.
370     */
371    nir_metadata_require(impl,
372                         nir_metadata_block_index |
373                         nir_metadata_instr_index |
374                         nir_metadata_live_ssa_defs);
375 
376    /* Make our struct. */
377    nir_instr_liveness *liveness = ralloc(NULL, nir_instr_liveness);
378    liveness->defs = rzalloc_array(liveness, nir_liveness_bounds,
379                                   impl->ssa_alloc);
380 
381    /* Set our starts so we can use MIN2() as we accumulate bounds. */
382    for (int i = 0; i < impl->ssa_alloc; i++)
383       liveness->defs->start = ~0;
384 
385    nir_foreach_block(block, impl) {
386       unsigned index;
387       BITSET_FOREACH_SET(index, block->live_in, impl->ssa_alloc) {
388          liveness->defs[index].start = MIN2(liveness->defs[index].start,
389                                             block->start_ip);
390       }
391 
392       nir_foreach_instr(instr, block) {
393          nir_foreach_ssa_def(instr, def_cb, liveness);
394       };
395 
396       /* track an if src's use.  We need to make sure that our value is live
397        * across the if reference, where we don't have an instr->index
398        * representing the use.  Mark it as live through the end of the block.
399        */
400       nir_if *nif = nir_block_get_following_if(block);
401       if (nif) {
402          if (nif->condition.is_ssa) {
403             liveness->defs[nif->condition.ssa->index].end = MAX2(
404                liveness->defs[nif->condition.ssa->index].end, block->end_ip);
405          }
406       }
407 
408       BITSET_FOREACH_SET(index, block->live_out, impl->ssa_alloc) {
409          liveness->defs[index].end = MAX2(liveness->defs[index].end,
410                                           block->end_ip);
411       }
412    }
413 
414    return liveness;
415 }
416