• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019 Zodiac Inflight Innovations
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jonathan Marek <jonathan@marek.ca>
25  */
26 
27 #include "etnaviv_compiler_nir.h"
28 #include "compiler/nir/nir_worklist.h"
29 
30 static void
range_include(struct live_def * def,unsigned index)31 range_include(struct live_def *def, unsigned index)
32 {
33    if (def->live_start > index)
34       def->live_start = index;
35    if (def->live_end < index)
36       def->live_end = index;
37 }
38 
39 struct live_defs_state {
40    unsigned num_defs;
41    unsigned bitset_words;
42 
43    nir_function_impl *impl;
44    nir_block *block; /* current block pointer */
45    unsigned index; /* current live index */
46 
47    struct live_def *defs;
48    unsigned *live_map; /* to map ssa/reg index into defs array */
49 
50    nir_block_worklist worklist;
51 };
52 
53 static bool
init_liveness_block(nir_block * block,struct live_defs_state * state)54 init_liveness_block(nir_block *block,
55                     struct live_defs_state *state)
56 {
57    block->live_in = reralloc(block, block->live_in, BITSET_WORD,
58                              state->bitset_words);
59    memset(block->live_in, 0, state->bitset_words * sizeof(BITSET_WORD));
60 
61    block->live_out = reralloc(block, block->live_out, BITSET_WORD,
62                               state->bitset_words);
63    memset(block->live_out, 0, state->bitset_words * sizeof(BITSET_WORD));
64 
65    nir_block_worklist_push_head(&state->worklist, block);
66 
67    return true;
68 }
69 
70 static bool
set_src_live(nir_src * src,void * void_state)71 set_src_live(nir_src *src, void *void_state)
72 {
73    struct live_defs_state *state = void_state;
74 
75    if (src->is_ssa) {
76       nir_instr *instr = src->ssa->parent_instr;
77 
78       if (is_sysval(instr) || instr->type == nir_instr_type_deref)
79          return true;
80 
81       switch (instr->type) {
82       case nir_instr_type_load_const:
83       case nir_instr_type_ssa_undef:
84          return true;
85       case nir_instr_type_alu: {
86          /* alu op bypass */
87          nir_alu_instr *alu = nir_instr_as_alu(instr);
88          if (instr->pass_flags & BYPASS_SRC) {
89             for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
90                set_src_live(&alu->src[i].src, state);
91             return true;
92          }
93       } break;
94       default:
95          break;
96       }
97    }
98 
99    unsigned i = state->live_map[src_index(state->impl, src)];
100    assert(i != ~0u);
101 
102    BITSET_SET(state->block->live_in, i);
103    range_include(&state->defs[i], state->index);
104 
105    return true;
106 }
107 
108 static bool
propagate_across_edge(nir_block * pred,nir_block * succ,struct live_defs_state * state)109 propagate_across_edge(nir_block *pred, nir_block *succ,
110                       struct live_defs_state *state)
111 {
112    BITSET_WORD progress = 0;
113    for (unsigned i = 0; i < state->bitset_words; ++i) {
114       progress |= succ->live_in[i] & ~pred->live_out[i];
115       pred->live_out[i] |= succ->live_in[i];
116    }
117    return progress != 0;
118 }
119 
120 unsigned
etna_live_defs(nir_function_impl * impl,struct live_def * defs,unsigned * live_map)121 etna_live_defs(nir_function_impl *impl, struct live_def *defs, unsigned *live_map)
122 {
123    struct live_defs_state state;
124    unsigned block_live_index[impl->num_blocks + 1];
125 
126    state.impl = impl;
127    state.defs = defs;
128    state.live_map = live_map;
129 
130    state.num_defs = 0;
131    nir_foreach_block(block, impl) {
132       block_live_index[block->index] = state.num_defs;
133       nir_foreach_instr(instr, block) {
134          nir_dest *dest = dest_for_instr(instr);
135          if (!dest)
136             continue;
137 
138          unsigned idx = dest_index(impl, dest);
139          /* register is already in defs */
140          if (live_map[idx] != ~0u)
141             continue;
142 
143          defs[state.num_defs] = (struct live_def) {instr, dest, state.num_defs, 0};
144 
145          /* input live from the start */
146          if (instr->type == nir_instr_type_intrinsic) {
147             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
148             if (intr->intrinsic == nir_intrinsic_load_input ||
149                 intr->intrinsic == nir_intrinsic_load_instance_id)
150                defs[state.num_defs].live_start = 0;
151          }
152 
153          live_map[idx] = state.num_defs;
154          state.num_defs++;
155       }
156    }
157    block_live_index[impl->num_blocks] = state.num_defs;
158 
159    nir_block_worklist_init(&state.worklist, impl->num_blocks, NULL);
160 
161    /* We now know how many unique ssa definitions we have and we can go
162     * ahead and allocate live_in and live_out sets and add all of the
163     * blocks to the worklist.
164     */
165    state.bitset_words = BITSET_WORDS(state.num_defs);
166    nir_foreach_block(block, impl) {
167       init_liveness_block(block, &state);
168    }
169 
170    /* We're now ready to work through the worklist and update the liveness
171     * sets of each of the blocks.  By the time we get to this point, every
172     * block in the function implementation has been pushed onto the
173     * worklist in reverse order.  As long as we keep the worklist
174     * up-to-date as we go, everything will get covered.
175     */
176    while (!nir_block_worklist_is_empty(&state.worklist)) {
177       /* We pop them off in the reverse order we pushed them on.  This way
178        * the first walk of the instructions is backwards so we only walk
179        * once in the case of no control flow.
180        */
181       nir_block *block = nir_block_worklist_pop_head(&state.worklist);
182       state.block = block;
183 
184       memcpy(block->live_in, block->live_out,
185              state.bitset_words * sizeof(BITSET_WORD));
186 
187       state.index = block_live_index[block->index + 1];
188 
189       nir_if *following_if = nir_block_get_following_if(block);
190       if (following_if)
191          set_src_live(&following_if->condition, &state);
192 
193       nir_foreach_instr_reverse(instr, block) {
194          /* when we come across the next "live" instruction, decrement index */
195          if (state.index && instr == defs[state.index - 1].instr) {
196             state.index--;
197             /* the only source of writes to registers is phis:
198              * we don't expect any partial write_mask alus
199              * so clearing live_in here is OK
200              */
201             BITSET_CLEAR(block->live_in, state.index);
202          }
203 
204          /* don't set_src_live for not-emitted instructions */
205          if (instr->pass_flags)
206             continue;
207 
208          unsigned index = state.index;
209 
210          /* output live till the end */
211          if (instr->type == nir_instr_type_intrinsic) {
212             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
213             if (intr->intrinsic == nir_intrinsic_store_deref)
214                state.index = ~0u;
215          }
216 
217          nir_foreach_src(instr, set_src_live, &state);
218 
219          state.index = index;
220       }
221       assert(state.index == block_live_index[block->index]);
222 
223       /* Walk over all of the predecessors of the current block updating
224        * their live in with the live out of this one.  If anything has
225        * changed, add the predecessor to the work list so that we ensure
226        * that the new information is used.
227        */
228       set_foreach(block->predecessors, entry) {
229          nir_block *pred = (nir_block *)entry->key;
230          if (propagate_across_edge(pred, block, &state))
231             nir_block_worklist_push_tail(&state.worklist, pred);
232       }
233    }
234 
235    nir_block_worklist_fini(&state.worklist);
236 
237    /* apply live_in/live_out to ranges */
238 
239    nir_foreach_block(block, impl) {
240       int i;
241 
242       BITSET_FOREACH_SET(i, block->live_in, state.num_defs)
243          range_include(&state.defs[i], block_live_index[block->index]);
244 
245       BITSET_FOREACH_SET(i, block->live_out, state.num_defs)
246          range_include(&state.defs[i], block_live_index[block->index + 1]);
247    }
248 
249    return state.num_defs;
250 }
251