• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include "brw_cfg.h"
29 #include "brw_vec4_live_variables.h"
30 
31 using namespace brw;
32 
33 /** @file brw_vec4_live_variables.cpp
34  *
35  * Support for computing at the basic block level which variables
36  * (virtual GRFs in our case) are live at entry and exit.
37  *
38  * See Muchnick's Advanced Compiler Design and Implementation, section
39  * 14.1 (p444).
40  */
41 
42 /**
43  * Sets up the use[] and def[] arrays.
44  *
45  * The basic-block-level live variable analysis needs to know which
46  * variables get used before they're completely defined, and which
47  * variables are completely defined before they're used.
48  *
49  * We independently track each channel of a vec4.  This is because we need to
50  * be able to recognize a sequence like:
51  *
52  * ...
53  * DP4 tmp.x a b;
54  * DP4 tmp.y c d;
55  * MUL result.xy tmp.xy e.xy
56  * ...
57  *
58  * as having tmp live only across that sequence (assuming it's used nowhere
59  * else), because it's a common pattern.  A more conservative approach that
60  * doesn't get tmp marked a deffed in this block will tend to result in
61  * spilling.
62  */
63 void
setup_def_use()64 vec4_live_variables::setup_def_use()
65 {
66    int ip = 0;
67 
68    foreach_block (block, cfg) {
69       assert(ip == block->start_ip);
70       if (block->num > 0)
71 	 assert(cfg->blocks[block->num - 1]->end_ip == ip - 1);
72 
73       foreach_inst_in_block(vec4_instruction, inst, block) {
74          struct block_data *bd = &block_data[block->num];
75 
76 	 /* Set use[] for this instruction */
77 	 for (unsigned int i = 0; i < 3; i++) {
78 	    if (inst->src[i].file == VGRF) {
79                for (unsigned j = 0; j < DIV_ROUND_UP(inst->size_read(i), 16); j++) {
80                   for (int c = 0; c < 4; c++) {
81                      const unsigned v = var_from_reg(alloc, inst->src[i], c, j);
82                      if (!BITSET_TEST(bd->def, v))
83                         BITSET_SET(bd->use, v);
84                   }
85                }
86 	    }
87 	 }
88          for (unsigned c = 0; c < 4; c++) {
89             if (inst->reads_flag(c) &&
90                 !BITSET_TEST(bd->flag_def, c)) {
91                BITSET_SET(bd->flag_use, c);
92             }
93          }
94 
95 	 /* Check for unconditional writes to whole registers. These
96 	  * are the things that screen off preceding definitions of a
97 	  * variable, and thus qualify for being in def[].
98 	  */
99 	 if (inst->dst.file == VGRF &&
100 	     (!inst->predicate || inst->opcode == BRW_OPCODE_SEL)) {
101             for (unsigned i = 0; i < DIV_ROUND_UP(inst->size_written, 16); i++) {
102                for (int c = 0; c < 4; c++) {
103                   if (inst->dst.writemask & (1 << c)) {
104                      const unsigned v = var_from_reg(alloc, inst->dst, c, i);
105                      if (!BITSET_TEST(bd->use, v))
106                         BITSET_SET(bd->def, v);
107                   }
108                }
109             }
110          }
111          if (inst->writes_flag()) {
112             for (unsigned c = 0; c < 4; c++) {
113                if ((inst->dst.writemask & (1 << c)) &&
114                    !BITSET_TEST(bd->flag_use, c)) {
115                   BITSET_SET(bd->flag_def, c);
116                }
117             }
118          }
119 
120 	 ip++;
121       }
122    }
123 }
124 
125 /**
126  * The algorithm incrementally sets bits in liveout and livein,
127  * propagating it through control flow.  It will eventually terminate
128  * because it only ever adds bits, and stops when no bits are added in
129  * a pass.
130  */
131 void
compute_live_variables()132 vec4_live_variables::compute_live_variables()
133 {
134    bool cont = true;
135 
136    while (cont) {
137       cont = false;
138 
139       foreach_block_reverse (block, cfg) {
140          struct block_data *bd = &block_data[block->num];
141 
142 	 /* Update liveout */
143 	 foreach_list_typed(bblock_link, child_link, link, &block->children) {
144             struct block_data *child_bd = &block_data[child_link->block->num];
145 
146 	    for (int i = 0; i < bitset_words; i++) {
147                BITSET_WORD new_liveout = (child_bd->livein[i] &
148                                           ~bd->liveout[i]);
149                if (new_liveout) {
150                   bd->liveout[i] |= new_liveout;
151 		  cont = true;
152 	       }
153 	    }
154             BITSET_WORD new_liveout = (child_bd->flag_livein[0] &
155                                        ~bd->flag_liveout[0]);
156             if (new_liveout) {
157                bd->flag_liveout[0] |= new_liveout;
158                cont = true;
159             }
160 	 }
161 
162          /* Update livein */
163          for (int i = 0; i < bitset_words; i++) {
164             BITSET_WORD new_livein = (bd->use[i] |
165                                       (bd->liveout[i] &
166                                        ~bd->def[i]));
167             if (new_livein & ~bd->livein[i]) {
168                bd->livein[i] |= new_livein;
169                cont = true;
170             }
171          }
172          BITSET_WORD new_livein = (bd->flag_use[0] |
173                                    (bd->flag_liveout[0] &
174                                     ~bd->flag_def[0]));
175          if (new_livein & ~bd->flag_livein[0]) {
176             bd->flag_livein[0] |= new_livein;
177             cont = true;
178          }
179       }
180    }
181 }
182 
vec4_live_variables(const simple_allocator & alloc,cfg_t * cfg)183 vec4_live_variables::vec4_live_variables(const simple_allocator &alloc,
184                                          cfg_t *cfg)
185    : alloc(alloc), cfg(cfg)
186 {
187    mem_ctx = ralloc_context(NULL);
188 
189    num_vars = alloc.total_size * 8;
190    block_data = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks);
191 
192    bitset_words = BITSET_WORDS(num_vars);
193    for (int i = 0; i < cfg->num_blocks; i++) {
194       block_data[i].def = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
195       block_data[i].use = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
196       block_data[i].livein = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
197       block_data[i].liveout = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
198 
199       block_data[i].flag_def[0] = 0;
200       block_data[i].flag_use[0] = 0;
201       block_data[i].flag_livein[0] = 0;
202       block_data[i].flag_liveout[0] = 0;
203    }
204 
205    setup_def_use();
206    compute_live_variables();
207 }
208 
~vec4_live_variables()209 vec4_live_variables::~vec4_live_variables()
210 {
211    ralloc_free(mem_ctx);
212 }
213 
214 #define MAX_INSTRUCTION (1 << 30)
215 
216 /**
217  * Computes a conservative start/end of the live intervals for each virtual GRF.
218  *
219  * We could expose per-channel live intervals to the consumer based on the
220  * information we computed in vec4_live_variables, except that our only
221  * current user is virtual_grf_interferes().  So we instead union the
222  * per-channel ranges into a per-vgrf range for virtual_grf_start[] and
223  * virtual_grf_end[].
224  *
225  * We could potentially have virtual_grf_interferes() do the test per-channel,
226  * which would let some interesting register allocation occur (particularly on
227  * code-generated GLSL sequences from the Cg compiler which does register
228  * allocation at the GLSL level and thus reuses components of the variable
229  * with distinct lifetimes).  But right now the complexity of doing so doesn't
230  * seem worth it, since having virtual_grf_interferes() be cheap is important
231  * for register allocation performance.
232  */
233 void
calculate_live_intervals()234 vec4_visitor::calculate_live_intervals()
235 {
236    if (this->live_intervals)
237       return;
238 
239    int *start = ralloc_array(mem_ctx, int, this->alloc.total_size * 8);
240    int *end = ralloc_array(mem_ctx, int, this->alloc.total_size * 8);
241    ralloc_free(this->virtual_grf_start);
242    ralloc_free(this->virtual_grf_end);
243    this->virtual_grf_start = start;
244    this->virtual_grf_end = end;
245 
246    for (unsigned i = 0; i < this->alloc.total_size * 8; i++) {
247       start[i] = MAX_INSTRUCTION;
248       end[i] = -1;
249    }
250 
251    /* Start by setting up the intervals with no knowledge of control
252     * flow.
253     */
254    int ip = 0;
255    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
256       for (unsigned int i = 0; i < 3; i++) {
257 	 if (inst->src[i].file == VGRF) {
258             for (unsigned j = 0; j < DIV_ROUND_UP(inst->size_read(i), 16); j++) {
259                for (int c = 0; c < 4; c++) {
260                   const unsigned v = var_from_reg(alloc, inst->src[i], c, j);
261                   start[v] = MIN2(start[v], ip);
262                   end[v] = ip;
263                }
264             }
265 	 }
266       }
267 
268       if (inst->dst.file == VGRF) {
269          for (unsigned i = 0; i < DIV_ROUND_UP(inst->size_written, 16); i++) {
270             for (int c = 0; c < 4; c++) {
271                if (inst->dst.writemask & (1 << c)) {
272                   const unsigned v = var_from_reg(alloc, inst->dst, c, i);
273                   start[v] = MIN2(start[v], ip);
274                   end[v] = ip;
275                }
276             }
277          }
278       }
279 
280       ip++;
281    }
282 
283    /* Now, extend those intervals using our analysis of control flow.
284     *
285     * The control flow-aware analysis was done at a channel level, while at
286     * this point we're distilling it down to vgrfs.
287     */
288    this->live_intervals = new(mem_ctx) vec4_live_variables(alloc, cfg);
289 
290    foreach_block (block, cfg) {
291       struct block_data *bd = &live_intervals->block_data[block->num];
292 
293       for (int i = 0; i < live_intervals->num_vars; i++) {
294          if (BITSET_TEST(bd->livein, i)) {
295             start[i] = MIN2(start[i], block->start_ip);
296             end[i] = MAX2(end[i], block->start_ip);
297          }
298 
299          if (BITSET_TEST(bd->liveout, i)) {
300             start[i] = MIN2(start[i], block->end_ip);
301             end[i] = MAX2(end[i], block->end_ip);
302          }
303       }
304    }
305 }
306 
307 void
invalidate_live_intervals()308 vec4_visitor::invalidate_live_intervals()
309 {
310    ralloc_free(live_intervals);
311    live_intervals = NULL;
312 }
313 
314 int
var_range_start(unsigned v,unsigned n) const315 vec4_visitor::var_range_start(unsigned v, unsigned n) const
316 {
317    int start = INT_MAX;
318 
319    for (unsigned i = 0; i < n; i++)
320       start = MIN2(start, virtual_grf_start[v + i]);
321 
322    return start;
323 }
324 
325 int
var_range_end(unsigned v,unsigned n) const326 vec4_visitor::var_range_end(unsigned v, unsigned n) const
327 {
328    int end = INT_MIN;
329 
330    for (unsigned i = 0; i < n; i++)
331       end = MAX2(end, virtual_grf_end[v + i]);
332 
333    return end;
334 }
335 
336 bool
virtual_grf_interferes(int a,int b)337 vec4_visitor::virtual_grf_interferes(int a, int b)
338 {
339    return !((var_range_end(8 * alloc.offsets[a], 8 * alloc.sizes[a]) <=
340              var_range_start(8 * alloc.offsets[b], 8 * alloc.sizes[b])) ||
341             (var_range_end(8 * alloc.offsets[b], 8 * alloc.sizes[b]) <=
342              var_range_start(8 * alloc.offsets[a], 8 * alloc.sizes[a])));
343 }
344