• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file elk_fs_register_coalesce.cpp
25  *
26  * Implements register coalescing: Checks if the two registers involved in a
27  * raw move don't interfere, in which case they can both be stored in the same
28  * place and the MOV removed.
29  *
30  * To do this, all uses of the source of the MOV in the shader are replaced
31  * with the destination of the MOV. For example:
32  *
33  * add vgrf3:F, vgrf1:F, vgrf2:F
34  * mov vgrf4:F, vgrf3:F
35  * mul vgrf5:F, vgrf5:F, vgrf4:F
36  *
37  * becomes
38  *
39  * add vgrf4:F, vgrf1:F, vgrf2:F
40  * mul vgrf5:F, vgrf5:F, vgrf4:F
41  */
42 
43 #include "elk_fs.h"
44 #include "elk_cfg.h"
45 #include "elk_fs_live_variables.h"
46 
47 using namespace elk;
48 
49 static bool
is_nop_mov(const elk_fs_inst * inst)50 is_nop_mov(const elk_fs_inst *inst)
51 {
52    if (inst->opcode == ELK_SHADER_OPCODE_LOAD_PAYLOAD) {
53       elk_fs_reg dst = inst->dst;
54       for (int i = 0; i < inst->sources; i++) {
55          if (!dst.equals(inst->src[i])) {
56             return false;
57          }
58          dst.offset += (i < inst->header_size ? REG_SIZE :
59                         inst->exec_size * dst.stride *
60                         type_sz(inst->src[i].type));
61       }
62       return true;
63    } else if (inst->opcode == ELK_OPCODE_MOV) {
64       return inst->dst.equals(inst->src[0]);
65    }
66 
67    return false;
68 }
69 
70 static bool
is_coalesce_candidate(const elk_fs_visitor * v,const elk_fs_inst * inst)71 is_coalesce_candidate(const elk_fs_visitor *v, const elk_fs_inst *inst)
72 {
73    if ((inst->opcode != ELK_OPCODE_MOV &&
74         inst->opcode != ELK_SHADER_OPCODE_LOAD_PAYLOAD) ||
75        inst->is_partial_write() ||
76        inst->saturate ||
77        inst->src[0].file != VGRF ||
78        inst->src[0].negate ||
79        inst->src[0].abs ||
80        !inst->src[0].is_contiguous() ||
81        inst->dst.file != VGRF ||
82        inst->dst.type != inst->src[0].type) {
83       return false;
84    }
85 
86    if (v->alloc.sizes[inst->src[0].nr] >
87        v->alloc.sizes[inst->dst.nr])
88       return false;
89 
90    if (inst->opcode == ELK_SHADER_OPCODE_LOAD_PAYLOAD) {
91       if (!is_coalescing_payload(v->alloc, inst)) {
92          return false;
93       }
94    }
95 
96    return true;
97 }
98 
99 static bool
can_coalesce_vars(const fs_live_variables & live,const elk_cfg_t * cfg,const elk_bblock_t * block,const elk_fs_inst * inst,int dst_var,int src_var)100 can_coalesce_vars(const fs_live_variables &live, const elk_cfg_t *cfg,
101                   const elk_bblock_t *block, const elk_fs_inst *inst,
102                   int dst_var, int src_var)
103 {
104    if (!live.vars_interfere(src_var, dst_var))
105       return true;
106 
107    int dst_start = live.start[dst_var];
108    int dst_end = live.end[dst_var];
109    int src_start = live.start[src_var];
110    int src_end = live.end[src_var];
111 
112    /* Variables interfere and one line range isn't a subset of the other. */
113    if ((dst_end > src_end && src_start < dst_start) ||
114        (src_end > dst_end && dst_start < src_start))
115       return false;
116 
117    /* Check for a write to either register in the intersection of their live
118     * ranges.
119     */
120    int start_ip = MAX2(dst_start, src_start);
121    int end_ip = MIN2(dst_end, src_end);
122 
123    foreach_block(scan_block, cfg) {
124       if (scan_block->end_ip < start_ip)
125          continue;
126 
127       int scan_ip = scan_block->start_ip - 1;
128 
129       bool seen_src_write = false;
130       bool seen_copy = false;
131       foreach_inst_in_block(elk_fs_inst, scan_inst, scan_block) {
132          scan_ip++;
133 
134          /* Ignore anything before the intersection of the live ranges */
135          if (scan_ip < start_ip)
136             continue;
137 
138          /* Ignore the copying instruction itself */
139          if (scan_inst == inst) {
140             seen_copy = true;
141             continue;
142          }
143 
144          if (scan_ip > end_ip)
145             return true; /* registers do not interfere */
146 
147          if (seen_src_write && !seen_copy) {
148             /* In order to satisfy the guarantee of register coalescing, we
149              * must ensure that the two registers always have the same value
150              * during the intersection of their live ranges.  One way to do
151              * this is to simply ensure that neither is ever written apart
152              * from the one copy which syncs up the two registers.  However,
153              * this can be overly conservative and only works in the case
154              * where the destination live range is entirely contained in the
155              * source live range.
156              *
157              * To handle the other case where the source is contained in the
158              * destination, we allow writes to the source register as long as
159              * they happen before the copy, in the same block as the copy, and
160              * the destination is never read between first such write and the
161              * copy.  This effectively moves the write from the copy up.
162              */
163             for (int j = 0; j < scan_inst->sources; j++) {
164                if (regions_overlap(scan_inst->src[j], scan_inst->size_read(j),
165                                    inst->dst, inst->size_written))
166                   return false; /* registers interfere */
167             }
168          }
169 
170          /* The MOV being coalesced had better be the only instruction which
171           * writes to the coalesce destination in the intersection.
172           */
173          if (regions_overlap(scan_inst->dst, scan_inst->size_written,
174                              inst->dst, inst->size_written))
175             return false; /* registers interfere */
176 
177          /* See the big comment above */
178          if (regions_overlap(scan_inst->dst, scan_inst->size_written,
179                              inst->src[0], inst->size_read(0))) {
180             if (seen_copy || scan_block != block ||
181                 (scan_inst->force_writemask_all && !inst->force_writemask_all))
182                return false;
183             seen_src_write = true;
184          }
185       }
186    }
187 
188    return true;
189 }
190 
191 bool
register_coalesce()192 elk_fs_visitor::register_coalesce()
193 {
194    bool progress = false;
195    fs_live_variables &live = live_analysis.require();
196    int src_size = 0;
197    int channels_remaining = 0;
198    unsigned src_reg = ~0u, dst_reg = ~0u;
199    int *dst_reg_offset = new int[MAX_VGRF_SIZE(devinfo)];
200    elk_fs_inst **mov = new elk_fs_inst *[MAX_VGRF_SIZE(devinfo)];
201    int *dst_var = new int[MAX_VGRF_SIZE(devinfo)];
202    int *src_var = new int[MAX_VGRF_SIZE(devinfo)];
203 
204    foreach_block_and_inst(block, elk_fs_inst, inst, cfg) {
205       if (!is_coalesce_candidate(this, inst))
206          continue;
207 
208       if (is_nop_mov(inst)) {
209          inst->opcode = ELK_OPCODE_NOP;
210          progress = true;
211          continue;
212       }
213 
214       if (src_reg != inst->src[0].nr) {
215          src_reg = inst->src[0].nr;
216 
217          src_size = alloc.sizes[inst->src[0].nr];
218          assert(src_size <= MAX_VGRF_SIZE(devinfo));
219 
220          channels_remaining = src_size;
221          memset(mov, 0, sizeof(*mov) * MAX_VGRF_SIZE(devinfo));
222 
223          dst_reg = inst->dst.nr;
224       }
225 
226       if (dst_reg != inst->dst.nr)
227          continue;
228 
229       if (inst->opcode == ELK_SHADER_OPCODE_LOAD_PAYLOAD) {
230          for (int i = 0; i < src_size; i++) {
231             dst_reg_offset[i] = i;
232          }
233          mov[0] = inst;
234          channels_remaining -= regs_written(inst);
235       } else {
236          const int offset = inst->src[0].offset / REG_SIZE;
237          if (mov[offset]) {
238             /* This is the second time that this offset in the register has
239              * been set.  This means, in particular, that inst->dst was
240              * live before this instruction and that the live ranges of
241              * inst->dst and inst->src[0] overlap and we can't coalesce the
242              * two variables.  Let's ensure that doesn't happen.
243              */
244             channels_remaining = -1;
245             continue;
246          }
247          for (unsigned i = 0; i < MAX2(inst->size_written / REG_SIZE, 1); i++)
248             dst_reg_offset[offset + i] = inst->dst.offset / REG_SIZE + i;
249          mov[offset] = inst;
250          channels_remaining -= regs_written(inst);
251       }
252 
253       if (channels_remaining)
254          continue;
255 
256       bool can_coalesce = true;
257       for (int i = 0; i < src_size; i++) {
258          if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
259             /* Registers are out-of-order. */
260             can_coalesce = false;
261             src_reg = ~0u;
262             break;
263          }
264 
265          dst_var[i] = live.var_from_vgrf[dst_reg] + dst_reg_offset[i];
266          src_var[i] = live.var_from_vgrf[src_reg] + i;
267 
268          if (!can_coalesce_vars(live, cfg, block, inst, dst_var[i], src_var[i])) {
269             can_coalesce = false;
270             src_reg = ~0u;
271             break;
272          }
273       }
274 
275       if (!can_coalesce)
276          continue;
277 
278       progress = true;
279 
280       for (int i = 0; i < src_size; i++) {
281          if (!mov[i])
282             continue;
283 
284          if (mov[i]->conditional_mod == ELK_CONDITIONAL_NONE) {
285             mov[i]->opcode = ELK_OPCODE_NOP;
286             mov[i]->dst = reg_undef;
287             for (int j = 0; j < mov[i]->sources; j++) {
288                mov[i]->src[j] = reg_undef;
289             }
290          } else {
291             /* If we have a conditional modifier, rewrite the MOV to be a
292              * MOV.cmod from the coalesced register.  Hopefully, cmod
293              * propagation will clean this up and move it to the instruction
294              * that writes the register.  If not, this keeps things correct
295              * while still letting us coalesce.
296              */
297             assert(mov[i]->opcode == ELK_OPCODE_MOV);
298             assert(mov[i]->sources == 1);
299             mov[i]->src[0] = mov[i]->dst;
300             mov[i]->dst = retype(elk_null_reg(), mov[i]->dst.type);
301          }
302       }
303 
304       foreach_block_and_inst(block, elk_fs_inst, scan_inst, cfg) {
305          if (scan_inst->dst.file == VGRF &&
306              scan_inst->dst.nr == src_reg) {
307             scan_inst->dst.nr = dst_reg;
308             scan_inst->dst.offset = scan_inst->dst.offset % REG_SIZE +
309                dst_reg_offset[scan_inst->dst.offset / REG_SIZE] * REG_SIZE;
310          }
311 
312          for (int j = 0; j < scan_inst->sources; j++) {
313             if (scan_inst->src[j].file == VGRF &&
314                 scan_inst->src[j].nr == src_reg) {
315                scan_inst->src[j].nr = dst_reg;
316                scan_inst->src[j].offset = scan_inst->src[j].offset % REG_SIZE +
317                   dst_reg_offset[scan_inst->src[j].offset / REG_SIZE] * REG_SIZE;
318             }
319          }
320       }
321 
322       for (int i = 0; i < src_size; i++) {
323          live.start[dst_var[i]] = MIN2(live.start[dst_var[i]],
324                                        live.start[src_var[i]]);
325          live.end[dst_var[i]] = MAX2(live.end[dst_var[i]],
326                                      live.end[src_var[i]]);
327       }
328       src_reg = ~0u;
329    }
330 
331    if (progress) {
332       foreach_block_and_inst_safe (block, elk_backend_instruction, inst, cfg) {
333          if (inst->opcode == ELK_OPCODE_NOP) {
334             inst->remove(block, true);
335          }
336       }
337 
338       cfg->adjust_block_ips();
339 
340       invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
341    }
342 
343    delete[] src_var;
344    delete[] dst_var;
345    delete[] mov;
346    delete[] dst_reg_offset;
347 
348    return progress;
349 }
350