• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file brw_fs_register_coalesce.cpp
25  *
26  * Implements register coalescing: Checks if the two registers involved in a
27  * raw move don't interfere, in which case they can both be stored in the same
28  * place and the MOV removed.
29  *
30  * To do this, all uses of the source of the MOV in the shader are replaced
31  * with the destination of the MOV. For example:
32  *
33  * add vgrf3:F, vgrf1:F, vgrf2:F
34  * mov vgrf4:F, vgrf3:F
35  * mul vgrf5:F, vgrf5:F, vgrf4:F
36  *
37  * becomes
38  *
39  * add vgrf4:F, vgrf1:F, vgrf2:F
40  * mul vgrf5:F, vgrf5:F, vgrf4:F
41  */
42 
43 #include "brw_fs.h"
44 #include "brw_cfg.h"
45 #include "brw_fs_live_variables.h"
46 
47 using namespace brw;
48 
49 static bool
is_nop_mov(const fs_inst * inst)50 is_nop_mov(const fs_inst *inst)
51 {
52    if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
53       fs_reg dst = inst->dst;
54       for (int i = 0; i < inst->sources; i++) {
55          if (!dst.equals(inst->src[i])) {
56             return false;
57          }
58          dst.offset += (i < inst->header_size ? REG_SIZE :
59                         inst->exec_size * dst.stride *
60                         type_sz(inst->src[i].type));
61       }
62       return true;
63    } else if (inst->opcode == BRW_OPCODE_MOV) {
64       return inst->dst.equals(inst->src[0]);
65    }
66 
67    return false;
68 }
69 
70 static bool
is_coalesce_candidate(const fs_visitor * v,const fs_inst * inst)71 is_coalesce_candidate(const fs_visitor *v, const fs_inst *inst)
72 {
73    if ((inst->opcode != BRW_OPCODE_MOV &&
74         inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD) ||
75        inst->is_partial_write() ||
76        inst->saturate ||
77        inst->src[0].file != VGRF ||
78        inst->src[0].negate ||
79        inst->src[0].abs ||
80        !inst->src[0].is_contiguous() ||
81        inst->dst.file != VGRF ||
82        inst->dst.type != inst->src[0].type) {
83       return false;
84    }
85 
86    if (v->alloc.sizes[inst->src[0].nr] >
87        v->alloc.sizes[inst->dst.nr])
88       return false;
89 
90    if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
91       if (!is_coalescing_payload(v->alloc, inst)) {
92          return false;
93       }
94    }
95 
96    return true;
97 }
98 
99 static bool
can_coalesce_vars(const fs_live_variables & live,const cfg_t * cfg,const bblock_t * block,const fs_inst * inst,int dst_var,int src_var)100 can_coalesce_vars(const fs_live_variables &live, const cfg_t *cfg,
101                   const bblock_t *block, const fs_inst *inst,
102                   int dst_var, int src_var)
103 {
104    if (!live.vars_interfere(src_var, dst_var))
105       return true;
106 
107    int dst_start = live.start[dst_var];
108    int dst_end = live.end[dst_var];
109    int src_start = live.start[src_var];
110    int src_end = live.end[src_var];
111 
112    /* Variables interfere and one line range isn't a subset of the other. */
113    if ((dst_end > src_end && src_start < dst_start) ||
114        (src_end > dst_end && dst_start < src_start))
115       return false;
116 
117    /* Check for a write to either register in the intersection of their live
118     * ranges.
119     */
120    int start_ip = MAX2(dst_start, src_start);
121    int end_ip = MIN2(dst_end, src_end);
122 
123    foreach_block(scan_block, cfg) {
124       if (scan_block->end_ip < start_ip)
125          continue;
126 
127       int scan_ip = scan_block->start_ip - 1;
128 
129       bool seen_src_write = false;
130       bool seen_copy = false;
131       foreach_inst_in_block(fs_inst, scan_inst, scan_block) {
132          scan_ip++;
133 
134          /* Ignore anything before the intersection of the live ranges */
135          if (scan_ip < start_ip)
136             continue;
137 
138          /* Ignore the copying instruction itself */
139          if (scan_inst == inst) {
140             seen_copy = true;
141             continue;
142          }
143 
144          if (scan_ip > end_ip)
145             return true; /* registers do not interfere */
146 
147          if (seen_src_write && !seen_copy) {
148             /* In order to satisfy the guarantee of register coalescing, we
149              * must ensure that the two registers always have the same value
150              * during the intersection of their live ranges.  One way to do
151              * this is to simply ensure that neither is ever written apart
152              * from the one copy which syncs up the two registers.  However,
153              * this can be overly conservative and only works in the case
154              * where the destination live range is entirely contained in the
155              * source live range.
156              *
157              * To handle the other case where the source is contained in the
158              * destination, we allow writes to the source register as long as
159              * they happen before the copy, in the same block as the copy, and
160              * the destination is never read between first such write and the
161              * copy.  This effectively moves the write from the copy up.
162              */
163             for (int j = 0; j < scan_inst->sources; j++) {
164                if (regions_overlap(scan_inst->src[j], scan_inst->size_read(j),
165                                    inst->dst, inst->size_written))
166                   return false; /* registers interfere */
167             }
168          }
169 
170          /* The MOV being coalesced had better be the only instruction which
171           * writes to the coalesce destination in the intersection.
172           */
173          if (regions_overlap(scan_inst->dst, scan_inst->size_written,
174                              inst->dst, inst->size_written))
175             return false; /* registers interfere */
176 
177          /* See the big comment above */
178          if (regions_overlap(scan_inst->dst, scan_inst->size_written,
179                              inst->src[0], inst->size_read(0))) {
180             if (seen_copy || scan_block != block)
181                return false;
182             seen_src_write = true;
183          }
184       }
185    }
186 
187    return true;
188 }
189 
190 bool
register_coalesce()191 fs_visitor::register_coalesce()
192 {
193    bool progress = false;
194    fs_live_variables &live = live_analysis.require();
195    int src_size = 0;
196    int channels_remaining = 0;
197    unsigned src_reg = ~0u, dst_reg = ~0u;
198    int dst_reg_offset[MAX_VGRF_SIZE];
199    fs_inst *mov[MAX_VGRF_SIZE];
200    int dst_var[MAX_VGRF_SIZE];
201    int src_var[MAX_VGRF_SIZE];
202 
203    foreach_block_and_inst(block, fs_inst, inst, cfg) {
204       if (!is_coalesce_candidate(this, inst))
205          continue;
206 
207       if (is_nop_mov(inst)) {
208          inst->opcode = BRW_OPCODE_NOP;
209          progress = true;
210          continue;
211       }
212 
213       if (src_reg != inst->src[0].nr) {
214          src_reg = inst->src[0].nr;
215 
216          src_size = alloc.sizes[inst->src[0].nr];
217          assert(src_size <= MAX_VGRF_SIZE);
218 
219          channels_remaining = src_size;
220          memset(mov, 0, sizeof(mov));
221 
222          dst_reg = inst->dst.nr;
223       }
224 
225       if (dst_reg != inst->dst.nr)
226          continue;
227 
228       if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
229          for (int i = 0; i < src_size; i++) {
230             dst_reg_offset[i] = i;
231          }
232          mov[0] = inst;
233          channels_remaining -= regs_written(inst);
234       } else {
235          const int offset = inst->src[0].offset / REG_SIZE;
236          if (mov[offset]) {
237             /* This is the second time that this offset in the register has
238              * been set.  This means, in particular, that inst->dst was
239              * live before this instruction and that the live ranges of
240              * inst->dst and inst->src[0] overlap and we can't coalesce the
241              * two variables.  Let's ensure that doesn't happen.
242              */
243             channels_remaining = -1;
244             continue;
245          }
246          for (unsigned i = 0; i < MAX2(inst->size_written / REG_SIZE, 1); i++)
247             dst_reg_offset[offset + i] = inst->dst.offset / REG_SIZE + i;
248          mov[offset] = inst;
249          channels_remaining -= regs_written(inst);
250       }
251 
252       if (channels_remaining)
253          continue;
254 
255       bool can_coalesce = true;
256       for (int i = 0; i < src_size; i++) {
257          if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
258             /* Registers are out-of-order. */
259             can_coalesce = false;
260             src_reg = ~0u;
261             break;
262          }
263 
264          dst_var[i] = live.var_from_vgrf[dst_reg] + dst_reg_offset[i];
265          src_var[i] = live.var_from_vgrf[src_reg] + i;
266 
267          if (!can_coalesce_vars(live, cfg, block, inst, dst_var[i], src_var[i])) {
268             can_coalesce = false;
269             src_reg = ~0u;
270             break;
271          }
272       }
273 
274       if (!can_coalesce)
275          continue;
276 
277       progress = true;
278 
279       for (int i = 0; i < src_size; i++) {
280          if (!mov[i])
281             continue;
282 
283          if (mov[i]->conditional_mod == BRW_CONDITIONAL_NONE) {
284             mov[i]->opcode = BRW_OPCODE_NOP;
285             mov[i]->dst = reg_undef;
286             for (int j = 0; j < mov[i]->sources; j++) {
287                mov[i]->src[j] = reg_undef;
288             }
289          } else {
290             /* If we have a conditional modifier, rewrite the MOV to be a
291              * MOV.cmod from the coalesced register.  Hopefully, cmod
292              * propagation will clean this up and move it to the instruction
293              * that writes the register.  If not, this keeps things correct
294              * while still letting us coalesce.
295              */
296             assert(mov[i]->opcode == BRW_OPCODE_MOV);
297             assert(mov[i]->sources == 1);
298             mov[i]->src[0] = mov[i]->dst;
299             mov[i]->dst = retype(brw_null_reg(), mov[i]->dst.type);
300          }
301       }
302 
303       foreach_block_and_inst(block, fs_inst, scan_inst, cfg) {
304          if (scan_inst->dst.file == VGRF &&
305              scan_inst->dst.nr == src_reg) {
306             scan_inst->dst.nr = dst_reg;
307             scan_inst->dst.offset = scan_inst->dst.offset % REG_SIZE +
308                dst_reg_offset[scan_inst->dst.offset / REG_SIZE] * REG_SIZE;
309          }
310 
311          for (int j = 0; j < scan_inst->sources; j++) {
312             if (scan_inst->src[j].file == VGRF &&
313                 scan_inst->src[j].nr == src_reg) {
314                scan_inst->src[j].nr = dst_reg;
315                scan_inst->src[j].offset = scan_inst->src[j].offset % REG_SIZE +
316                   dst_reg_offset[scan_inst->src[j].offset / REG_SIZE] * REG_SIZE;
317             }
318          }
319       }
320 
321       for (int i = 0; i < src_size; i++) {
322          live.start[dst_var[i]] = MIN2(live.start[dst_var[i]],
323                                        live.start[src_var[i]]);
324          live.end[dst_var[i]] = MAX2(live.end[dst_var[i]],
325                                      live.end[src_var[i]]);
326       }
327       src_reg = ~0u;
328    }
329 
330    if (progress) {
331       foreach_block_and_inst_safe (block, backend_instruction, inst, cfg) {
332          if (inst->opcode == BRW_OPCODE_NOP) {
333             inst->remove(block, true);
334          }
335       }
336 
337       cfg->adjust_block_ips();
338 
339       invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
340    }
341 
342    return progress;
343 }
344