• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file brw_fs_register_coalesce.cpp
25  *
26  * Implements register coalescing: Checks if the two registers involved in a
27  * raw move don't interfere, in which case they can both be stored in the same
28  * place and the MOV removed.
29  *
30  * To do this, all uses of the source of the MOV in the shader are replaced
31  * with the destination of the MOV. For example:
32  *
33  * add vgrf3:F, vgrf1:F, vgrf2:F
34  * mov vgrf4:F, vgrf3:F
35  * mul vgrf5:F, vgrf5:F, vgrf4:F
36  *
37  * becomes
38  *
39  * add vgrf4:F, vgrf1:F, vgrf2:F
40  * mul vgrf5:F, vgrf5:F, vgrf4:F
41  */
42 
43 #include "brw_fs.h"
44 #include "brw_cfg.h"
45 #include "brw_fs_live_variables.h"
46 
47 using namespace brw;
48 
49 static bool
is_nop_mov(const fs_inst * inst)50 is_nop_mov(const fs_inst *inst)
51 {
52    if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
53       fs_reg dst = inst->dst;
54       for (int i = 0; i < inst->sources; i++) {
55          if (!dst.equals(inst->src[i])) {
56             return false;
57          }
58          dst.offset += (i < inst->header_size ? REG_SIZE :
59                         inst->exec_size * dst.stride *
60                         type_sz(inst->src[i].type));
61       }
62       return true;
63    } else if (inst->opcode == BRW_OPCODE_MOV) {
64       return inst->dst.equals(inst->src[0]);
65    }
66 
67    return false;
68 }
69 
70 static bool
is_coalesce_candidate(const fs_visitor * v,const fs_inst * inst)71 is_coalesce_candidate(const fs_visitor *v, const fs_inst *inst)
72 {
73    if ((inst->opcode != BRW_OPCODE_MOV &&
74         inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD) ||
75        inst->is_partial_write() ||
76        inst->saturate ||
77        inst->src[0].file != VGRF ||
78        inst->src[0].negate ||
79        inst->src[0].abs ||
80        !inst->src[0].is_contiguous() ||
81        inst->dst.file != VGRF ||
82        inst->dst.type != inst->src[0].type) {
83       return false;
84    }
85 
86    if (v->alloc.sizes[inst->src[0].nr] >
87        v->alloc.sizes[inst->dst.nr])
88       return false;
89 
90    if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
91       if (!is_coalescing_payload(v->alloc, inst)) {
92          return false;
93       }
94    }
95 
96    return true;
97 }
98 
99 static bool
can_coalesce_vars(const fs_live_variables & live,const cfg_t * cfg,const bblock_t * block,const fs_inst * inst,int dst_var,int src_var)100 can_coalesce_vars(const fs_live_variables &live, const cfg_t *cfg,
101                   const bblock_t *block, const fs_inst *inst,
102                   int dst_var, int src_var)
103 {
104    if (!live.vars_interfere(src_var, dst_var))
105       return true;
106 
107    int dst_start = live.start[dst_var];
108    int dst_end = live.end[dst_var];
109    int src_start = live.start[src_var];
110    int src_end = live.end[src_var];
111 
112    /* Variables interfere and one line range isn't a subset of the other. */
113    if ((dst_end > src_end && src_start < dst_start) ||
114        (src_end > dst_end && dst_start < src_start))
115       return false;
116 
117    /* Check for a write to either register in the intersection of their live
118     * ranges.
119     */
120    int start_ip = MAX2(dst_start, src_start);
121    int end_ip = MIN2(dst_end, src_end);
122 
123    foreach_block(scan_block, cfg) {
124       if (scan_block->end_ip < start_ip)
125          continue;
126 
127       int scan_ip = scan_block->start_ip - 1;
128 
129       bool seen_src_write = false;
130       bool seen_copy = false;
131       foreach_inst_in_block(fs_inst, scan_inst, scan_block) {
132          scan_ip++;
133 
134          /* Ignore anything before the intersection of the live ranges */
135          if (scan_ip < start_ip)
136             continue;
137 
138          /* Ignore the copying instruction itself */
139          if (scan_inst == inst) {
140             seen_copy = true;
141             continue;
142          }
143 
144          if (scan_ip > end_ip)
145             return true; /* registers do not interfere */
146 
147          if (seen_src_write && !seen_copy) {
148             /* In order to satisfy the guarantee of register coalescing, we
149              * must ensure that the two registers always have the same value
150              * during the intersection of their live ranges.  One way to do
151              * this is to simply ensure that neither is ever written apart
152              * from the one copy which syncs up the two registers.  However,
153              * this can be overly conservative and only works in the case
154              * where the destination live range is entirely contained in the
155              * source live range.
156              *
157              * To handle the other case where the source is contained in the
158              * destination, we allow writes to the source register as long as
159              * they happen before the copy, in the same block as the copy, and
160              * the destination is never read between first such write and the
161              * copy.  This effectively moves the write from the copy up.
162              */
163             for (int j = 0; j < scan_inst->sources; j++) {
164                if (regions_overlap(scan_inst->src[j], scan_inst->size_read(j),
165                                    inst->dst, inst->size_written))
166                   return false; /* registers interfere */
167             }
168          }
169 
170          /* The MOV being coalesced had better be the only instruction which
171           * writes to the coalesce destination in the intersection.
172           */
173          if (regions_overlap(scan_inst->dst, scan_inst->size_written,
174                              inst->dst, inst->size_written))
175             return false; /* registers interfere */
176 
177          /* See the big comment above */
178          if (regions_overlap(scan_inst->dst, scan_inst->size_written,
179                              inst->src[0], inst->size_read(0))) {
180             if (seen_copy || scan_block != block ||
181                 (scan_inst->force_writemask_all && !inst->force_writemask_all))
182                return false;
183             seen_src_write = true;
184          }
185       }
186    }
187 
188    return true;
189 }
190 
191 bool
brw_fs_opt_register_coalesce(fs_visitor & s)192 brw_fs_opt_register_coalesce(fs_visitor &s)
193 {
194    const intel_device_info *devinfo = s.devinfo;
195 
196    bool progress = false;
197    fs_live_variables &live = s.live_analysis.require();
198    int src_size = 0;
199    int channels_remaining = 0;
200    unsigned src_reg = ~0u, dst_reg = ~0u;
201    int *dst_reg_offset = new int[MAX_VGRF_SIZE(devinfo)];
202    fs_inst **mov = new fs_inst *[MAX_VGRF_SIZE(devinfo)];
203    int *dst_var = new int[MAX_VGRF_SIZE(devinfo)];
204    int *src_var = new int[MAX_VGRF_SIZE(devinfo)];
205 
206    foreach_block_and_inst(block, fs_inst, inst, s.cfg) {
207       if (!is_coalesce_candidate(&s, inst))
208          continue;
209 
210       if (is_nop_mov(inst)) {
211          inst->opcode = BRW_OPCODE_NOP;
212          progress = true;
213          continue;
214       }
215 
216       if (src_reg != inst->src[0].nr) {
217          src_reg = inst->src[0].nr;
218 
219          src_size = s.alloc.sizes[inst->src[0].nr];
220          assert(src_size <= MAX_VGRF_SIZE(devinfo));
221 
222          channels_remaining = src_size;
223          memset(mov, 0, sizeof(*mov) * MAX_VGRF_SIZE(devinfo));
224 
225          dst_reg = inst->dst.nr;
226       }
227 
228       if (dst_reg != inst->dst.nr)
229          continue;
230 
231       if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
232          for (int i = 0; i < src_size; i++) {
233             dst_reg_offset[i] = i;
234          }
235          mov[0] = inst;
236          channels_remaining -= regs_written(inst);
237       } else {
238          const int offset = inst->src[0].offset / REG_SIZE;
239          if (mov[offset]) {
240             /* This is the second time that this offset in the register has
241              * been set.  This means, in particular, that inst->dst was
242              * live before this instruction and that the live ranges of
243              * inst->dst and inst->src[0] overlap and we can't coalesce the
244              * two variables.  Let's ensure that doesn't happen.
245              */
246             channels_remaining = -1;
247             continue;
248          }
249          for (unsigned i = 0; i < MAX2(inst->size_written / REG_SIZE, 1); i++)
250             dst_reg_offset[offset + i] = inst->dst.offset / REG_SIZE + i;
251          mov[offset] = inst;
252          channels_remaining -= regs_written(inst);
253       }
254 
255       if (channels_remaining)
256          continue;
257 
258       bool can_coalesce = true;
259       for (int i = 0; i < src_size; i++) {
260          if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
261             /* Registers are out-of-order. */
262             can_coalesce = false;
263             src_reg = ~0u;
264             break;
265          }
266 
267          dst_var[i] = live.var_from_vgrf[dst_reg] + dst_reg_offset[i];
268          src_var[i] = live.var_from_vgrf[src_reg] + i;
269 
270          if (!can_coalesce_vars(live, s.cfg, block, inst, dst_var[i], src_var[i])) {
271             can_coalesce = false;
272             src_reg = ~0u;
273             break;
274          }
275       }
276 
277       if (!can_coalesce)
278          continue;
279 
280       progress = true;
281 
282       for (int i = 0; i < src_size; i++) {
283          if (!mov[i])
284             continue;
285 
286          if (mov[i]->conditional_mod == BRW_CONDITIONAL_NONE) {
287             mov[i]->opcode = BRW_OPCODE_NOP;
288             mov[i]->dst = reg_undef;
289             for (int j = 0; j < mov[i]->sources; j++) {
290                mov[i]->src[j] = reg_undef;
291             }
292          } else {
293             /* If we have a conditional modifier, rewrite the MOV to be a
294              * MOV.cmod from the coalesced register.  Hopefully, cmod
295              * propagation will clean this up and move it to the instruction
296              * that writes the register.  If not, this keeps things correct
297              * while still letting us coalesce.
298              */
299             assert(mov[i]->opcode == BRW_OPCODE_MOV);
300             assert(mov[i]->sources == 1);
301             mov[i]->src[0] = mov[i]->dst;
302             mov[i]->dst = retype(brw_null_reg(), mov[i]->dst.type);
303          }
304       }
305 
306       foreach_block_and_inst(block, fs_inst, scan_inst, s.cfg) {
307          if (scan_inst->dst.file == VGRF &&
308              scan_inst->dst.nr == src_reg) {
309             scan_inst->dst.nr = dst_reg;
310             scan_inst->dst.offset = scan_inst->dst.offset % REG_SIZE +
311                dst_reg_offset[scan_inst->dst.offset / REG_SIZE] * REG_SIZE;
312          }
313 
314          for (int j = 0; j < scan_inst->sources; j++) {
315             if (scan_inst->src[j].file == VGRF &&
316                 scan_inst->src[j].nr == src_reg) {
317                scan_inst->src[j].nr = dst_reg;
318                scan_inst->src[j].offset = scan_inst->src[j].offset % REG_SIZE +
319                   dst_reg_offset[scan_inst->src[j].offset / REG_SIZE] * REG_SIZE;
320             }
321          }
322       }
323 
324       for (int i = 0; i < src_size; i++) {
325          live.start[dst_var[i]] = MIN2(live.start[dst_var[i]],
326                                        live.start[src_var[i]]);
327          live.end[dst_var[i]] = MAX2(live.end[dst_var[i]],
328                                      live.end[src_var[i]]);
329       }
330       src_reg = ~0u;
331    }
332 
333    if (progress) {
334       foreach_block_and_inst_safe (block, backend_instruction, inst, s.cfg) {
335          if (inst->opcode == BRW_OPCODE_NOP) {
336             inst->remove(block, true);
337          }
338       }
339 
340       s.cfg->adjust_block_ips();
341 
342       s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
343    }
344 
345    delete[] src_var;
346    delete[] dst_var;
347    delete[] mov;
348    delete[] dst_reg_offset;
349 
350    return progress;
351 }
352