1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file
25 *
26 * Implements register coalescing: Checks if the two registers involved in a
27 * raw move don't interfere, in which case they can both be stored in the same
28 * place and the MOV removed.
29 *
30 * To do this, all uses of the source of the MOV in the shader are replaced
31 * with the destination of the MOV. For example:
32 *
33 * add vgrf3:F, vgrf1:F, vgrf2:F
34 * mov vgrf4:F, vgrf3:F
35 * mul vgrf5:F, vgrf5:F, vgrf4:F
36 *
37 * becomes
38 *
39 * add vgrf4:F, vgrf1:F, vgrf2:F
40 * mul vgrf5:F, vgrf5:F, vgrf4:F
41 */
42
43 #include "brw_fs.h"
44 #include "brw_cfg.h"
45 #include "brw_fs_live_variables.h"
46
47 using namespace brw;
48
49 static bool
is_nop_mov(const fs_inst * inst)50 is_nop_mov(const fs_inst *inst)
51 {
52 if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
53 brw_reg dst = inst->dst;
54 for (int i = 0; i < inst->sources; i++) {
55 if (!dst.equals(inst->src[i])) {
56 return false;
57 }
58 dst.offset += (i < inst->header_size ? REG_SIZE :
59 inst->exec_size * dst.stride *
60 brw_type_size_bytes(inst->src[i].type));
61 }
62 return true;
63 } else if (inst->opcode == BRW_OPCODE_MOV) {
64 return inst->dst.equals(inst->src[0]);
65 }
66
67 return false;
68 }
69
70 static bool
is_coalesce_candidate(const fs_visitor * v,const fs_inst * inst)71 is_coalesce_candidate(const fs_visitor *v, const fs_inst *inst)
72 {
73 if ((inst->opcode != BRW_OPCODE_MOV &&
74 inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD) ||
75 inst->is_partial_write() ||
76 inst->saturate ||
77 inst->src[0].file != VGRF ||
78 inst->src[0].negate ||
79 inst->src[0].abs ||
80 !inst->src[0].is_contiguous() ||
81 inst->dst.file != VGRF ||
82 inst->dst.type != inst->src[0].type) {
83 return false;
84 }
85
86 if (v->alloc.sizes[inst->src[0].nr] >
87 v->alloc.sizes[inst->dst.nr])
88 return false;
89
90 if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
91 if (!is_coalescing_payload(v->devinfo, v->alloc, inst)) {
92 return false;
93 }
94 }
95
96 return true;
97 }
98
99 static bool
can_coalesce_vars(const intel_device_info * devinfo,const fs_live_variables & live,const cfg_t * cfg,const bblock_t * block,const fs_inst * inst,int dst_var,int src_var)100 can_coalesce_vars(const intel_device_info *devinfo,
101 const fs_live_variables &live, const cfg_t *cfg,
102 const bblock_t *block, const fs_inst *inst,
103 int dst_var, int src_var)
104 {
105 if (!live.vars_interfere(src_var, dst_var))
106 return true;
107
108 int dst_start = live.start[dst_var];
109 int dst_end = live.end[dst_var];
110 int src_start = live.start[src_var];
111 int src_end = live.end[src_var];
112
113 /* Variables interfere and one line range isn't a subset of the other. */
114 if ((dst_end > src_end && src_start < dst_start) ||
115 (src_end > dst_end && dst_start < src_start))
116 return false;
117
118 /* Check for a write to either register in the intersection of their live
119 * ranges.
120 */
121 int start_ip = MAX2(dst_start, src_start);
122 int end_ip = MIN2(dst_end, src_end);
123
124 foreach_block(scan_block, cfg) {
125 if (scan_block->end_ip < start_ip)
126 continue;
127
128 int scan_ip = scan_block->start_ip - 1;
129
130 bool seen_src_write = false;
131 bool seen_copy = false;
132 foreach_inst_in_block(fs_inst, scan_inst, scan_block) {
133 scan_ip++;
134
135 /* Ignore anything before the intersection of the live ranges */
136 if (scan_ip < start_ip)
137 continue;
138
139 /* Ignore the copying instruction itself */
140 if (scan_inst == inst) {
141 seen_copy = true;
142 continue;
143 }
144
145 if (scan_ip > end_ip)
146 return true; /* registers do not interfere */
147
148 if (seen_src_write && !seen_copy) {
149 /* In order to satisfy the guarantee of register coalescing, we
150 * must ensure that the two registers always have the same value
151 * during the intersection of their live ranges. One way to do
152 * this is to simply ensure that neither is ever written apart
153 * from the one copy which syncs up the two registers. However,
154 * this can be overly conservative and only works in the case
155 * where the destination live range is entirely contained in the
156 * source live range.
157 *
158 * To handle the other case where the source is contained in the
159 * destination, we allow writes to the source register as long as
160 * they happen before the copy, in the same block as the copy, and
161 * the destination is never read between first such write and the
162 * copy. This effectively moves the write from the copy up.
163 */
164 for (int j = 0; j < scan_inst->sources; j++) {
165 if (regions_overlap(scan_inst->src[j], scan_inst->size_read(devinfo, j),
166 inst->dst, inst->size_written))
167 return false; /* registers interfere */
168 }
169 }
170
171 /* The MOV being coalesced had better be the only instruction which
172 * writes to the coalesce destination in the intersection.
173 */
174 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
175 inst->dst, inst->size_written))
176 return false; /* registers interfere */
177
178 /* See the big comment above */
179 if (regions_overlap(scan_inst->dst, scan_inst->size_written,
180 inst->src[0], inst->size_read(devinfo, 0))) {
181 if (seen_copy || scan_block != block ||
182 (scan_inst->force_writemask_all && !inst->force_writemask_all))
183 return false;
184 seen_src_write = true;
185 }
186 }
187 }
188
189 return true;
190 }
191
192 /**
193 * Check if coalescing this register would expand the size of the last
194 * SEND instruction's payload to more than would fit in g112-g127.
195 */
196 static bool
would_violate_eot_restriction(const brw::simple_allocator & alloc,const cfg_t * cfg,unsigned dst_reg,unsigned src_reg)197 would_violate_eot_restriction(const brw::simple_allocator &alloc,
198 const cfg_t *cfg,
199 unsigned dst_reg, unsigned src_reg)
200 {
201 if (alloc.sizes[dst_reg] > alloc.sizes[src_reg]) {
202 foreach_inst_in_block_reverse(fs_inst, send, cfg->last_block()) {
203 if (send->opcode != SHADER_OPCODE_SEND || !send->eot)
204 continue;
205
206 if ((send->src[2].file == VGRF && send->src[2].nr == src_reg) ||
207 (send->sources >= 4 &&
208 send->src[3].file == VGRF && send->src[3].nr == src_reg)) {
209 const unsigned s2 =
210 send->src[2].file == VGRF ? alloc.sizes[send->src[2].nr] : 0;
211 const unsigned s3 = send->sources >= 4 &&
212 send->src[3].file == VGRF ?
213 alloc.sizes[send->src[3].nr] : 0;
214
215 const unsigned increase =
216 alloc.sizes[dst_reg] - alloc.sizes[src_reg];
217
218 if (s2 + s3 + increase > 15)
219 return true;
220 }
221 break;
222 }
223 }
224
225 return false;
226 }
227
228 bool
brw_opt_register_coalesce(fs_visitor & s)229 brw_opt_register_coalesce(fs_visitor &s)
230 {
231 const intel_device_info *devinfo = s.devinfo;
232
233 bool progress = false;
234 fs_live_variables &live = s.live_analysis.require();
235 int src_size = 0;
236 int channels_remaining = 0;
237 unsigned src_reg = ~0u, dst_reg = ~0u;
238 int *dst_reg_offset = new int[MAX_VGRF_SIZE(devinfo)];
239 fs_inst **mov = new fs_inst *[MAX_VGRF_SIZE(devinfo)];
240 int *dst_var = new int[MAX_VGRF_SIZE(devinfo)];
241 int *src_var = new int[MAX_VGRF_SIZE(devinfo)];
242
243 foreach_block_and_inst(block, fs_inst, inst, s.cfg) {
244 if (!is_coalesce_candidate(&s, inst))
245 continue;
246
247 if (is_nop_mov(inst)) {
248 inst->opcode = BRW_OPCODE_NOP;
249 progress = true;
250 continue;
251 }
252
253 if (src_reg != inst->src[0].nr) {
254 src_reg = inst->src[0].nr;
255
256 src_size = s.alloc.sizes[inst->src[0].nr];
257 assert(src_size <= MAX_VGRF_SIZE(devinfo));
258
259 channels_remaining = src_size;
260 memset(mov, 0, sizeof(*mov) * MAX_VGRF_SIZE(devinfo));
261
262 dst_reg = inst->dst.nr;
263 }
264
265 if (dst_reg != inst->dst.nr)
266 continue;
267
268 if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
269 for (int i = 0; i < src_size; i++) {
270 dst_reg_offset[i] = inst->dst.offset / REG_SIZE + i;
271 }
272 mov[0] = inst;
273 channels_remaining -= regs_written(inst);
274 } else {
275 const int offset = inst->src[0].offset / REG_SIZE;
276 if (mov[offset]) {
277 /* This is the second time that this offset in the register has
278 * been set. This means, in particular, that inst->dst was
279 * live before this instruction and that the live ranges of
280 * inst->dst and inst->src[0] overlap and we can't coalesce the
281 * two variables. Let's ensure that doesn't happen.
282 */
283 channels_remaining = -1;
284 continue;
285 }
286 for (unsigned i = 0; i < MAX2(inst->size_written / REG_SIZE, 1); i++)
287 dst_reg_offset[offset + i] = inst->dst.offset / REG_SIZE + i;
288 mov[offset] = inst;
289 channels_remaining -= regs_written(inst);
290 }
291
292 if (channels_remaining)
293 continue;
294
295 bool can_coalesce = true;
296 for (int i = 0; i < src_size; i++) {
297 if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
298 /* Registers are out-of-order. */
299 can_coalesce = false;
300 src_reg = ~0u;
301 break;
302 }
303
304 dst_var[i] = live.var_from_vgrf[dst_reg] + dst_reg_offset[i];
305 src_var[i] = live.var_from_vgrf[src_reg] + i;
306
307 if (!can_coalesce_vars(devinfo, live, s.cfg, block, inst, dst_var[i], src_var[i]) ||
308 would_violate_eot_restriction(s.alloc, s.cfg, dst_reg, src_reg)) {
309 can_coalesce = false;
310 src_reg = ~0u;
311 break;
312 }
313 }
314
315 if (!can_coalesce)
316 continue;
317
318 progress = true;
319
320 for (int i = 0; i < src_size; i++) {
321 if (!mov[i])
322 continue;
323
324 if (mov[i]->conditional_mod == BRW_CONDITIONAL_NONE) {
325 mov[i]->opcode = BRW_OPCODE_NOP;
326 mov[i]->dst = reg_undef;
327 for (int j = 0; j < mov[i]->sources; j++) {
328 mov[i]->src[j] = reg_undef;
329 }
330 } else {
331 /* If we have a conditional modifier, rewrite the MOV to be a
332 * MOV.cmod from the coalesced register. Hopefully, cmod
333 * propagation will clean this up and move it to the instruction
334 * that writes the register. If not, this keeps things correct
335 * while still letting us coalesce.
336 */
337 assert(mov[i]->opcode == BRW_OPCODE_MOV);
338 assert(mov[i]->sources == 1);
339 mov[i]->src[0] = mov[i]->dst;
340 mov[i]->dst = retype(brw_null_reg(), mov[i]->dst.type);
341 }
342 }
343
344 foreach_block_and_inst(block, fs_inst, scan_inst, s.cfg) {
345 if (scan_inst->dst.file == VGRF &&
346 scan_inst->dst.nr == src_reg) {
347 scan_inst->dst.nr = dst_reg;
348 scan_inst->dst.offset = scan_inst->dst.offset % REG_SIZE +
349 dst_reg_offset[scan_inst->dst.offset / REG_SIZE] * REG_SIZE;
350 }
351
352 for (int j = 0; j < scan_inst->sources; j++) {
353 if (scan_inst->src[j].file == VGRF &&
354 scan_inst->src[j].nr == src_reg) {
355 scan_inst->src[j].nr = dst_reg;
356 scan_inst->src[j].offset = scan_inst->src[j].offset % REG_SIZE +
357 dst_reg_offset[scan_inst->src[j].offset / REG_SIZE] * REG_SIZE;
358 }
359 }
360 }
361
362 for (int i = 0; i < src_size; i++) {
363 live.start[dst_var[i]] = MIN2(live.start[dst_var[i]],
364 live.start[src_var[i]]);
365 live.end[dst_var[i]] = MAX2(live.end[dst_var[i]],
366 live.end[src_var[i]]);
367 }
368 src_reg = ~0u;
369 }
370
371 if (progress) {
372 foreach_block_and_inst_safe (block, fs_inst, inst, s.cfg) {
373 if (inst->opcode == BRW_OPCODE_NOP) {
374 inst->remove(block, true);
375 }
376 }
377
378 s.cfg->adjust_block_ips();
379
380 s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
381 }
382
383 delete[] src_var;
384 delete[] dst_var;
385 delete[] mov;
386 delete[] dst_reg_offset;
387
388 return progress;
389 }
390