1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_fs.h"
25 #include "brw_fs_builder.h"
26 #include "brw_cfg.h"
27
28 /** @file brw_fs_cse.cpp
29 *
30 * Support for local common subexpression elimination.
31 *
32 * See Muchnick's Advanced Compiler Design and Implementation, section
33 * 13.1 (p378).
34 */
35
36 using namespace brw;
37
38 namespace {
39 struct aeb_entry : public exec_node {
40 /** The instruction that generates the expression value. */
41 fs_inst *generator;
42
43 /** The temporary where the value is stored. */
44 fs_reg tmp;
45 };
46 }
47
48 static bool
is_expression(const fs_visitor * v,const fs_inst * const inst)49 is_expression(const fs_visitor *v, const fs_inst *const inst)
50 {
51 switch (inst->opcode) {
52 case BRW_OPCODE_MOV:
53 case BRW_OPCODE_SEL:
54 case BRW_OPCODE_NOT:
55 case BRW_OPCODE_AND:
56 case BRW_OPCODE_OR:
57 case BRW_OPCODE_XOR:
58 case BRW_OPCODE_SHR:
59 case BRW_OPCODE_SHL:
60 case BRW_OPCODE_ASR:
61 case BRW_OPCODE_CMP:
62 case BRW_OPCODE_CMPN:
63 case BRW_OPCODE_ADD:
64 case BRW_OPCODE_MUL:
65 case SHADER_OPCODE_MULH:
66 case BRW_OPCODE_FRC:
67 case BRW_OPCODE_RNDU:
68 case BRW_OPCODE_RNDD:
69 case BRW_OPCODE_RNDE:
70 case BRW_OPCODE_RNDZ:
71 case BRW_OPCODE_LINE:
72 case BRW_OPCODE_PLN:
73 case BRW_OPCODE_MAD:
74 case BRW_OPCODE_LRP:
75 case FS_OPCODE_FB_READ_LOGICAL:
76 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
77 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
78 case FS_OPCODE_LINTERP:
79 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
80 case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
81 case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
82 case FS_OPCODE_LOAD_LIVE_CHANNELS:
83 case SHADER_OPCODE_BROADCAST:
84 case SHADER_OPCODE_MOV_INDIRECT:
85 case SHADER_OPCODE_TEX_LOGICAL:
86 case SHADER_OPCODE_TXD_LOGICAL:
87 case SHADER_OPCODE_TXF_LOGICAL:
88 case SHADER_OPCODE_TXL_LOGICAL:
89 case SHADER_OPCODE_TXS_LOGICAL:
90 case FS_OPCODE_TXB_LOGICAL:
91 case SHADER_OPCODE_TXF_CMS_LOGICAL:
92 case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
93 case SHADER_OPCODE_TXF_UMS_LOGICAL:
94 case SHADER_OPCODE_TXF_MCS_LOGICAL:
95 case SHADER_OPCODE_LOD_LOGICAL:
96 case SHADER_OPCODE_TG4_LOGICAL:
97 case SHADER_OPCODE_TG4_BIAS_LOGICAL:
98 case SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL:
99 case SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL:
100 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
101 case SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL:
102 case SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL:
103 case FS_OPCODE_PACK:
104 return true;
105 case SHADER_OPCODE_RCP:
106 case SHADER_OPCODE_RSQ:
107 case SHADER_OPCODE_SQRT:
108 case SHADER_OPCODE_EXP2:
109 case SHADER_OPCODE_LOG2:
110 case SHADER_OPCODE_POW:
111 case SHADER_OPCODE_INT_QUOTIENT:
112 case SHADER_OPCODE_INT_REMAINDER:
113 case SHADER_OPCODE_SIN:
114 case SHADER_OPCODE_COS:
115 return inst->mlen < 2;
116 case SHADER_OPCODE_LOAD_PAYLOAD:
117 return !is_coalescing_payload(v->alloc, inst);
118 default:
119 return inst->is_send_from_grf() && !inst->has_side_effects() &&
120 !inst->is_volatile();
121 }
122 }
123
124 static bool
operands_match(const fs_inst * a,const fs_inst * b,bool * negate)125 operands_match(const fs_inst *a, const fs_inst *b, bool *negate)
126 {
127 fs_reg *xs = a->src;
128 fs_reg *ys = b->src;
129
130 if (a->opcode == BRW_OPCODE_MAD) {
131 return xs[0].equals(ys[0]) &&
132 ((xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
133 (xs[2].equals(ys[1]) && xs[1].equals(ys[2])));
134 } else if (a->opcode == BRW_OPCODE_MUL && a->dst.type == BRW_REGISTER_TYPE_F) {
135 bool xs0_negate = xs[0].negate;
136 bool xs1_negate = xs[1].file == IMM ? xs[1].f < 0.0f
137 : xs[1].negate;
138 bool ys0_negate = ys[0].negate;
139 bool ys1_negate = ys[1].file == IMM ? ys[1].f < 0.0f
140 : ys[1].negate;
141 float xs1_imm = xs[1].f;
142 float ys1_imm = ys[1].f;
143
144 xs[0].negate = false;
145 xs[1].negate = false;
146 ys[0].negate = false;
147 ys[1].negate = false;
148 xs[1].f = fabsf(xs[1].f);
149 ys[1].f = fabsf(ys[1].f);
150
151 bool ret = (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
152 (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
153
154 xs[0].negate = xs0_negate;
155 xs[1].negate = xs[1].file == IMM ? false : xs1_negate;
156 ys[0].negate = ys0_negate;
157 ys[1].negate = ys[1].file == IMM ? false : ys1_negate;
158 xs[1].f = xs1_imm;
159 ys[1].f = ys1_imm;
160
161 *negate = (xs0_negate != xs1_negate) != (ys0_negate != ys1_negate);
162 if (*negate && (a->saturate || b->saturate))
163 return false;
164 return ret;
165 } else if (!a->is_commutative()) {
166 bool match = true;
167 for (int i = 0; i < a->sources; i++) {
168 if (!xs[i].equals(ys[i])) {
169 match = false;
170 break;
171 }
172 }
173 return match;
174 } else {
175 return (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
176 (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
177 }
178 }
179
180 static bool
instructions_match(fs_inst * a,fs_inst * b,bool * negate)181 instructions_match(fs_inst *a, fs_inst *b, bool *negate)
182 {
183 return a->opcode == b->opcode &&
184 a->force_writemask_all == b->force_writemask_all &&
185 a->exec_size == b->exec_size &&
186 a->group == b->group &&
187 a->saturate == b->saturate &&
188 a->predicate == b->predicate &&
189 a->predicate_inverse == b->predicate_inverse &&
190 a->conditional_mod == b->conditional_mod &&
191 a->flag_subreg == b->flag_subreg &&
192 a->dst.type == b->dst.type &&
193 a->offset == b->offset &&
194 a->mlen == b->mlen &&
195 a->ex_mlen == b->ex_mlen &&
196 a->sfid == b->sfid &&
197 a->desc == b->desc &&
198 a->size_written == b->size_written &&
199 a->check_tdr == b->check_tdr &&
200 a->send_has_side_effects == b->send_has_side_effects &&
201 a->eot == b->eot &&
202 a->header_size == b->header_size &&
203 a->shadow_compare == b->shadow_compare &&
204 a->pi_noperspective == b->pi_noperspective &&
205 a->target == b->target &&
206 a->sources == b->sources &&
207 operands_match(a, b, negate);
208 }
209
210 static void
create_copy_instr(const fs_builder & bld,fs_inst * inst,fs_reg src,bool negate)211 create_copy_instr(const fs_builder &bld, fs_inst *inst, fs_reg src, bool negate)
212 {
213 unsigned written = regs_written(inst);
214 unsigned dst_width =
215 DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE);
216 fs_inst *copy;
217
218 if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD) {
219 assert(src.file == VGRF);
220 fs_reg *payload = ralloc_array(bld.shader->mem_ctx, fs_reg,
221 inst->sources);
222 for (int i = 0; i < inst->header_size; i++) {
223 payload[i] = src;
224 src.offset += REG_SIZE;
225 }
226 for (int i = inst->header_size; i < inst->sources; i++) {
227 src.type = inst->src[i].type;
228 payload[i] = src;
229 src = offset(src, bld, 1);
230 }
231 copy = bld.LOAD_PAYLOAD(inst->dst, payload, inst->sources,
232 inst->header_size);
233 } else if (written != dst_width) {
234 assert(src.file == VGRF);
235 assert(written % dst_width == 0);
236 const int sources = written / dst_width;
237 fs_reg *payload = ralloc_array(bld.shader->mem_ctx, fs_reg, sources);
238 for (int i = 0; i < sources; i++) {
239 payload[i] = src;
240 src = offset(src, bld, 1);
241 }
242 copy = bld.LOAD_PAYLOAD(inst->dst, payload, sources, 0);
243 } else {
244 copy = bld.MOV(inst->dst, src);
245 copy->group = inst->group;
246 copy->force_writemask_all = inst->force_writemask_all;
247 copy->src[0].negate = negate;
248 }
249 assert(regs_written(copy) == written);
250 }
251
252 static bool
brw_fs_opt_cse_local(fs_visitor & s,const fs_live_variables & live,bblock_t * block,int & ip)253 brw_fs_opt_cse_local(fs_visitor &s, const fs_live_variables &live, bblock_t *block, int &ip)
254 {
255 const intel_device_info *devinfo = s.devinfo;
256 bool progress = false;
257 exec_list aeb;
258
259 void *cse_ctx = ralloc_context(NULL);
260
261 foreach_inst_in_block(fs_inst, inst, block) {
262 /* Skip some cases. */
263 if (is_expression(&s, inst) && !inst->is_partial_write() &&
264 ((inst->dst.file != ARF && inst->dst.file != FIXED_GRF) ||
265 inst->dst.is_null()))
266 {
267 bool found = false;
268 bool negate = false;
269
270 foreach_in_list_use_after(aeb_entry, entry, &aeb) {
271 /* Match current instruction's expression against those in AEB. */
272 if (!(entry->generator->dst.is_null() && !inst->dst.is_null()) &&
273 instructions_match(inst, entry->generator, &negate)) {
274 found = true;
275 progress = true;
276 break;
277 }
278 }
279
280 if (!found) {
281 if (inst->opcode != BRW_OPCODE_MOV ||
282 (inst->opcode == BRW_OPCODE_MOV &&
283 inst->src[0].file == IMM &&
284 inst->src[0].type == BRW_REGISTER_TYPE_VF)) {
285 /* Our first sighting of this expression. Create an entry. */
286 aeb_entry *entry = ralloc(cse_ctx, aeb_entry);
287 entry->tmp = reg_undef;
288 entry->generator = inst;
289 aeb.push_tail(entry);
290 }
291 } else {
292 /* This is at least our second sighting of this expression.
293 * If we don't have a temporary already, make one.
294 */
295 bool no_existing_temp = entry->tmp.file == BAD_FILE;
296 if (no_existing_temp && !entry->generator->dst.is_null()) {
297 const fs_builder ibld = fs_builder(&s, block, entry->generator)
298 .at(block, entry->generator->next);
299 int written = regs_written(entry->generator);
300
301 entry->tmp = fs_reg(VGRF, s.alloc.allocate(written),
302 entry->generator->dst.type);
303
304 create_copy_instr(ibld, entry->generator, entry->tmp, false);
305
306 entry->generator->dst = entry->tmp;
307 }
308
309 /* dest <- temp */
310 if (!inst->dst.is_null()) {
311 assert(inst->size_written == entry->generator->size_written);
312 assert(inst->dst.type == entry->tmp.type);
313 const fs_builder ibld(&s, block, inst);
314
315 create_copy_instr(ibld, inst, entry->tmp, negate);
316 }
317
318 /* Set our iterator so that next time through the loop inst->next
319 * will get the instruction in the basic block after the one we've
320 * removed.
321 */
322 fs_inst *prev = (fs_inst *)inst->prev;
323
324 inst->remove(block);
325 inst = prev;
326 }
327 }
328
329 /* Discard jumps aren't represented in the CFG unfortunately, so we need
330 * to make sure that they behave as a CSE barrier, since we lack global
331 * dataflow information. This is particularly likely to cause problems
332 * with instructions dependent on the current execution mask like
333 * SHADER_OPCODE_FIND_LIVE_CHANNEL.
334 */
335 if (inst->opcode == BRW_OPCODE_HALT ||
336 inst->opcode == SHADER_OPCODE_HALT_TARGET)
337 aeb.make_empty();
338
339 foreach_in_list_safe(aeb_entry, entry, &aeb) {
340 /* Kill all AEB entries that write a different value to or read from
341 * the flag register if we just wrote it.
342 */
343 if (inst->flags_written(devinfo)) {
344 bool negate; /* dummy */
345 if (entry->generator->flags_read(devinfo) ||
346 (entry->generator->flags_written(devinfo) &&
347 !instructions_match(inst, entry->generator, &negate))) {
348 entry->remove();
349 ralloc_free(entry);
350 continue;
351 }
352 }
353
354 for (int i = 0; i < entry->generator->sources; i++) {
355 fs_reg *src_reg = &entry->generator->src[i];
356
357 /* Kill all AEB entries that use the destination we just
358 * overwrote.
359 */
360 if (regions_overlap(inst->dst, inst->size_written,
361 entry->generator->src[i],
362 entry->generator->size_read(i))) {
363 entry->remove();
364 ralloc_free(entry);
365 break;
366 }
367
368 /* Kill any AEB entries using registers that don't get reused any
369 * more -- a sure sign they'll fail operands_match().
370 */
371 if (src_reg->file == VGRF && live.vgrf_end[src_reg->nr] < ip) {
372 entry->remove();
373 ralloc_free(entry);
374 break;
375 }
376 }
377 }
378
379 ip++;
380 }
381
382 ralloc_free(cse_ctx);
383
384 return progress;
385 }
386
387 bool
brw_fs_opt_cse(fs_visitor & s)388 brw_fs_opt_cse(fs_visitor &s)
389 {
390 const fs_live_variables &live = s.live_analysis.require();
391 bool progress = false;
392 int ip = 0;
393
394 foreach_block (block, s.cfg) {
395 progress = brw_fs_opt_cse_local(s, live, block, ip) || progress;
396 }
397
398 if (progress)
399 s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
400
401 return progress;
402 }
403