• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #define XXH_INLINE_ALL
25 #include "util/xxhash.h"
26 
27 #include "brw_fs.h"
28 #include "brw_fs_builder.h"
29 #include "brw_cfg.h"
30 
31 /** @file
32  *
33  * Support for SSA-based global Common Subexpression Elimination (CSE).
34  */
35 
36 using namespace brw;
37 
38 static bool
is_expression(const fs_visitor * v,const fs_inst * const inst)39 is_expression(const fs_visitor *v, const fs_inst *const inst)
40 {
41    switch (inst->opcode) {
42    case BRW_OPCODE_MOV:
43    case BRW_OPCODE_SEL:
44    case BRW_OPCODE_NOT:
45    case BRW_OPCODE_AND:
46    case BRW_OPCODE_OR:
47    case BRW_OPCODE_XOR:
48    case BRW_OPCODE_SHR:
49    case BRW_OPCODE_SHL:
50    case BRW_OPCODE_ASR:
51    case BRW_OPCODE_ROR:
52    case BRW_OPCODE_ROL:
53    case BRW_OPCODE_CMP:
54    case BRW_OPCODE_CMPN:
55    case BRW_OPCODE_CSEL:
56    case BRW_OPCODE_BFREV:
57    case BRW_OPCODE_BFE:
58    case BRW_OPCODE_BFI1:
59    case BRW_OPCODE_BFI2:
60    case BRW_OPCODE_ADD:
61    case BRW_OPCODE_MUL:
62    case SHADER_OPCODE_MULH:
63    case BRW_OPCODE_AVG:
64    case BRW_OPCODE_FRC:
65    case BRW_OPCODE_LZD:
66    case BRW_OPCODE_FBH:
67    case BRW_OPCODE_FBL:
68    case BRW_OPCODE_CBIT:
69    case BRW_OPCODE_ADD3:
70    case BRW_OPCODE_RNDU:
71    case BRW_OPCODE_RNDD:
72    case BRW_OPCODE_RNDE:
73    case BRW_OPCODE_RNDZ:
74    case BRW_OPCODE_LINE:
75    case BRW_OPCODE_PLN:
76    case BRW_OPCODE_MAD:
77    case BRW_OPCODE_LRP:
78    case FS_OPCODE_FB_READ_LOGICAL:
79    case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
80    case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
81    case SHADER_OPCODE_FIND_LIVE_CHANNEL:
82    case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
83    case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
84    case FS_OPCODE_LOAD_LIVE_CHANNELS:
85    case SHADER_OPCODE_BROADCAST:
86    case SHADER_OPCODE_SHUFFLE:
87    case SHADER_OPCODE_QUAD_SWIZZLE:
88    case SHADER_OPCODE_CLUSTER_BROADCAST:
89    case SHADER_OPCODE_MOV_INDIRECT:
90    case SHADER_OPCODE_TEX_LOGICAL:
91    case SHADER_OPCODE_TXD_LOGICAL:
92    case SHADER_OPCODE_TXF_LOGICAL:
93    case SHADER_OPCODE_TXL_LOGICAL:
94    case SHADER_OPCODE_TXS_LOGICAL:
95    case FS_OPCODE_TXB_LOGICAL:
96    case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
97    case SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL:
98    case SHADER_OPCODE_TXF_MCS_LOGICAL:
99    case SHADER_OPCODE_LOD_LOGICAL:
100    case SHADER_OPCODE_TG4_LOGICAL:
101    case SHADER_OPCODE_TG4_BIAS_LOGICAL:
102    case SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL:
103    case SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL:
104    case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
105    case SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL:
106    case SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL:
107    case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
108    case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
109    case SHADER_OPCODE_GET_BUFFER_SIZE:
110    case FS_OPCODE_PACK:
111    case FS_OPCODE_PACK_HALF_2x16_SPLIT:
112    case SHADER_OPCODE_RCP:
113    case SHADER_OPCODE_RSQ:
114    case SHADER_OPCODE_SQRT:
115    case SHADER_OPCODE_EXP2:
116    case SHADER_OPCODE_LOG2:
117    case SHADER_OPCODE_POW:
118    case SHADER_OPCODE_INT_QUOTIENT:
119    case SHADER_OPCODE_INT_REMAINDER:
120    case SHADER_OPCODE_SIN:
121    case SHADER_OPCODE_COS:
122    case SHADER_OPCODE_LOAD_SUBGROUP_INVOCATION:
123       return true;
124    case SHADER_OPCODE_MEMORY_LOAD_LOGICAL:
125       return inst->src[MEMORY_LOGICAL_MODE].ud == MEMORY_MODE_CONSTANT;
126    case SHADER_OPCODE_LOAD_PAYLOAD:
127       return !is_coalescing_payload(v->devinfo, v->alloc, inst);
128    default:
129       return inst->is_send_from_grf() && !inst->has_side_effects() &&
130          !inst->is_volatile();
131    }
132 }
133 
134 /**
135  * True if the instruction should only be CSE'd within their local block.
136  */
137 bool
local_only(const fs_inst * inst)138 local_only(const fs_inst *inst)
139 {
140    switch (inst->opcode) {
141    case SHADER_OPCODE_FIND_LIVE_CHANNEL:
142    case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
143    case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
144    case FS_OPCODE_LOAD_LIVE_CHANNELS:
145       /* These depend on the current channel enables, so the same opcode
146        * in another block will likely return a different value.
147        */
148       return true;
149    case BRW_OPCODE_MOV:
150       /* Global CSE of MOVs is likely not worthwhile.  It can increase
151        * register pressure by extending the lifetime of simple constants.
152        */
153       return true;
154    case SHADER_OPCODE_LOAD_PAYLOAD:
155       /* This is basically a MOV */
156       return inst->sources == 1;
157    case BRW_OPCODE_CMP:
158       /* Seems to increase spilling a lot without much benefit */
159       return true;
160    default:
161       return false;
162    }
163 }
164 
165 static bool
operands_match(const fs_inst * a,const fs_inst * b,bool * negate)166 operands_match(const fs_inst *a, const fs_inst *b, bool *negate)
167 {
168    brw_reg *xs = a->src;
169    brw_reg *ys = b->src;
170 
171    if (a->opcode == BRW_OPCODE_MAD) {
172       return xs[0].equals(ys[0]) &&
173              ((xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
174               (xs[2].equals(ys[1]) && xs[1].equals(ys[2])));
175    } else if (a->opcode == BRW_OPCODE_MUL && a->dst.type == BRW_TYPE_F) {
176       bool xs0_negate = xs[0].negate;
177       bool xs1_negate = xs[1].file == IMM ? xs[1].f < 0.0f
178                                           : xs[1].negate;
179       bool ys0_negate = ys[0].negate;
180       bool ys1_negate = ys[1].file == IMM ? ys[1].f < 0.0f
181                                           : ys[1].negate;
182       float xs1_imm = xs[1].f;
183       float ys1_imm = ys[1].f;
184 
185       xs[0].negate = false;
186       xs[1].negate = false;
187       ys[0].negate = false;
188       ys[1].negate = false;
189       xs[1].f = fabsf(xs[1].f);
190       ys[1].f = fabsf(ys[1].f);
191 
192       bool ret = (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
193                  (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
194 
195       xs[0].negate = xs0_negate;
196       xs[1].negate = xs[1].file == IMM ? false : xs1_negate;
197       ys[0].negate = ys0_negate;
198       ys[1].negate = ys[1].file == IMM ? false : ys1_negate;
199       xs[1].f = xs1_imm;
200       ys[1].f = ys1_imm;
201 
202       *negate = (xs0_negate != xs1_negate) != (ys0_negate != ys1_negate);
203       if (*negate && (a->saturate || b->saturate))
204          return false;
205       return ret;
206    } else if (!a->is_commutative()) {
207       bool match = true;
208       for (int i = 0; i < a->sources; i++) {
209          if (!xs[i].equals(ys[i])) {
210             match = false;
211             break;
212          }
213       }
214       return match;
215    } else if (a->sources == 3) {
216       return (xs[0].equals(ys[0]) && xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
217              (xs[0].equals(ys[0]) && xs[1].equals(ys[2]) && xs[2].equals(ys[1])) ||
218              (xs[0].equals(ys[1]) && xs[1].equals(ys[0]) && xs[2].equals(ys[2])) ||
219              (xs[0].equals(ys[1]) && xs[1].equals(ys[2]) && xs[2].equals(ys[1])) ||
220              (xs[0].equals(ys[2]) && xs[1].equals(ys[0]) && xs[2].equals(ys[1])) ||
221              (xs[0].equals(ys[2]) && xs[1].equals(ys[1]) && xs[2].equals(ys[0]));
222    } else {
223       return (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
224              (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
225    }
226 }
227 
228 static bool
instructions_match(fs_inst * a,fs_inst * b,bool * negate)229 instructions_match(fs_inst *a, fs_inst *b, bool *negate)
230 {
231    return a->opcode == b->opcode &&
232           a->exec_size == b->exec_size &&
233           a->group == b->group &&
234           a->predicate == b->predicate &&
235           a->conditional_mod == b->conditional_mod &&
236           a->dst.type == b->dst.type &&
237           a->offset == b->offset &&
238           a->mlen == b->mlen &&
239           a->ex_mlen == b->ex_mlen &&
240           a->sfid == b->sfid &&
241           a->desc == b->desc &&
242           a->ex_desc == b->ex_desc &&
243           a->size_written == b->size_written &&
244           a->check_tdr == b->check_tdr &&
245           a->header_size == b->header_size &&
246           a->target == b->target &&
247           a->sources == b->sources &&
248           a->bits == b->bits &&
249           operands_match(a, b, negate);
250 }
251 
252 /* -------------------------------------------------------------------- */
253 
254 #define HASH(hash, data) XXH32(&(data), sizeof(data), hash)
255 
256 uint32_t
hash_reg(uint32_t hash,const brw_reg & r)257 hash_reg(uint32_t hash, const brw_reg &r)
258 {
259    struct {
260       uint64_t u64;
261       uint32_t u32;
262       uint16_t u16a;
263       uint16_t u16b;
264    } data = {
265       .u64 = r.u64, .u32 = r.bits, .u16a = r.offset, .u16b = r.stride
266    };
267    STATIC_ASSERT(sizeof(data) == 16); /* ensure there's no padding */
268    hash = HASH(hash, data);
269    return hash;
270 }
271 
272 static uint32_t
hash_inst(const void * v)273 hash_inst(const void *v)
274 {
275    const fs_inst *inst = static_cast<const fs_inst *>(v);
276    uint32_t hash = 0;
277 
278    /* Skip dst - that would make nothing ever match */
279 
280    /* Skip ir and annotation - we don't care for equivalency purposes. */
281 
282    const uint8_t u8data[] = {
283       inst->sources,
284       inst->exec_size,
285       inst->group,
286       inst->mlen,
287       inst->ex_mlen,
288       inst->sfid,
289       inst->header_size,
290       inst->target,
291 
292       inst->conditional_mod,
293       inst->predicate,
294    };
295    const uint32_t u32data[] = {
296       inst->desc,
297       inst->ex_desc,
298       inst->offset,
299       inst->size_written,
300       inst->opcode,
301       inst->bits,
302    };
303 
304    hash = HASH(hash, u8data);
305    hash = HASH(hash, u32data);
306 
307    /* Skip hashing sched - we shouldn't be CSE'ing after that SWSB */
308 
309    if (inst->opcode == BRW_OPCODE_MAD) {
310       /* Commutatively combine the hashes for the multiplicands */
311       hash = hash_reg(hash, inst->src[0]);
312       uint32_t hash1 = hash_reg(hash, inst->src[1]);
313       uint32_t hash2 = hash_reg(hash, inst->src[2]);
314       hash = hash1 * hash2;
315    } else if (inst->opcode == BRW_OPCODE_MUL &&
316               inst->dst.type == BRW_TYPE_F) {
317       /* Canonicalize negations on either source (or both) and commutatively
318        * combine the hashes for both sources.
319        */
320       brw_reg src[2] = { inst->src[0], inst->src[1] };
321       uint32_t src_hash[2];
322 
323       for (int i = 0; i < 2; i++) {
324          src[i].negate = false;
325          if (src[i].file == IMM)
326             src[i].f = fabs(src[i].f);
327 
328          src_hash[i] = hash_reg(hash, src[i]);
329       }
330 
331       hash = src_hash[0] * src_hash[1];
332    } else if (inst->is_commutative()) {
333       /* Commutatively combine the sources */
334       uint32_t hash0 = hash_reg(hash, inst->src[0]);
335       uint32_t hash1 = hash_reg(hash, inst->src[1]);
336       uint32_t hash2 = inst->sources > 2 ? hash_reg(hash, inst->src[2]) : 1;
337       hash = hash0 * hash1 * hash2;
338    } else {
339       /* Just hash all the sources */
340       for (int i = 0; i < inst->sources; i++)
341          hash = hash_reg(hash, inst->src[i]);
342    }
343 
344    return hash;
345 }
346 
347 /* -------------------------------------------------------------------- */
348 
349 static bool
cmp_func(const void * data1,const void * data2)350 cmp_func(const void *data1, const void *data2)
351 {
352    bool negate;
353    return instructions_match((fs_inst *) data1, (fs_inst *) data2, &negate);
354 }
355 
356 /* We set bit 31 in remap_table entries if it needs to be negated. */
357 #define REMAP_NEGATE (0x80000000u)
358 
359 static void
remap_sources(fs_visitor & s,const brw::def_analysis & defs,fs_inst * inst,unsigned * remap_table)360 remap_sources(fs_visitor &s, const brw::def_analysis &defs,
361               fs_inst *inst, unsigned *remap_table)
362 {
363    for (int i = 0; i < inst->sources; i++) {
364       if (inst->src[i].file == VGRF &&
365           inst->src[i].nr < defs.count() &&
366           remap_table[inst->src[i].nr] != ~0u) {
367          const unsigned old_nr = inst->src[i].nr;
368          unsigned new_nr = remap_table[old_nr];
369          const bool need_negate = new_nr & REMAP_NEGATE;
370          new_nr &= ~REMAP_NEGATE;
371          inst->src[i].nr = new_nr;
372 
373          if (need_negate) {
374             if ((inst->src[i].type != BRW_TYPE_F &&
375                  !inst->can_change_types()) ||
376                 !inst->can_do_source_mods(s.devinfo)) {
377                /* We can't use the negate directly, resolve it just after the
378                 * def and use that for any future uses.
379                 */
380                fs_inst *def = defs.get(inst->src[i]);
381                bblock_t *def_block = defs.get_block(inst->src[i]);
382                const fs_builder dbld =
383                   fs_builder(&s, def_block, def).at(def_block, def->next);
384 
385                /* Resolve any deferred block IP changes before inserting */
386                if (def_block->end_ip_delta)
387                   s.cfg->adjust_block_ips();
388 
389                brw_reg neg = brw_vgrf(new_nr, BRW_TYPE_F);
390                brw_reg tmp = dbld.MOV(negate(neg));
391                inst->src[i].nr = tmp.nr;
392                remap_table[old_nr] = tmp.nr;
393             } else {
394                inst->src[i].negate = !inst->src[i].negate;
395                inst->src[i].type = BRW_TYPE_F;
396             }
397          }
398       }
399    }
400 }
401 
402 bool
brw_opt_cse_defs(fs_visitor & s)403 brw_opt_cse_defs(fs_visitor &s)
404 {
405    const intel_device_info *devinfo = s.devinfo;
406    const idom_tree &idom = s.idom_analysis.require();
407    const brw::def_analysis &defs = s.def_analysis.require();
408    bool progress = false;
409    bool need_remaps = false;
410 
411    unsigned *remap_table = new unsigned[defs.count()];
412    memset(remap_table, ~0u, defs.count() * sizeof(int));
413    struct set *set = _mesa_set_create(NULL, NULL, cmp_func);
414 
415    foreach_block(block, s.cfg) {
416       fs_inst *last_flag_write = NULL;
417       fs_inst *last = NULL;
418 
419       foreach_inst_in_block_safe(fs_inst, inst, block) {
420          if (need_remaps)
421             remap_sources(s, defs, inst, remap_table);
422 
423          /* Updating last_flag_written should be at the bottom of the loop,
424           * but doing it this way lets us use "continue" more easily.
425           */
426          if (last && last->flags_written(devinfo))
427             last_flag_write = last;
428          last = inst;
429 
430          if (inst->dst.is_null()) {
431             bool ignored;
432             if (last_flag_write && !inst->writes_accumulator &&
433                 instructions_match(last_flag_write, inst, &ignored)) {
434                /* This instruction has no destination but has a flag write
435                 * which is redundant with the previous flag write in our
436                 * basic block.  So we can simply remove it.
437                 */
438                inst->remove(block, true);
439                last = NULL;
440                progress = true;
441             }
442          } else if (is_expression(&s, inst) && defs.get(inst->dst)) {
443             assert(!inst->writes_accumulator);
444             assert(!inst->reads_accumulator_implicitly());
445 
446             uint32_t hash = hash_inst(inst);
447             if (inst->flags_read(devinfo)) {
448                hash = last_flag_write ? HASH(hash, last_flag_write)
449                                       : HASH(hash, block);
450             }
451 
452             struct set_entry *e =
453                _mesa_set_search_or_add_pre_hashed(set, hash, inst, NULL);
454             if (!e) goto out; /* out of memory error */
455             fs_inst *match = (fs_inst *) e->key;
456 
457             /* If there was no match, move on */
458             if (match == inst)
459                continue;
460 
461             bblock_t *def_block = defs.get_block(match->dst);
462             if (block != def_block && (local_only(inst) ||
463                 !idom.dominates(def_block, block))) {
464                /* If `match` doesn't dominate `inst` then remove it from
465                 * the set and add `inst` instead so future lookups see that.
466                 */
467                e->key = inst;
468                continue;
469             }
470 
471             /* We can replace inst with match or negate(match). */
472             bool negate = false;
473             if (inst->opcode == BRW_OPCODE_MUL &&
474                 inst->dst.type == BRW_TYPE_F) {
475                /* Determine whether inst is actually negate(match) */
476                bool ops_must_match = operands_match(inst, match, &negate);
477                assert(ops_must_match);
478             }
479 
480             /* Some later instruction could depend on the flags written by
481              * this instruction. It can only be removed if the previous
482              * instruction that write the flags is identical.
483              */
484             if (inst->flags_written(devinfo)) {
485                bool ignored;
486 
487                if (last_flag_write == NULL ||
488                    !instructions_match(last_flag_write, inst, &ignored)) {
489                   continue;
490                }
491             }
492 
493             progress = true;
494             need_remaps = true;
495             remap_table[inst->dst.nr] =
496                match->dst.nr | (negate ? REMAP_NEGATE : 0);
497 
498             inst->remove(block, true);
499          }
500       }
501    }
502 
503 out:
504    delete [] remap_table;
505    _mesa_set_destroy(set, NULL);
506 
507    if (progress) {
508       s.cfg->adjust_block_ips();
509       s.invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
510                             DEPENDENCY_INSTRUCTION_DETAIL);
511    }
512 
513    return progress;
514 }
515 
516 #undef HASH
517