• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #define XXH_INLINE_ALL
25 #include "util/xxhash.h"
26 
27 #include "brw_fs.h"
28 #include "brw_builder.h"
29 #include "brw_cfg.h"
30 
31 /** @file
32  *
33  * Support for SSA-based global Common Subexpression Elimination (CSE).
34  */
35 
36 using namespace brw;
37 
38 struct remap_entry {
39    fs_inst *inst;
40    bblock_t *block;
41    enum brw_reg_type type;
42    unsigned nr;
43    bool negate;
44    bool still_used;
45 };
46 
47 static bool
is_expression(const fs_visitor * v,const fs_inst * const inst)48 is_expression(const fs_visitor *v, const fs_inst *const inst)
49 {
50    switch (inst->opcode) {
51    case BRW_OPCODE_MOV:
52    case BRW_OPCODE_SEL:
53    case BRW_OPCODE_NOT:
54    case BRW_OPCODE_AND:
55    case BRW_OPCODE_OR:
56    case BRW_OPCODE_XOR:
57    case BRW_OPCODE_SHR:
58    case BRW_OPCODE_SHL:
59    case BRW_OPCODE_ASR:
60    case BRW_OPCODE_ROR:
61    case BRW_OPCODE_ROL:
62    case BRW_OPCODE_CMP:
63    case BRW_OPCODE_CMPN:
64    case BRW_OPCODE_CSEL:
65    case BRW_OPCODE_BFREV:
66    case BRW_OPCODE_BFE:
67    case BRW_OPCODE_BFI1:
68    case BRW_OPCODE_BFI2:
69    case BRW_OPCODE_ADD:
70    case BRW_OPCODE_MUL:
71    case SHADER_OPCODE_MULH:
72    case BRW_OPCODE_AVG:
73    case BRW_OPCODE_FRC:
74    case BRW_OPCODE_LZD:
75    case BRW_OPCODE_FBH:
76    case BRW_OPCODE_FBL:
77    case BRW_OPCODE_CBIT:
78    case BRW_OPCODE_ADD3:
79    case BRW_OPCODE_RNDU:
80    case BRW_OPCODE_RNDD:
81    case BRW_OPCODE_RNDE:
82    case BRW_OPCODE_RNDZ:
83    case BRW_OPCODE_LINE:
84    case BRW_OPCODE_PLN:
85    case BRW_OPCODE_MAD:
86    case BRW_OPCODE_LRP:
87    case FS_OPCODE_FB_READ_LOGICAL:
88    case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
89    case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_LOGICAL:
90    case SHADER_OPCODE_FIND_LIVE_CHANNEL:
91    case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
92    case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
93    case FS_OPCODE_LOAD_LIVE_CHANNELS:
94    case SHADER_OPCODE_BROADCAST:
95    case SHADER_OPCODE_SHUFFLE:
96    case SHADER_OPCODE_QUAD_SWIZZLE:
97    case SHADER_OPCODE_CLUSTER_BROADCAST:
98    case SHADER_OPCODE_MOV_INDIRECT:
99    case SHADER_OPCODE_TEX_LOGICAL:
100    case SHADER_OPCODE_TXD_LOGICAL:
101    case SHADER_OPCODE_TXF_LOGICAL:
102    case SHADER_OPCODE_TXL_LOGICAL:
103    case SHADER_OPCODE_TXS_LOGICAL:
104    case FS_OPCODE_TXB_LOGICAL:
105    case SHADER_OPCODE_TXF_CMS_W_LOGICAL:
106    case SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL:
107    case SHADER_OPCODE_TXF_MCS_LOGICAL:
108    case SHADER_OPCODE_LOD_LOGICAL:
109    case SHADER_OPCODE_TG4_LOGICAL:
110    case SHADER_OPCODE_TG4_BIAS_LOGICAL:
111    case SHADER_OPCODE_TG4_EXPLICIT_LOD_LOGICAL:
112    case SHADER_OPCODE_TG4_IMPLICIT_LOD_LOGICAL:
113    case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
114    case SHADER_OPCODE_TG4_OFFSET_LOD_LOGICAL:
115    case SHADER_OPCODE_TG4_OFFSET_BIAS_LOGICAL:
116    case SHADER_OPCODE_SAMPLEINFO_LOGICAL:
117    case SHADER_OPCODE_IMAGE_SIZE_LOGICAL:
118    case SHADER_OPCODE_GET_BUFFER_SIZE:
119    case FS_OPCODE_PACK:
120    case FS_OPCODE_PACK_HALF_2x16_SPLIT:
121    case SHADER_OPCODE_RCP:
122    case SHADER_OPCODE_RSQ:
123    case SHADER_OPCODE_SQRT:
124    case SHADER_OPCODE_EXP2:
125    case SHADER_OPCODE_LOG2:
126    case SHADER_OPCODE_POW:
127    case SHADER_OPCODE_INT_QUOTIENT:
128    case SHADER_OPCODE_INT_REMAINDER:
129    case SHADER_OPCODE_SIN:
130    case SHADER_OPCODE_COS:
131    case SHADER_OPCODE_LOAD_SUBGROUP_INVOCATION:
132       return true;
133    case SHADER_OPCODE_MEMORY_LOAD_LOGICAL:
134       return inst->src[MEMORY_LOGICAL_MODE].ud == MEMORY_MODE_CONSTANT;
135    case SHADER_OPCODE_LOAD_PAYLOAD:
136       return !is_coalescing_payload(v->devinfo, v->alloc, inst);
137    default:
138       return inst->is_send_from_grf() && !inst->has_side_effects() &&
139          !inst->is_volatile();
140    }
141 }
142 
143 /**
144  * True if the instruction should only be CSE'd within their local block.
145  */
146 bool
local_only(const fs_inst * inst)147 local_only(const fs_inst *inst)
148 {
149    switch (inst->opcode) {
150    case SHADER_OPCODE_FIND_LIVE_CHANNEL:
151    case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL:
152    case SHADER_OPCODE_LOAD_LIVE_CHANNELS:
153    case FS_OPCODE_LOAD_LIVE_CHANNELS:
154       /* These depend on the current channel enables, so the same opcode
155        * in another block will likely return a different value.
156        */
157       return true;
158    case BRW_OPCODE_MOV:
159       /* Global CSE of MOVs is likely not worthwhile.  It can increase
160        * register pressure by extending the lifetime of simple constants.
161        */
162       return true;
163    case SHADER_OPCODE_LOAD_PAYLOAD:
164       /* This is basically a MOV */
165       return inst->sources == 1;
166    case BRW_OPCODE_CMP:
167       /* Seems to increase spilling a lot without much benefit */
168       return true;
169    default:
170       return false;
171    }
172 }
173 
174 static bool
operands_match(const fs_inst * a,const fs_inst * b,bool * negate)175 operands_match(const fs_inst *a, const fs_inst *b, bool *negate)
176 {
177    brw_reg *xs = a->src;
178    brw_reg *ys = b->src;
179 
180    if (a->opcode == BRW_OPCODE_MAD) {
181       return xs[0].equals(ys[0]) &&
182              ((xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
183               (xs[2].equals(ys[1]) && xs[1].equals(ys[2])));
184    } else if (a->opcode == BRW_OPCODE_MUL && a->dst.type == BRW_TYPE_F) {
185       bool xs0_negate = xs[0].negate;
186       bool xs1_negate = xs[1].file == IMM ? xs[1].f < 0.0f
187                                           : xs[1].negate;
188       bool ys0_negate = ys[0].negate;
189       bool ys1_negate = ys[1].file == IMM ? ys[1].f < 0.0f
190                                           : ys[1].negate;
191       float xs1_imm = xs[1].f;
192       float ys1_imm = ys[1].f;
193 
194       xs[0].negate = false;
195       xs[1].negate = false;
196       ys[0].negate = false;
197       ys[1].negate = false;
198       xs[1].f = fabsf(xs[1].f);
199       ys[1].f = fabsf(ys[1].f);
200 
201       bool ret = (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
202                  (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
203 
204       xs[0].negate = xs0_negate;
205       xs[1].negate = xs[1].file == IMM ? false : xs1_negate;
206       ys[0].negate = ys0_negate;
207       ys[1].negate = ys[1].file == IMM ? false : ys1_negate;
208       xs[1].f = xs1_imm;
209       ys[1].f = ys1_imm;
210 
211       *negate = (xs0_negate != xs1_negate) != (ys0_negate != ys1_negate);
212       if (*negate && (a->saturate || b->saturate))
213          return false;
214       return ret;
215    } else if (!a->is_commutative()) {
216       bool match = true;
217       for (int i = 0; i < a->sources; i++) {
218          if (!xs[i].equals(ys[i])) {
219             match = false;
220             break;
221          }
222       }
223       return match;
224    } else if (a->sources == 3) {
225       return (xs[0].equals(ys[0]) && xs[1].equals(ys[1]) && xs[2].equals(ys[2])) ||
226              (xs[0].equals(ys[0]) && xs[1].equals(ys[2]) && xs[2].equals(ys[1])) ||
227              (xs[0].equals(ys[1]) && xs[1].equals(ys[0]) && xs[2].equals(ys[2])) ||
228              (xs[0].equals(ys[1]) && xs[1].equals(ys[2]) && xs[2].equals(ys[1])) ||
229              (xs[0].equals(ys[2]) && xs[1].equals(ys[0]) && xs[2].equals(ys[1])) ||
230              (xs[0].equals(ys[2]) && xs[1].equals(ys[1]) && xs[2].equals(ys[0]));
231    } else {
232       return (xs[0].equals(ys[0]) && xs[1].equals(ys[1])) ||
233              (xs[1].equals(ys[0]) && xs[0].equals(ys[1]));
234    }
235 }
236 
237 static bool
instructions_match(fs_inst * a,fs_inst * b,bool * negate)238 instructions_match(fs_inst *a, fs_inst *b, bool *negate)
239 {
240    return a->opcode == b->opcode &&
241           a->exec_size == b->exec_size &&
242           a->group == b->group &&
243           a->predicate == b->predicate &&
244           a->conditional_mod == b->conditional_mod &&
245           a->dst.type == b->dst.type &&
246           a->offset == b->offset &&
247           a->mlen == b->mlen &&
248           a->ex_mlen == b->ex_mlen &&
249           a->sfid == b->sfid &&
250           a->desc == b->desc &&
251           a->ex_desc == b->ex_desc &&
252           a->size_written == b->size_written &&
253           a->check_tdr == b->check_tdr &&
254           a->header_size == b->header_size &&
255           a->target == b->target &&
256           a->sources == b->sources &&
257           a->bits == b->bits &&
258           operands_match(a, b, negate);
259 }
260 
261 /* -------------------------------------------------------------------- */
262 
263 #define HASH(hash, data) XXH32(&(data), sizeof(data), hash)
264 
265 uint32_t
hash_reg(uint32_t hash,const brw_reg & r)266 hash_reg(uint32_t hash, const brw_reg &r)
267 {
268    struct {
269       uint64_t u64;
270       uint32_t u32;
271       uint16_t u16a;
272       uint16_t u16b;
273    } data = {
274       .u64 = r.u64, .u32 = r.bits, .u16a = r.offset, .u16b = r.stride
275    };
276    STATIC_ASSERT(sizeof(data) == 16); /* ensure there's no padding */
277    hash = HASH(hash, data);
278    return hash;
279 }
280 
281 static uint32_t
hash_inst(const void * v)282 hash_inst(const void *v)
283 {
284    const fs_inst *inst = static_cast<const fs_inst *>(v);
285    uint32_t hash = 0;
286 
287    /* Skip dst - that would make nothing ever match */
288 
289    /* Skip ir and annotation - we don't care for equivalency purposes. */
290 
291    const uint8_t u8data[] = {
292       inst->sources,
293       inst->exec_size,
294       inst->group,
295       inst->mlen,
296       inst->ex_mlen,
297       inst->sfid,
298       inst->header_size,
299       inst->target,
300 
301       inst->conditional_mod,
302       inst->predicate,
303    };
304    const uint32_t u32data[] = {
305       inst->desc,
306       inst->ex_desc,
307       inst->offset,
308       inst->size_written,
309       inst->opcode,
310       inst->bits,
311    };
312 
313    hash = HASH(hash, u8data);
314    hash = HASH(hash, u32data);
315 
316    /* Skip hashing sched - we shouldn't be CSE'ing after that SWSB */
317 
318    if (inst->opcode == BRW_OPCODE_MAD) {
319       /* Commutatively combine the hashes for the multiplicands */
320       hash = hash_reg(hash, inst->src[0]);
321       uint32_t hash1 = hash_reg(hash, inst->src[1]);
322       uint32_t hash2 = hash_reg(hash, inst->src[2]);
323       hash = hash1 * hash2;
324    } else if (inst->opcode == BRW_OPCODE_MUL &&
325               inst->dst.type == BRW_TYPE_F) {
326       /* Canonicalize negations on either source (or both) and commutatively
327        * combine the hashes for both sources.
328        */
329       brw_reg src[2] = { inst->src[0], inst->src[1] };
330       uint32_t src_hash[2];
331 
332       for (int i = 0; i < 2; i++) {
333          src[i].negate = false;
334          if (src[i].file == IMM)
335             src[i].f = fabs(src[i].f);
336 
337          src_hash[i] = hash_reg(hash, src[i]);
338       }
339 
340       hash = src_hash[0] * src_hash[1];
341    } else if (inst->is_commutative()) {
342       /* Commutatively combine the sources */
343       uint32_t hash0 = hash_reg(hash, inst->src[0]);
344       uint32_t hash1 = hash_reg(hash, inst->src[1]);
345       uint32_t hash2 = inst->sources > 2 ? hash_reg(hash, inst->src[2]) : 1;
346       hash = hash0 * hash1 * hash2;
347    } else {
348       /* Just hash all the sources */
349       for (int i = 0; i < inst->sources; i++)
350          hash = hash_reg(hash, inst->src[i]);
351    }
352 
353    return hash;
354 }
355 
356 /* -------------------------------------------------------------------- */
357 
358 static bool
cmp_func(const void * data1,const void * data2)359 cmp_func(const void *data1, const void *data2)
360 {
361    bool negate;
362    return instructions_match((fs_inst *) data1, (fs_inst *) data2, &negate);
363 }
364 
365 static bool
remap_sources(fs_visitor & s,const brw::def_analysis & defs,fs_inst * inst,struct remap_entry * remap_table)366 remap_sources(fs_visitor &s, const brw::def_analysis &defs,
367               fs_inst *inst, struct remap_entry *remap_table)
368 {
369    bool progress = false;
370 
371    for (int i = 0; i < inst->sources; i++) {
372       if (inst->src[i].file == VGRF &&
373           inst->src[i].nr < defs.count() &&
374           remap_table[inst->src[i].nr].inst != NULL) {
375          const unsigned old_nr = inst->src[i].nr;
376          const unsigned new_nr = remap_table[old_nr].nr;
377          const bool need_negate = remap_table[old_nr].negate;
378 
379          if (need_negate &&
380              (remap_table[old_nr].type != inst->src[i].type ||
381               !inst->can_do_source_mods(s.devinfo))) {
382             remap_table[old_nr].still_used = true;
383             continue;
384          }
385 
386          inst->src[i].nr = new_nr;
387 
388          if (!inst->src[i].abs)
389             inst->src[i].negate ^= need_negate;
390 
391          progress = true;
392       }
393    }
394 
395    return progress;
396 }
397 
398 bool
brw_opt_cse_defs(fs_visitor & s)399 brw_opt_cse_defs(fs_visitor &s)
400 {
401    const intel_device_info *devinfo = s.devinfo;
402    const idom_tree &idom = s.idom_analysis.require();
403    const brw::def_analysis &defs = s.def_analysis.require();
404    bool progress = false;
405    bool need_remaps = false;
406 
407    struct remap_entry *remap_table = new remap_entry[defs.count()];
408    memset(remap_table, 0, defs.count() * sizeof(struct remap_entry));
409    struct set *set = _mesa_set_create(NULL, NULL, cmp_func);
410 
411    foreach_block(block, s.cfg) {
412       fs_inst *last_flag_write = NULL;
413       fs_inst *last = NULL;
414 
415       foreach_inst_in_block_safe(fs_inst, inst, block) {
416          if (need_remaps)
417             progress |= remap_sources(s, defs, inst, remap_table);
418 
419          /* Updating last_flag_written should be at the bottom of the loop,
420           * but doing it this way lets us use "continue" more easily.
421           */
422          if (last && last->flags_written(devinfo))
423             last_flag_write = last;
424          last = inst;
425 
426          if (inst->dst.is_null()) {
427             bool ignored;
428             if (last_flag_write && !inst->writes_accumulator &&
429                 instructions_match(last_flag_write, inst, &ignored)) {
430                /* This instruction has no destination but has a flag write
431                 * which is redundant with the previous flag write in our
432                 * basic block.  So we can simply remove it.
433                 */
434                inst->remove(block, true);
435                last = NULL;
436                progress = true;
437             }
438          } else if (is_expression(&s, inst) && defs.get(inst->dst)) {
439             assert(!inst->writes_accumulator);
440             assert(!inst->reads_accumulator_implicitly());
441 
442             uint32_t hash = hash_inst(inst);
443             if (inst->flags_read(devinfo)) {
444                hash = last_flag_write ? HASH(hash, last_flag_write)
445                                       : HASH(hash, block);
446             }
447 
448             struct set_entry *e =
449                _mesa_set_search_or_add_pre_hashed(set, hash, inst, NULL);
450             if (!e) goto out; /* out of memory error */
451             fs_inst *match = (fs_inst *) e->key;
452 
453             /* If there was no match, move on */
454             if (match == inst)
455                continue;
456 
457             bblock_t *def_block = defs.get_block(match->dst);
458             if (block != def_block && (local_only(inst) ||
459                 !idom.dominates(def_block, block))) {
460                /* If `match` doesn't dominate `inst` then remove it from
461                 * the set and add `inst` instead so future lookups see that.
462                 */
463                e->key = inst;
464                continue;
465             }
466 
467             /* We can replace inst with match or negate(match). */
468             bool negate = false;
469             if (inst->opcode == BRW_OPCODE_MUL &&
470                 inst->dst.type == BRW_TYPE_F) {
471                /* Determine whether inst is actually negate(match) */
472                bool ops_must_match = operands_match(inst, match, &negate);
473                assert(ops_must_match);
474             }
475 
476             /* Some later instruction could depend on the flags written by
477              * this instruction. It can only be removed if the previous
478              * instruction that write the flags is identical.
479              */
480             if (inst->flags_written(devinfo)) {
481                bool ignored;
482 
483                if (last_flag_write == NULL ||
484                    !instructions_match(last_flag_write, inst, &ignored)) {
485                   continue;
486                }
487             }
488 
489             need_remaps = true;
490             remap_table[inst->dst.nr].inst = inst;
491             remap_table[inst->dst.nr].block = block;
492             remap_table[inst->dst.nr].type = match->dst.type;
493             remap_table[inst->dst.nr].nr = match->dst.nr;
494             remap_table[inst->dst.nr].negate = negate;
495             remap_table[inst->dst.nr].still_used = false;
496          }
497       }
498    }
499 
500    /* Remove instruction now unused */
501    for (unsigned i = 0; i < defs.count(); i++) {
502       if (!remap_table[i].inst)
503          continue;
504 
505       if (!remap_table[i].still_used) {
506          remap_table[i].inst->remove(remap_table[i].block, true);
507          progress = true;
508       }
509    }
510 
511 out:
512    delete [] remap_table;
513    _mesa_set_destroy(set, NULL);
514 
515    if (progress) {
516       s.cfg->adjust_block_ips();
517       s.invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
518                             DEPENDENCY_INSTRUCTION_DETAIL);
519    }
520 
521    return progress;
522 }
523 
524 #undef HASH
525