1 /*
2 * Copyright (C) 2021 Collabora, Ltd.
3 * Copyright (C) 2014 Valve Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "compiler.h"
26 #include "bi_builder.h"
27
28 #define XXH_INLINE_ALL
29 #include "xxhash.h"
30
31 /* This pass handles CSE'ing repeated expressions created in the process of
32 * translating from NIR. Also, currently this is intra-block only, to make it
33 * work over multiple block we'd need to bring forward dominance calculation.
34 */
35
36 static inline uint32_t
HASH(uint32_t hash,unsigned data)37 HASH(uint32_t hash, unsigned data)
38 {
39 return XXH32(&data, sizeof(data), hash);
40 }
41
42 static uint32_t
hash_index(uint32_t hash,bi_index index)43 hash_index(uint32_t hash, bi_index index)
44 {
45 hash = HASH(hash, index.value);
46 hash = HASH(hash, index.abs);
47 hash = HASH(hash, index.neg);
48 hash = HASH(hash, index.swizzle);
49 hash = HASH(hash, index.offset);
50 hash = HASH(hash, index.reg);
51 hash = HASH(hash, index.type);
52 return hash;
53 }
54
55 /* Hash an ALU instruction. */
56 static uint32_t
hash_instr(const void * data)57 hash_instr(const void *data)
58 {
59 const bi_instr *I = data;
60 uint32_t hash = 0;
61
62 hash = HASH(hash, I->op);
63
64 /* Explcitly skip destinations, except for size details */
65 bi_foreach_dest(I, d) {
66 hash = HASH(hash, I->dest[d].swizzle);
67 }
68
69 bi_foreach_src(I, s) {
70 hash = hash_index(hash, I->src[s]);
71 }
72
73 /* Explicitly skip branch, regfmt, vecsize, no_spill, tdd, table */
74 hash = HASH(hash, I->dest_mod);
75
76 /* Explicitly skip other immediates */
77 hash = HASH(hash, I->shift);
78
79 for (unsigned i = 0; i < ARRAY_SIZE(I->flags); ++i)
80 hash = HASH(hash, I->flags[i]);
81
82 return hash;
83 }
84
85 static bool
instrs_equal(const void * _i1,const void * _i2)86 instrs_equal(const void *_i1, const void *_i2)
87 {
88 const bi_instr *i1 = _i1, *i2 = _i2;
89
90 if (i1->op != i2->op)
91 return false;
92
93 /* Explicitly skip destinations */
94
95 bi_foreach_src(i1, s) {
96 bi_index s1 = i1->src[s], s2 = i2->src[s];
97
98 if (memcmp(&s1, &s2, sizeof(s1)) != 0)
99 return false;
100 }
101
102 if (i1->dest_mod != i2->dest_mod)
103 return false;
104
105 if (i1->shift != i2->shift)
106 return false;
107
108 for (unsigned i = 0; i < ARRAY_SIZE(i1->flags); ++i) {
109 if (i1->flags[i] != i2->flags[i])
110 return false;
111 }
112
113 return true;
114 }
115
116 /* Determines what instructions the above routines have to handle */
117
118 static bool
instr_can_cse(const bi_instr * I)119 instr_can_cse(const bi_instr *I)
120 {
121 switch (I->op) {
122 case BI_OPCODE_DTSEL_IMM:
123 case BI_OPCODE_DISCARD_F32:
124 return false;
125 default:
126 break;
127 }
128
129 /* Be conservative about which message-passing instructions we CSE,
130 * since most are not pure even within a thread.
131 */
132 if (bi_opcode_props[I->op].message && I->op != BI_OPCODE_LEA_BUF_IMM)
133 return false;
134
135 if (I->branch_target)
136 return false;
137
138 /* Refuse to CSE non-SSA destinations since the data flow analysis
139 * required is nontrivial */
140 bi_foreach_dest(I, d) {
141 if (!bi_is_null(I->dest[d]) && !bi_is_ssa(I->dest[d]))
142 return false;
143 }
144
145 /* Similar refuse to CSE non-SSA sources. We allow machine registers,
146 * since CSE runs before register allocation which means any registers
147 * encountered are preloaded and hence assumed constant.
148 */
149 bi_foreach_src(I, s) {
150 if (I->src[s].reg)
151 return false;
152 }
153
154 return true;
155 }
156
157 void
bi_opt_cse(bi_context * ctx)158 bi_opt_cse(bi_context *ctx)
159 {
160 struct set *instr_set = _mesa_set_create(NULL, hash_instr, instrs_equal);
161
162 bi_foreach_block(ctx, block) {
163 bi_index *replacement = calloc(sizeof(bi_index), ctx->ssa_alloc);
164 _mesa_set_clear(instr_set, NULL);
165
166 bi_foreach_instr_in_block(block, instr) {
167 /* Rewrite before trying to CSE anything so we converge
168 * locally in one iteration */
169 bi_foreach_src(instr, s) {
170 if (bi_is_staging_src(instr, s))
171 continue;
172
173 if (!bi_is_ssa(instr->src[s]))
174 continue;
175
176 bi_index repl = replacement[instr->src[s].value];
177 if (!bi_is_null(repl))
178 instr->src[s] = bi_replace_index(instr->src[s], repl);
179 }
180
181 if (!instr_can_cse(instr))
182 continue;
183
184 bool found;
185 struct set_entry *entry =
186 _mesa_set_search_or_add(instr_set, instr, &found);
187 if (found) {
188 const bi_instr *match = entry->key;
189
190 bi_foreach_dest(instr, d) {
191 if (!bi_is_null(instr->dest[d]))
192 replacement[instr->dest[d].value] = match->dest[d];
193 }
194 }
195 }
196
197 free(replacement);
198 }
199
200 _mesa_set_destroy(instr_set, NULL);
201 }
202