1 /*
2 * Copyright (C) 2020 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 #include "bi_builder.h"
28 #include "compiler.h"
29
30 bool
bi_has_arg(const bi_instr * ins,bi_index arg)31 bi_has_arg(const bi_instr *ins, bi_index arg)
32 {
33 if (!ins)
34 return false;
35
36 bi_foreach_src(ins, s) {
37 if (bi_is_equiv(ins->src[s], arg))
38 return true;
39 }
40
41 return false;
42 }
43
44 /* Precondition: valid 16-bit or 32-bit register format. Returns whether it is
45 * 32-bit. Note auto reads to 32-bit registers even if the memory format is
46 * 16-bit, so is considered as such here */
47
48 bool
bi_is_regfmt_16(enum bi_register_format fmt)49 bi_is_regfmt_16(enum bi_register_format fmt)
50 {
51 switch (fmt) {
52 case BI_REGISTER_FORMAT_F16:
53 case BI_REGISTER_FORMAT_S16:
54 case BI_REGISTER_FORMAT_U16:
55 return true;
56 case BI_REGISTER_FORMAT_F32:
57 case BI_REGISTER_FORMAT_S32:
58 case BI_REGISTER_FORMAT_U32:
59 case BI_REGISTER_FORMAT_AUTO:
60 return false;
61 default:
62 unreachable("Invalid register format");
63 }
64 }
65
66 static unsigned
bi_count_staging_registers(const bi_instr * ins)67 bi_count_staging_registers(const bi_instr *ins)
68 {
69 enum bi_sr_count count = bi_opcode_props[ins->op].sr_count;
70 unsigned vecsize = ins->vecsize + 1; /* XXX: off-by-one */
71
72 switch (count) {
73 case BI_SR_COUNT_0 ... BI_SR_COUNT_4:
74 return count;
75 case BI_SR_COUNT_FORMAT:
76 return bi_is_regfmt_16(ins->register_format) ? DIV_ROUND_UP(vecsize, 2)
77 : vecsize;
78 case BI_SR_COUNT_VECSIZE:
79 return vecsize;
80 case BI_SR_COUNT_SR_COUNT:
81 return ins->sr_count;
82 }
83
84 unreachable("Invalid sr_count");
85 }
86
87 unsigned
bi_count_read_registers(const bi_instr * ins,unsigned s)88 bi_count_read_registers(const bi_instr *ins, unsigned s)
89 {
90 /* ATOM reads 1 but writes 2. Exception for ACMPXCHG */
91 if (s == 0 && ins->op == BI_OPCODE_ATOM_RETURN_I32)
92 return (ins->atom_opc == BI_ATOM_OPC_ACMPXCHG) ? 2 : 1;
93 else if (s == 0 && bi_opcode_props[ins->op].sr_read)
94 return bi_count_staging_registers(ins);
95 else if (s == 4 && ins->op == BI_OPCODE_BLEND)
96 return ins->sr_count_2; /* Dual source blending */
97 else if (s == 0 && ins->op == BI_OPCODE_SPLIT_I32)
98 return ins->nr_dests;
99 else
100 return 1;
101 }
102
103 unsigned
bi_count_write_registers(const bi_instr * ins,unsigned d)104 bi_count_write_registers(const bi_instr *ins, unsigned d)
105 {
106 if (d == 0 && bi_opcode_props[ins->op].sr_write) {
107 switch (ins->op) {
108 case BI_OPCODE_TEXC:
109 case BI_OPCODE_TEXC_DUAL:
110 if (ins->sr_count_2)
111 return ins->sr_count;
112 else
113 return bi_is_regfmt_16(ins->register_format) ? 2 : 4;
114
115 case BI_OPCODE_TEX_SINGLE:
116 case BI_OPCODE_TEX_FETCH:
117 case BI_OPCODE_TEX_GRADIENT:
118 case BI_OPCODE_TEX_GATHER: {
119 unsigned chans = util_bitcount(ins->write_mask);
120
121 return bi_is_regfmt_16(ins->register_format) ? DIV_ROUND_UP(chans, 2)
122 : chans;
123 }
124
125 case BI_OPCODE_ACMPXCHG_I32:
126 /* Reads 2 but writes 1 */
127 return 1;
128
129 case BI_OPCODE_ATOM1_RETURN_I32:
130 /* Allow omitting the destination for plain ATOM1 */
131 return bi_is_null(ins->dest[0]) ? 0 : ins->sr_count;
132 default:
133 return bi_count_staging_registers(ins);
134 }
135 } else if (ins->op == BI_OPCODE_SEG_ADD_I64) {
136 return 2;
137 } else if (ins->op == BI_OPCODE_TEXC_DUAL && d == 1) {
138 return ins->sr_count_2;
139 } else if (ins->op == BI_OPCODE_COLLECT_I32 && d == 0) {
140 return ins->nr_srcs;
141 }
142
143 return 1;
144 }
145
146 unsigned
bi_writemask(const bi_instr * ins,unsigned d)147 bi_writemask(const bi_instr *ins, unsigned d)
148 {
149 unsigned mask = BITFIELD_MASK(bi_count_write_registers(ins, d));
150 unsigned shift = ins->dest[d].offset;
151 return (mask << shift);
152 }
153
154 bi_clause *
bi_next_clause(bi_context * ctx,bi_block * block,bi_clause * clause)155 bi_next_clause(bi_context *ctx, bi_block *block, bi_clause *clause)
156 {
157 if (!block && !clause)
158 return NULL;
159
160 /* Try the first clause in this block if we're starting from scratch */
161 if (!clause && !list_is_empty(&block->clauses))
162 return list_first_entry(&block->clauses, bi_clause, link);
163
164 /* Try the next clause in this block */
165 if (clause && clause->link.next != &block->clauses)
166 return list_first_entry(&(clause->link), bi_clause, link);
167
168 /* Try the next block, or the one after that if it's empty, etc .*/
169 bi_block *next_block = bi_next_block(block);
170
171 bi_foreach_block_from(ctx, next_block, block) {
172 if (!list_is_empty(&block->clauses))
173 return list_first_entry(&block->clauses, bi_clause, link);
174 }
175
176 return NULL;
177 }
178
179 /* Does an instruction have a side effect not captured by its register
180 * destination? Applies to certain message-passing instructions, +DISCARD, and
181 * branching only, used in dead code elimation. Branches are characterized by
182 * `last` which applies to them and some atomics, +BARRIER, +BLEND which
183 * implies no loss of generality */
184
185 bool
bi_side_effects(const bi_instr * I)186 bi_side_effects(const bi_instr *I)
187 {
188 if (bi_opcode_props[I->op].last)
189 return true;
190
191 switch (I->op) {
192 case BI_OPCODE_DISCARD_F32:
193 case BI_OPCODE_DISCARD_B32:
194 return true;
195 default:
196 break;
197 }
198
199 switch (bi_opcode_props[I->op].message) {
200 case BIFROST_MESSAGE_NONE:
201 case BIFROST_MESSAGE_VARYING:
202 case BIFROST_MESSAGE_ATTRIBUTE:
203 case BIFROST_MESSAGE_TEX:
204 case BIFROST_MESSAGE_VARTEX:
205 case BIFROST_MESSAGE_LOAD:
206 case BIFROST_MESSAGE_64BIT:
207 return false;
208
209 case BIFROST_MESSAGE_STORE:
210 case BIFROST_MESSAGE_ATOMIC:
211 case BIFROST_MESSAGE_BARRIER:
212 case BIFROST_MESSAGE_BLEND:
213 case BIFROST_MESSAGE_Z_STENCIL:
214 case BIFROST_MESSAGE_ATEST:
215 case BIFROST_MESSAGE_JOB:
216 return true;
217
218 case BIFROST_MESSAGE_TILE:
219 return (I->op != BI_OPCODE_LD_TILE);
220 }
221
222 unreachable("Invalid message type");
223 }
224
225 /* Branch reconvergence is required when the execution mask may change
226 * between adjacent instructions (clauses). This occurs for conditional
227 * branches and for the last instruction (clause) in a block whose
228 * fallthrough successor has multiple predecessors.
229 */
230
231 bool
bi_reconverge_branches(bi_block * block)232 bi_reconverge_branches(bi_block *block)
233 {
234 if (bi_num_successors(block) == 1)
235 return bi_num_predecessors(block->successors[0]) > 1;
236 else
237 return true;
238 }
239
240 /*
241 * When MUX.i32 or MUX.v2i16 is used to multiplex entire sources, they can be
242 * replaced by CSEL as follows:
243 *
244 * MUX.neg(x, y, b) -> CSEL.s.lt(b, 0, x, y)
245 * MUX.int_zero(x, y, b) -> CSEL.i.eq(b, 0, x, y)
246 * MUX.fp_zero(x, y, b) -> CSEL.f.eq(b, 0, x, y)
247 *
248 * MUX.bit cannot be transformed like this.
249 *
250 * Note that MUX.v2i16 has partial support for swizzles, which CSEL.v2i16 lacks.
251 * So we must check the swizzles too.
252 */
253 bool
bi_can_replace_with_csel(bi_instr * I)254 bi_can_replace_with_csel(bi_instr *I)
255 {
256 return ((I->op == BI_OPCODE_MUX_I32) || (I->op == BI_OPCODE_MUX_V2I16)) &&
257 (I->mux != BI_MUX_BIT) && (I->src[0].swizzle == BI_SWIZZLE_H01) &&
258 (I->src[1].swizzle == BI_SWIZZLE_H01) &&
259 (I->src[2].swizzle == BI_SWIZZLE_H01);
260 }
261
262 static enum bi_opcode
bi_csel_for_mux(bool must_sign,bool b32,enum bi_mux mux)263 bi_csel_for_mux(bool must_sign, bool b32, enum bi_mux mux)
264 {
265 switch (mux) {
266 case BI_MUX_INT_ZERO:
267 if (must_sign)
268 return b32 ? BI_OPCODE_CSEL_U32 : BI_OPCODE_CSEL_V2U16;
269 else
270 return b32 ? BI_OPCODE_CSEL_I32 : BI_OPCODE_CSEL_V2I16;
271 case BI_MUX_NEG:
272 return b32 ? BI_OPCODE_CSEL_S32 : BI_OPCODE_CSEL_V2S16;
273 case BI_MUX_FP_ZERO:
274 return b32 ? BI_OPCODE_CSEL_F32 : BI_OPCODE_CSEL_V2F16;
275 default:
276 unreachable("No CSEL for MUX.bit");
277 }
278 }
279
280 bi_instr *
bi_csel_from_mux(bi_builder * b,const bi_instr * I,bool must_sign)281 bi_csel_from_mux(bi_builder *b, const bi_instr *I, bool must_sign)
282 {
283 assert(I->op == BI_OPCODE_MUX_I32 || I->op == BI_OPCODE_MUX_V2I16);
284
285 /* Build a new CSEL */
286 enum bi_cmpf cmpf = (I->mux == BI_MUX_NEG) ? BI_CMPF_LT : BI_CMPF_EQ;
287 bi_instr *csel = bi_csel_u32_to(b, I->dest[0], I->src[2], bi_zero(),
288 I->src[0], I->src[1], cmpf);
289
290 /* Fixup the opcode and use it */
291 csel->op = bi_csel_for_mux(must_sign, I->op == BI_OPCODE_MUX_I32, I->mux);
292 return csel;
293 }
294