1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019-2020 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "compiler.h"
26 #include "midgard_ops.h"
27
mir_rewrite_index_src_single(midgard_instruction * ins,unsigned old,unsigned new)28 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
29 {
30 mir_foreach_src(ins, i) {
31 if (ins->src[i] == old)
32 ins->src[i] = new;
33 }
34 }
35
mir_rewrite_index_dst_single(midgard_instruction * ins,unsigned old,unsigned new)36 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
37 {
38 if (ins->dest == old)
39 ins->dest = new;
40 }
41
42 static void
mir_rewrite_index_src_single_swizzle(midgard_instruction * ins,unsigned old,unsigned new,unsigned * swizzle)43 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)
44 {
45 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
46 if (ins->src[i] != old) continue;
47
48 ins->src[i] = new;
49 mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
50 }
51 }
52
53 void
mir_rewrite_index_src(compiler_context * ctx,unsigned old,unsigned new)54 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
55 {
56 mir_foreach_instr_global(ctx, ins) {
57 mir_rewrite_index_src_single(ins, old, new);
58 }
59 }
60
61 void
mir_rewrite_index_src_swizzle(compiler_context * ctx,unsigned old,unsigned new,unsigned * swizzle)62 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)
63 {
64 mir_foreach_instr_global(ctx, ins) {
65 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
66 }
67 }
68
69 void
mir_rewrite_index_dst(compiler_context * ctx,unsigned old,unsigned new)70 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
71 {
72 mir_foreach_instr_global(ctx, ins) {
73 mir_rewrite_index_dst_single(ins, old, new);
74 }
75
76 /* Implicitly written before the shader */
77 if (ctx->blend_input == old)
78 ctx->blend_input = new;
79
80 if (ctx->blend_src1 == old)
81 ctx->blend_src1 = new;
82 }
83
84 void
mir_rewrite_index(compiler_context * ctx,unsigned old,unsigned new)85 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
86 {
87 mir_rewrite_index_src(ctx, old, new);
88 mir_rewrite_index_dst(ctx, old, new);
89 }
90
91 unsigned
mir_use_count(compiler_context * ctx,unsigned value)92 mir_use_count(compiler_context *ctx, unsigned value)
93 {
94 unsigned used_count = 0;
95
96 mir_foreach_instr_global(ctx, ins) {
97 if (mir_has_arg(ins, value))
98 ++used_count;
99 }
100
101 if (ctx->blend_input == value)
102 ++used_count;
103
104 if (ctx->blend_src1 == value)
105 ++used_count;
106
107 return used_count;
108 }
109
110 /* Checks if a value is used only once (or totally dead), which is an important
111 * heuristic to figure out if certain optimizations are Worth It (TM) */
112
113 bool
mir_single_use(compiler_context * ctx,unsigned value)114 mir_single_use(compiler_context *ctx, unsigned value)
115 {
116 /* We can replicate constants in places so who cares */
117 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
118 return true;
119
120 return mir_use_count(ctx, value) <= 1;
121 }
122
123 bool
mir_nontrivial_mod(midgard_instruction * ins,unsigned i,bool check_swizzle)124 mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
125 {
126 bool is_int = midgard_is_integer_op(ins->op);
127
128 if (is_int) {
129 if (ins->src_shift[i]) return true;
130 } else {
131 if (ins->src_neg[i]) return true;
132 if (ins->src_abs[i]) return true;
133 }
134
135 if (ins->dest_type != ins->src_types[i]) return true;
136
137 if (check_swizzle) {
138 for (unsigned c = 0; c < 16; ++c) {
139 if (!(ins->mask & (1 << c))) continue;
140 if (ins->swizzle[i][c] != c) return true;
141 }
142 }
143
144 return false;
145 }
146
147 bool
mir_nontrivial_outmod(midgard_instruction * ins)148 mir_nontrivial_outmod(midgard_instruction *ins)
149 {
150 bool is_int = midgard_is_integer_op(ins->op);
151 unsigned mod = ins->outmod;
152
153 if (ins->dest_type != ins->src_types[1])
154 return true;
155
156 if (is_int)
157 return mod != midgard_outmod_keeplo;
158 else
159 return mod != midgard_outmod_none;
160 }
161
162 /* 128 / sz = exp2(log2(128 / sz))
163 * = exp2(log2(128) - log2(sz))
164 * = exp2(7 - log2(sz))
165 * = 1 << (7 - log2(sz))
166 */
167
168 static unsigned
mir_components_for_bits(unsigned bits)169 mir_components_for_bits(unsigned bits)
170 {
171 return 1 << (7 - util_logbase2(bits));
172 }
173
174 unsigned
mir_components_for_type(nir_alu_type T)175 mir_components_for_type(nir_alu_type T)
176 {
177 unsigned sz = nir_alu_type_get_type_size(T);
178 return mir_components_for_bits(sz);
179 }
180
181 uint16_t
mir_from_bytemask(uint16_t bytemask,unsigned bits)182 mir_from_bytemask(uint16_t bytemask, unsigned bits)
183 {
184 unsigned value = 0;
185 unsigned count = bits / 8;
186
187 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
188 bool a = (bytemask & (1 << c)) != 0;
189
190 for (unsigned q = c; q < count; ++q)
191 assert(((bytemask & (1 << q)) != 0) == a);
192
193 value |= (a << d);
194 }
195
196 return value;
197 }
198
199 /* Rounds up a bytemask to fill a given component count. Iterate each
200 * component, and check if any bytes in the component are masked on */
201
202 uint16_t
mir_round_bytemask_up(uint16_t mask,unsigned bits)203 mir_round_bytemask_up(uint16_t mask, unsigned bits)
204 {
205 unsigned bytes = bits / 8;
206 unsigned maxmask = mask_of(bytes);
207 unsigned channels = mir_components_for_bits(bits);
208
209 for (unsigned c = 0; c < channels; ++c) {
210 unsigned submask = maxmask << (c * bytes);
211
212 if (mask & submask)
213 mask |= submask;
214 }
215
216 return mask;
217 }
218
219 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
220
221 uint16_t
mir_bytemask(midgard_instruction * ins)222 mir_bytemask(midgard_instruction *ins)
223 {
224 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
225 return pan_to_bytemask(type_size, ins->mask);
226 }
227
228 void
mir_set_bytemask(midgard_instruction * ins,uint16_t bytemask)229 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
230 {
231 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
232 ins->mask = mir_from_bytemask(bytemask, type_size);
233 }
234
235 /* Checks if we should use an upper destination override, rather than the lower
236 * one in the IR. Returns zero if no, returns the bytes to shift otherwise */
237
238 signed
mir_upper_override(midgard_instruction * ins,unsigned inst_size)239 mir_upper_override(midgard_instruction *ins, unsigned inst_size)
240 {
241 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
242
243 /* 8bit imovs are promoted to 16bit ones with .sext on the source and
244 * .keeplo on the destination to accomodate with non-identity swizzles.
245 */
246 if (ins->op == midgard_alu_op_imov && type_size == 8)
247 return 0;
248
249 /* If the sizes are the same, there's nothing to override */
250 if (type_size == inst_size)
251 return -1;
252
253 /* There are 16 bytes per vector, so there are (16/bytes)
254 * components per vector. So the magic half is half of
255 * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
256 * */
257
258 unsigned threshold = mir_components_for_bits(type_size) >> 1;
259
260 /* How many components did we shift over? */
261 unsigned zeroes = __builtin_ctz(ins->mask);
262
263 /* Did we hit the threshold? */
264 return (zeroes >= threshold) ? threshold : 0;
265 }
266
267 /* Creates a mask of the components of a node read by an instruction, by
268 * analyzing the swizzle with respect to the instruction's mask. E.g.:
269 *
270 * fadd r0.xz, r1.yyyy, r2.zwyx
271 *
272 * will return a mask of Z/Y for r2
273 */
274
275 static uint16_t
mir_bytemask_of_read_components_single(unsigned * swizzle,unsigned inmask,unsigned bits)276 mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, unsigned bits)
277 {
278 unsigned cmask = 0;
279
280 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
281 if (!(inmask & (1 << c))) continue;
282 cmask |= (1 << swizzle[c]);
283 }
284
285 return pan_to_bytemask(bits, cmask);
286 }
287
288 uint16_t
mir_bytemask_of_read_components_index(midgard_instruction * ins,unsigned i)289 mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
290 {
291 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
292 if (ins->compact_branch && ins->branch.conditional && (i == 0))
293 return 0xF;
294
295 /* ALU ops act componentwise so we need to pay attention to
296 * their mask. Texture/ldst does not so we don't clamp source
297 * readmasks based on the writemask */
298 unsigned qmask = ~0;
299
300 /* Handle dot products and things */
301 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
302 unsigned props = alu_opcode_props[ins->op].props;
303
304 unsigned channel_override = GET_CHANNEL_COUNT(props);
305
306 if (channel_override)
307 qmask = mask_of(channel_override);
308 else
309 qmask = ins->mask;
310 }
311
312 return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask,
313 nir_alu_type_get_type_size(ins->src_types[i]));
314 }
315
316 uint16_t
mir_bytemask_of_read_components(midgard_instruction * ins,unsigned node)317 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
318 {
319 uint16_t mask = 0;
320
321 if (node == ~0)
322 return 0;
323
324 mir_foreach_src(ins, i) {
325 if (ins->src[i] != node) continue;
326 mask |= mir_bytemask_of_read_components_index(ins, i);
327 }
328
329 return mask;
330 }
331
332 /* Register allocation occurs after instruction scheduling, which is fine until
333 * we start needing to spill registers and therefore insert instructions into
334 * an already-scheduled program. We don't have to be terribly efficient about
335 * this, since spilling is already slow. So just semantically we need to insert
336 * the instruction into a new bundle before/after the bundle of the instruction
337 * in question */
338
339 static midgard_bundle
mir_bundle_for_op(compiler_context * ctx,midgard_instruction ins)340 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
341 {
342 midgard_instruction *u = mir_upload_ins(ctx, ins);
343
344 midgard_bundle bundle = {
345 .tag = ins.type,
346 .instruction_count = 1,
347 .instructions = { u },
348 };
349
350 if (bundle.tag == TAG_ALU_4) {
351 assert(OP_IS_MOVE(u->op));
352 u->unit = UNIT_VMUL;
353
354 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
355 bundle.padding = ~(bytes_emitted - 1) & 0xF;
356 bundle.control = ins.type | u->unit;
357 }
358
359 return bundle;
360 }
361
362 static unsigned
mir_bundle_idx_for_ins(midgard_instruction * tag,midgard_block * block)363 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
364 {
365 midgard_bundle *bundles =
366 (midgard_bundle *) block->bundles.data;
367
368 size_t count = (block->bundles.size / sizeof(midgard_bundle));
369
370 for (unsigned i = 0; i < count; ++i) {
371 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
372 if (bundles[i].instructions[j] == tag)
373 return i;
374 }
375 }
376
377 mir_print_instruction(tag);
378 unreachable("Instruction not scheduled in block");
379 }
380
381 midgard_instruction *
mir_insert_instruction_before_scheduled(compiler_context * ctx,midgard_block * block,midgard_instruction * tag,midgard_instruction ins)382 mir_insert_instruction_before_scheduled(
383 compiler_context *ctx,
384 midgard_block *block,
385 midgard_instruction *tag,
386 midgard_instruction ins)
387 {
388 unsigned before = mir_bundle_idx_for_ins(tag, block);
389 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
390 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
391
392 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
393 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
394 midgard_bundle *before_bundle = bundles + before + 1;
395
396 midgard_bundle new = mir_bundle_for_op(ctx, ins);
397 memcpy(bundles + before, &new, sizeof(new));
398
399 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
400 block->quadword_count += midgard_tag_props[new.tag].size;
401
402 return new.instructions[0];
403 }
404
405 midgard_instruction *
mir_insert_instruction_after_scheduled(compiler_context * ctx,midgard_block * block,midgard_instruction * tag,midgard_instruction ins)406 mir_insert_instruction_after_scheduled(
407 compiler_context *ctx,
408 midgard_block *block,
409 midgard_instruction *tag,
410 midgard_instruction ins)
411 {
412 /* We need to grow the bundles array to add our new bundle */
413 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
414 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
415
416 /* Find the bundle that we want to insert after */
417 unsigned after = mir_bundle_idx_for_ins(tag, block);
418
419 /* All the bundles after that one, we move ahead by one */
420 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
421 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
422 midgard_bundle *after_bundle = bundles + after;
423
424 midgard_bundle new = mir_bundle_for_op(ctx, ins);
425 memcpy(bundles + after + 1, &new, sizeof(new));
426 list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
427 block->quadword_count += midgard_tag_props[new.tag].size;
428
429 return new.instructions[0];
430 }
431
432 /* Flip the first-two arguments of a (binary) op. Currently ALU
433 * only, no known uses for ldst/tex */
434
435 void
mir_flip(midgard_instruction * ins)436 mir_flip(midgard_instruction *ins)
437 {
438 unsigned temp = ins->src[0];
439 ins->src[0] = ins->src[1];
440 ins->src[1] = temp;
441
442 assert(ins->type == TAG_ALU_4);
443
444 temp = ins->src_types[0];
445 ins->src_types[0] = ins->src_types[1];
446 ins->src_types[1] = temp;
447
448 temp = ins->src_abs[0];
449 ins->src_abs[0] = ins->src_abs[1];
450 ins->src_abs[1] = temp;
451
452 temp = ins->src_neg[0];
453 ins->src_neg[0] = ins->src_neg[1];
454 ins->src_neg[1] = temp;
455
456 temp = ins->src_invert[0];
457 ins->src_invert[0] = ins->src_invert[1];
458 ins->src_invert[1] = temp;
459
460 unsigned temp_swizzle[16];
461 memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
462 memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
463 memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
464 }
465
466 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
467
468 void
mir_compute_temp_count(compiler_context * ctx)469 mir_compute_temp_count(compiler_context *ctx)
470 {
471 if (ctx->temp_count)
472 return;
473
474 unsigned max_dest = 0;
475
476 mir_foreach_instr_global(ctx, ins) {
477 if (ins->dest < SSA_FIXED_MINIMUM)
478 max_dest = MAX2(max_dest, ins->dest + 1);
479 }
480
481 ctx->temp_count = max_dest;
482 }
483