1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019-2020 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "compiler.h"
26 #include "midgard_ops.h"
27
28 void
mir_rewrite_index_src_single(midgard_instruction * ins,unsigned old,unsigned new)29 mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old,
30 unsigned new)
31 {
32 mir_foreach_src(ins, i) {
33 if (ins->src[i] == old)
34 ins->src[i] = new;
35 }
36 }
37
38 void
mir_rewrite_index_dst_single(midgard_instruction * ins,unsigned old,unsigned new)39 mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old,
40 unsigned new)
41 {
42 if (ins->dest == old)
43 ins->dest = new;
44 }
45
46 static void
mir_rewrite_index_src_single_swizzle(midgard_instruction * ins,unsigned old,unsigned new,unsigned * swizzle)47 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old,
48 unsigned new, unsigned *swizzle)
49 {
50 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
51 if (ins->src[i] != old)
52 continue;
53
54 ins->src[i] = new;
55 mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
56 }
57 }
58
59 void
mir_rewrite_index_src(compiler_context * ctx,unsigned old,unsigned new)60 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
61 {
62 mir_foreach_instr_global(ctx, ins) {
63 mir_rewrite_index_src_single(ins, old, new);
64 }
65 }
66
67 void
mir_rewrite_index_src_swizzle(compiler_context * ctx,unsigned old,unsigned new,unsigned * swizzle)68 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new,
69 unsigned *swizzle)
70 {
71 mir_foreach_instr_global(ctx, ins) {
72 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
73 }
74 }
75
76 void
mir_rewrite_index_dst(compiler_context * ctx,unsigned old,unsigned new)77 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
78 {
79 mir_foreach_instr_global(ctx, ins) {
80 mir_rewrite_index_dst_single(ins, old, new);
81 }
82
83 /* Implicitly written before the shader */
84 if (ctx->blend_input == old)
85 ctx->blend_input = new;
86
87 if (ctx->blend_src1 == old)
88 ctx->blend_src1 = new;
89 }
90
91 void
mir_rewrite_index(compiler_context * ctx,unsigned old,unsigned new)92 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
93 {
94 mir_rewrite_index_src(ctx, old, new);
95 mir_rewrite_index_dst(ctx, old, new);
96 }
97
98 unsigned
mir_use_count(compiler_context * ctx,unsigned value)99 mir_use_count(compiler_context *ctx, unsigned value)
100 {
101 unsigned used_count = 0;
102
103 mir_foreach_instr_global(ctx, ins) {
104 if (mir_has_arg(ins, value))
105 ++used_count;
106 }
107
108 if (ctx->blend_input == value)
109 ++used_count;
110
111 if (ctx->blend_src1 == value)
112 ++used_count;
113
114 return used_count;
115 }
116
117 /* Checks if a value is used only once (or totally dead), which is an important
118 * heuristic to figure out if certain optimizations are Worth It (TM) */
119
120 bool
mir_single_use(compiler_context * ctx,unsigned value)121 mir_single_use(compiler_context *ctx, unsigned value)
122 {
123 /* We can replicate constants in places so who cares */
124 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
125 return true;
126
127 return mir_use_count(ctx, value) <= 1;
128 }
129
130 bool
mir_nontrivial_mod(const midgard_instruction * ins,unsigned i,bool check_swizzle)131 mir_nontrivial_mod(const midgard_instruction *ins, unsigned i,
132 bool check_swizzle)
133 {
134 bool is_int = midgard_is_integer_op(ins->op);
135
136 if (is_int) {
137 if (ins->src_shift[i])
138 return true;
139 } else {
140 if (ins->src_neg[i])
141 return true;
142 if (ins->src_abs[i])
143 return true;
144 }
145
146 if (ins->dest_type != ins->src_types[i])
147 return true;
148
149 if (check_swizzle) {
150 for (unsigned c = 0; c < 16; ++c) {
151 if (!(ins->mask & (1 << c)))
152 continue;
153 if (ins->swizzle[i][c] != c)
154 return true;
155 }
156 }
157
158 return false;
159 }
160
161 bool
mir_nontrivial_outmod(const midgard_instruction * ins)162 mir_nontrivial_outmod(const midgard_instruction *ins)
163 {
164 bool is_int = midgard_is_integer_op(ins->op);
165 unsigned mod = ins->outmod;
166
167 if (ins->dest_type != ins->src_types[1])
168 return true;
169
170 if (is_int)
171 return mod != midgard_outmod_keeplo;
172 else
173 return mod != midgard_outmod_none;
174 }
175
176 /* 128 / sz = exp2(log2(128 / sz))
177 * = exp2(log2(128) - log2(sz))
178 * = exp2(7 - log2(sz))
179 * = 1 << (7 - log2(sz))
180 */
181
182 static unsigned
mir_components_for_bits(unsigned bits)183 mir_components_for_bits(unsigned bits)
184 {
185 return 1 << (7 - util_logbase2(bits));
186 }
187
188 unsigned
mir_components_for_type(nir_alu_type T)189 mir_components_for_type(nir_alu_type T)
190 {
191 unsigned sz = nir_alu_type_get_type_size(T);
192 return mir_components_for_bits(sz);
193 }
194
195 uint16_t
mir_from_bytemask(uint16_t bytemask,unsigned bits)196 mir_from_bytemask(uint16_t bytemask, unsigned bits)
197 {
198 unsigned value = 0;
199 unsigned count = bits / 8;
200
201 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
202 bool a = (bytemask & (1 << c)) != 0;
203
204 for (unsigned q = c; q < count; ++q)
205 assert(((bytemask & (1 << q)) != 0) == a);
206
207 value |= (a << d);
208 }
209
210 return value;
211 }
212
213 /* Rounds up a bytemask to fill a given component count. Iterate each
214 * component, and check if any bytes in the component are masked on */
215
216 uint16_t
mir_round_bytemask_up(uint16_t mask,unsigned bits)217 mir_round_bytemask_up(uint16_t mask, unsigned bits)
218 {
219 unsigned bytes = bits / 8;
220 unsigned maxmask = mask_of(bytes);
221 unsigned channels = mir_components_for_bits(bits);
222
223 for (unsigned c = 0; c < channels; ++c) {
224 unsigned submask = maxmask << (c * bytes);
225
226 if (mask & submask)
227 mask |= submask;
228 }
229
230 return mask;
231 }
232
233 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
234
235 uint16_t
mir_bytemask(const midgard_instruction * ins)236 mir_bytemask(const midgard_instruction *ins)
237 {
238 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
239 return pan_to_bytemask(type_size, ins->mask);
240 }
241
242 void
mir_set_bytemask(midgard_instruction * ins,uint16_t bytemask)243 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
244 {
245 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
246 ins->mask = mir_from_bytemask(bytemask, type_size);
247 }
248
249 /*
250 * Checks if we should use an upper destination override, rather than the lower
251 * one in the IR. If yes, returns the bytes to shift by. If no, returns zero
252 * for a lower override and negative for no override.
253 */
254 signed
mir_upper_override(const midgard_instruction * ins,unsigned inst_size)255 mir_upper_override(const midgard_instruction *ins, unsigned inst_size)
256 {
257 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
258
259 /* If the sizes are the same, there's nothing to override */
260 if (type_size == inst_size)
261 return -1;
262
263 /* There are 16 bytes per vector, so there are (16/bytes)
264 * components per vector. So the magic half is half of
265 * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
266 * */
267
268 unsigned threshold = mir_components_for_bits(type_size) >> 1;
269
270 /* How many components did we shift over? */
271 unsigned zeroes = __builtin_ctz(ins->mask);
272
273 /* Did we hit the threshold? */
274 return (zeroes >= threshold) ? threshold : 0;
275 }
276
277 /* Creates a mask of the components of a node read by an instruction, by
278 * analyzing the swizzle with respect to the instruction's mask. E.g.:
279 *
280 * fadd r0.xz, r1.yyyy, r2.zwyx
281 *
282 * will return a mask of Z/Y for r2
283 */
284
285 static uint16_t
mir_bytemask_of_read_components_single(const unsigned * swizzle,unsigned inmask,unsigned bits)286 mir_bytemask_of_read_components_single(const unsigned *swizzle, unsigned inmask,
287 unsigned bits)
288 {
289 unsigned cmask = 0;
290
291 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
292 if (!(inmask & (1 << c)))
293 continue;
294 cmask |= (1 << swizzle[c]);
295 }
296
297 return pan_to_bytemask(bits, cmask);
298 }
299
300 uint16_t
mir_bytemask_of_read_components_index(const midgard_instruction * ins,unsigned i)301 mir_bytemask_of_read_components_index(const midgard_instruction *ins,
302 unsigned i)
303 {
304 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi
305 * branch??) */
306 if (ins->compact_branch && ins->branch.conditional && (i == 0))
307 return 0xF;
308
309 /* ALU ops act componentwise so we need to pay attention to
310 * their mask. Texture/ldst does not so we don't clamp source
311 * readmasks based on the writemask */
312 unsigned qmask = ~0;
313
314 /* Handle dot products and things */
315 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
316 unsigned props = alu_opcode_props[ins->op].props;
317
318 unsigned channel_override = GET_CHANNEL_COUNT(props);
319
320 if (channel_override)
321 qmask = mask_of(channel_override);
322 else
323 qmask = ins->mask;
324 }
325
326 return mir_bytemask_of_read_components_single(
327 ins->swizzle[i], qmask, nir_alu_type_get_type_size(ins->src_types[i]));
328 }
329
330 uint16_t
mir_bytemask_of_read_components(const midgard_instruction * ins,unsigned node)331 mir_bytemask_of_read_components(const midgard_instruction *ins, unsigned node)
332 {
333 uint16_t mask = 0;
334
335 if (node == ~0)
336 return 0;
337
338 mir_foreach_src(ins, i) {
339 if (ins->src[i] != node)
340 continue;
341 mask |= mir_bytemask_of_read_components_index(ins, i);
342 }
343
344 return mask;
345 }
346
347 /* Register allocation occurs after instruction scheduling, which is fine until
348 * we start needing to spill registers and therefore insert instructions into
349 * an already-scheduled program. We don't have to be terribly efficient about
350 * this, since spilling is already slow. So just semantically we need to insert
351 * the instruction into a new bundle before/after the bundle of the instruction
352 * in question */
353
354 static midgard_bundle
mir_bundle_for_op(compiler_context * ctx,const midgard_instruction * ins)355 mir_bundle_for_op(compiler_context *ctx, const midgard_instruction *ins)
356 {
357 midgard_instruction *u = mir_upload_ins(ctx, ins);
358
359 midgard_bundle bundle = {
360 .tag = ins->type,
361 .instruction_count = 1,
362 .instructions = {u},
363 };
364
365 if (bundle.tag == TAG_ALU_4) {
366 assert(OP_IS_MOVE(u->op));
367 u->unit = UNIT_VMUL;
368
369 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) +
370 sizeof(midgard_vector_alu);
371 bundle.padding = ~(bytes_emitted - 1) & 0xF;
372 bundle.control = ins->type | u->unit;
373 }
374
375 return bundle;
376 }
377
378 static unsigned
mir_bundle_idx_for_ins(const midgard_instruction * tag,midgard_block * block)379 mir_bundle_idx_for_ins(const midgard_instruction *tag, midgard_block *block)
380 {
381 midgard_bundle *bundles = (midgard_bundle *)block->bundles.data;
382
383 size_t count = (block->bundles.size / sizeof(midgard_bundle));
384
385 for (unsigned i = 0; i < count; ++i) {
386 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
387 if (bundles[i].instructions[j] == tag)
388 return i;
389 }
390 }
391
392 mir_print_instruction(tag);
393 unreachable("Instruction not scheduled in block");
394 }
395
396 midgard_instruction *
mir_insert_instruction_before_scheduled(compiler_context * ctx,midgard_block * block,const midgard_instruction * tag,const midgard_instruction * ins)397 mir_insert_instruction_before_scheduled(compiler_context *ctx,
398 midgard_block *block,
399 const midgard_instruction *tag,
400 const midgard_instruction *ins)
401 {
402 unsigned before = mir_bundle_idx_for_ins(tag, block);
403 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
404 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
405
406 midgard_bundle *bundles = (midgard_bundle *)block->bundles.data;
407 memmove(bundles + before + 1, bundles + before,
408 (count - before) * sizeof(midgard_bundle));
409 midgard_bundle *before_bundle = bundles + before + 1;
410
411 midgard_bundle new = mir_bundle_for_op(ctx, ins);
412 memcpy(bundles + before, &new, sizeof(new));
413
414 list_addtail(&new.instructions[0]->link,
415 &before_bundle->instructions[0]->link);
416 block->quadword_count += midgard_tag_props[new.tag].size;
417
418 return new.instructions[0];
419 }
420
421 midgard_instruction *
mir_insert_instruction_after_scheduled(compiler_context * ctx,midgard_block * block,const midgard_instruction * tag,const midgard_instruction * ins)422 mir_insert_instruction_after_scheduled(compiler_context *ctx,
423 midgard_block *block,
424 const midgard_instruction *tag,
425 const midgard_instruction *ins)
426 {
427 /* We need to grow the bundles array to add our new bundle */
428 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
429 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
430
431 /* Find the bundle that we want to insert after */
432 unsigned after = mir_bundle_idx_for_ins(tag, block);
433
434 /* All the bundles after that one, we move ahead by one */
435 midgard_bundle *bundles = (midgard_bundle *)block->bundles.data;
436 memmove(bundles + after + 2, bundles + after + 1,
437 (count - after - 1) * sizeof(midgard_bundle));
438 midgard_bundle *after_bundle = bundles + after;
439
440 midgard_bundle new = mir_bundle_for_op(ctx, ins);
441 memcpy(bundles + after + 1, &new, sizeof(new));
442 list_add(
443 &new.instructions[0]->link,
444 &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
445 block->quadword_count += midgard_tag_props[new.tag].size;
446
447 return new.instructions[0];
448 }
449
450 /* Flip the first-two arguments of a (binary) op. Currently ALU
451 * only, no known uses for ldst/tex */
452
453 void
mir_flip(midgard_instruction * ins)454 mir_flip(midgard_instruction *ins)
455 {
456 unsigned temp = ins->src[0];
457 ins->src[0] = ins->src[1];
458 ins->src[1] = temp;
459
460 assert(ins->type == TAG_ALU_4);
461
462 temp = ins->src_types[0];
463 ins->src_types[0] = ins->src_types[1];
464 ins->src_types[1] = temp;
465
466 temp = ins->src_abs[0];
467 ins->src_abs[0] = ins->src_abs[1];
468 ins->src_abs[1] = temp;
469
470 temp = ins->src_neg[0];
471 ins->src_neg[0] = ins->src_neg[1];
472 ins->src_neg[1] = temp;
473
474 temp = ins->src_invert[0];
475 ins->src_invert[0] = ins->src_invert[1];
476 ins->src_invert[1] = temp;
477
478 unsigned temp_swizzle[16];
479 memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
480 memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
481 memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
482 }
483
484 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
485
486 void
mir_compute_temp_count(compiler_context * ctx)487 mir_compute_temp_count(compiler_context *ctx)
488 {
489 unsigned max_index = 0;
490
491 mir_foreach_instr_global(ctx, ins) {
492 if (ins->dest < SSA_FIXED_MINIMUM)
493 max_index = MAX2(max_index, ins->dest + 1);
494 }
495
496 if (ctx->blend_input != ~0)
497 max_index = MAX2(max_index, ctx->blend_input + 1);
498
499 if (ctx->blend_src1 != ~0)
500 max_index = MAX2(max_index, ctx->blend_src1 + 1);
501
502 ctx->temp_count = max_index;
503 }
504