1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019-2020 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include <err.h>
26 #include <fcntl.h>
27 #include <stdint.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <sys/mman.h>
31 #include <sys/stat.h>
32 #include <sys/types.h>
33
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/glsl_types.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "util/half_float.h"
38 #include "util/list.h"
39 #include "util/u_debug.h"
40 #include "util/u_dynarray.h"
41 #include "util/u_math.h"
42
43 #include "compiler.h"
44 #include "helpers.h"
45 #include "midgard.h"
46 #include "midgard_compile.h"
47 #include "midgard_nir.h"
48 #include "midgard_ops.h"
49 #include "midgard_quirks.h"
50
51 #include "disassemble.h"
52
53 static const struct debug_named_value midgard_debug_options[] = {
54 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
55 {"shaderdb", MIDGARD_DBG_SHADERDB, "Prints shader-db statistics"},
56 {"inorder", MIDGARD_DBG_INORDER, "Disables out-of-order scheduling"},
57 {"verbose", MIDGARD_DBG_VERBOSE, "Dump shaders verbosely"},
58 {"internal", MIDGARD_DBG_INTERNAL, "Dump internal shaders"},
59 DEBUG_NAMED_VALUE_END};
60
61 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG",
62 midgard_debug_options, 0)
63
64 int midgard_debug = 0;
65
66 static midgard_block *
create_empty_block(compiler_context * ctx)67 create_empty_block(compiler_context *ctx)
68 {
69 midgard_block *blk = rzalloc(ctx, midgard_block);
70
71 blk->base.predecessors =
72 _mesa_set_create(blk, _mesa_hash_pointer, _mesa_key_pointer_equal);
73
74 blk->base.name = ctx->block_source_count++;
75
76 return blk;
77 }
78
79 static void
schedule_barrier(compiler_context * ctx)80 schedule_barrier(compiler_context *ctx)
81 {
82 midgard_block *temp = ctx->after_block;
83 ctx->after_block = create_empty_block(ctx);
84 ctx->block_count++;
85 list_addtail(&ctx->after_block->base.link, &ctx->blocks);
86 list_inithead(&ctx->after_block->base.instructions);
87 pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
88 ctx->current_block = ctx->after_block;
89 ctx->after_block = temp;
90 }
91
92 /* Helpers to generate midgard_instruction's using macro magic, since every
93 * driver seems to do it that way */
94
95 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
96
97 #define M_LOAD_STORE(name, store, T) \
98 static midgard_instruction m_##name(unsigned ssa, unsigned address) \
99 { \
100 midgard_instruction i = { \
101 .type = TAG_LOAD_STORE_4, \
102 .mask = 0xF, \
103 .dest = ~0, \
104 .src = {~0, ~0, ~0, ~0}, \
105 .swizzle = SWIZZLE_IDENTITY_4, \
106 .op = midgard_op_##name, \
107 .load_store = \
108 { \
109 .signed_offset = address, \
110 }, \
111 }; \
112 \
113 if (store) { \
114 i.src[0] = ssa; \
115 i.src_types[0] = T; \
116 i.dest_type = T; \
117 } else { \
118 i.dest = ssa; \
119 i.dest_type = T; \
120 } \
121 return i; \
122 }
123
124 #define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
125 #define M_STORE(name, T) M_LOAD_STORE(name, true, T)
126
127 M_LOAD(ld_attr_32, nir_type_uint32);
128 M_LOAD(ld_vary_32, nir_type_uint32);
129 M_LOAD(ld_ubo_u8, nir_type_uint32); /* mandatory extension to 32-bit */
130 M_LOAD(ld_ubo_u16, nir_type_uint32);
131 M_LOAD(ld_ubo_32, nir_type_uint32);
132 M_LOAD(ld_ubo_64, nir_type_uint32);
133 M_LOAD(ld_ubo_128, nir_type_uint32);
134 M_LOAD(ld_u8, nir_type_uint8);
135 M_LOAD(ld_u16, nir_type_uint16);
136 M_LOAD(ld_32, nir_type_uint32);
137 M_LOAD(ld_64, nir_type_uint32);
138 M_LOAD(ld_128, nir_type_uint32);
139 M_STORE(st_u8, nir_type_uint8);
140 M_STORE(st_u16, nir_type_uint16);
141 M_STORE(st_32, nir_type_uint32);
142 M_STORE(st_64, nir_type_uint32);
143 M_STORE(st_128, nir_type_uint32);
144 M_LOAD(ld_tilebuffer_raw, nir_type_uint32);
145 M_LOAD(ld_tilebuffer_16f, nir_type_float16);
146 M_LOAD(ld_tilebuffer_32f, nir_type_float32);
147 M_STORE(st_vary_32, nir_type_uint32);
148 M_LOAD(ld_cubemap_coords, nir_type_uint32);
149 M_LOAD(ldst_mov, nir_type_uint32);
150 M_LOAD(ld_image_32f, nir_type_float32);
151 M_LOAD(ld_image_16f, nir_type_float16);
152 M_LOAD(ld_image_32u, nir_type_uint32);
153 M_LOAD(ld_image_32i, nir_type_int32);
154 M_STORE(st_image_32f, nir_type_float32);
155 M_STORE(st_image_16f, nir_type_float16);
156 M_STORE(st_image_32u, nir_type_uint32);
157 M_STORE(st_image_32i, nir_type_int32);
158 M_LOAD(lea_image, nir_type_uint64);
159
160 #define M_IMAGE(op) \
161 static midgard_instruction op##_image(nir_alu_type type, unsigned val, \
162 unsigned address) \
163 { \
164 switch (type) { \
165 case nir_type_float32: \
166 return m_##op##_image_32f(val, address); \
167 case nir_type_float16: \
168 return m_##op##_image_16f(val, address); \
169 case nir_type_uint32: \
170 return m_##op##_image_32u(val, address); \
171 case nir_type_int32: \
172 return m_##op##_image_32i(val, address); \
173 default: \
174 unreachable("Invalid image type"); \
175 } \
176 }
177
178 M_IMAGE(ld);
179 M_IMAGE(st);
180
181 static midgard_instruction
v_branch(bool conditional,bool invert)182 v_branch(bool conditional, bool invert)
183 {
184 midgard_instruction ins = {
185 .type = TAG_ALU_4,
186 .unit = ALU_ENAB_BRANCH,
187 .compact_branch = true,
188 .branch =
189 {
190 .conditional = conditional,
191 .invert_conditional = invert,
192 },
193 .dest = ~0,
194 .src = {~0, ~0, ~0, ~0},
195 };
196
197 return ins;
198 }
199
200 static void
attach_constants(compiler_context * ctx,midgard_instruction * ins,void * constants,int name)201 attach_constants(compiler_context *ctx, midgard_instruction *ins,
202 void *constants, int name)
203 {
204 ins->has_constants = true;
205 memcpy(&ins->constants, constants, 16);
206 }
207
208 static int
glsl_type_size(const struct glsl_type * type,bool bindless)209 glsl_type_size(const struct glsl_type *type, bool bindless)
210 {
211 return glsl_count_attribute_slots(type, false);
212 }
213
214 static bool
midgard_nir_lower_global_load_instr(nir_builder * b,nir_intrinsic_instr * intr,void * data)215 midgard_nir_lower_global_load_instr(nir_builder *b, nir_intrinsic_instr *intr,
216 void *data)
217 {
218 if (intr->intrinsic != nir_intrinsic_load_global &&
219 intr->intrinsic != nir_intrinsic_load_shared)
220 return false;
221
222 unsigned compsz = intr->def.bit_size;
223 unsigned totalsz = compsz * intr->def.num_components;
224 /* 8, 16, 32, 64 and 128 bit loads don't need to be lowered */
225 if (util_bitcount(totalsz) < 2 && totalsz <= 128)
226 return false;
227
228 b->cursor = nir_before_instr(&intr->instr);
229
230 nir_def *addr = intr->src[0].ssa;
231
232 nir_def *comps[MIR_VEC_COMPONENTS];
233 unsigned ncomps = 0;
234
235 while (totalsz) {
236 unsigned loadsz = MIN2(1 << (util_last_bit(totalsz) - 1), 128);
237 unsigned loadncomps = loadsz / compsz;
238
239 nir_def *load;
240 if (intr->intrinsic == nir_intrinsic_load_global) {
241 load = nir_load_global(b, addr, compsz / 8, loadncomps, compsz);
242 } else {
243 assert(intr->intrinsic == nir_intrinsic_load_shared);
244 nir_intrinsic_instr *shared_load =
245 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_shared);
246 shared_load->num_components = loadncomps;
247 shared_load->src[0] = nir_src_for_ssa(addr);
248 nir_intrinsic_set_align(shared_load, compsz / 8, 0);
249 nir_intrinsic_set_base(shared_load, nir_intrinsic_base(intr));
250 nir_def_init(&shared_load->instr, &shared_load->def,
251 shared_load->num_components, compsz);
252 nir_builder_instr_insert(b, &shared_load->instr);
253 load = &shared_load->def;
254 }
255
256 for (unsigned i = 0; i < loadncomps; i++)
257 comps[ncomps++] = nir_channel(b, load, i);
258
259 totalsz -= loadsz;
260 addr = nir_iadd_imm(b, addr, loadsz / 8);
261 }
262
263 assert(ncomps == intr->def.num_components);
264 nir_def_rewrite_uses(&intr->def, nir_vec(b, comps, ncomps));
265
266 return true;
267 }
268
269 static bool
midgard_nir_lower_global_load(nir_shader * shader)270 midgard_nir_lower_global_load(nir_shader *shader)
271 {
272 return nir_shader_intrinsics_pass(
273 shader, midgard_nir_lower_global_load_instr,
274 nir_metadata_control_flow, NULL);
275 }
276
277 static bool
mdg_should_scalarize(const nir_instr * instr,const void * _unused)278 mdg_should_scalarize(const nir_instr *instr, const void *_unused)
279 {
280 const nir_alu_instr *alu = nir_instr_as_alu(instr);
281
282 if (nir_src_bit_size(alu->src[0].src) == 64)
283 return true;
284
285 if (alu->def.bit_size == 64)
286 return true;
287
288 switch (alu->op) {
289 case nir_op_fdot2:
290 case nir_op_umul_high:
291 case nir_op_imul_high:
292 case nir_op_pack_half_2x16:
293 case nir_op_unpack_half_2x16:
294
295 /* The LUT unit is scalar */
296 case nir_op_fsqrt:
297 case nir_op_frcp:
298 case nir_op_frsq:
299 case nir_op_fsin_mdg:
300 case nir_op_fcos_mdg:
301 case nir_op_fexp2:
302 case nir_op_flog2:
303 return true;
304 default:
305 return false;
306 }
307 }
308
309 /* Only vectorize int64 up to vec2 */
310 static uint8_t
midgard_vectorize_filter(const nir_instr * instr,const void * data)311 midgard_vectorize_filter(const nir_instr *instr, const void *data)
312 {
313 if (instr->type != nir_instr_type_alu)
314 return 0;
315
316 const nir_alu_instr *alu = nir_instr_as_alu(instr);
317 int src_bit_size = nir_src_bit_size(alu->src[0].src);
318 int dst_bit_size = alu->def.bit_size;
319
320 if (src_bit_size == 64 || dst_bit_size == 64)
321 return 2;
322
323 return 4;
324 }
325
326 static nir_mem_access_size_align
mem_access_size_align_cb(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,enum gl_access_qualifier access,const void * cb_data)327 mem_access_size_align_cb(nir_intrinsic_op intrin, uint8_t bytes,
328 uint8_t bit_size, uint32_t align_mul,
329 uint32_t align_offset, bool offset_is_const,
330 enum gl_access_qualifier access, const void *cb_data)
331 {
332 uint32_t align = nir_combined_align(align_mul, align_offset);
333 assert(util_is_power_of_two_nonzero(align));
334
335 /* No more than 16 bytes at a time. */
336 bytes = MIN2(bytes, 16);
337
338 /* If the number of bytes is a multiple of 4, use 32-bit loads. Else if it's
339 * a multiple of 2, use 16-bit loads. Else use 8-bit loads.
340 *
341 * But if we're only aligned to 1 byte, use 8-bit loads. If we're only
342 * aligned to 2 bytes, use 16-bit loads, unless we needed 8-bit loads due to
343 * the size.
344 */
345 if ((bytes & 1) || (align == 1))
346 bit_size = 8;
347 else if ((bytes & 2) || (align == 2))
348 bit_size = 16;
349 else if (bit_size >= 32)
350 bit_size = 32;
351
352 unsigned num_comps = MIN2(bytes / (bit_size / 8), 4);
353
354 return (nir_mem_access_size_align){
355 .num_components = num_comps,
356 .bit_size = bit_size,
357 .align = bit_size / 8,
358 .shift = nir_mem_access_shift_method_scalar,
359 };
360 }
361
362 static uint8_t
lower_vec816_alu(const nir_instr * instr,const void * cb_data)363 lower_vec816_alu(const nir_instr *instr, const void *cb_data)
364 {
365 return 4;
366 }
367
368 void
midgard_preprocess_nir(nir_shader * nir,unsigned gpu_id)369 midgard_preprocess_nir(nir_shader *nir, unsigned gpu_id)
370 {
371 unsigned quirks = midgard_get_quirks(gpu_id);
372
373 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
374 * (so we don't accidentally duplicate the epilogue since mesa/st has
375 * messed with our I/O quite a bit already).
376 */
377 NIR_PASS(_, nir, nir_lower_vars_to_ssa);
378
379 if (nir->info.stage == MESA_SHADER_VERTEX) {
380 NIR_PASS(_, nir, pan_nir_lower_vertex_id);
381 NIR_PASS(_, nir, nir_lower_viewport_transform);
382 NIR_PASS(_, nir, nir_lower_point_size, 1.0, 0.0);
383 }
384
385 NIR_PASS(_, nir, nir_lower_var_copies);
386 NIR_PASS(_, nir, nir_lower_vars_to_ssa);
387 NIR_PASS(_, nir, nir_split_var_copies);
388 NIR_PASS(_, nir, nir_lower_var_copies);
389 NIR_PASS(_, nir, nir_lower_global_vars_to_local);
390 NIR_PASS(_, nir, nir_lower_var_copies);
391 NIR_PASS(_, nir, nir_lower_vars_to_ssa);
392
393 NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
394 glsl_type_size, nir_lower_io_use_interpolated_input_intrinsics);
395
396 if (nir->info.stage == MESA_SHADER_VERTEX) {
397 /* nir_lower[_explicit]_io is lazy and emits mul+add chains even
398 * for offsets it could figure out are constant. Do some
399 * constant folding before pan_nir_lower_store_component below.
400 */
401 NIR_PASS(_, nir, nir_opt_constant_folding);
402 NIR_PASS(_, nir, pan_nir_lower_store_component);
403 }
404
405 /* Could be eventually useful for Vulkan, but we don't expect it to have
406 * the support, so limit it to compute */
407 if (gl_shader_stage_is_compute(nir->info.stage)) {
408 nir_lower_mem_access_bit_sizes_options mem_size_options = {
409 .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
410 nir_var_mem_constant | nir_var_mem_task_payload |
411 nir_var_shader_temp | nir_var_function_temp |
412 nir_var_mem_global | nir_var_mem_shared,
413 .callback = mem_access_size_align_cb,
414 };
415
416 NIR_PASS(_, nir, nir_lower_mem_access_bit_sizes, &mem_size_options);
417 NIR_PASS(_, nir, nir_lower_alu_width, lower_vec816_alu, NULL);
418 NIR_PASS(_, nir, nir_lower_alu_vec8_16_srcs);
419 }
420
421 NIR_PASS(_, nir, nir_lower_ssbo, NULL);
422 NIR_PASS(_, nir, pan_nir_lower_zs_store);
423
424 NIR_PASS(_, nir, nir_lower_frexp);
425 NIR_PASS(_, nir, midgard_nir_lower_global_load);
426
427 nir_lower_idiv_options idiv_options = {
428 .allow_fp16 = true,
429 };
430
431 NIR_PASS(_, nir, nir_lower_idiv, &idiv_options);
432
433 nir_lower_tex_options lower_tex_options = {
434 .lower_txs_lod = true,
435 .lower_txp = ~0,
436 .lower_tg4_broadcom_swizzle = true,
437 .lower_txd = true,
438 .lower_invalid_implicit_lod = true,
439 };
440
441 NIR_PASS(_, nir, nir_lower_tex, &lower_tex_options);
442 NIR_PASS(_, nir, nir_lower_image_atomics_to_global);
443
444 /* TEX_GRAD fails to apply sampler descriptor settings on some
445 * implementations, requiring a lowering.
446 */
447 if (quirks & MIDGARD_BROKEN_LOD)
448 NIR_PASS(_, nir, midgard_nir_lod_errata);
449
450 /* lower MSAA image operations to 3D load before coordinate lowering */
451 NIR_PASS(_, nir, pan_nir_lower_image_ms);
452
453 /* Midgard image ops coordinates are 16-bit instead of 32-bit */
454 NIR_PASS(_, nir, midgard_nir_lower_image_bitsize);
455
456 if (nir->info.stage == MESA_SHADER_FRAGMENT)
457 NIR_PASS(_, nir, nir_lower_helper_writes, true);
458
459 NIR_PASS(_, nir, pan_lower_helper_invocation);
460 NIR_PASS(_, nir, pan_lower_sample_pos);
461 NIR_PASS(_, nir, midgard_nir_lower_algebraic_early);
462 NIR_PASS(_, nir, nir_lower_alu_to_scalar, mdg_should_scalarize, NULL);
463 NIR_PASS(_, nir, nir_lower_flrp, 16 | 32 | 64, false /* always_precise */);
464 NIR_PASS(_, nir, nir_lower_var_copies);
465 }
466
467 static void
optimise_nir(nir_shader * nir,unsigned quirks,bool is_blend)468 optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
469 {
470 bool progress;
471
472 do {
473 progress = false;
474
475 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
476
477 NIR_PASS(progress, nir, nir_copy_prop);
478 NIR_PASS(progress, nir, nir_opt_remove_phis);
479 NIR_PASS(progress, nir, nir_opt_dce);
480 NIR_PASS(progress, nir, nir_opt_dead_cf);
481 NIR_PASS(progress, nir, nir_opt_cse);
482 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
483 NIR_PASS(progress, nir, nir_opt_algebraic);
484 NIR_PASS(progress, nir, nir_opt_constant_folding);
485 NIR_PASS(progress, nir, nir_opt_undef);
486 NIR_PASS(progress, nir, nir_lower_undef_to_zero);
487
488 NIR_PASS(progress, nir, nir_opt_loop_unroll);
489
490 NIR_PASS(progress, nir, nir_opt_vectorize, midgard_vectorize_filter,
491 NULL);
492 } while (progress);
493
494 NIR_PASS(_, nir, nir_lower_alu_to_scalar, mdg_should_scalarize, NULL);
495
496 /* Run after opts so it can hit more */
497 if (!is_blend)
498 NIR_PASS(progress, nir, nir_fuse_io_16);
499
500 do {
501 progress = false;
502
503 NIR_PASS(progress, nir, nir_opt_dce);
504 NIR_PASS(progress, nir, nir_opt_algebraic);
505 NIR_PASS(progress, nir, nir_opt_constant_folding);
506 NIR_PASS(progress, nir, nir_copy_prop);
507 } while (progress);
508
509 NIR_PASS(progress, nir, nir_opt_algebraic_late);
510 NIR_PASS(progress, nir, nir_opt_algebraic_distribute_src_mods);
511
512 /* We implement booleans as 32-bit 0/~0 */
513 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
514
515 /* Now that booleans are lowered, we can run out late opts */
516 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
517 NIR_PASS(progress, nir, midgard_nir_cancel_inot);
518 NIR_PASS(_, nir, midgard_nir_type_csel);
519
520 /* Clean up after late opts */
521 do {
522 progress = false;
523
524 NIR_PASS(progress, nir, nir_opt_dce);
525 NIR_PASS(progress, nir, nir_opt_constant_folding);
526 NIR_PASS(progress, nir, nir_copy_prop);
527 } while (progress);
528
529 /* Backend scheduler is purely local, so do some global optimizations
530 * to reduce register pressure. */
531 nir_move_options move_all = nir_move_const_undef | nir_move_load_ubo |
532 nir_move_load_input | nir_move_comparisons |
533 nir_move_copies | nir_move_load_ssbo;
534
535 NIR_PASS(_, nir, nir_opt_sink, move_all);
536 NIR_PASS(_, nir, nir_opt_move, move_all);
537
538 /* Take us out of SSA */
539 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
540
541 /* We are a vector architecture; write combine where possible */
542 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest, false);
543 NIR_PASS(progress, nir, nir_lower_vec_to_regs, NULL, NULL);
544
545 NIR_PASS(progress, nir, nir_opt_dce);
546 nir_trivialize_registers(nir);
547 }
548
549 /* Do not actually emit a load; instead, cache the constant for inlining */
550
551 static void
emit_load_const(compiler_context * ctx,nir_load_const_instr * instr)552 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
553 {
554 nir_def def = instr->def;
555
556 midgard_constants *consts = rzalloc(ctx, midgard_constants);
557
558 assert(instr->def.num_components * instr->def.bit_size <=
559 sizeof(*consts) * 8);
560
561 #define RAW_CONST_COPY(bits) \
562 nir_const_value_to_array(consts->u##bits, instr->value, \
563 instr->def.num_components, u##bits)
564
565 switch (instr->def.bit_size) {
566 case 64:
567 RAW_CONST_COPY(64);
568 break;
569 case 32:
570 RAW_CONST_COPY(32);
571 break;
572 case 16:
573 RAW_CONST_COPY(16);
574 break;
575 case 8:
576 RAW_CONST_COPY(8);
577 break;
578 default:
579 unreachable("Invalid bit_size for load_const instruction\n");
580 }
581
582 /* Shifted for SSA, +1 for off-by-one */
583 _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1,
584 consts);
585 }
586
587 /* Normally constants are embedded implicitly, but for I/O and such we have to
588 * explicitly emit a move with the constant source */
589
590 static void
emit_explicit_constant(compiler_context * ctx,unsigned node)591 emit_explicit_constant(compiler_context *ctx, unsigned node)
592 {
593 void *constant_value =
594 _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
595
596 if (constant_value) {
597 midgard_instruction ins =
598 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), node);
599 attach_constants(ctx, &ins, constant_value, node + 1);
600 emit_mir_instruction(ctx, ins);
601 }
602 }
603
604 static bool
nir_is_non_scalar_swizzle(nir_alu_src * src,unsigned nr_components)605 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
606 {
607 unsigned comp = src->swizzle[0];
608
609 for (unsigned c = 1; c < nr_components; ++c) {
610 if (src->swizzle[c] != comp)
611 return true;
612 }
613
614 return false;
615 }
616
617 #define ALU_CASE(nir, _op) \
618 case nir_op_##nir: \
619 op = midgard_alu_op_##_op; \
620 assert(src_bitsize == dst_bitsize); \
621 break;
622
623 #define ALU_CASE_RTZ(nir, _op) \
624 case nir_op_##nir: \
625 op = midgard_alu_op_##_op; \
626 roundmode = MIDGARD_RTZ; \
627 break;
628
629 #define ALU_CHECK_CMP() \
630 assert(src_bitsize == 16 || src_bitsize == 32 || src_bitsize == 64); \
631 assert(dst_bitsize == 16 || dst_bitsize == 32);
632
633 #define ALU_CASE_BCAST(nir, _op, count) \
634 case nir_op_##nir: \
635 op = midgard_alu_op_##_op; \
636 broadcast_swizzle = count; \
637 ALU_CHECK_CMP(); \
638 break;
639
640 #define ALU_CASE_CMP(nir, _op) \
641 case nir_op_##nir: \
642 op = midgard_alu_op_##_op; \
643 ALU_CHECK_CMP(); \
644 break;
645
646 static void
mir_copy_src(midgard_instruction * ins,nir_alu_instr * instr,unsigned i,unsigned to,bool is_int,unsigned bcast_count)647 mir_copy_src(midgard_instruction *ins, nir_alu_instr *instr, unsigned i,
648 unsigned to, bool is_int, unsigned bcast_count)
649 {
650 nir_alu_src src = instr->src[i];
651 unsigned bits = nir_src_bit_size(src.src);
652
653 ins->src[to] = nir_src_index(NULL, &src.src);
654 ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits;
655
656 /* Figure out which component we should fill unused channels with. This
657 * doesn't matter too much in the non-broadcast case, but it makes
658 * should that scalar sources are packed with replicated swizzles,
659 * which works around issues seen with the combination of source
660 * expansion and destination shrinking.
661 */
662 unsigned replicate_c = 0;
663 if (bcast_count) {
664 replicate_c = bcast_count - 1;
665 } else {
666 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
667 if (nir_alu_instr_channel_used(instr, i, c))
668 replicate_c = c;
669 }
670 }
671
672 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
673 ins->swizzle[to][c] =
674 src.swizzle[((!bcast_count || c < bcast_count) &&
675 nir_alu_instr_channel_used(instr, i, c))
676 ? c
677 : replicate_c];
678 }
679 }
680
681 static void
emit_alu(compiler_context * ctx,nir_alu_instr * instr)682 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
683 {
684 unsigned nr_components = instr->def.num_components;
685 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
686 unsigned op = 0;
687
688 /* Number of components valid to check for the instruction (the rest
689 * will be forced to the last), or 0 to use as-is. Relevant as
690 * ball-type instructions have a channel count in NIR but are all vec4
691 * in Midgard */
692
693 unsigned broadcast_swizzle = 0;
694
695 /* Should we swap arguments? */
696 bool flip_src12 = false;
697
698 ASSERTED unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
699 unsigned dst_bitsize = instr->def.bit_size;
700
701 enum midgard_roundmode roundmode = MIDGARD_RTE;
702
703 switch (instr->op) {
704 ALU_CASE(fadd, fadd);
705 ALU_CASE(fmul, fmul);
706 ALU_CASE(fmin, fmin);
707 ALU_CASE(fmax, fmax);
708 ALU_CASE(imin, imin);
709 ALU_CASE(imax, imax);
710 ALU_CASE(umin, umin);
711 ALU_CASE(umax, umax);
712 ALU_CASE(ffloor, ffloor);
713 ALU_CASE(fround_even, froundeven);
714 ALU_CASE(ftrunc, ftrunc);
715 ALU_CASE(fceil, fceil);
716 ALU_CASE(fdot3, fdot3);
717 ALU_CASE(fdot4, fdot4);
718 ALU_CASE(iadd, iadd);
719 ALU_CASE(isub, isub);
720 ALU_CASE(iadd_sat, iaddsat);
721 ALU_CASE(isub_sat, isubsat);
722 ALU_CASE(uadd_sat, uaddsat);
723 ALU_CASE(usub_sat, usubsat);
724 ALU_CASE(imul, imul);
725 ALU_CASE(imul_high, imul);
726 ALU_CASE(umul_high, imul);
727 ALU_CASE(uclz, iclz);
728
729 /* Zero shoved as second-arg */
730 ALU_CASE(iabs, iabsdiff);
731
732 ALU_CASE(uabs_isub, iabsdiff);
733 ALU_CASE(uabs_usub, uabsdiff);
734
735 ALU_CASE(mov, imov);
736
737 ALU_CASE_CMP(feq32, feq);
738 ALU_CASE_CMP(fneu32, fne);
739 ALU_CASE_CMP(flt32, flt);
740 ALU_CASE_CMP(ieq32, ieq);
741 ALU_CASE_CMP(ine32, ine);
742 ALU_CASE_CMP(ilt32, ilt);
743 ALU_CASE_CMP(ult32, ult);
744
745 /* We don't have a native b2f32 instruction. Instead, like many
746 * GPUs, we exploit booleans as 0/~0 for false/true, and
747 * correspondingly AND
748 * by 1.0 to do the type conversion. For the moment, prime us
749 * to emit:
750 *
751 * iand [whatever], #0
752 *
753 * At the end of emit_alu (as MIR), we'll fix-up the constant
754 */
755
756 ALU_CASE_CMP(b2f32, iand);
757 ALU_CASE_CMP(b2f16, iand);
758 ALU_CASE_CMP(b2i32, iand);
759 ALU_CASE_CMP(b2i16, iand);
760
761 ALU_CASE(frcp, frcp);
762 ALU_CASE(frsq, frsqrt);
763 ALU_CASE(fsqrt, fsqrt);
764 ALU_CASE(fexp2, fexp2);
765 ALU_CASE(flog2, flog2);
766
767 ALU_CASE_RTZ(f2i64, f2i_rte);
768 ALU_CASE_RTZ(f2u64, f2u_rte);
769 ALU_CASE_RTZ(i2f64, i2f_rte);
770 ALU_CASE_RTZ(u2f64, u2f_rte);
771
772 ALU_CASE_RTZ(f2i32, f2i_rte);
773 ALU_CASE_RTZ(f2u32, f2u_rte);
774 ALU_CASE_RTZ(i2f32, i2f_rte);
775 ALU_CASE_RTZ(u2f32, u2f_rte);
776
777 ALU_CASE_RTZ(f2i8, f2i_rte);
778 ALU_CASE_RTZ(f2u8, f2u_rte);
779
780 ALU_CASE_RTZ(f2i16, f2i_rte);
781 ALU_CASE_RTZ(f2u16, f2u_rte);
782 ALU_CASE_RTZ(i2f16, i2f_rte);
783 ALU_CASE_RTZ(u2f16, u2f_rte);
784
785 ALU_CASE(fsin_mdg, fsinpi);
786 ALU_CASE(fcos_mdg, fcospi);
787
788 /* We'll get 0 in the second arg, so:
789 * ~a = ~(a | 0) = nor(a, 0) */
790 ALU_CASE(inot, inor);
791 ALU_CASE(iand, iand);
792 ALU_CASE(ior, ior);
793 ALU_CASE(ixor, ixor);
794 ALU_CASE(ishl, ishl);
795 ALU_CASE(ishr, iasr);
796 ALU_CASE(ushr, ilsr);
797
798 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
799 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
800 ALU_CASE_CMP(b32all_fequal4, fball_eq);
801
802 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
803 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
804 ALU_CASE_CMP(b32any_fnequal4, fbany_neq);
805
806 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
807 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
808 ALU_CASE_CMP(b32all_iequal4, iball_eq);
809
810 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
811 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
812 ALU_CASE_CMP(b32any_inequal4, ibany_neq);
813
814 /* Source mods will be shoved in later */
815 ALU_CASE(fabs, fmov);
816 ALU_CASE(fneg, fmov);
817 ALU_CASE(fsat, fmov);
818 ALU_CASE(fsat_signed, fmov);
819 ALU_CASE(fclamp_pos, fmov);
820
821 /* For size conversion, we use a move. Ideally though we would squash
822 * these ops together; maybe that has to happen after in NIR as part of
823 * propagation...? An earlier algebraic pass ensured we step down by
824 * only / exactly one size. If stepping down, we use a dest override to
825 * reduce the size; if stepping up, we use a larger-sized move with a
826 * half source and a sign/zero-extension modifier */
827
828 case nir_op_i2i8:
829 case nir_op_i2i16:
830 case nir_op_i2i32:
831 case nir_op_i2i64:
832 case nir_op_u2u8:
833 case nir_op_u2u16:
834 case nir_op_u2u32:
835 case nir_op_u2u64:
836 case nir_op_f2f16:
837 case nir_op_f2f32:
838 case nir_op_f2f64: {
839 if (instr->op == nir_op_f2f16 || instr->op == nir_op_f2f32 ||
840 instr->op == nir_op_f2f64)
841 op = midgard_alu_op_fmov;
842 else
843 op = midgard_alu_op_imov;
844
845 break;
846 }
847
848 /* For greater-or-equal, we lower to less-or-equal and flip the
849 * arguments */
850
851 case nir_op_fge:
852 case nir_op_fge32:
853 case nir_op_ige32:
854 case nir_op_uge32: {
855 op = instr->op == nir_op_fge ? midgard_alu_op_fle
856 : instr->op == nir_op_fge32 ? midgard_alu_op_fle
857 : instr->op == nir_op_ige32 ? midgard_alu_op_ile
858 : instr->op == nir_op_uge32 ? midgard_alu_op_ule
859 : 0;
860
861 flip_src12 = true;
862 ALU_CHECK_CMP();
863 break;
864 }
865
866 case nir_op_b32csel:
867 case nir_op_b32fcsel_mdg: {
868 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
869 bool is_float = instr->op == nir_op_b32fcsel_mdg;
870 op = is_float ? (mixed ? midgard_alu_op_fcsel_v : midgard_alu_op_fcsel)
871 : (mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel);
872
873 int index = nir_src_index(ctx, &instr->src[0].src);
874 emit_explicit_constant(ctx, index);
875
876 break;
877 }
878
879 case nir_op_unpack_32_2x16:
880 case nir_op_unpack_32_4x8:
881 case nir_op_pack_32_2x16:
882 case nir_op_pack_32_4x8: {
883 op = midgard_alu_op_imov;
884 break;
885 }
886
887 case nir_op_unpack_64_2x32:
888 case nir_op_unpack_64_4x16:
889 case nir_op_pack_64_2x32:
890 case nir_op_pack_64_4x16: {
891 op = midgard_alu_op_imov;
892 break;
893 }
894
895 default:
896 mesa_loge("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
897 assert(0);
898 return;
899 }
900
901 /* Promote imov to fmov if it might help inline a constant */
902 if (op == midgard_alu_op_imov && nir_src_is_const(instr->src[0].src) &&
903 nir_src_bit_size(instr->src[0].src) == 32 &&
904 nir_is_same_comp_swizzle(instr->src[0].swizzle,
905 nir_src_num_components(instr->src[0].src))) {
906 op = midgard_alu_op_fmov;
907 }
908
909 /* Midgard can perform certain modifiers on output of an ALU op */
910
911 unsigned outmod = 0;
912 bool is_int = midgard_is_integer_op(op);
913
914 if (instr->op == nir_op_umul_high || instr->op == nir_op_imul_high) {
915 outmod = midgard_outmod_keephi;
916 } else if (midgard_is_integer_out_op(op)) {
917 outmod = midgard_outmod_keeplo;
918 } else if (instr->op == nir_op_fsat) {
919 outmod = midgard_outmod_clamp_0_1;
920 } else if (instr->op == nir_op_fsat_signed) {
921 outmod = midgard_outmod_clamp_m1_1;
922 } else if (instr->op == nir_op_fclamp_pos) {
923 outmod = midgard_outmod_clamp_0_inf;
924 }
925
926 /* Fetch unit, quirks, etc information */
927 unsigned opcode_props = alu_opcode_props[op].props;
928 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
929
930 midgard_instruction ins = {
931 .type = TAG_ALU_4,
932 .dest_type = nir_op_infos[instr->op].output_type | dst_bitsize,
933 .roundmode = roundmode,
934 };
935
936 ins.dest = nir_def_index_with_mask(&instr->def, &ins.mask);
937
938 for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
939 ins.src[i] = ~0;
940
941 if (quirk_flipped_r24) {
942 ins.src[0] = ~0;
943 mir_copy_src(&ins, instr, 0, 1, is_int, broadcast_swizzle);
944 } else {
945 for (unsigned i = 0; i < nr_inputs; ++i) {
946 unsigned to = i;
947
948 if (instr->op == nir_op_b32csel || instr->op == nir_op_b32fcsel_mdg) {
949 /* The condition is the first argument; move
950 * the other arguments up one to be a binary
951 * instruction for Midgard with the condition
952 * last */
953
954 if (i == 0)
955 to = 2;
956 else if (flip_src12)
957 to = 2 - i;
958 else
959 to = i - 1;
960 } else if (flip_src12) {
961 to = 1 - to;
962 }
963
964 mir_copy_src(&ins, instr, i, to, is_int, broadcast_swizzle);
965 }
966 }
967
968 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
969 /* Lowered to move */
970 if (instr->op == nir_op_fneg)
971 ins.src_neg[1] ^= true;
972
973 if (instr->op == nir_op_fabs)
974 ins.src_abs[1] = true;
975 }
976
977 ins.op = op;
978 ins.outmod = outmod;
979
980 /* Late fixup for emulated instructions */
981
982 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
983 /* Presently, our second argument is an inline #0 constant.
984 * Switch over to an embedded 1.0 constant (that can't fit
985 * inline, since we're 32-bit, not 16-bit like the inline
986 * constants) */
987
988 ins.has_inline_constant = false;
989 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
990 ins.src_types[1] = nir_type_float32;
991 ins.has_constants = true;
992
993 if (instr->op == nir_op_b2f32)
994 ins.constants.f32[0] = 1.0f;
995 else
996 ins.constants.i32[0] = 1;
997
998 for (unsigned c = 0; c < 16; ++c)
999 ins.swizzle[1][c] = 0;
1000 } else if (instr->op == nir_op_b2f16) {
1001 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1002 ins.src_types[1] = nir_type_float16;
1003 ins.has_constants = true;
1004 ins.constants.i16[0] = _mesa_float_to_half(1.0);
1005
1006 for (unsigned c = 0; c < 16; ++c)
1007 ins.swizzle[1][c] = 0;
1008 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1009 /* Lots of instructions need a 0 plonked in */
1010 ins.has_inline_constant = false;
1011 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1012 ins.src_types[1] = ins.src_types[0];
1013 ins.has_constants = true;
1014 ins.constants.u32[0] = 0;
1015
1016 for (unsigned c = 0; c < 16; ++c)
1017 ins.swizzle[1][c] = 0;
1018 } else if (instr->op == nir_op_pack_32_2x16) {
1019 ins.dest_type = nir_type_uint16;
1020 ins.mask = mask_of(nr_components * 2);
1021 ins.is_pack = true;
1022 } else if (instr->op == nir_op_pack_32_4x8) {
1023 ins.dest_type = nir_type_uint8;
1024 ins.mask = mask_of(nr_components * 4);
1025 ins.is_pack = true;
1026 } else if (instr->op == nir_op_unpack_32_2x16) {
1027 ins.dest_type = nir_type_uint32;
1028 ins.mask = mask_of(nr_components >> 1);
1029 ins.is_pack = true;
1030 } else if (instr->op == nir_op_unpack_32_4x8) {
1031 ins.dest_type = nir_type_uint32;
1032 ins.mask = mask_of(nr_components >> 2);
1033 ins.is_pack = true;
1034 }
1035
1036 emit_mir_instruction(ctx, ins);
1037 }
1038
1039 #undef ALU_CASE
1040
1041 static void
mir_set_intr_mask(nir_instr * instr,midgard_instruction * ins,bool is_read)1042 mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
1043 {
1044 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1045 unsigned nir_mask = 0;
1046 unsigned dsize = 0;
1047
1048 if (is_read) {
1049 nir_mask = mask_of(nir_intrinsic_dest_components(intr));
1050
1051 /* Extension is mandatory for 8/16-bit loads */
1052 dsize = intr->def.bit_size == 64 ? 64 : 32;
1053 } else {
1054 nir_mask = nir_intrinsic_write_mask(intr);
1055 dsize = OP_IS_COMMON_STORE(ins->op) ? nir_src_bit_size(intr->src[0]) : 32;
1056 }
1057
1058 /* Once we have the NIR mask, we need to normalize to work in 32-bit space */
1059 unsigned bytemask = pan_to_bytemask(dsize, nir_mask);
1060 ins->dest_type = nir_type_uint | dsize;
1061 mir_set_bytemask(ins, bytemask);
1062 }
1063
1064 /* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1065 * optimized) versions of UBO #0 */
1066
1067 static midgard_instruction *
emit_ubo_read(compiler_context * ctx,nir_instr * instr,unsigned dest,unsigned offset,nir_src * indirect_offset,unsigned indirect_shift,unsigned index,unsigned nr_comps)1068 emit_ubo_read(compiler_context *ctx, nir_instr *instr, unsigned dest,
1069 unsigned offset, nir_src *indirect_offset,
1070 unsigned indirect_shift, unsigned index, unsigned nr_comps)
1071 {
1072 midgard_instruction ins;
1073
1074 unsigned dest_size = (instr->type == nir_instr_type_intrinsic)
1075 ? nir_instr_as_intrinsic(instr)->def.bit_size
1076 : 32;
1077
1078 unsigned bitsize = dest_size * nr_comps;
1079
1080 /* Pick the smallest intrinsic to avoid out-of-bounds reads */
1081 if (bitsize <= 8)
1082 ins = m_ld_ubo_u8(dest, 0);
1083 else if (bitsize <= 16)
1084 ins = m_ld_ubo_u16(dest, 0);
1085 else if (bitsize <= 32)
1086 ins = m_ld_ubo_32(dest, 0);
1087 else if (bitsize <= 64)
1088 ins = m_ld_ubo_64(dest, 0);
1089 else if (bitsize <= 128)
1090 ins = m_ld_ubo_128(dest, 0);
1091 else
1092 unreachable("Invalid UBO read size");
1093
1094 ins.constants.u32[0] = offset;
1095
1096 if (instr->type == nir_instr_type_intrinsic)
1097 mir_set_intr_mask(instr, &ins, true);
1098
1099 if (indirect_offset) {
1100 ins.src[2] = nir_src_index(ctx, indirect_offset);
1101 ins.src_types[2] = nir_type_uint32;
1102 ins.load_store.index_shift = indirect_shift;
1103
1104 /* X component for the whole swizzle to prevent register
1105 * pressure from ballooning from the extra components */
1106 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[2]); ++i)
1107 ins.swizzle[2][i] = 0;
1108 } else {
1109 ins.load_store.index_reg = REGISTER_LDST_ZERO;
1110 }
1111
1112 if (indirect_offset && !indirect_shift)
1113 mir_set_ubo_offset(&ins, indirect_offset, offset);
1114
1115 midgard_pack_ubo_index_imm(&ins.load_store, index);
1116
1117 return emit_mir_instruction(ctx, ins);
1118 }
1119
1120 /* Globals are like UBOs if you squint. And shared memory is like globals if
1121 * you squint even harder */
1122
1123 static void
emit_global(compiler_context * ctx,nir_instr * instr,bool is_read,unsigned srcdest,nir_src * offset,unsigned seg)1124 emit_global(compiler_context *ctx, nir_instr *instr, bool is_read,
1125 unsigned srcdest, nir_src *offset, unsigned seg)
1126 {
1127 midgard_instruction ins;
1128
1129 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1130 if (is_read) {
1131 unsigned bitsize = intr->def.bit_size * intr->def.num_components;
1132
1133 switch (bitsize) {
1134 case 8:
1135 ins = m_ld_u8(srcdest, 0);
1136 break;
1137 case 16:
1138 ins = m_ld_u16(srcdest, 0);
1139 break;
1140 case 32:
1141 ins = m_ld_32(srcdest, 0);
1142 break;
1143 case 64:
1144 ins = m_ld_64(srcdest, 0);
1145 break;
1146 case 128:
1147 ins = m_ld_128(srcdest, 0);
1148 break;
1149 default:
1150 unreachable("Invalid global read size");
1151 }
1152
1153 mir_set_intr_mask(instr, &ins, is_read);
1154
1155 /* For anything not aligned on 32bit, make sure we write full
1156 * 32 bits registers. */
1157 if (bitsize & 31) {
1158 unsigned comps_per_32b = 32 / intr->def.bit_size;
1159
1160 for (unsigned c = 0; c < 4 * comps_per_32b; c += comps_per_32b) {
1161 if (!(ins.mask & BITFIELD_RANGE(c, comps_per_32b)))
1162 continue;
1163
1164 unsigned base = ~0;
1165 for (unsigned i = 0; i < comps_per_32b; i++) {
1166 if (ins.mask & BITFIELD_BIT(c + i)) {
1167 base = ins.swizzle[0][c + i];
1168 break;
1169 }
1170 }
1171
1172 assert(base != ~0);
1173
1174 for (unsigned i = 0; i < comps_per_32b; i++) {
1175 if (!(ins.mask & BITFIELD_BIT(c + i))) {
1176 ins.swizzle[0][c + i] = base + i;
1177 ins.mask |= BITFIELD_BIT(c + i);
1178 }
1179 assert(ins.swizzle[0][c + i] == base + i);
1180 }
1181 }
1182 }
1183 } else {
1184 unsigned bitsize =
1185 nir_src_bit_size(intr->src[0]) * nir_src_num_components(intr->src[0]);
1186
1187 if (bitsize == 8)
1188 ins = m_st_u8(srcdest, 0);
1189 else if (bitsize == 16)
1190 ins = m_st_u16(srcdest, 0);
1191 else if (bitsize <= 32)
1192 ins = m_st_32(srcdest, 0);
1193 else if (bitsize <= 64)
1194 ins = m_st_64(srcdest, 0);
1195 else if (bitsize <= 128)
1196 ins = m_st_128(srcdest, 0);
1197 else
1198 unreachable("Invalid global store size");
1199
1200 mir_set_intr_mask(instr, &ins, is_read);
1201 }
1202
1203 mir_set_offset(ctx, &ins, offset, seg);
1204
1205 /* Set a valid swizzle for masked out components */
1206 assert(ins.mask);
1207 unsigned first_component = __builtin_ffs(ins.mask) - 1;
1208
1209 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i) {
1210 if (!(ins.mask & (1 << i)))
1211 ins.swizzle[0][i] = first_component;
1212 }
1213
1214 emit_mir_instruction(ctx, ins);
1215 }
1216
1217 static midgard_load_store_op
translate_atomic_op(nir_atomic_op op)1218 translate_atomic_op(nir_atomic_op op)
1219 {
1220 /* clang-format off */
1221 switch (op) {
1222 case nir_atomic_op_xchg: return midgard_op_atomic_xchg;
1223 case nir_atomic_op_cmpxchg: return midgard_op_atomic_cmpxchg;
1224 case nir_atomic_op_iadd: return midgard_op_atomic_add;
1225 case nir_atomic_op_iand: return midgard_op_atomic_and;
1226 case nir_atomic_op_imax: return midgard_op_atomic_imax;
1227 case nir_atomic_op_imin: return midgard_op_atomic_imin;
1228 case nir_atomic_op_ior: return midgard_op_atomic_or;
1229 case nir_atomic_op_umax: return midgard_op_atomic_umax;
1230 case nir_atomic_op_umin: return midgard_op_atomic_umin;
1231 case nir_atomic_op_ixor: return midgard_op_atomic_xor;
1232 default: unreachable("Unexpected atomic");
1233 }
1234 /* clang-format on */
1235 }
1236
1237 /* Emit an atomic to shared memory or global memory. */
1238 static void
emit_atomic(compiler_context * ctx,nir_intrinsic_instr * instr)1239 emit_atomic(compiler_context *ctx, nir_intrinsic_instr *instr)
1240 {
1241 midgard_load_store_op op =
1242 translate_atomic_op(nir_intrinsic_atomic_op(instr));
1243
1244 nir_alu_type type =
1245 (op == midgard_op_atomic_imin || op == midgard_op_atomic_imax)
1246 ? nir_type_int
1247 : nir_type_uint;
1248
1249 bool is_shared = (instr->intrinsic == nir_intrinsic_shared_atomic) ||
1250 (instr->intrinsic == nir_intrinsic_shared_atomic_swap);
1251
1252 unsigned dest = nir_def_index(&instr->def);
1253 unsigned val = nir_src_index(ctx, &instr->src[1]);
1254 unsigned bitsize = nir_src_bit_size(instr->src[1]);
1255 emit_explicit_constant(ctx, val);
1256
1257 midgard_instruction ins = {
1258 .type = TAG_LOAD_STORE_4,
1259 .mask = 0xF,
1260 .dest = dest,
1261 .src = {~0, ~0, ~0, val},
1262 .src_types = {0, 0, 0, type | bitsize},
1263 .op = op,
1264 };
1265
1266 nir_src *src_offset = nir_get_io_offset_src(instr);
1267
1268 if (op == midgard_op_atomic_cmpxchg) {
1269 unsigned xchg_val = nir_src_index(ctx, &instr->src[2]);
1270 emit_explicit_constant(ctx, xchg_val);
1271
1272 ins.src[2] = val;
1273 ins.src_types[2] = type | bitsize;
1274 ins.src[3] = xchg_val;
1275
1276 if (is_shared) {
1277 ins.load_store.arg_reg = REGISTER_LDST_LOCAL_STORAGE_PTR;
1278 ins.load_store.arg_comp = COMPONENT_Z;
1279 ins.load_store.bitsize_toggle = true;
1280 } else {
1281 for (unsigned i = 0; i < 2; ++i)
1282 ins.swizzle[1][i] = i;
1283
1284 ins.src[1] = nir_src_index(ctx, src_offset);
1285 ins.src_types[1] = nir_type_uint64;
1286 }
1287 } else
1288 mir_set_offset(ctx, &ins, src_offset,
1289 is_shared ? LDST_SHARED : LDST_GLOBAL);
1290
1291 mir_set_intr_mask(&instr->instr, &ins, true);
1292
1293 emit_mir_instruction(ctx, ins);
1294 }
1295
1296 static void
emit_varying_read(compiler_context * ctx,unsigned dest,unsigned offset,unsigned nr_comp,unsigned component,nir_src * indirect_offset,nir_alu_type type,bool flat)1297 emit_varying_read(compiler_context *ctx, unsigned dest, unsigned offset,
1298 unsigned nr_comp, unsigned component,
1299 nir_src *indirect_offset, nir_alu_type type, bool flat)
1300 {
1301 midgard_instruction ins = m_ld_vary_32(dest, PACK_LDST_ATTRIB_OFS(offset));
1302 ins.mask = mask_of(nr_comp);
1303 ins.dest_type = type;
1304
1305 if (type == nir_type_float16) {
1306 /* Ensure we are aligned so we can pack it later */
1307 ins.mask = mask_of(ALIGN_POT(nr_comp, 2));
1308 }
1309
1310 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i)
1311 ins.swizzle[0][i] = MIN2(i + component, COMPONENT_W);
1312
1313 midgard_varying_params p = {
1314 .flat_shading = flat,
1315 .perspective_correction = 1,
1316 .interpolate_sample = true,
1317 };
1318 midgard_pack_varying_params(&ins.load_store, p);
1319
1320 if (indirect_offset) {
1321 ins.src[2] = nir_src_index(ctx, indirect_offset);
1322 ins.src_types[2] = nir_type_uint32;
1323 } else
1324 ins.load_store.index_reg = REGISTER_LDST_ZERO;
1325
1326 ins.load_store.arg_reg = REGISTER_LDST_ZERO;
1327 ins.load_store.index_format = midgard_index_address_u32;
1328
1329 /* For flat shading, for GPUs supporting auto32, we always use .u32 and
1330 * require 32-bit mode. For smooth shading, we use the appropriate
1331 * floating-point type.
1332 *
1333 * This could be optimized, but it makes it easy to check correctness.
1334 */
1335 if (ctx->quirks & MIDGARD_NO_AUTO32) {
1336 switch (type) {
1337 case nir_type_uint32:
1338 case nir_type_bool32:
1339 ins.op = midgard_op_ld_vary_32u;
1340 break;
1341 case nir_type_int32:
1342 ins.op = midgard_op_ld_vary_32i;
1343 break;
1344 case nir_type_float32:
1345 ins.op = midgard_op_ld_vary_32;
1346 break;
1347 case nir_type_float16:
1348 ins.op = midgard_op_ld_vary_16;
1349 break;
1350 default:
1351 unreachable("Attempted to load unknown type");
1352 break;
1353 }
1354 } else if (flat) {
1355 assert(nir_alu_type_get_type_size(type) == 32);
1356 ins.op = midgard_op_ld_vary_32u;
1357 } else {
1358 assert(nir_alu_type_get_base_type(type) == nir_type_float);
1359
1360 ins.op = (nir_alu_type_get_type_size(type) == 32) ? midgard_op_ld_vary_32
1361 : midgard_op_ld_vary_16;
1362 }
1363
1364 emit_mir_instruction(ctx, ins);
1365 }
1366
1367 static midgard_instruction
emit_image_op(compiler_context * ctx,nir_intrinsic_instr * instr)1368 emit_image_op(compiler_context *ctx, nir_intrinsic_instr *instr)
1369 {
1370 enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
1371 unsigned nr_dim = glsl_get_sampler_dim_coordinate_components(dim);
1372 bool is_array = nir_intrinsic_image_array(instr);
1373 bool is_store = instr->intrinsic == nir_intrinsic_image_store;
1374
1375 assert(dim != GLSL_SAMPLER_DIM_MS && "MSAA'd image not lowered");
1376
1377 unsigned coord_reg = nir_src_index(ctx, &instr->src[1]);
1378 emit_explicit_constant(ctx, coord_reg);
1379
1380 nir_src *index = &instr->src[0];
1381 bool is_direct = nir_src_is_const(*index);
1382
1383 /* For image opcodes, address is used as an index into the attribute
1384 * descriptor */
1385 unsigned address = is_direct ? nir_src_as_uint(*index) : 0;
1386
1387 midgard_instruction ins;
1388 if (is_store) { /* emit st_image_* */
1389 unsigned val = nir_src_index(ctx, &instr->src[3]);
1390 emit_explicit_constant(ctx, val);
1391
1392 nir_alu_type type = nir_intrinsic_src_type(instr);
1393 ins = st_image(type, val, PACK_LDST_ATTRIB_OFS(address));
1394 nir_alu_type base_type = nir_alu_type_get_base_type(type);
1395 ins.src_types[0] = base_type | nir_src_bit_size(instr->src[3]);
1396 } else if (instr->intrinsic == nir_intrinsic_image_texel_address) {
1397 ins =
1398 m_lea_image(nir_def_index(&instr->def), PACK_LDST_ATTRIB_OFS(address));
1399 ins.mask = mask_of(2); /* 64-bit memory address */
1400 } else { /* emit ld_image_* */
1401 nir_alu_type type = nir_intrinsic_dest_type(instr);
1402 ins = ld_image(type, nir_def_index(&instr->def),
1403 PACK_LDST_ATTRIB_OFS(address));
1404 ins.mask = mask_of(nir_intrinsic_dest_components(instr));
1405 ins.dest_type = type;
1406 }
1407
1408 /* Coord reg */
1409 ins.src[1] = coord_reg;
1410 ins.src_types[1] = nir_type_uint16;
1411 if (nr_dim == 3 || is_array) {
1412 ins.load_store.bitsize_toggle = true;
1413 }
1414
1415 /* Image index reg */
1416 if (!is_direct) {
1417 ins.src[2] = nir_src_index(ctx, index);
1418 ins.src_types[2] = nir_type_uint32;
1419 } else
1420 ins.load_store.index_reg = REGISTER_LDST_ZERO;
1421
1422 emit_mir_instruction(ctx, ins);
1423
1424 return ins;
1425 }
1426
1427 static void
emit_attr_read(compiler_context * ctx,unsigned dest,unsigned offset,unsigned nr_comp,nir_alu_type t)1428 emit_attr_read(compiler_context *ctx, unsigned dest, unsigned offset,
1429 unsigned nr_comp, nir_alu_type t)
1430 {
1431 midgard_instruction ins = m_ld_attr_32(dest, PACK_LDST_ATTRIB_OFS(offset));
1432 ins.load_store.arg_reg = REGISTER_LDST_ZERO;
1433 ins.load_store.index_reg = REGISTER_LDST_ZERO;
1434 ins.mask = mask_of(nr_comp);
1435
1436 /* Use the type appropriate load */
1437 switch (t) {
1438 case nir_type_uint:
1439 case nir_type_bool:
1440 ins.op = midgard_op_ld_attr_32u;
1441 break;
1442 case nir_type_int:
1443 ins.op = midgard_op_ld_attr_32i;
1444 break;
1445 case nir_type_float:
1446 ins.op = midgard_op_ld_attr_32;
1447 break;
1448 default:
1449 unreachable("Attempted to load unknown type");
1450 break;
1451 }
1452
1453 emit_mir_instruction(ctx, ins);
1454 }
1455
1456 static unsigned
compute_builtin_arg(nir_intrinsic_op op)1457 compute_builtin_arg(nir_intrinsic_op op)
1458 {
1459 switch (op) {
1460 case nir_intrinsic_load_workgroup_id:
1461 return REGISTER_LDST_GROUP_ID;
1462 case nir_intrinsic_load_local_invocation_id:
1463 return REGISTER_LDST_LOCAL_THREAD_ID;
1464 case nir_intrinsic_load_global_invocation_id:
1465 return REGISTER_LDST_GLOBAL_THREAD_ID;
1466 default:
1467 unreachable("Invalid compute paramater loaded");
1468 }
1469 }
1470
1471 static void
emit_fragment_store(compiler_context * ctx,unsigned src,unsigned src_z,unsigned src_s,enum midgard_rt_id rt,unsigned sample_iter)1472 emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z,
1473 unsigned src_s, enum midgard_rt_id rt, unsigned sample_iter)
1474 {
1475 assert(rt < ARRAY_SIZE(ctx->writeout_branch));
1476 assert(sample_iter < ARRAY_SIZE(ctx->writeout_branch[0]));
1477
1478 midgard_instruction *br = ctx->writeout_branch[rt][sample_iter];
1479
1480 assert(!br);
1481
1482 emit_explicit_constant(ctx, src);
1483
1484 struct midgard_instruction ins = v_branch(false, false);
1485
1486 bool depth_only = (rt == MIDGARD_ZS_RT);
1487
1488 ins.writeout = depth_only ? 0 : PAN_WRITEOUT_C;
1489
1490 /* Add dependencies */
1491 ins.src[0] = src;
1492 ins.src_types[0] = nir_type_uint32;
1493
1494 if (depth_only)
1495 ins.constants.u32[0] = 0xFF;
1496 else
1497 ins.constants.u32[0] = ((rt - MIDGARD_COLOR_RT0) << 8) | sample_iter;
1498
1499 for (int i = 0; i < 4; ++i)
1500 ins.swizzle[0][i] = i;
1501
1502 if (~src_z) {
1503 emit_explicit_constant(ctx, src_z);
1504 ins.src[2] = src_z;
1505 ins.src_types[2] = nir_type_uint32;
1506 ins.writeout |= PAN_WRITEOUT_Z;
1507 }
1508 if (~src_s) {
1509 emit_explicit_constant(ctx, src_s);
1510 ins.src[3] = src_s;
1511 ins.src_types[3] = nir_type_uint32;
1512 ins.writeout |= PAN_WRITEOUT_S;
1513 }
1514
1515 /* Emit the branch */
1516 br = emit_mir_instruction(ctx, ins);
1517 schedule_barrier(ctx);
1518 ctx->writeout_branch[rt][sample_iter] = br;
1519
1520 /* Push our current location = current block count - 1 = where we'll
1521 * jump to. Maybe a bit too clever for my own good */
1522
1523 br->branch.target_block = ctx->block_count - 1;
1524 }
1525
1526 static void
emit_compute_builtin(compiler_context * ctx,nir_intrinsic_instr * instr)1527 emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1528 {
1529 unsigned reg = nir_def_index(&instr->def);
1530 midgard_instruction ins = m_ldst_mov(reg, 0);
1531 ins.mask = mask_of(3);
1532 ins.swizzle[0][3] = COMPONENT_X; /* xyzx */
1533 ins.load_store.arg_reg = compute_builtin_arg(instr->intrinsic);
1534 emit_mir_instruction(ctx, ins);
1535 }
1536
1537 static unsigned
vertex_builtin_arg(nir_intrinsic_op op)1538 vertex_builtin_arg(nir_intrinsic_op op)
1539 {
1540 switch (op) {
1541 case nir_intrinsic_load_raw_vertex_id_pan:
1542 return PAN_VERTEX_ID;
1543 case nir_intrinsic_load_instance_id:
1544 return PAN_INSTANCE_ID;
1545 default:
1546 unreachable("Invalid vertex builtin");
1547 }
1548 }
1549
1550 static void
emit_vertex_builtin(compiler_context * ctx,nir_intrinsic_instr * instr)1551 emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1552 {
1553 unsigned reg = nir_def_index(&instr->def);
1554 emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1,
1555 nir_type_int);
1556 }
1557
1558 static void
emit_special(compiler_context * ctx,nir_intrinsic_instr * instr,unsigned idx)1559 emit_special(compiler_context *ctx, nir_intrinsic_instr *instr, unsigned idx)
1560 {
1561 unsigned reg = nir_def_index(&instr->def);
1562
1563 midgard_instruction ld = m_ld_tilebuffer_raw(reg, 0);
1564 ld.op = midgard_op_ld_special_32u;
1565 ld.load_store.signed_offset = PACK_LDST_SELECTOR_OFS(idx);
1566 ld.load_store.index_reg = REGISTER_LDST_ZERO;
1567
1568 for (int i = 0; i < 4; ++i)
1569 ld.swizzle[0][i] = COMPONENT_X;
1570
1571 emit_mir_instruction(ctx, ld);
1572 }
1573
1574 static void
emit_control_barrier(compiler_context * ctx)1575 emit_control_barrier(compiler_context *ctx)
1576 {
1577 midgard_instruction ins = {
1578 .type = TAG_TEXTURE_4,
1579 .dest = ~0,
1580 .src = {~0, ~0, ~0, ~0},
1581 .op = midgard_tex_op_barrier,
1582 };
1583
1584 emit_mir_instruction(ctx, ins);
1585 }
1586
1587 static uint8_t
output_load_rt_addr(compiler_context * ctx,nir_intrinsic_instr * instr)1588 output_load_rt_addr(compiler_context *ctx, nir_intrinsic_instr *instr)
1589 {
1590 unsigned loc = nir_intrinsic_io_semantics(instr).location;
1591
1592 if (loc >= FRAG_RESULT_DATA0)
1593 return loc - FRAG_RESULT_DATA0;
1594
1595 if (loc == FRAG_RESULT_DEPTH)
1596 return 0x1F;
1597 if (loc == FRAG_RESULT_STENCIL)
1598 return 0x1E;
1599
1600 unreachable("Invalid RT to load from");
1601 }
1602
1603 static void
emit_intrinsic(compiler_context * ctx,nir_intrinsic_instr * instr)1604 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1605 {
1606 unsigned offset = 0, reg;
1607
1608 switch (instr->intrinsic) {
1609 case nir_intrinsic_decl_reg:
1610 case nir_intrinsic_store_reg:
1611 /* Always fully consumed */
1612 break;
1613
1614 case nir_intrinsic_load_reg: {
1615 /* NIR guarantees that, for typical isel, this will always be fully
1616 * consumed. However, we also do our own nir_scalar chasing for
1617 * address arithmetic, bypassing the source chasing helpers. So we can end
1618 * up with unconsumed load_register instructions. Translate them here. 99%
1619 * of the time, these moves will be DCE'd away.
1620 */
1621 nir_def *handle = instr->src[0].ssa;
1622
1623 midgard_instruction ins =
1624 v_mov(nir_reg_index(handle), nir_def_index(&instr->def));
1625
1626 ins.dest_type = ins.src_types[1] = nir_type_uint | instr->def.bit_size;
1627
1628 ins.mask = BITFIELD_MASK(instr->def.num_components);
1629 emit_mir_instruction(ctx, ins);
1630 break;
1631 }
1632
1633 case nir_intrinsic_terminate_if:
1634 case nir_intrinsic_terminate: {
1635 bool conditional = instr->intrinsic == nir_intrinsic_terminate_if;
1636 struct midgard_instruction discard = v_branch(conditional, false);
1637 discard.branch.target_type = TARGET_DISCARD;
1638
1639 if (conditional) {
1640 discard.src[0] = nir_src_index(ctx, &instr->src[0]);
1641 discard.src_types[0] = nir_type_uint32;
1642 }
1643
1644 emit_mir_instruction(ctx, discard);
1645 schedule_barrier(ctx);
1646
1647 break;
1648 }
1649
1650 case nir_intrinsic_image_load:
1651 case nir_intrinsic_image_store:
1652 case nir_intrinsic_image_texel_address:
1653 emit_image_op(ctx, instr);
1654 break;
1655
1656 case nir_intrinsic_load_ubo:
1657 case nir_intrinsic_load_global:
1658 case nir_intrinsic_load_global_constant:
1659 case nir_intrinsic_load_shared:
1660 case nir_intrinsic_load_scratch:
1661 case nir_intrinsic_load_input:
1662 case nir_intrinsic_load_interpolated_input: {
1663 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1664 bool is_global = instr->intrinsic == nir_intrinsic_load_global ||
1665 instr->intrinsic == nir_intrinsic_load_global_constant;
1666 bool is_shared = instr->intrinsic == nir_intrinsic_load_shared;
1667 bool is_scratch = instr->intrinsic == nir_intrinsic_load_scratch;
1668 bool is_flat = instr->intrinsic == nir_intrinsic_load_input;
1669 bool is_interp =
1670 instr->intrinsic == nir_intrinsic_load_interpolated_input;
1671
1672 /* Get the base type of the intrinsic */
1673 /* TODO: Infer type? Does it matter? */
1674 nir_alu_type t = (is_interp) ? nir_type_float
1675 : (is_flat) ? nir_intrinsic_dest_type(instr)
1676 : nir_type_uint;
1677
1678 t = nir_alu_type_get_base_type(t);
1679
1680 if (!(is_ubo || is_global || is_scratch)) {
1681 offset = nir_intrinsic_base(instr);
1682 }
1683
1684 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1685
1686 nir_src *src_offset = nir_get_io_offset_src(instr);
1687
1688 bool direct = nir_src_is_const(*src_offset);
1689 nir_src *indirect_offset = direct ? NULL : src_offset;
1690
1691 if (direct)
1692 offset += nir_src_as_uint(*src_offset);
1693
1694 /* We may need to apply a fractional offset */
1695 int component =
1696 (is_flat || is_interp) ? nir_intrinsic_component(instr) : 0;
1697 reg = nir_def_index(&instr->def);
1698
1699 if (is_ubo) {
1700 nir_src index = instr->src[0];
1701
1702 /* TODO: Is indirect block number possible? */
1703 assert(nir_src_is_const(index));
1704
1705 uint32_t uindex = nir_src_as_uint(index);
1706 emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0,
1707 uindex, nr_comp);
1708 } else if (is_global || is_shared || is_scratch) {
1709 unsigned seg =
1710 is_global ? LDST_GLOBAL : (is_shared ? LDST_SHARED : LDST_SCRATCH);
1711 emit_global(ctx, &instr->instr, true, reg, src_offset, seg);
1712 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->inputs->is_blend) {
1713 emit_varying_read(ctx, reg, offset, nr_comp, component,
1714 indirect_offset, t | instr->def.bit_size, is_flat);
1715 } else if (ctx->inputs->is_blend) {
1716 /* ctx->blend_input will be precoloured to r0/r2, where
1717 * the input is preloaded */
1718
1719 unsigned *input = offset ? &ctx->blend_src1 : &ctx->blend_input;
1720
1721 if (*input == ~0)
1722 *input = reg;
1723 else
1724 emit_mir_instruction(ctx, v_mov(*input, reg));
1725 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1726 emit_attr_read(ctx, reg, offset, nr_comp, t);
1727 } else {
1728 unreachable("Unknown load");
1729 }
1730
1731 break;
1732 }
1733
1734 /* Handled together with load_interpolated_input */
1735 case nir_intrinsic_load_barycentric_pixel:
1736 case nir_intrinsic_load_barycentric_centroid:
1737 case nir_intrinsic_load_barycentric_sample:
1738 break;
1739
1740 /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
1741
1742 case nir_intrinsic_load_raw_output_pan: {
1743 reg = nir_def_index(&instr->def);
1744
1745 /* T720 and below use different blend opcodes with slightly
1746 * different semantics than T760 and up */
1747
1748 midgard_instruction ld = m_ld_tilebuffer_raw(reg, 0);
1749
1750 unsigned target = output_load_rt_addr(ctx, instr);
1751 ld.load_store.index_comp = target & 0x3;
1752 ld.load_store.index_reg = target >> 2;
1753
1754 if (nir_src_is_const(instr->src[0])) {
1755 unsigned sample = nir_src_as_uint(instr->src[0]);
1756 ld.load_store.arg_comp = sample & 0x3;
1757 ld.load_store.arg_reg = sample >> 2;
1758 } else {
1759 /* Enable sample index via register. */
1760 ld.load_store.signed_offset |= 1;
1761 ld.src[1] = nir_src_index(ctx, &instr->src[0]);
1762 ld.src_types[1] = nir_type_int32;
1763 }
1764
1765 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1766 ld.op = midgard_op_ld_special_32u;
1767 ld.load_store.signed_offset = PACK_LDST_SELECTOR_OFS(16);
1768 ld.load_store.index_reg = REGISTER_LDST_ZERO;
1769 }
1770
1771 emit_mir_instruction(ctx, ld);
1772 break;
1773 }
1774
1775 case nir_intrinsic_load_output: {
1776 reg = nir_def_index(&instr->def);
1777
1778 unsigned bits = instr->def.bit_size;
1779
1780 midgard_instruction ld;
1781 if (bits == 16)
1782 ld = m_ld_tilebuffer_16f(reg, 0);
1783 else
1784 ld = m_ld_tilebuffer_32f(reg, 0);
1785
1786 unsigned index = output_load_rt_addr(ctx, instr);
1787 ld.load_store.index_comp = index & 0x3;
1788 ld.load_store.index_reg = index >> 2;
1789
1790 for (unsigned c = 4; c < 16; ++c)
1791 ld.swizzle[0][c] = 0;
1792
1793 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1794 if (bits == 16)
1795 ld.op = midgard_op_ld_special_16f;
1796 else
1797 ld.op = midgard_op_ld_special_32f;
1798 ld.load_store.signed_offset = PACK_LDST_SELECTOR_OFS(1);
1799 ld.load_store.index_reg = REGISTER_LDST_ZERO;
1800 }
1801
1802 emit_mir_instruction(ctx, ld);
1803 break;
1804 }
1805
1806 case nir_intrinsic_store_output:
1807 case nir_intrinsic_store_combined_output_pan:
1808 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1809
1810 reg = nir_src_index(ctx, &instr->src[0]);
1811
1812 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1813 bool combined =
1814 instr->intrinsic == nir_intrinsic_store_combined_output_pan;
1815
1816 enum midgard_rt_id rt;
1817
1818 unsigned reg_z = ~0, reg_s = ~0, reg_2 = ~0;
1819 unsigned writeout = PAN_WRITEOUT_C;
1820 if (combined) {
1821 writeout = nir_intrinsic_component(instr);
1822 if (writeout & PAN_WRITEOUT_Z)
1823 reg_z = nir_src_index(ctx, &instr->src[2]);
1824 if (writeout & PAN_WRITEOUT_S)
1825 reg_s = nir_src_index(ctx, &instr->src[3]);
1826 if (writeout & PAN_WRITEOUT_2)
1827 reg_2 = nir_src_index(ctx, &instr->src[4]);
1828 }
1829
1830 if (writeout & PAN_WRITEOUT_C) {
1831 nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
1832
1833 rt = MIDGARD_COLOR_RT0 + (sem.location - FRAG_RESULT_DATA0);
1834 } else {
1835 rt = MIDGARD_ZS_RT;
1836 }
1837
1838 /* Dual-source blend writeout is done by leaving the
1839 * value in r2 for the blend shader to use. */
1840 if (~reg_2) {
1841 emit_explicit_constant(ctx, reg_2);
1842
1843 unsigned out = make_compiler_temp(ctx);
1844
1845 midgard_instruction ins = v_mov(reg_2, out);
1846 emit_mir_instruction(ctx, ins);
1847
1848 ctx->blend_src1 = out;
1849 }
1850
1851 emit_fragment_store(ctx, reg, reg_z, reg_s, rt, 0);
1852 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1853 assert(instr->intrinsic == nir_intrinsic_store_output);
1854
1855 /* We should have been vectorized, though we don't
1856 * currently check that st_vary is emitted only once
1857 * per slot (this is relevant, since there's not a mask
1858 * parameter available on the store [set to 0 by the
1859 * blob]). We do respect the component by adjusting the
1860 * swizzle. If this is a constant source, we'll need to
1861 * emit that explicitly. */
1862
1863 emit_explicit_constant(ctx, reg);
1864
1865 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1866
1867 unsigned dst_component = nir_intrinsic_component(instr);
1868 unsigned nr_comp = nir_src_num_components(instr->src[0]);
1869
1870 /* ABI: Format controlled by the attribute descriptor.
1871 * This simplifies flat shading, although it prevents
1872 * certain (unimplemented) 16-bit optimizations.
1873 *
1874 * In particular, it lets the driver handle internal
1875 * TGSI shaders that set flat in the VS but smooth in
1876 * the FS. This matches our handling on Bifrost.
1877 */
1878 bool auto32 = true;
1879 assert(nir_alu_type_get_type_size(nir_intrinsic_src_type(instr)) ==
1880 32);
1881
1882 /* ABI: varyings in the secondary attribute table */
1883 bool secondary_table = true;
1884
1885 midgard_instruction st =
1886 m_st_vary_32(reg, PACK_LDST_ATTRIB_OFS(offset));
1887 st.load_store.arg_reg = REGISTER_LDST_ZERO;
1888 st.load_store.index_reg = REGISTER_LDST_ZERO;
1889
1890 /* Attribute instruction uses these 2-bits for the
1891 * a32 and table bits, pack this specially.
1892 */
1893 st.load_store.index_format =
1894 (auto32 ? (1 << 0) : 0) | (secondary_table ? (1 << 1) : 0);
1895
1896 /* nir_intrinsic_component(store_intr) encodes the
1897 * destination component start. Source component offset
1898 * adjustment is taken care of in
1899 * install_registers_instr(), when offset_swizzle() is
1900 * called.
1901 */
1902 unsigned src_component = COMPONENT_X;
1903
1904 assert(nr_comp > 0);
1905 for (unsigned i = 0; i < ARRAY_SIZE(st.swizzle); ++i) {
1906 st.swizzle[0][i] = src_component;
1907 if (i >= dst_component && i < dst_component + nr_comp - 1)
1908 src_component++;
1909 }
1910
1911 emit_mir_instruction(ctx, st);
1912 } else {
1913 unreachable("Unknown store");
1914 }
1915
1916 break;
1917
1918 /* Special case of store_output for lowered blend shaders */
1919 case nir_intrinsic_store_raw_output_pan: {
1920 assert(ctx->stage == MESA_SHADER_FRAGMENT);
1921 reg = nir_src_index(ctx, &instr->src[0]);
1922
1923 nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
1924 assert(sem.location >= FRAG_RESULT_DATA0);
1925 unsigned rt = sem.location - FRAG_RESULT_DATA0;
1926
1927 emit_fragment_store(ctx, reg, ~0, ~0, rt + MIDGARD_COLOR_RT0,
1928 nir_intrinsic_base(instr));
1929 break;
1930 }
1931
1932 case nir_intrinsic_store_global:
1933 case nir_intrinsic_store_shared:
1934 case nir_intrinsic_store_scratch:
1935 reg = nir_src_index(ctx, &instr->src[0]);
1936 emit_explicit_constant(ctx, reg);
1937
1938 unsigned seg;
1939 if (instr->intrinsic == nir_intrinsic_store_global)
1940 seg = LDST_GLOBAL;
1941 else if (instr->intrinsic == nir_intrinsic_store_shared)
1942 seg = LDST_SHARED;
1943 else
1944 seg = LDST_SCRATCH;
1945
1946 emit_global(ctx, &instr->instr, false, reg, &instr->src[1], seg);
1947 break;
1948
1949 case nir_intrinsic_load_workgroup_id:
1950 case nir_intrinsic_load_local_invocation_id:
1951 case nir_intrinsic_load_global_invocation_id:
1952 emit_compute_builtin(ctx, instr);
1953 break;
1954
1955 case nir_intrinsic_load_raw_vertex_id_pan:
1956 ctx->info->midgard.vs.reads_raw_vertex_id = true;
1957 FALLTHROUGH;
1958 case nir_intrinsic_load_instance_id:
1959 emit_vertex_builtin(ctx, instr);
1960 break;
1961
1962 case nir_intrinsic_load_sample_mask_in:
1963 emit_special(ctx, instr, 96);
1964 break;
1965
1966 case nir_intrinsic_load_sample_id:
1967 emit_special(ctx, instr, 97);
1968 break;
1969
1970 case nir_intrinsic_barrier:
1971 if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
1972 schedule_barrier(ctx);
1973 emit_control_barrier(ctx);
1974 schedule_barrier(ctx);
1975 } else if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE) {
1976 /* Midgard doesn't seem to want special handling, though we do need to
1977 * take care when scheduling to avoid incorrect reordering.
1978 *
1979 * Note this is an "else if" since the handling for the execution scope
1980 * case already covers the case when both scopes are present.
1981 */
1982 schedule_barrier(ctx);
1983 }
1984 break;
1985
1986 case nir_intrinsic_shared_atomic:
1987 case nir_intrinsic_shared_atomic_swap:
1988 case nir_intrinsic_global_atomic:
1989 case nir_intrinsic_global_atomic_swap:
1990 emit_atomic(ctx, instr);
1991 break;
1992
1993 case nir_intrinsic_ddx:
1994 case nir_intrinsic_ddy:
1995 midgard_emit_derivatives(ctx, instr);
1996 break;
1997
1998 default:
1999 fprintf(stderr, "Unhandled intrinsic %s\n",
2000 nir_intrinsic_infos[instr->intrinsic].name);
2001 assert(0);
2002 break;
2003 }
2004 }
2005
2006 /* Returns dimension with 0 special casing cubemaps */
2007 static unsigned
midgard_tex_format(enum glsl_sampler_dim dim)2008 midgard_tex_format(enum glsl_sampler_dim dim)
2009 {
2010 switch (dim) {
2011 case GLSL_SAMPLER_DIM_1D:
2012 case GLSL_SAMPLER_DIM_BUF:
2013 return 1;
2014
2015 case GLSL_SAMPLER_DIM_2D:
2016 case GLSL_SAMPLER_DIM_MS:
2017 case GLSL_SAMPLER_DIM_EXTERNAL:
2018 case GLSL_SAMPLER_DIM_RECT:
2019 return 2;
2020
2021 case GLSL_SAMPLER_DIM_3D:
2022 return 3;
2023
2024 case GLSL_SAMPLER_DIM_CUBE:
2025 return 0;
2026
2027 default:
2028 unreachable("Unknown sampler dim type");
2029 }
2030 }
2031
2032 /* Tries to attach an explicit LOD or bias as a constant. Returns whether this
2033 * was successful */
2034
2035 static bool
pan_attach_constant_bias(compiler_context * ctx,nir_src lod,midgard_texture_word * word)2036 pan_attach_constant_bias(compiler_context *ctx, nir_src lod,
2037 midgard_texture_word *word)
2038 {
2039 /* To attach as constant, it has to *be* constant */
2040
2041 if (!nir_src_is_const(lod))
2042 return false;
2043
2044 float f = nir_src_as_float(lod);
2045
2046 /* Break into fixed-point */
2047 signed lod_int = f;
2048 float lod_frac = f - lod_int;
2049
2050 /* Carry over negative fractions */
2051 if (lod_frac < 0.0) {
2052 lod_int--;
2053 lod_frac += 1.0;
2054 }
2055
2056 /* Encode */
2057 word->bias = float_to_ubyte(lod_frac);
2058 word->bias_int = lod_int;
2059
2060 return true;
2061 }
2062
2063 static enum mali_texture_mode
mdg_texture_mode(nir_tex_instr * instr)2064 mdg_texture_mode(nir_tex_instr *instr)
2065 {
2066 if (instr->op == nir_texop_tg4 && instr->is_shadow)
2067 return TEXTURE_GATHER_SHADOW;
2068 else if (instr->op == nir_texop_tg4)
2069 return TEXTURE_GATHER_X + instr->component;
2070 else if (instr->is_shadow)
2071 return TEXTURE_SHADOW;
2072 else
2073 return TEXTURE_NORMAL;
2074 }
2075
2076 static void
set_tex_coord(compiler_context * ctx,nir_tex_instr * instr,midgard_instruction * ins)2077 set_tex_coord(compiler_context *ctx, nir_tex_instr *instr,
2078 midgard_instruction *ins)
2079 {
2080 int coord_idx = nir_tex_instr_src_index(instr, nir_tex_src_coord);
2081
2082 assert(coord_idx >= 0);
2083
2084 int comparator_idx = nir_tex_instr_src_index(instr, nir_tex_src_comparator);
2085 int ms_idx = nir_tex_instr_src_index(instr, nir_tex_src_ms_index);
2086 assert(comparator_idx < 0 || ms_idx < 0);
2087 int ms_or_comparator_idx = ms_idx >= 0 ? ms_idx : comparator_idx;
2088
2089 unsigned coords = nir_src_index(ctx, &instr->src[coord_idx].src);
2090
2091 emit_explicit_constant(ctx, coords);
2092
2093 ins->src_types[1] = nir_tex_instr_src_type(instr, coord_idx) |
2094 nir_src_bit_size(instr->src[coord_idx].src);
2095
2096 unsigned nr_comps = instr->coord_components;
2097 unsigned written_mask = 0, write_mask = 0;
2098
2099 /* Initialize all components to coord.x which is expected to always be
2100 * present. Swizzle is updated below based on the texture dimension
2101 * and extra attributes that are packed in the coordinate argument.
2102 */
2103 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++)
2104 ins->swizzle[1][c] = COMPONENT_X;
2105
2106 /* Shadow ref value is part of the coordinates if there's no comparator
2107 * source, in that case it's always placed in the last component.
2108 * Midgard wants the ref value in coord.z.
2109 */
2110 if (instr->is_shadow && comparator_idx < 0) {
2111 ins->swizzle[1][COMPONENT_Z] = --nr_comps;
2112 write_mask |= 1 << COMPONENT_Z;
2113 }
2114
2115 /* The array index is the last component if there's no shadow ref value
2116 * or second last if there's one. We already decremented the number of
2117 * components to account for the shadow ref value above.
2118 * Midgard wants the array index in coord.w.
2119 */
2120 if (instr->is_array) {
2121 ins->swizzle[1][COMPONENT_W] = --nr_comps;
2122 write_mask |= 1 << COMPONENT_W;
2123 }
2124
2125 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
2126 /* texelFetch is undefined on samplerCube */
2127 assert(ins->op != midgard_tex_op_fetch);
2128
2129 ins->src[1] = make_compiler_temp_reg(ctx);
2130
2131 /* For cubemaps, we use a special ld/st op to select the face
2132 * and copy the xy into the texture register
2133 */
2134 midgard_instruction ld = m_ld_cubemap_coords(ins->src[1], 0);
2135 ld.src[1] = coords;
2136 ld.src_types[1] = ins->src_types[1];
2137 ld.mask = 0x3; /* xy */
2138 ld.load_store.bitsize_toggle = true;
2139 ld.swizzle[1][3] = COMPONENT_X;
2140 emit_mir_instruction(ctx, ld);
2141
2142 /* We packed cube coordiates (X,Y,Z) into (X,Y), update the
2143 * written mask accordingly and decrement the number of
2144 * components
2145 */
2146 nr_comps--;
2147 written_mask |= 3;
2148 }
2149
2150 /* Now flag tex coord components that have not been written yet */
2151 write_mask |= mask_of(nr_comps) & ~written_mask;
2152 for (unsigned c = 0; c < nr_comps; c++)
2153 ins->swizzle[1][c] = c;
2154
2155 /* Sample index and shadow ref are expected in coord.z */
2156 if (ms_or_comparator_idx >= 0) {
2157 assert(!((write_mask | written_mask) & (1 << COMPONENT_Z)));
2158
2159 unsigned sample_or_ref =
2160 nir_src_index(ctx, &instr->src[ms_or_comparator_idx].src);
2161
2162 emit_explicit_constant(ctx, sample_or_ref);
2163
2164 if (ins->src[1] == ~0)
2165 ins->src[1] = make_compiler_temp_reg(ctx);
2166
2167 midgard_instruction mov = v_mov(sample_or_ref, ins->src[1]);
2168
2169 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++)
2170 mov.swizzle[1][c] = COMPONENT_X;
2171
2172 mov.mask = 1 << COMPONENT_Z;
2173 written_mask |= 1 << COMPONENT_Z;
2174 ins->swizzle[1][COMPONENT_Z] = COMPONENT_Z;
2175 emit_mir_instruction(ctx, mov);
2176 }
2177
2178 /* Texelfetch coordinates uses all four elements (xyz/index) regardless
2179 * of texture dimensionality, which means it's necessary to zero the
2180 * unused components to keep everything happy.
2181 */
2182 if (ins->op == midgard_tex_op_fetch && (written_mask | write_mask) != 0xF) {
2183 if (ins->src[1] == ~0)
2184 ins->src[1] = make_compiler_temp_reg(ctx);
2185
2186 /* mov index.zw, #0, or generalized */
2187 midgard_instruction mov =
2188 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), ins->src[1]);
2189 mov.has_constants = true;
2190 mov.mask = (written_mask | write_mask) ^ 0xF;
2191 emit_mir_instruction(ctx, mov);
2192 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++) {
2193 if (mov.mask & (1 << c))
2194 ins->swizzle[1][c] = c;
2195 }
2196 }
2197
2198 if (ins->src[1] == ~0) {
2199 /* No temporary reg created, use the src coords directly */
2200 ins->src[1] = coords;
2201 } else if (write_mask) {
2202 /* Move the remaining coordinates to the temporary reg */
2203 midgard_instruction mov = v_mov(coords, ins->src[1]);
2204
2205 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; c++) {
2206 if ((1 << c) & write_mask) {
2207 mov.swizzle[1][c] = ins->swizzle[1][c];
2208 ins->swizzle[1][c] = c;
2209 } else {
2210 mov.swizzle[1][c] = COMPONENT_X;
2211 }
2212 }
2213
2214 mov.mask = write_mask;
2215 emit_mir_instruction(ctx, mov);
2216 }
2217 }
2218
2219 static void
emit_texop_native(compiler_context * ctx,nir_tex_instr * instr,unsigned midgard_texop)2220 emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
2221 unsigned midgard_texop)
2222 {
2223 int texture_index = instr->texture_index;
2224 int sampler_index = instr->sampler_index;
2225
2226 /* If txf is used, we assume there is a valid sampler bound at index 0. Use
2227 * it for txf operations, since there may be no other valid samplers. This is
2228 * a workaround: txf does not require a sampler in NIR (so sampler_index is
2229 * undefined) but we need one in the hardware. This is ABI with the driver.
2230 */
2231 if (!nir_tex_instr_need_sampler(instr))
2232 sampler_index = 0;
2233
2234 midgard_instruction ins = {
2235 .type = TAG_TEXTURE_4,
2236 .mask = 0xF,
2237 .dest = nir_def_index(&instr->def),
2238 .src = {~0, ~0, ~0, ~0},
2239 .dest_type = instr->dest_type,
2240 .swizzle = SWIZZLE_IDENTITY_4,
2241 .outmod = midgard_outmod_none,
2242 .op = midgard_texop,
2243 .texture = {
2244 .format = midgard_tex_format(instr->sampler_dim),
2245 .texture_handle = texture_index,
2246 .sampler_handle = sampler_index,
2247 .mode = mdg_texture_mode(instr),
2248 }};
2249
2250 if (instr->is_shadow && !instr->is_new_style_shadow &&
2251 instr->op != nir_texop_tg4)
2252 for (int i = 0; i < 4; ++i)
2253 ins.swizzle[0][i] = COMPONENT_X;
2254
2255 for (unsigned i = 0; i < instr->num_srcs; ++i) {
2256 int index = nir_src_index(ctx, &instr->src[i].src);
2257 unsigned sz = nir_src_bit_size(instr->src[i].src);
2258 nir_alu_type T = nir_tex_instr_src_type(instr, i) | sz;
2259
2260 switch (instr->src[i].src_type) {
2261 case nir_tex_src_coord:
2262 set_tex_coord(ctx, instr, &ins);
2263 break;
2264
2265 case nir_tex_src_bias:
2266 case nir_tex_src_lod: {
2267 /* Try as a constant if we can */
2268
2269 bool is_txf = midgard_texop == midgard_tex_op_fetch;
2270 if (!is_txf &&
2271 pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
2272 break;
2273
2274 ins.texture.lod_register = true;
2275 ins.src[2] = index;
2276 ins.src_types[2] = T;
2277
2278 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2279 ins.swizzle[2][c] = COMPONENT_X;
2280
2281 emit_explicit_constant(ctx, index);
2282
2283 break;
2284 };
2285
2286 case nir_tex_src_offset: {
2287 ins.texture.offset_register = true;
2288 ins.src[3] = index;
2289 ins.src_types[3] = T;
2290
2291 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2292 ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
2293
2294 emit_explicit_constant(ctx, index);
2295 break;
2296 };
2297
2298 case nir_tex_src_comparator:
2299 case nir_tex_src_ms_index:
2300 /* Nothing to do, handled in set_tex_coord() */
2301 break;
2302
2303 default: {
2304 fprintf(stderr, "Unknown texture source type: %d\n",
2305 instr->src[i].src_type);
2306 assert(0);
2307 }
2308 }
2309 }
2310
2311 emit_mir_instruction(ctx, ins);
2312 }
2313
2314 static void
emit_tex(compiler_context * ctx,nir_tex_instr * instr)2315 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
2316 {
2317 switch (instr->op) {
2318 case nir_texop_tex:
2319 case nir_texop_txb:
2320 emit_texop_native(ctx, instr, midgard_tex_op_normal);
2321 break;
2322 case nir_texop_txl:
2323 case nir_texop_tg4:
2324 emit_texop_native(ctx, instr, midgard_tex_op_gradient);
2325 break;
2326 case nir_texop_txf:
2327 case nir_texop_txf_ms:
2328 emit_texop_native(ctx, instr, midgard_tex_op_fetch);
2329 break;
2330 default: {
2331 fprintf(stderr, "Unhandled texture op: %d\n", instr->op);
2332 assert(0);
2333 }
2334 }
2335 }
2336
2337 static void
emit_jump(compiler_context * ctx,nir_jump_instr * instr)2338 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
2339 {
2340 switch (instr->type) {
2341 case nir_jump_break: {
2342 /* Emit a branch out of the loop */
2343 struct midgard_instruction br = v_branch(false, false);
2344 br.branch.target_type = TARGET_BREAK;
2345 br.branch.target_break = ctx->current_loop_depth;
2346 emit_mir_instruction(ctx, br);
2347 break;
2348 }
2349
2350 default:
2351 unreachable("Unhandled jump");
2352 }
2353 }
2354
2355 static void
emit_instr(compiler_context * ctx,struct nir_instr * instr)2356 emit_instr(compiler_context *ctx, struct nir_instr *instr)
2357 {
2358 switch (instr->type) {
2359 case nir_instr_type_load_const:
2360 emit_load_const(ctx, nir_instr_as_load_const(instr));
2361 break;
2362
2363 case nir_instr_type_intrinsic:
2364 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2365 break;
2366
2367 case nir_instr_type_alu:
2368 emit_alu(ctx, nir_instr_as_alu(instr));
2369 break;
2370
2371 case nir_instr_type_tex:
2372 emit_tex(ctx, nir_instr_as_tex(instr));
2373 break;
2374
2375 case nir_instr_type_jump:
2376 emit_jump(ctx, nir_instr_as_jump(instr));
2377 break;
2378
2379 case nir_instr_type_undef:
2380 /* Spurious */
2381 break;
2382
2383 default:
2384 unreachable("Unhandled instruction type");
2385 }
2386 }
2387
2388 /* ALU instructions can inline or embed constants, which decreases register
2389 * pressure and saves space. */
2390
2391 #define CONDITIONAL_ATTACH(idx) \
2392 { \
2393 void *entry = \
2394 _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
2395 \
2396 if (entry) { \
2397 attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
2398 alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
2399 } \
2400 }
2401
2402 static void
inline_alu_constants(compiler_context * ctx,midgard_block * block)2403 inline_alu_constants(compiler_context *ctx, midgard_block *block)
2404 {
2405 mir_foreach_instr_in_block(block, alu) {
2406 /* Other instructions cannot inline constants */
2407 if (alu->type != TAG_ALU_4)
2408 continue;
2409 if (alu->compact_branch)
2410 continue;
2411
2412 /* If there is already a constant here, we can do nothing */
2413 if (alu->has_constants)
2414 continue;
2415
2416 CONDITIONAL_ATTACH(0);
2417
2418 if (!alu->has_constants) {
2419 CONDITIONAL_ATTACH(1)
2420 } else if (!alu->inline_constant) {
2421 /* Corner case: _two_ vec4 constants, for instance with a
2422 * csel. For this case, we can only use a constant
2423 * register for one, we'll have to emit a move for the
2424 * other. */
2425
2426 void *entry =
2427 _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
2428 unsigned scratch = make_compiler_temp(ctx);
2429
2430 if (entry) {
2431 midgard_instruction ins =
2432 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
2433 attach_constants(ctx, &ins, entry, alu->src[1] + 1);
2434
2435 /* Set the source */
2436 alu->src[1] = scratch;
2437
2438 /* Inject us -before- the last instruction which set r31, if
2439 * possible.
2440 */
2441 midgard_instruction *first = list_first_entry(
2442 &block->base.instructions, midgard_instruction, link);
2443
2444 if (alu == first) {
2445 mir_insert_instruction_before(ctx, alu, ins);
2446 } else {
2447 mir_insert_instruction_before(ctx, mir_prev_op(alu), ins);
2448 }
2449 }
2450 }
2451 }
2452 }
2453
2454 unsigned
max_bitsize_for_alu(midgard_instruction * ins)2455 max_bitsize_for_alu(midgard_instruction *ins)
2456 {
2457 unsigned max_bitsize = 0;
2458 for (int i = 0; i < MIR_SRC_COUNT; i++) {
2459 if (ins->src[i] == ~0)
2460 continue;
2461 unsigned src_bitsize = nir_alu_type_get_type_size(ins->src_types[i]);
2462 max_bitsize = MAX2(src_bitsize, max_bitsize);
2463 }
2464 unsigned dst_bitsize = nir_alu_type_get_type_size(ins->dest_type);
2465 max_bitsize = MAX2(dst_bitsize, max_bitsize);
2466
2467 /* We emulate 8-bit as 16-bit for simplicity of packing */
2468 max_bitsize = MAX2(max_bitsize, 16);
2469
2470 /* We don't have fp16 LUTs, so we'll want to emit code like:
2471 *
2472 * vlut.fsinr hr0, hr0
2473 *
2474 * where both input and output are 16-bit but the operation is carried
2475 * out in 32-bit
2476 */
2477
2478 switch (ins->op) {
2479 case midgard_alu_op_fsqrt:
2480 case midgard_alu_op_frcp:
2481 case midgard_alu_op_frsqrt:
2482 case midgard_alu_op_fsinpi:
2483 case midgard_alu_op_fcospi:
2484 case midgard_alu_op_fexp2:
2485 case midgard_alu_op_flog2:
2486 max_bitsize = MAX2(max_bitsize, 32);
2487 break;
2488
2489 default:
2490 break;
2491 }
2492
2493 /* High implies computing at a higher bitsize, e.g umul_high of 32-bit
2494 * requires computing at 64-bit */
2495 if (midgard_is_integer_out_op(ins->op) &&
2496 ins->outmod == midgard_outmod_keephi) {
2497 max_bitsize *= 2;
2498 assert(max_bitsize <= 64);
2499 }
2500
2501 return max_bitsize;
2502 }
2503
2504 midgard_reg_mode
reg_mode_for_bitsize(unsigned bitsize)2505 reg_mode_for_bitsize(unsigned bitsize)
2506 {
2507 switch (bitsize) {
2508 /* use 16 pipe for 8 since we don't support vec16 yet */
2509 case 8:
2510 case 16:
2511 return midgard_reg_mode_16;
2512 case 32:
2513 return midgard_reg_mode_32;
2514 case 64:
2515 return midgard_reg_mode_64;
2516 default:
2517 unreachable("invalid bit size");
2518 }
2519 }
2520
2521 /* Midgard supports two types of constants, embedded constants (128-bit) and
2522 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
2523 * constants can be demoted to inline constants, for space savings and
2524 * sometimes a performance boost */
2525
2526 static void
embedded_to_inline_constant(compiler_context * ctx,midgard_block * block)2527 embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
2528 {
2529 mir_foreach_instr_in_block(block, ins) {
2530 if (!ins->has_constants)
2531 continue;
2532 if (ins->has_inline_constant)
2533 continue;
2534
2535 unsigned max_bitsize = max_bitsize_for_alu(ins);
2536
2537 /* We can inline 32-bit (sometimes) or 16-bit (usually) */
2538 bool is_16 = max_bitsize == 16;
2539 bool is_32 = max_bitsize == 32;
2540
2541 if (!(is_16 || is_32))
2542 continue;
2543
2544 /* src1 cannot be an inline constant due to encoding
2545 * restrictions. So, if possible we try to flip the arguments
2546 * in that case */
2547
2548 int op = ins->op;
2549
2550 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
2551 alu_opcode_props[op].props & OP_COMMUTES) {
2552 mir_flip(ins);
2553 }
2554
2555 if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2556 /* Component is from the swizzle. Take a nonzero component */
2557 assert(ins->mask);
2558 unsigned first_comp = ffs(ins->mask) - 1;
2559 unsigned component = ins->swizzle[1][first_comp];
2560
2561 /* Scale constant appropriately, if we can legally */
2562 int16_t scaled_constant = 0;
2563
2564 if (is_16) {
2565 scaled_constant = ins->constants.u16[component];
2566 } else if (midgard_is_integer_op(op)) {
2567 scaled_constant = ins->constants.u32[component];
2568
2569 /* Constant overflow after resize */
2570 if (scaled_constant != ins->constants.u32[component])
2571 continue;
2572 } else {
2573 float original = ins->constants.f32[component];
2574 scaled_constant = _mesa_float_to_half(original);
2575
2576 /* Check for loss of precision. If this is
2577 * mediump, we don't care, but for a highp
2578 * shader, we need to pay attention. NIR
2579 * doesn't yet tell us which mode we're in!
2580 * Practically this prevents most constants
2581 * from being inlined, sadly. */
2582
2583 float fp32 = _mesa_half_to_float(scaled_constant);
2584
2585 if (fp32 != original)
2586 continue;
2587 }
2588
2589 /* Should've been const folded */
2590 if (ins->src_abs[1] || ins->src_neg[1])
2591 continue;
2592
2593 /* Make sure that the constant is not itself a vector
2594 * by checking if all accessed values are the same. */
2595
2596 const midgard_constants *cons = &ins->constants;
2597 uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
2598
2599 bool is_vector = false;
2600 unsigned mask = effective_writemask(ins->op, ins->mask);
2601
2602 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
2603 /* We only care if this component is actually used */
2604 if (!(mask & (1 << c)))
2605 continue;
2606
2607 uint32_t test = is_16 ? cons->u16[ins->swizzle[1][c]]
2608 : cons->u32[ins->swizzle[1][c]];
2609
2610 if (test != value) {
2611 is_vector = true;
2612 break;
2613 }
2614 }
2615
2616 if (is_vector)
2617 continue;
2618
2619 /* Get rid of the embedded constant */
2620 ins->has_constants = false;
2621 ins->src[1] = ~0;
2622 ins->has_inline_constant = true;
2623 ins->inline_constant = scaled_constant;
2624 }
2625 }
2626 }
2627
2628 /* Dead code elimination for branches at the end of a block - only one branch
2629 * per block is legal semantically */
2630
2631 static void
midgard_cull_dead_branch(compiler_context * ctx,midgard_block * block)2632 midgard_cull_dead_branch(compiler_context *ctx, midgard_block *block)
2633 {
2634 bool branched = false;
2635
2636 mir_foreach_instr_in_block_safe(block, ins) {
2637 if (!midgard_is_branch_unit(ins->unit))
2638 continue;
2639
2640 if (branched)
2641 mir_remove_instruction(ins);
2642
2643 branched = true;
2644 }
2645 }
2646
2647 /* We want to force the invert on AND/OR to the second slot to legalize into
2648 * iandnot/iornot. The relevant patterns are for AND (and OR respectively)
2649 *
2650 * ~a & #b = ~a & ~(#~b)
2651 * ~a & b = b & ~a
2652 */
2653
2654 static void
midgard_legalize_invert(compiler_context * ctx,midgard_block * block)2655 midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
2656 {
2657 mir_foreach_instr_in_block(block, ins) {
2658 if (ins->type != TAG_ALU_4)
2659 continue;
2660
2661 if (ins->op != midgard_alu_op_iand && ins->op != midgard_alu_op_ior)
2662 continue;
2663
2664 if (ins->src_invert[1] || !ins->src_invert[0])
2665 continue;
2666
2667 if (ins->has_inline_constant) {
2668 /* ~(#~a) = ~(~#a) = a, so valid, and forces both
2669 * inverts on */
2670 ins->inline_constant = ~ins->inline_constant;
2671 ins->src_invert[1] = true;
2672 } else {
2673 /* Flip to the right invert order. Note
2674 * has_inline_constant false by assumption on the
2675 * branch, so flipping makes sense. */
2676 mir_flip(ins);
2677 }
2678 }
2679 }
2680
2681 static unsigned
emit_fragment_epilogue(compiler_context * ctx,unsigned rt,unsigned sample_iter)2682 emit_fragment_epilogue(compiler_context *ctx, unsigned rt, unsigned sample_iter)
2683 {
2684 /* Loop to ourselves */
2685 midgard_instruction *br = ctx->writeout_branch[rt][sample_iter];
2686 struct midgard_instruction ins = v_branch(false, false);
2687 ins.writeout = br->writeout;
2688 ins.branch.target_block = ctx->block_count - 1;
2689 ins.constants.u32[0] = br->constants.u32[0];
2690 memcpy(&ins.src_types, &br->src_types, sizeof(ins.src_types));
2691 emit_mir_instruction(ctx, ins);
2692
2693 ctx->current_block->epilogue = true;
2694 schedule_barrier(ctx);
2695 return ins.branch.target_block;
2696 }
2697
2698 static midgard_block *
emit_block_init(compiler_context * ctx)2699 emit_block_init(compiler_context *ctx)
2700 {
2701 midgard_block *this_block = ctx->after_block;
2702 ctx->after_block = NULL;
2703
2704 if (!this_block)
2705 this_block = create_empty_block(ctx);
2706
2707 list_addtail(&this_block->base.link, &ctx->blocks);
2708
2709 this_block->scheduled = false;
2710 ++ctx->block_count;
2711
2712 /* Set up current block */
2713 list_inithead(&this_block->base.instructions);
2714 ctx->current_block = this_block;
2715
2716 return this_block;
2717 }
2718
2719 static midgard_block *
emit_block(compiler_context * ctx,nir_block * block)2720 emit_block(compiler_context *ctx, nir_block *block)
2721 {
2722 midgard_block *this_block = emit_block_init(ctx);
2723
2724 nir_foreach_instr(instr, block) {
2725 emit_instr(ctx, instr);
2726 ++ctx->instruction_count;
2727 }
2728
2729 return this_block;
2730 }
2731
2732 static midgard_block *emit_cf_list(struct compiler_context *ctx,
2733 struct exec_list *list);
2734
2735 static void
emit_if(struct compiler_context * ctx,nir_if * nif)2736 emit_if(struct compiler_context *ctx, nir_if *nif)
2737 {
2738 midgard_block *before_block = ctx->current_block;
2739
2740 /* Speculatively emit the branch, but we can't fill it in until later */
2741 EMIT(branch, true, true);
2742 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2743 then_branch->src[0] = nir_src_index(ctx, &nif->condition);
2744 then_branch->src_types[0] = nir_type_uint32;
2745
2746 /* Emit the two subblocks. */
2747 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2748 midgard_block *end_then_block = ctx->current_block;
2749
2750 /* Emit a jump from the end of the then block to the end of the else */
2751 EMIT(branch, false, false);
2752 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2753
2754 /* Emit second block, and check if it's empty */
2755
2756 int else_idx = ctx->block_count;
2757 int count_in = ctx->instruction_count;
2758 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2759 midgard_block *end_else_block = ctx->current_block;
2760 int after_else_idx = ctx->block_count;
2761
2762 /* Now that we have the subblocks emitted, fix up the branches */
2763
2764 assert(then_block);
2765 assert(else_block);
2766
2767 if (ctx->instruction_count == count_in) {
2768 /* The else block is empty, so don't emit an exit jump */
2769 mir_remove_instruction(then_exit);
2770 then_branch->branch.target_block = after_else_idx;
2771 } else {
2772 then_branch->branch.target_block = else_idx;
2773 then_exit->branch.target_block = after_else_idx;
2774 }
2775
2776 /* Wire up the successors */
2777
2778 ctx->after_block = create_empty_block(ctx);
2779
2780 pan_block_add_successor(&before_block->base, &then_block->base);
2781 pan_block_add_successor(&before_block->base, &else_block->base);
2782
2783 pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
2784 pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
2785 }
2786
2787 static void
emit_loop(struct compiler_context * ctx,nir_loop * nloop)2788 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2789 {
2790 assert(!nir_loop_has_continue_construct(nloop));
2791
2792 /* Remember where we are */
2793 midgard_block *start_block = ctx->current_block;
2794
2795 /* Allocate a loop number, growing the current inner loop depth */
2796 int loop_idx = ++ctx->current_loop_depth;
2797
2798 /* Get index from before the body so we can loop back later */
2799 int start_idx = ctx->block_count;
2800
2801 /* Emit the body itself */
2802 midgard_block *loop_block = emit_cf_list(ctx, &nloop->body);
2803
2804 /* Branch back to loop back */
2805 struct midgard_instruction br_back = v_branch(false, false);
2806 br_back.branch.target_block = start_idx;
2807 emit_mir_instruction(ctx, br_back);
2808
2809 /* Mark down that branch in the graph. */
2810 pan_block_add_successor(&start_block->base, &loop_block->base);
2811 pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
2812
2813 /* Find the index of the block about to follow us (note: we don't add
2814 * one; blocks are 0-indexed so we get a fencepost problem) */
2815 int break_block_idx = ctx->block_count;
2816
2817 /* Fix up the break statements we emitted to point to the right place,
2818 * now that we can allocate a block number for them */
2819 ctx->after_block = create_empty_block(ctx);
2820
2821 mir_foreach_block_from(ctx, start_block, _block) {
2822 mir_foreach_instr_in_block(((midgard_block *)_block), ins) {
2823 if (ins->type != TAG_ALU_4)
2824 continue;
2825 if (!ins->compact_branch)
2826 continue;
2827
2828 /* We found a branch -- check the type to see if we need to do anything
2829 */
2830 if (ins->branch.target_type != TARGET_BREAK)
2831 continue;
2832
2833 /* It's a break! Check if it's our break */
2834 if (ins->branch.target_break != loop_idx)
2835 continue;
2836
2837 /* Okay, cool, we're breaking out of this loop.
2838 * Rewrite from a break to a goto */
2839
2840 ins->branch.target_type = TARGET_GOTO;
2841 ins->branch.target_block = break_block_idx;
2842
2843 pan_block_add_successor(_block, &ctx->after_block->base);
2844 }
2845 }
2846
2847 /* Now that we've finished emitting the loop, free up the depth again
2848 * so we play nice with recursion amid nested loops */
2849 --ctx->current_loop_depth;
2850
2851 /* Dump loop stats */
2852 ++ctx->loop_count;
2853 }
2854
2855 static midgard_block *
emit_cf_list(struct compiler_context * ctx,struct exec_list * list)2856 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2857 {
2858 midgard_block *start_block = NULL;
2859
2860 foreach_list_typed(nir_cf_node, node, node, list) {
2861 switch (node->type) {
2862 case nir_cf_node_block: {
2863 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2864
2865 if (!start_block)
2866 start_block = block;
2867
2868 break;
2869 }
2870
2871 case nir_cf_node_if:
2872 emit_if(ctx, nir_cf_node_as_if(node));
2873 break;
2874
2875 case nir_cf_node_loop:
2876 emit_loop(ctx, nir_cf_node_as_loop(node));
2877 break;
2878
2879 case nir_cf_node_function:
2880 assert(0);
2881 break;
2882 }
2883 }
2884
2885 return start_block;
2886 }
2887
2888 /* Due to lookahead, we need to report the first tag executed in the command
2889 * stream and in branch targets. An initial block might be empty, so iterate
2890 * until we find one that 'works' */
2891
2892 unsigned
midgard_get_first_tag_from_block(compiler_context * ctx,unsigned block_idx)2893 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2894 {
2895 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2896
2897 mir_foreach_block_from(ctx, initial_block, _v) {
2898 midgard_block *v = (midgard_block *)_v;
2899 if (v->quadword_count) {
2900 midgard_bundle *initial_bundle =
2901 util_dynarray_element(&v->bundles, midgard_bundle, 0);
2902
2903 return initial_bundle->tag;
2904 }
2905 }
2906
2907 /* Default to a tag 1 which will break from the shader, in case we jump
2908 * to the exit block (i.e. `return` in a compute shader) */
2909
2910 return 1;
2911 }
2912
2913 /* For each fragment writeout instruction, generate a writeout loop to
2914 * associate with it */
2915
2916 static void
mir_add_writeout_loops(compiler_context * ctx)2917 mir_add_writeout_loops(compiler_context *ctx)
2918 {
2919 for (unsigned rt = 0; rt < ARRAY_SIZE(ctx->writeout_branch); ++rt) {
2920 for (unsigned s = 0; s < MIDGARD_MAX_SAMPLE_ITER; ++s) {
2921 midgard_instruction *br = ctx->writeout_branch[rt][s];
2922 if (!br)
2923 continue;
2924
2925 unsigned popped = br->branch.target_block;
2926 pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base),
2927 &ctx->current_block->base);
2928 br->branch.target_block = emit_fragment_epilogue(ctx, rt, s);
2929 br->branch.target_type = TARGET_GOTO;
2930
2931 /* If we have more RTs, we'll need to restore back after our
2932 * loop terminates */
2933 midgard_instruction *next_br = NULL;
2934
2935 if ((s + 1) < MIDGARD_MAX_SAMPLE_ITER)
2936 next_br = ctx->writeout_branch[rt][s + 1];
2937
2938 if (!next_br && (rt + 1) < ARRAY_SIZE(ctx->writeout_branch))
2939 next_br = ctx->writeout_branch[rt + 1][0];
2940
2941 if (next_br) {
2942 midgard_instruction uncond = v_branch(false, false);
2943 uncond.branch.target_block = popped;
2944 uncond.branch.target_type = TARGET_GOTO;
2945 emit_mir_instruction(ctx, uncond);
2946 pan_block_add_successor(&ctx->current_block->base,
2947 &(mir_get_block(ctx, popped)->base));
2948 schedule_barrier(ctx);
2949 } else {
2950 /* We're last, so we can terminate here */
2951 br->last_writeout = true;
2952 }
2953 }
2954 }
2955 }
2956
2957 void
midgard_compile_shader_nir(nir_shader * nir,const struct panfrost_compile_inputs * inputs,struct util_dynarray * binary,struct pan_shader_info * info)2958 midgard_compile_shader_nir(nir_shader *nir,
2959 const struct panfrost_compile_inputs *inputs,
2960 struct util_dynarray *binary,
2961 struct pan_shader_info *info)
2962 {
2963 midgard_debug = debug_get_option_midgard_debug();
2964
2965 /* TODO: Bound against what? */
2966 compiler_context *ctx = rzalloc(NULL, compiler_context);
2967
2968 ctx->inputs = inputs;
2969 ctx->nir = nir;
2970 ctx->info = info;
2971 ctx->stage = nir->info.stage;
2972 ctx->blend_input = ~0;
2973 ctx->blend_src1 = ~0;
2974 ctx->quirks = midgard_get_quirks(inputs->gpu_id);
2975
2976 /* Initialize at a global (not block) level hash tables */
2977
2978 ctx->ssa_constants = _mesa_hash_table_u64_create(ctx);
2979
2980 /* Collect varyings after lowering I/O */
2981 info->quirk_no_auto32 = (ctx->quirks & MIDGARD_NO_AUTO32);
2982 pan_nir_collect_varyings(nir, info);
2983
2984 /* Optimisation passes */
2985 optimise_nir(nir, ctx->quirks, inputs->is_blend);
2986
2987 bool skip_internal = nir->info.internal;
2988 skip_internal &= !(midgard_debug & MIDGARD_DBG_INTERNAL);
2989
2990 if (midgard_debug & MIDGARD_DBG_SHADERS && !skip_internal)
2991 nir_print_shader(nir, stdout);
2992
2993 info->tls_size = nir->scratch_size;
2994
2995 nir_foreach_function_with_impl(func, impl, nir) {
2996 list_inithead(&ctx->blocks);
2997 ctx->block_count = 0;
2998 ctx->func = func;
2999
3000 if (nir->info.outputs_read && !inputs->is_blend) {
3001 emit_block_init(ctx);
3002
3003 struct midgard_instruction wait = v_branch(false, false);
3004 wait.branch.target_type = TARGET_TILEBUF_WAIT;
3005
3006 emit_mir_instruction(ctx, wait);
3007
3008 ++ctx->instruction_count;
3009 }
3010
3011 emit_cf_list(ctx, &impl->body);
3012 break; /* TODO: Multi-function shaders */
3013 }
3014
3015 /* Per-block lowering before opts */
3016
3017 mir_foreach_block(ctx, _block) {
3018 midgard_block *block = (midgard_block *)_block;
3019 inline_alu_constants(ctx, block);
3020 embedded_to_inline_constant(ctx, block);
3021 }
3022 /* MIR-level optimizations */
3023
3024 bool progress = false;
3025
3026 do {
3027 progress = false;
3028 progress |= midgard_opt_dead_code_eliminate(ctx);
3029 progress |= midgard_opt_prop(ctx);
3030
3031 mir_foreach_block(ctx, _block) {
3032 midgard_block *block = (midgard_block *)_block;
3033 progress |= midgard_opt_copy_prop(ctx, block);
3034 progress |= midgard_opt_combine_projection(ctx, block);
3035 progress |= midgard_opt_varying_projection(ctx, block);
3036 }
3037 } while (progress);
3038
3039 mir_foreach_block(ctx, _block) {
3040 midgard_block *block = (midgard_block *)_block;
3041 midgard_lower_derivatives(ctx, block);
3042 midgard_legalize_invert(ctx, block);
3043 midgard_cull_dead_branch(ctx, block);
3044 }
3045
3046 if (ctx->stage == MESA_SHADER_FRAGMENT)
3047 mir_add_writeout_loops(ctx);
3048
3049 /* Analyze now that the code is known but before scheduling creates
3050 * pipeline registers which are harder to track */
3051 mir_analyze_helper_requirements(ctx);
3052
3053 if (midgard_debug & MIDGARD_DBG_SHADERS && !skip_internal)
3054 mir_print_shader(ctx);
3055
3056 /* Schedule! */
3057 midgard_schedule_program(ctx);
3058 mir_ra(ctx);
3059
3060 if (midgard_debug & MIDGARD_DBG_SHADERS && !skip_internal)
3061 mir_print_shader(ctx);
3062
3063 /* Analyze after scheduling since this is order-dependent */
3064 mir_analyze_helper_terminate(ctx);
3065
3066 /* Emit flat binary from the instruction arrays. Iterate each block in
3067 * sequence. Save instruction boundaries such that lookahead tags can
3068 * be assigned easily */
3069
3070 /* Cache _all_ bundles in source order for lookahead across failed branches */
3071
3072 int bundle_count = 0;
3073 mir_foreach_block(ctx, _block) {
3074 midgard_block *block = (midgard_block *)_block;
3075 bundle_count += block->bundles.size / sizeof(midgard_bundle);
3076 }
3077 midgard_bundle **source_order_bundles =
3078 malloc(sizeof(midgard_bundle *) * bundle_count);
3079 int bundle_idx = 0;
3080 mir_foreach_block(ctx, _block) {
3081 midgard_block *block = (midgard_block *)_block;
3082 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
3083 source_order_bundles[bundle_idx++] = bundle;
3084 }
3085 }
3086
3087 int current_bundle = 0;
3088
3089 /* Midgard prefetches instruction types, so during emission we
3090 * need to lookahead. Unless this is the last instruction, in
3091 * which we return 1. */
3092
3093 mir_foreach_block(ctx, _block) {
3094 midgard_block *block = (midgard_block *)_block;
3095 mir_foreach_bundle_in_block(block, bundle) {
3096 int lookahead = 1;
3097
3098 if (!bundle->last_writeout && (current_bundle + 1 < bundle_count))
3099 lookahead = source_order_bundles[current_bundle + 1]->tag;
3100
3101 emit_binary_bundle(ctx, block, bundle, binary, lookahead);
3102 ++current_bundle;
3103 }
3104
3105 /* TODO: Free deeper */
3106 // util_dynarray_fini(&block->instructions);
3107 }
3108
3109 free(source_order_bundles);
3110
3111 /* Report the very first tag executed */
3112 info->midgard.first_tag = midgard_get_first_tag_from_block(ctx, 0);
3113
3114 info->ubo_mask = ctx->ubo_mask & ((1 << ctx->nir->info.num_ubos) - 1);
3115
3116 if (midgard_debug & MIDGARD_DBG_SHADERS && !skip_internal) {
3117 disassemble_midgard(stdout, binary->data, binary->size, inputs->gpu_id,
3118 midgard_debug & MIDGARD_DBG_VERBOSE);
3119 fflush(stdout);
3120 }
3121
3122 /* A shader ending on a 16MB boundary causes INSTR_INVALID_PC faults,
3123 * workaround by adding some padding to the end of the shader. (The
3124 * kernel makes sure shader BOs can't cross 16MB boundaries.) */
3125 if (binary->size)
3126 memset(util_dynarray_grow(binary, uint8_t, 16), 0, 16);
3127
3128 if ((midgard_debug & MIDGARD_DBG_SHADERDB || inputs->debug) &&
3129 !nir->info.internal) {
3130 unsigned nr_bundles = 0, nr_ins = 0;
3131
3132 /* Count instructions and bundles */
3133
3134 mir_foreach_block(ctx, _block) {
3135 midgard_block *block = (midgard_block *)_block;
3136 nr_bundles +=
3137 util_dynarray_num_elements(&block->bundles, midgard_bundle);
3138
3139 mir_foreach_bundle_in_block(block, bun)
3140 nr_ins += bun->instruction_count;
3141 }
3142
3143 /* Calculate thread count. There are certain cutoffs by
3144 * register count for thread count */
3145
3146 unsigned nr_registers = info->work_reg_count;
3147
3148 unsigned nr_threads = (nr_registers <= 4) ? 4
3149 : (nr_registers <= 8) ? 2
3150 : 1;
3151
3152 char *shaderdb = NULL;
3153
3154 /* Dump stats */
3155
3156 asprintf(&shaderdb,
3157 "%s shader: "
3158 "%u inst, %u bundles, %u quadwords, "
3159 "%u registers, %u threads, %u loops, "
3160 "%u:%u spills:fills",
3161 ctx->inputs->is_blend ? "PAN_SHADER_BLEND"
3162 : gl_shader_stage_name(ctx->stage),
3163 nr_ins, nr_bundles, ctx->quadword_count, nr_registers,
3164 nr_threads, ctx->loop_count, ctx->spills, ctx->fills);
3165
3166 if (midgard_debug & MIDGARD_DBG_SHADERDB)
3167 fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
3168
3169 if (inputs->debug)
3170 util_debug_message(inputs->debug, SHADER_INFO, "%s", shaderdb);
3171
3172 free(shaderdb);
3173 }
3174
3175 _mesa_hash_table_u64_destroy(ctx->ssa_constants);
3176 ralloc_free(ctx);
3177 }
3178