1 /*
2 * Copyright (C) 2019-2020 Collabora, Ltd.
3 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #ifndef _MDG_COMPILER_H
26 #define _MDG_COMPILER_H
27
28 #include "helpers.h"
29 #include "midgard.h"
30 #include "midgard_compile.h"
31 #include "midgard_ops.h"
32
33 #include "util/hash_table.h"
34 #include "util/list.h"
35 #include "util/set.h"
36 #include "util/u_dynarray.h"
37 #include "util/u_math.h"
38
39 #include "compiler/glsl_types.h"
40 #include "compiler/nir/nir.h"
41 #include "panfrost/util/lcra.h"
42 #include "panfrost/util/pan_ir.h"
43
44 /* Forward declare */
45 struct midgard_block;
46
47 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
48 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
49 * instruction is actually a discard op. */
50
51 #define TARGET_GOTO 0
52 #define TARGET_BREAK 1
53 #define TARGET_CONTINUE 2
54 #define TARGET_DISCARD 3
55 #define TARGET_TILEBUF_WAIT 4
56
57 typedef struct midgard_branch {
58 /* If conditional, the condition is specified in r31.w */
59 bool conditional;
60
61 /* For conditionals, if this is true, we branch on FALSE. If false, we branch
62 * on TRUE. */
63 bool invert_conditional;
64
65 /* Branch targets: the start of a block, the start of a loop (continue), the
66 * end of a loop (break). Value is one of TARGET_ */
67 unsigned target_type;
68
69 /* The actual target */
70 union {
71 int target_block;
72 int target_break;
73 int target_continue;
74 };
75 } midgard_branch;
76
77 /* Generic in-memory data type repesenting a single logical instruction, rather
78 * than a single instruction group. This is the preferred form for code gen.
79 * Multiple midgard_insturctions will later be combined during scheduling,
80 * though this is not represented in this structure. Its format bridges
81 * the low-level binary representation with the higher level semantic meaning.
82 *
83 * Notably, it allows registers to be specified as block local SSA, for code
84 * emitted before the register allocation pass.
85 */
86
87 #define MIR_SRC_COUNT 4
88 #define MIR_VEC_COMPONENTS 16
89
90 typedef struct midgard_instruction {
91 /* Must be first for casting */
92 struct list_head link;
93
94 unsigned type; /* ALU, load/store, texture */
95
96 /* Instruction arguments represented as block-local SSA
97 * indices, rather than registers. ~0 means unused. */
98 unsigned src[MIR_SRC_COUNT];
99 unsigned dest;
100
101 /* vec16 swizzle, unpacked, per source */
102 unsigned swizzle[MIR_SRC_COUNT][MIR_VEC_COMPONENTS];
103
104 /* Types! */
105 nir_alu_type src_types[MIR_SRC_COUNT];
106 nir_alu_type dest_type;
107
108 /* Packing ops have non-32-bit dest types even though they functionally
109 * work at the 32-bit level, use this as a signal to disable copyprop.
110 * We maybe need synthetic pack ops instead. */
111 bool is_pack;
112
113 /* Modifiers, depending on type */
114 union {
115 struct {
116 bool src_abs[MIR_SRC_COUNT];
117 bool src_neg[MIR_SRC_COUNT];
118 };
119
120 struct {
121 bool src_shift[MIR_SRC_COUNT];
122 };
123 };
124
125 /* Out of the union for csel (could maybe be fixed..) */
126 bool src_invert[MIR_SRC_COUNT];
127
128 /* If the op supports it */
129 enum midgard_roundmode roundmode;
130
131 /* For textures: should helpers execute this instruction (instead of
132 * just helping with derivatives)? Should helpers terminate after? */
133 bool helper_terminate;
134 bool helper_execute;
135
136 /* I.e. (1 << alu_bit) */
137 int unit;
138
139 bool has_constants;
140 midgard_constants constants;
141 uint16_t inline_constant;
142 bool has_inline_constant;
143
144 bool compact_branch;
145 uint8_t writeout;
146 bool last_writeout;
147
148 /* Masks in a saneish format. One bit per channel, not packed fancy.
149 * Use this instead of the op specific ones, and switch over at emit
150 * time */
151
152 uint16_t mask;
153
154 /* Hint for the register allocator not to spill the destination written
155 * from this instruction (because it is a spill/unspill node itself).
156 * Bitmask of spilled classes */
157
158 unsigned no_spill;
159
160 /* Generic hint for intra-pass use */
161 bool hint;
162
163 /* During scheduling, the backwards dependency graph
164 * (DAG). nr_dependencies is the number of unscheduled
165 * instructions that must still be scheduled after
166 * (before) this instruction. dependents are which
167 * instructions need to be scheduled before (after) this
168 * instruction. */
169
170 unsigned nr_dependencies;
171 BITSET_WORD *dependents;
172
173 /* Use this in conjunction with `type` */
174 unsigned op;
175
176 /* This refers to midgard_outmod_float or midgard_outmod_int.
177 * In case of a ALU op, use midgard_is_integer_out_op() to know which
178 * one is used.
179 * If it's a texture op, it's always midgard_outmod_float. */
180 unsigned outmod;
181
182 union {
183 midgard_load_store_word load_store;
184 midgard_texture_word texture;
185
186 midgard_branch branch;
187 };
188
189 unsigned bundle_id;
190 } midgard_instruction;
191
192 typedef struct midgard_block {
193 pan_block base;
194
195 bool scheduled;
196
197 /* List of midgard_bundles emitted (after the scheduler has run) */
198 struct util_dynarray bundles;
199
200 /* Number of quadwords _actually_ emitted, as determined after scheduling */
201 unsigned quadword_count;
202
203 /* Indicates this is a fixed-function fragment epilogue block */
204 bool epilogue;
205
206 /* Are helper invocations required by this block? */
207 bool helpers_in;
208 } midgard_block;
209
210 typedef struct midgard_bundle {
211 /* Tag for the overall bundle */
212 int tag;
213
214 /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
215 * sadd, vadd, smul, vlut, branch) */
216 int instruction_count;
217 midgard_instruction *instructions[6];
218
219 /* Bundle-wide ALU configuration */
220 int padding;
221 int control;
222 bool has_embedded_constants;
223 midgard_constants constants;
224 bool last_writeout;
225 } midgard_bundle;
226
227 enum midgard_rt_id {
228 MIDGARD_COLOR_RT0 = 0,
229 MIDGARD_COLOR_RT1,
230 MIDGARD_COLOR_RT2,
231 MIDGARD_COLOR_RT3,
232 MIDGARD_COLOR_RT4,
233 MIDGARD_COLOR_RT5,
234 MIDGARD_COLOR_RT6,
235 MIDGARD_COLOR_RT7,
236 MIDGARD_ZS_RT,
237 MIDGARD_NUM_RTS,
238 };
239
240 #define MIDGARD_MAX_SAMPLE_ITER 16
241
242 typedef struct compiler_context {
243 const struct panfrost_compile_inputs *inputs;
244 nir_shader *nir;
245 struct pan_shader_info *info;
246 gl_shader_stage stage;
247
248 /* Index to precolour to r0 for an input blend colour */
249 unsigned blend_input;
250
251 /* Index to precolour to r2 for a dual-source blend colour */
252 unsigned blend_src1;
253
254 /* Count of spills and fills for shaderdb */
255 unsigned spills;
256 unsigned fills;
257
258 /* Current NIR function */
259 nir_function *func;
260
261 /* Allocated compiler temporary counter */
262 unsigned temp_alloc;
263
264 /* Unordered list of midgard_blocks */
265 int block_count;
266 struct list_head blocks;
267
268 /* TODO merge with block_count? */
269 unsigned block_source_count;
270
271 /* List of midgard_instructions emitted for the current block */
272 midgard_block *current_block;
273
274 /* If there is a preset after block, use this, otherwise emit_block will
275 * create one if NULL */
276 midgard_block *after_block;
277
278 /* The current "depth" of the loop, for disambiguating breaks/continues
279 * when using nested loops */
280 int current_loop_depth;
281
282 /* Total number of loops for shader-db */
283 unsigned loop_count;
284
285 /* Constants which have been loaded, for later inlining */
286 struct hash_table_u64 *ssa_constants;
287
288 int temp_count;
289 int max_hash;
290
291 /* Count of instructions emitted from NIR overall, across all blocks */
292 int instruction_count;
293
294 unsigned quadword_count;
295
296 /* Bitmask of valid metadata */
297 unsigned metadata;
298
299 /* Model-specific quirk set */
300 uint32_t quirks;
301
302 /* Writeout instructions for each render target */
303 midgard_instruction
304 *writeout_branch[MIDGARD_NUM_RTS][MIDGARD_MAX_SAMPLE_ITER];
305
306 /* Mask of UBOs that need to be uploaded */
307 uint32_t ubo_mask;
308 } compiler_context;
309
310 /* Per-block live_in/live_out */
311 #define MIDGARD_METADATA_LIVENESS (1 << 0)
312
313 /* Helpers for manipulating the above structures (forming the driver IR) */
314
315 /* Append instruction to end of current block */
316
317 static inline midgard_instruction *
mir_upload_ins(struct compiler_context * ctx,const struct midgard_instruction * ins)318 mir_upload_ins(struct compiler_context *ctx,
319 const struct midgard_instruction *ins)
320 {
321 midgard_instruction *heap = ralloc(ctx, struct midgard_instruction);
322 memcpy(heap, ins, sizeof(*ins));
323 return heap;
324 }
325
326 static inline midgard_instruction *
emit_mir_instruction(struct compiler_context * ctx,const struct midgard_instruction * ins)327 emit_mir_instruction(struct compiler_context *ctx,
328 const struct midgard_instruction *ins)
329 {
330 midgard_instruction *u = mir_upload_ins(ctx, ins);
331 list_addtail(&u->link, &ctx->current_block->base.instructions);
332 return u;
333 }
334
335 static inline struct midgard_instruction *
mir_insert_instruction_before(struct compiler_context * ctx,struct midgard_instruction * tag,const struct midgard_instruction * ins)336 mir_insert_instruction_before(struct compiler_context *ctx,
337 struct midgard_instruction *tag,
338 const struct midgard_instruction *ins)
339 {
340 struct midgard_instruction *u = mir_upload_ins(ctx, ins);
341 list_addtail(&u->link, &tag->link);
342 return u;
343 }
344
345 static inline void
mir_remove_instruction(struct midgard_instruction * ins)346 mir_remove_instruction(struct midgard_instruction *ins)
347 {
348 list_del(&ins->link);
349 }
350
351 #define mir_prev_op(ins) \
352 list_last_entry(&((ins)->link), midgard_instruction, link)
353
354 #define mir_next_op(ins) \
355 list_first_entry(&((ins)->link), midgard_instruction, link)
356
357 #define mir_foreach_block(ctx, v) \
358 list_for_each_entry(pan_block, v, &ctx->blocks, link)
359
360 #define mir_foreach_block_from(ctx, from, v) \
361 list_for_each_entry_from(pan_block, v, &from->base, &ctx->blocks, link)
362
363 #define mir_foreach_instr_in_block(block, v) \
364 list_for_each_entry(struct midgard_instruction, v, \
365 &block->base.instructions, link)
366 #define mir_foreach_instr_in_block_rev(block, v) \
367 list_for_each_entry_rev(struct midgard_instruction, v, \
368 &block->base.instructions, link)
369
370 #define mir_foreach_instr_in_block_safe(block, v) \
371 list_for_each_entry_safe(struct midgard_instruction, v, \
372 &block->base.instructions, link)
373
374 #define mir_foreach_instr_in_block_safe_rev(block, v) \
375 list_for_each_entry_safe_rev(struct midgard_instruction, v, \
376 &block->base.instructions, link)
377
378 #define mir_foreach_instr_in_block_from(block, v, from) \
379 list_for_each_entry_from(struct midgard_instruction, v, from, \
380 &block->base.instructions, link)
381
382 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
383 list_for_each_entry_from_rev(struct midgard_instruction, v, from, \
384 &block->base.instructions, link)
385
386 #define mir_foreach_bundle_in_block(block, v) \
387 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
388
389 #define mir_foreach_bundle_in_block_rev(block, v) \
390 util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
391
392 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
393 midgard_instruction *v; \
394 signed i = 0; \
395 mir_foreach_bundle_in_block_rev(block, _bundle) \
396 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
397 i >= 0; --i, v = (i >= 0) ? _bundle->instructions[i] : NULL)
398
399 #define mir_foreach_instr_global(ctx, v) \
400 mir_foreach_block(ctx, v_block) \
401 mir_foreach_instr_in_block(((midgard_block *)v_block), v)
402
403 #define mir_foreach_instr_global_safe(ctx, v) \
404 mir_foreach_block(ctx, v_block) \
405 mir_foreach_instr_in_block_safe(((midgard_block *)v_block), v)
406
407 /* Based on set_foreach, expanded with automatic type casts */
408
409 #define mir_foreach_predecessor(blk, v) \
410 struct set_entry *_entry_##v; \
411 struct midgard_block *v; \
412 for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
413 v = (struct midgard_block *)(_entry_##v ? _entry_##v->key : NULL); \
414 _entry_##v != NULL; \
415 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
416 v = (struct midgard_block *)(_entry_##v ? _entry_##v->key : NULL))
417
418 #define mir_foreach_src(ins, v) \
419 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
420
421 static inline midgard_instruction *
mir_last_in_block(struct midgard_block * block)422 mir_last_in_block(struct midgard_block *block)
423 {
424 return list_last_entry(&block->base.instructions, struct midgard_instruction,
425 link);
426 }
427
428 static inline midgard_block *
mir_get_block(compiler_context * ctx,int idx)429 mir_get_block(compiler_context *ctx, int idx)
430 {
431 struct list_head *lst = &ctx->blocks;
432
433 while ((idx--) + 1)
434 lst = lst->next;
435
436 return (struct midgard_block *)lst;
437 }
438
439 static inline bool
mir_is_alu_bundle(midgard_bundle * bundle)440 mir_is_alu_bundle(midgard_bundle *bundle)
441 {
442 return IS_ALU(bundle->tag);
443 }
444
445 static inline unsigned
make_compiler_temp(compiler_context * ctx)446 make_compiler_temp(compiler_context *ctx)
447 {
448 return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
449 }
450
451 static inline unsigned
make_compiler_temp_reg(compiler_context * ctx)452 make_compiler_temp_reg(compiler_context *ctx)
453 {
454 return ((ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1) | PAN_IS_REG;
455 }
456
457 static inline bool
mir_is_ssa(unsigned index)458 mir_is_ssa(unsigned index)
459 {
460 return (index < SSA_FIXED_MINIMUM) && !(index & PAN_IS_REG);
461 }
462
463 static inline unsigned
nir_ssa_index(nir_def * ssa)464 nir_ssa_index(nir_def *ssa)
465 {
466 return (ssa->index << 1) | 0;
467 }
468
469 static inline unsigned
nir_reg_index(nir_def * handle)470 nir_reg_index(nir_def *handle)
471 {
472 return (handle->index << 1) | PAN_IS_REG;
473 }
474
475 static inline unsigned
nir_src_index(compiler_context * ctx,nir_src * src)476 nir_src_index(compiler_context *ctx, nir_src *src)
477 {
478 nir_intrinsic_instr *load = nir_load_reg_for_def(src->ssa);
479
480 if (load)
481 return nir_reg_index(load->src[0].ssa);
482 else
483 return nir_ssa_index(src->ssa);
484 }
485
486 static inline unsigned
nir_def_index_with_mask(nir_def * def,uint16_t * write_mask)487 nir_def_index_with_mask(nir_def *def, uint16_t *write_mask)
488 {
489 nir_intrinsic_instr *store = nir_store_reg_for_def(def);
490
491 if (store) {
492 *write_mask = nir_intrinsic_write_mask(store);
493 return nir_reg_index(store->src[1].ssa);
494 } else {
495 *write_mask = (uint16_t)BITFIELD_MASK(def->num_components);
496 return nir_ssa_index(def);
497 }
498 }
499
500 static inline unsigned
nir_def_index(nir_def * def)501 nir_def_index(nir_def *def)
502 {
503 uint16_t write_mask = 0;
504 return nir_def_index_with_mask(def, &write_mask);
505 }
506
507 /* MIR manipulation */
508
509 void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
510 void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
511 void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
512 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old,
513 unsigned new);
514 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old,
515 unsigned new);
516 void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old,
517 unsigned new, unsigned *swizzle);
518 bool mir_single_use(compiler_context *ctx, unsigned value);
519 unsigned mir_use_count(compiler_context *ctx, unsigned value);
520 uint16_t mir_bytemask_of_read_components(const midgard_instruction *ins,
521 unsigned node);
522 uint16_t mir_bytemask_of_read_components_index(const midgard_instruction *ins,
523 unsigned i);
524 uint16_t mir_from_bytemask(uint16_t bytemask, unsigned bits);
525 uint16_t mir_bytemask(const midgard_instruction *ins);
526 uint16_t mir_round_bytemask_up(uint16_t mask, unsigned bits);
527 void mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask);
528 signed mir_upper_override(const midgard_instruction *ins, unsigned inst_size);
529 unsigned mir_components_for_type(nir_alu_type T);
530 unsigned max_bitsize_for_alu(const midgard_instruction *ins);
531 midgard_reg_mode reg_mode_for_bitsize(unsigned bitsize);
532
533 /* MIR printing */
534
535 void mir_print_instruction(const midgard_instruction *ins);
536 void mir_print_bundle(const midgard_bundle *ctx);
537 void mir_print_block(const midgard_block *block);
538 void mir_print_shader(const compiler_context *ctx);
539 bool mir_nontrivial_mod(const midgard_instruction *ins, unsigned i,
540 bool check_swizzle);
541 bool mir_nontrivial_outmod(const midgard_instruction *ins);
542
543 midgard_instruction *mir_insert_instruction_before_scheduled(
544 compiler_context *ctx, midgard_block *block, const midgard_instruction *tag,
545 const midgard_instruction *ins);
546 midgard_instruction *mir_insert_instruction_after_scheduled(
547 compiler_context *ctx, midgard_block *block, const midgard_instruction *tag,
548 const midgard_instruction *ins);
549 void mir_flip(midgard_instruction *ins);
550 void mir_compute_temp_count(compiler_context *ctx);
551
552 #define LDST_GLOBAL (REGISTER_LDST_ZERO << 2)
553 #define LDST_SHARED ((REGISTER_LDST_LOCAL_STORAGE_PTR << 2) | COMPONENT_Z)
554 #define LDST_SCRATCH ((REGISTER_LDST_PC_SP << 2) | COMPONENT_Z)
555
556 void mir_set_offset(compiler_context *ctx, midgard_instruction *ins,
557 nir_src *offset, unsigned seg);
558 void mir_set_ubo_offset(midgard_instruction *ins, nir_src *src, unsigned bias);
559
560 /* 'Intrinsic' move for aliasing */
561
562 static inline midgard_instruction
v_mov(unsigned src,unsigned dest)563 v_mov(unsigned src, unsigned dest)
564 {
565 midgard_instruction ins = {
566 .type = TAG_ALU_4,
567 .mask = 0xF,
568 .src = {~0, src, ~0, ~0},
569 .src_types = {0, nir_type_uint32},
570 .swizzle = SWIZZLE_IDENTITY,
571 .dest = dest,
572 .dest_type = nir_type_uint32,
573 .op = midgard_alu_op_imov,
574 .outmod = midgard_outmod_keeplo,
575 };
576
577 return ins;
578 }
579
580 /* Broad types of register classes so we can handle special
581 * registers */
582
583 #define REG_CLASS_WORK 0
584 #define REG_CLASS_LDST 1
585 #define REG_CLASS_TEXR 3
586 #define REG_CLASS_TEXW 4
587
588 /* Like a move, but to thread local storage! */
589
590 static inline midgard_instruction
v_load_store_scratch(unsigned srcdest,unsigned index,bool is_store,unsigned mask)591 v_load_store_scratch(unsigned srcdest, unsigned index, bool is_store,
592 unsigned mask)
593 {
594 /* We index by 32-bit vec4s */
595 unsigned byte = (index * 4 * 4);
596
597 midgard_instruction ins = {
598 .type = TAG_LOAD_STORE_4,
599 .mask = mask,
600 .dest_type = nir_type_uint32,
601 .dest = ~0,
602 .src = {~0, ~0, ~0, ~0},
603 .swizzle = SWIZZLE_IDENTITY_4,
604 .op = is_store ? midgard_op_st_128 : midgard_op_ld_128,
605 .load_store =
606 {
607 /* For register spilling - to thread local storage */
608 .arg_reg = REGISTER_LDST_LOCAL_STORAGE_PTR,
609 .arg_comp = COMPONENT_X,
610 .bitsize_toggle = true,
611 .index_format = midgard_index_address_u32,
612 .index_reg = REGISTER_LDST_ZERO,
613 },
614
615 /* If we spill an unspill, RA goes into an infinite loop */
616 .no_spill = (1 << REG_CLASS_WORK),
617 };
618
619 ins.constants.u32[0] = byte;
620
621 if (is_store) {
622 ins.src[0] = srcdest;
623 ins.src_types[0] = nir_type_uint32;
624
625 /* Ensure we are tightly swizzled so liveness analysis is
626 * correct */
627
628 for (unsigned i = 0; i < 4; ++i) {
629 if (!(mask & (1 << i)))
630 ins.swizzle[0][i] = COMPONENT_X;
631 }
632 } else
633 ins.dest = srcdest;
634
635 return ins;
636 }
637
638 static inline bool
mir_has_arg(const midgard_instruction * ins,unsigned arg)639 mir_has_arg(const midgard_instruction *ins, unsigned arg)
640 {
641 if (!ins)
642 return false;
643
644 mir_foreach_src(ins, i) {
645 if (ins->src[i] == arg)
646 return true;
647 }
648
649 return false;
650 }
651
652 /* Scheduling */
653
654 void midgard_schedule_program(compiler_context *ctx);
655
656 void mir_ra(compiler_context *ctx);
657 void mir_squeeze_index(compiler_context *ctx);
658 void mir_lower_special_reads(compiler_context *ctx);
659 void mir_liveness_ins_update(uint16_t *live, const midgard_instruction *ins,
660 unsigned max);
661 void mir_compute_liveness(compiler_context *ctx);
662 void mir_invalidate_liveness(compiler_context *ctx);
663 bool mir_is_live_after(compiler_context *ctx, const midgard_block *block,
664 const midgard_instruction *start, int src);
665
666 void mir_create_pipeline_registers(compiler_context *ctx);
667 void midgard_promote_uniforms(compiler_context *ctx);
668
669 void midgard_emit_derivatives(compiler_context *ctx,
670 nir_intrinsic_instr *instr);
671
672 void midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
673
674 bool mir_op_computes_derivatives(gl_shader_stage stage, unsigned op);
675
676 void mir_analyze_helper_terminate(compiler_context *ctx);
677 void mir_analyze_helper_requirements(compiler_context *ctx);
678
679 /* Final emission */
680
681 void emit_binary_bundle(compiler_context *ctx, midgard_block *block,
682 midgard_bundle *bundle, struct util_dynarray *emission,
683 int next_tag);
684
685 bool nir_fuse_io_16(nir_shader *shader);
686
687 bool midgard_nir_lod_errata(nir_shader *shader);
688
689 unsigned midgard_get_first_tag_from_block(compiler_context *ctx,
690 unsigned block_idx);
691
692 /* Optimizations */
693
694 bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
695 bool midgard_opt_prop(compiler_context *ctx);
696 bool midgard_opt_combine_projection(compiler_context *ctx,
697 midgard_block *block);
698 bool midgard_opt_varying_projection(compiler_context *ctx,
699 midgard_block *block);
700 bool midgard_opt_dead_code_eliminate(compiler_context *ctx);
701 bool midgard_opt_dead_move_eliminate(compiler_context *ctx,
702 midgard_block *block);
703
704 #endif
705