• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 Collabora Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors (Collabora):
24  *      Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 #ifndef __BIFROST_COMPILER_H
28 #define __BIFROST_COMPILER_H
29 
30 #include "bifrost.h"
31 #include "compiler/nir/nir.h"
32 #include "panfrost/util/pan_ir.h"
33 
34 /* Bifrost opcodes are tricky -- the same op may exist on both FMA and
35  * ADD with two completely different opcodes, and opcodes can be varying
36  * length in some cases. Then we have different opcodes for int vs float
37  * and then sometimes even for different typesizes. Further, virtually
38  * every op has a number of flags which depend on the op. In constrast
39  * to Midgard where you have a strict ALU/LDST/TEX division and within
40  * ALU you have strict int/float and that's it... here it's a *lot* more
41  * involved. As such, we use something much higher level for our IR,
42  * encoding "classes" of operations, letting the opcode details get
43  * sorted out at emit time.
44  *
45  * Please keep this list alphabetized. Please use a dictionary if you
46  * don't know how to do that.
47  */
48 
49 enum bi_class {
50         BI_ADD,
51         BI_ATEST,
52         BI_BRANCH,
53         BI_CMP,
54         BI_BLEND,
55         BI_BITWISE,
56         BI_COMBINE,
57         BI_CONVERT,
58         BI_CSEL,
59         BI_DISCARD,
60         BI_FMA,
61         BI_FMOV,
62         BI_FREXP,
63         BI_IMATH,
64         BI_LOAD,
65         BI_LOAD_UNIFORM,
66         BI_LOAD_ATTR,
67         BI_LOAD_VAR,
68         BI_LOAD_VAR_ADDRESS,
69         BI_LOAD_TILE,
70         BI_MINMAX,
71         BI_MOV,
72         BI_REDUCE_FMA,
73         BI_SELECT,
74         BI_STORE,
75         BI_STORE_VAR,
76         BI_SPECIAL_ADD, /* _FAST on supported GPUs */
77         BI_SPECIAL_FMA, /* _FAST on supported GPUs */
78         BI_TABLE,
79         BI_TEXS,
80         BI_TEXC,
81         BI_TEXC_DUAL,
82         BI_ROUND,
83         BI_IMUL,
84         BI_ZS_EMIT,
85         BI_NUM_CLASSES
86 };
87 
88 /* Properties of a class... */
89 extern unsigned bi_class_props[BI_NUM_CLASSES];
90 
91 /* abs/neg/outmod valid for a float op */
92 #define BI_MODS (1 << 0)
93 
94 /* Accepts a bi_cond */
95 #define BI_CONDITIONAL (1 << 1)
96 
97 /* Accepts a bifrost_roundmode */
98 #define BI_ROUNDMODE (1 << 2)
99 
100 /* Can be scheduled to FMA */
101 #define BI_SCHED_FMA (1 << 3)
102 
103 /* Can be scheduled to ADD */
104 #define BI_SCHED_ADD (1 << 4)
105 
106 /* Most ALU ops can do either, actually */
107 #define BI_SCHED_ALL (BI_SCHED_FMA | BI_SCHED_ADD)
108 
109 /* Along with setting BI_SCHED_ADD, eats up the entire cycle, so FMA must be
110  * nopped out. Used for _FAST operations. */
111 #define BI_SCHED_SLOW (1 << 5)
112 
113 /* Swizzling allowed for the 8/16-bit source */
114 #define BI_SWIZZLABLE (1 << 6)
115 
116 /* For scheduling purposes this is a high latency instruction and must be at
117  * the end of a clause. Implies ADD */
118 #define BI_SCHED_HI_LATENCY (1 << 7)
119 
120 /* Intrinsic is vectorized and acts with `vector_channels` components */
121 #define BI_VECTOR (1 << 8)
122 
123 /* Use a data register for src0/dest respectively, bypassing the usual
124  * register accessor. */
125 #define BI_DATA_REG_SRC (1 << 9)
126 #define BI_DATA_REG_DEST (1 << 10)
127 
128 /* Quirk: cannot encode multiple abs on FMA in fp16 mode */
129 #define BI_NO_ABS_ABS_FP16_FMA (1 << 11)
130 
131 /* It can't get any worse than csel4... can it? */
132 #define BIR_SRC_COUNT 4
133 
134 /* BI_LD_VARY */
135 struct bi_load_vary {
136         enum bifrost_interp_mode interp_mode;
137         bool reuse;
138         bool flat;
139 };
140 
141 /* BI_BRANCH encoding the details of the branch itself as well as a pointer to
142  * the target. We forward declare bi_block since this is mildly circular (not
143  * strictly, but this order of the file makes more sense I think)
144  *
145  * We define our own enum of conditions since the conditions in the hardware
146  * packed in crazy ways that would make manipulation unweildly (meaning changes
147  * based on slot swapping, etc), so we defer dealing with that until emit time.
148  * Likewise, we expose NIR types instead of the crazy branch types, although
149  * the restrictions do eventually apply of course. */
150 
151 struct bi_block;
152 
153 /* Sync with gen-pack.py */
154 enum bi_cond {
155         BI_COND_ALWAYS = 0,
156         BI_COND_LT,
157         BI_COND_LE,
158         BI_COND_GE,
159         BI_COND_GT,
160         BI_COND_EQ,
161         BI_COND_NE,
162 };
163 
164 /* Segments, as synced with ISA. Used as an immediate in LOAD/STORE
165  * instructions for address calculation, and directly in SEG_ADD/SEG_SUB
166  * instructions. */
167 
168 enum bi_segment {
169         /* No segment (use global addressing, offset from GPU VA 0x0) */
170         BI_SEGMENT_NONE = 1,
171 
172         /* Within workgroup local memory (shared memory). Relative to
173          * wls_base_pointer in the draw's thread storage descriptor */
174         BI_SEGMENT_WLS = 2,
175 
176         /* Within one of the bound uniform buffers. Low 32-bits are the index
177          * within the uniform buffer; high 32-bits are the index of the uniform
178          * buffer itself. Relative to the uniform_array_pointer indexed within
179          * the draw's uniform remap table indexed by the high 32-bits. */
180         BI_SEGMENT_UBO = 4,
181 
182         /* Within thread local storage (for spilling). Relative to
183          * tls_base_pointer in the draw's thread storage descriptor */
184         BI_SEGMENT_TLS = 7
185 };
186 
187 /* Opcodes within a class */
188 enum bi_minmax_op {
189         BI_MINMAX_MIN,
190         BI_MINMAX_MAX
191 };
192 
193 enum bi_bitwise_op {
194         BI_BITWISE_AND,
195         BI_BITWISE_OR,
196         BI_BITWISE_XOR,
197         BI_BITWISE_ARSHIFT,
198 };
199 
200 enum bi_imath_op {
201         BI_IMATH_ADD,
202         BI_IMATH_SUB,
203 };
204 
205 enum bi_imul_op {
206         BI_IMUL_IMUL,
207 };
208 
209 enum bi_table_op {
210         /* fp32 log2() with low precision, suitable for GL or half_log2() in
211          * CL. In the first argument, takes x. Letting u be such that x =
212          * 2^{-m} u with m integer and 0.75 <= u < 1.5, returns
213          * log2(u) / (u - 1). */
214 
215         BI_TABLE_LOG2_U_OVER_U_1_LOW,
216 };
217 
218 enum bi_reduce_op {
219         /* Takes two fp32 arguments and returns x + frexp(y). Used in
220          * low-precision log2 argument reduction on newer models. */
221 
222         BI_REDUCE_ADD_FREXPM,
223 };
224 
225 enum bi_frexp_op {
226         BI_FREXPE_LOG,
227 };
228 
229 enum bi_special_op {
230         BI_SPECIAL_FRCP,
231         BI_SPECIAL_FRSQ,
232 
233         /* fp32 exp2() with low precision, suitable for half_exp2() in CL or
234          * exp2() in GL. In the first argument, it takes f2i_rte(x * 2^24). In
235          * the second, it takes x itself. */
236         BI_SPECIAL_EXP2_LOW,
237         BI_SPECIAL_IABS,
238 
239         /* cubemap coordinates extraction helpers */
240         BI_SPECIAL_CUBEFACE1,
241         BI_SPECIAL_CUBEFACE2,
242         BI_SPECIAL_CUBE_SSEL,
243         BI_SPECIAL_CUBE_TSEL,
244 };
245 
246 struct bi_bitwise {
247         bool dest_invert;
248         bool src1_invert;
249         bool rshift; /* false for lshift */
250 };
251 
252 struct bi_texture {
253         /* Constant indices. Indirect would need to be in src[..] like normal,
254          * we can reserve some sentinels there for that for future. */
255         unsigned texture_index, sampler_index;
256 
257         /* Should the LOD be computed based on neighboring pixels? Only valid
258          * in fragment shaders. */
259         bool compute_lod;
260 };
261 
262 typedef struct {
263         struct list_head link; /* Must be first */
264         enum bi_class type;
265 
266         /* Indices, see pan_ssa_index etc. Note zero is special cased
267          * to "no argument" */
268         unsigned dest;
269         unsigned src[BIR_SRC_COUNT];
270 
271         /* 32-bit word offset for destination, added to the register number in
272          * RA when lowering combines */
273         unsigned dest_offset;
274 
275         /* If one of the sources has BIR_INDEX_CONSTANT */
276         union {
277                 uint64_t u64;
278                 uint32_t u32;
279                 uint16_t u16[2];
280                 uint8_t u8[4];
281         } constant;
282 
283         /* Floating-point modifiers, type/class permitting. If not
284          * allowed for the type/class, these are ignored. */
285         enum bifrost_outmod outmod;
286         bool src_abs[BIR_SRC_COUNT];
287         bool src_neg[BIR_SRC_COUNT];
288 
289         /* Round mode (requires BI_ROUNDMODE) */
290         enum bifrost_roundmode roundmode;
291 
292         /* Destination type. Usually the type of the instruction
293          * itself, but if sources and destination have different
294          * types, the type of the destination wins (so f2i would be
295          * int). Zero if there is no destination. Bitsize included */
296         nir_alu_type dest_type;
297 
298         /* Source types if required by the class */
299         nir_alu_type src_types[BIR_SRC_COUNT];
300 
301         /* register_format if applicable */
302         nir_alu_type format;
303 
304         /* If the source type is 8-bit or 16-bit such that SIMD is possible,
305          * and the class has BI_SWIZZLABLE, this is a swizzle in the usual
306          * sense. On non-SIMD instructions, it can be used for component
307          * selection, so we don't have to special case extraction. */
308         uint8_t swizzle[BIR_SRC_COUNT][NIR_MAX_VEC_COMPONENTS];
309 
310         /* For VECTOR ops, how many channels are written? */
311         unsigned vector_channels;
312 
313         /* For texture ops, the skip bit. Set if helper invocations can skip
314          * the operation. That is, set if the result of this texture operation
315          * is never used for cross-lane operation (including texture
316          * coordinates and derivatives) as determined by data flow analysis
317          * (like Midgard) */
318         bool skip;
319 
320         /* The comparison op. BI_COND_ALWAYS may not be valid. */
321         enum bi_cond cond;
322 
323         /* For memory ops, base address */
324         enum bi_segment segment;
325 
326         /* Can we spill the value written here? Used to prevent
327          * useless double fills */
328         bool no_spill;
329 
330         /* A class-specific op from which the actual opcode can be derived
331          * (along with the above information) */
332 
333         union {
334                 enum bi_minmax_op minmax;
335                 enum bi_bitwise_op bitwise;
336                 enum bi_special_op special;
337                 enum bi_reduce_op reduce;
338                 enum bi_table_op table;
339                 enum bi_frexp_op frexp;
340                 enum bi_imath_op imath;
341                 enum bi_imul_op imul;
342 
343                 /* For FMA/ADD, should we add a biased exponent? */
344                 bool mscale;
345         } op;
346 
347         /* Union for class-specific information */
348         union {
349                 enum bifrost_minmax_mode minmax;
350                 struct bi_load_vary load_vary;
351                 struct bi_block *branch_target;
352 
353                 /* For BLEND -- the location 0-7 */
354                 unsigned blend_location;
355 
356                 struct bi_bitwise bitwise;
357                 struct bi_texture texture;
358         };
359 } bi_instruction;
360 
361 /* Represents the assignment of slots for a given bi_bundle */
362 
363 typedef struct {
364         /* Register to assign to each slot */
365         unsigned slot[4];
366 
367         /* Read slots can be disabled */
368         bool enabled[2];
369 
370         /* Configuration for slots 2/3 */
371         struct bifrost_reg_ctrl_23 slot23;
372 
373         /* Fast-Access-Uniform RAM index */
374         uint8_t fau_idx;
375 
376         /* Whether writes are actually for the last instruction */
377         bool first_instruction;
378 } bi_registers;
379 
380 /* A bi_bundle contains two paired instruction pointers. If a slot is unfilled,
381  * leave it NULL; the emitter will fill in a nop. Instructions reference
382  * registers via slots which are assigned per bundle.
383  */
384 
385 typedef struct {
386         uint8_t fau_idx;
387         bi_registers regs;
388         bi_instruction *fma;
389         bi_instruction *add;
390 } bi_bundle;
391 
392 struct bi_block;
393 
394 typedef struct {
395         struct list_head link;
396 
397         /* Link back up for branch calculations */
398         struct bi_block *block;
399 
400         /* A clause can have 8 instructions in bundled FMA/ADD sense, so there
401          * can be 8 bundles. */
402 
403         unsigned bundle_count;
404         bi_bundle bundles[8];
405 
406         /* For scoreboarding -- the clause ID (this is not globally unique!)
407          * and its dependencies in terms of other clauses, computed during
408          * scheduling and used when emitting code. Dependencies expressed as a
409          * bitfield matching the hardware, except shifted by a clause (the
410          * shift back to the ISA's off-by-one encoding is worked out when
411          * emitting clauses) */
412         unsigned scoreboard_id;
413         uint8_t dependencies;
414 
415         /* See ISA header for description */
416         enum bifrost_flow flow_control;
417 
418         /* Can we prefetch the next clause? Usually it makes sense, except for
419          * clauses ending in unconditional branches */
420         bool next_clause_prefetch;
421 
422         /* Assigned data register */
423         unsigned staging_register;
424 
425         /* Corresponds to the usual bit but shifted by a clause */
426         bool staging_barrier;
427 
428         /* Constants read by this clause. ISA limit. Must satisfy:
429          *
430          *      constant_count + bundle_count <= 13
431          *
432          * Also implicitly constant_count <= bundle_count since a bundle only
433          * reads a single constant.
434          */
435         uint64_t constants[8];
436         unsigned constant_count;
437 
438         /* Branches encode a constant offset relative to the program counter
439          * with some magic flags. By convention, if there is a branch, its
440          * constant will be last. Set this flag to indicate this is required.
441          */
442         bool branch_constant;
443 
444         /* What type of high latency instruction is here, basically */
445         unsigned message_type;
446 } bi_clause;
447 
448 typedef struct bi_block {
449         pan_block base; /* must be first */
450 
451         /* If true, uses clauses; if false, uses instructions */
452         bool scheduled;
453         struct list_head clauses; /* list of bi_clause */
454 } bi_block;
455 
456 typedef struct {
457        nir_shader *nir;
458        gl_shader_stage stage;
459        struct list_head blocks; /* list of bi_block */
460        struct panfrost_sysvals sysvals;
461        uint32_t quirks;
462        unsigned tls_size;
463 
464        /* Is internally a blend shader? Depends on stage == FRAGMENT */
465        bool is_blend;
466 
467        /* Blend constants */
468        float blend_constants[4];
469 
470        /* Blend return offsets */
471        uint32_t blend_ret_offsets[8];
472 
473        /* Blend tile buffer conversion desc */
474        uint64_t blend_desc;
475 
476        /* During NIR->BIR */
477        nir_function_impl *impl;
478        bi_block *current_block;
479        bi_block *after_block;
480        bi_block *break_block;
481        bi_block *continue_block;
482        bool emitted_atest;
483        nir_alu_type *blend_types;
484 
485        /* For creating temporaries */
486        unsigned temp_alloc;
487 
488        /* Analysis results */
489        bool has_liveness;
490 
491        /* Stats for shader-db */
492        unsigned instruction_count;
493        unsigned loop_count;
494        unsigned spills;
495        unsigned fills;
496 } bi_context;
497 
498 static inline bi_instruction *
bi_emit(bi_context * ctx,bi_instruction ins)499 bi_emit(bi_context *ctx, bi_instruction ins)
500 {
501         bi_instruction *u = rzalloc(ctx, bi_instruction);
502         memcpy(u, &ins, sizeof(ins));
503         list_addtail(&u->link, &ctx->current_block->base.instructions);
504         return u;
505 }
506 
507 static inline bi_instruction *
bi_emit_before(bi_context * ctx,bi_instruction * tag,bi_instruction ins)508 bi_emit_before(bi_context *ctx, bi_instruction *tag, bi_instruction ins)
509 {
510         bi_instruction *u = rzalloc(ctx, bi_instruction);
511         memcpy(u, &ins, sizeof(ins));
512         list_addtail(&u->link, &tag->link);
513         return u;
514 }
515 
516 static inline void
bi_remove_instruction(bi_instruction * ins)517 bi_remove_instruction(bi_instruction *ins)
518 {
519         list_del(&ins->link);
520 }
521 
522 /* If high bits are set, instead of SSA/registers, we have specials indexed by
523  * the low bits if necessary.
524  *
525  *  Fixed register: do not allocate register, do not collect $200.
526  *  Uniform: access a uniform register given by low bits.
527  *  Constant: access the specified constant (specifies a bit offset / shift)
528  *  Zero: special cased to avoid wasting a constant
529  *  Passthrough: a bifrost_packed_src to passthrough T/T0/T1
530  */
531 
532 #define BIR_INDEX_REGISTER (1 << 31)
533 #define BIR_INDEX_UNIFORM  (1 << 30)
534 #define BIR_INDEX_CONSTANT (1 << 29)
535 #define BIR_INDEX_ZERO     (1 << 28)
536 #define BIR_INDEX_PASS     (1 << 27)
537 #define BIR_INDEX_BLEND    (1 << 26)
538 
539 /* Keep me synced please so we can check src & BIR_SPECIAL */
540 
541 #define BIR_SPECIAL        (BIR_INDEX_REGISTER | BIR_INDEX_UNIFORM | \
542                             BIR_INDEX_CONSTANT | BIR_INDEX_ZERO | \
543                             BIR_INDEX_PASS | BIR_INDEX_BLEND)
544 
545 static inline unsigned
bi_max_temp(bi_context * ctx)546 bi_max_temp(bi_context *ctx)
547 {
548         unsigned alloc = MAX2(ctx->impl->reg_alloc, ctx->impl->ssa_alloc);
549         return ((alloc + 2 + ctx->temp_alloc) << 1);
550 }
551 
552 static inline unsigned
bi_make_temp(bi_context * ctx)553 bi_make_temp(bi_context *ctx)
554 {
555         return (ctx->impl->ssa_alloc + 1 + ctx->temp_alloc++) << 1;
556 }
557 
558 static inline unsigned
bi_make_temp_reg(bi_context * ctx)559 bi_make_temp_reg(bi_context *ctx)
560 {
561         return ((ctx->impl->reg_alloc + ctx->temp_alloc++) << 1) | PAN_IS_REG;
562 }
563 
564 /* Iterators for Bifrost IR */
565 
566 #define bi_foreach_block(ctx, v) \
567         list_for_each_entry(pan_block, v, &ctx->blocks, link)
568 
569 #define bi_foreach_block_from(ctx, from, v) \
570         list_for_each_entry_from(pan_block, v, from, &ctx->blocks, link)
571 
572 #define bi_foreach_block_from_rev(ctx, from, v) \
573         list_for_each_entry_from_rev(pan_block, v, from, &ctx->blocks, link)
574 
575 #define bi_foreach_instr_in_block(block, v) \
576         list_for_each_entry(bi_instruction, v, &(block)->base.instructions, link)
577 
578 #define bi_foreach_instr_in_block_rev(block, v) \
579         list_for_each_entry_rev(bi_instruction, v, &(block)->base.instructions, link)
580 
581 #define bi_foreach_instr_in_block_safe(block, v) \
582         list_for_each_entry_safe(bi_instruction, v, &(block)->base.instructions, link)
583 
584 #define bi_foreach_instr_in_block_safe_rev(block, v) \
585         list_for_each_entry_safe_rev(bi_instruction, v, &(block)->base.instructions, link)
586 
587 #define bi_foreach_instr_in_block_from(block, v, from) \
588         list_for_each_entry_from(bi_instruction, v, from, &(block)->base.instructions, link)
589 
590 #define bi_foreach_instr_in_block_from_rev(block, v, from) \
591         list_for_each_entry_from_rev(bi_instruction, v, from, &(block)->base.instructions, link)
592 
593 #define bi_foreach_clause_in_block(block, v) \
594         list_for_each_entry(bi_clause, v, &(block)->clauses, link)
595 
596 #define bi_foreach_clause_in_block_safe(block, v) \
597         list_for_each_entry_safe(bi_clause, v, &(block)->clauses, link)
598 
599 #define bi_foreach_clause_in_block_from(block, v, from) \
600         list_for_each_entry_from(bi_clause, v, from, &(block)->clauses, link)
601 
602 #define bi_foreach_clause_in_block_from_rev(block, v, from) \
603         list_for_each_entry_from_rev(bi_clause, v, from, &(block)->clauses, link)
604 
605 #define bi_foreach_instr_global(ctx, v) \
606         bi_foreach_block(ctx, v_block) \
607                 bi_foreach_instr_in_block((bi_block *) v_block, v)
608 
609 #define bi_foreach_instr_global_safe(ctx, v) \
610         bi_foreach_block(ctx, v_block) \
611                 bi_foreach_instr_in_block_safe((bi_block *) v_block, v)
612 
613 /* Based on set_foreach, expanded with automatic type casts */
614 
615 #define bi_foreach_predecessor(blk, v) \
616         struct set_entry *_entry_##v; \
617         bi_block *v; \
618         for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
619                 v = (bi_block *) (_entry_##v ? _entry_##v->key : NULL);  \
620                 _entry_##v != NULL; \
621                 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
622                 v = (bi_block *) (_entry_##v ? _entry_##v->key : NULL))
623 
624 #define bi_foreach_src(ins, v) \
625         for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
626 
627 static inline bi_instruction *
bi_prev_op(bi_instruction * ins)628 bi_prev_op(bi_instruction *ins)
629 {
630         return list_last_entry(&(ins->link), bi_instruction, link);
631 }
632 
633 static inline bi_instruction *
bi_next_op(bi_instruction * ins)634 bi_next_op(bi_instruction *ins)
635 {
636         return list_first_entry(&(ins->link), bi_instruction, link);
637 }
638 
639 static inline pan_block *
pan_next_block(pan_block * block)640 pan_next_block(pan_block *block)
641 {
642         return list_first_entry(&(block->link), pan_block, link);
643 }
644 
645 /* Special functions */
646 
647 void bi_emit_fexp2(bi_context *ctx, nir_alu_instr *instr);
648 void bi_emit_flog2(bi_context *ctx, nir_alu_instr *instr);
649 
650 /* BIR manipulation */
651 
652 bool bi_has_outmod(bi_instruction *ins);
653 bool bi_has_source_mods(bi_instruction *ins);
654 bool bi_is_src_swizzled(bi_instruction *ins, unsigned s);
655 bool bi_has_arg(bi_instruction *ins, unsigned arg);
656 uint16_t bi_from_bytemask(uint16_t bytemask, unsigned bytes);
657 unsigned bi_get_component_count(bi_instruction *ins, signed s);
658 uint16_t bi_bytemask_of_read_components(bi_instruction *ins, unsigned node);
659 uint64_t bi_get_immediate(bi_instruction *ins, unsigned index);
660 bool bi_writes_component(bi_instruction *ins, unsigned comp);
661 unsigned bi_writemask(bi_instruction *ins);
662 void bi_rewrite_uses(bi_context *ctx, unsigned old, unsigned oldc, unsigned new, unsigned newc);
663 
664 /* BIR passes */
665 
666 void bi_lower_combine(bi_context *ctx, bi_block *block);
667 bool bi_opt_dead_code_eliminate(bi_context *ctx, bi_block *block);
668 void bi_schedule(bi_context *ctx);
669 void bi_register_allocate(bi_context *ctx);
670 
671 bi_clause *bi_make_singleton(void *memctx, bi_instruction *ins,
672                 bi_block *block,
673                 unsigned scoreboard_id,
674                 unsigned dependencies,
675                 bool osrb);
676 
677 /* Liveness */
678 
679 void bi_compute_liveness(bi_context *ctx);
680 void bi_liveness_ins_update(uint16_t *live, bi_instruction *ins, unsigned max);
681 void bi_invalidate_liveness(bi_context *ctx);
682 bool bi_is_live_after(bi_context *ctx, bi_block *block, bi_instruction *start, int src);
683 
684 /* Layout */
685 
686 bool bi_can_insert_bundle(bi_clause *clause, bool constant);
687 unsigned bi_clause_quadwords(bi_clause *clause);
688 signed bi_block_offset(bi_context *ctx, bi_clause *start, bi_block *target);
689 
690 /* Code emit */
691 
692 void bi_pack(bi_context *ctx, struct util_dynarray *emission);
693 
694 #endif
695