1 /*
2 * Copyright © 2015-2018 Rob Clark <robclark@freedesktop.org>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <robclark@freedesktop.org>
7 */
8
9 #ifndef IR3_CONTEXT_H_
10 #define IR3_CONTEXT_H_
11
12 #include "ir3.h"
13 #include "ir3_compiler.h"
14 #include "ir3_nir.h"
15
16 /* for conditionally setting boolean flag(s): */
17 #define COND(bool, val) ((bool) ? (val) : 0)
18
19 #define DBG(fmt, ...) \
20 do { \
21 mesa_logd("%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__); \
22 } while (0)
23
24 /**
25 * The context for compilation of a single shader.
26 */
27 struct ir3_context {
28 struct ir3_compiler *compiler;
29 const struct ir3_context_funcs *funcs;
30
31 struct nir_shader *s;
32
33 struct nir_instr *cur_instr; /* current instruction, just for debug */
34
35 struct ir3 *ir;
36 struct ir3_shader_variant *so;
37
38 /* Tables of scalar inputs/outputs. Because of the way varying packing
39 * works, we could have inputs w/ fractional location, which is a bit
40 * awkward to deal with unless we keep track of the split scalar in/
41 * out components.
42 *
43 * These *only* have inputs/outputs that are touched by load_*input and
44 * store_output.
45 */
46 unsigned ninputs, noutputs;
47 struct ir3_instruction **inputs;
48 struct ir3_instruction **outputs;
49
50 struct ir3_block *block; /* the current block */
51 struct ir3_builder build;
52 struct ir3_block *in_block; /* block created for shader inputs */
53
54 nir_function_impl *impl;
55
56 /* For fragment shaders, varyings are not actual shader inputs,
57 * instead the hw passes a ij coord which is used with
58 * bary.f.
59 *
60 * But NIR doesn't know that, it still declares varyings as
61 * inputs. So we do all the input tracking normally and fix
62 * things up after compile_instructions()
63 */
64 struct ir3_instruction *ij[IJ_COUNT];
65
66 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
67 struct ir3_instruction *frag_face, *frag_coord;
68
69 /* For vertex shaders, keep track of the system values sources */
70 struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance,
71 *draw_id, *view_index, *is_indexed_draw;
72
73 /* For fragment shaders: */
74 struct ir3_instruction *samp_id, *samp_mask_in;
75
76 /* For geometry shaders: */
77 struct ir3_instruction *primitive_id;
78 struct ir3_instruction *gs_header;
79
80 /* For tessellation shaders: */
81 struct ir3_instruction *tcs_header;
82 struct ir3_instruction *tess_coord;
83 struct ir3_instruction *rel_patch_id;
84
85 /* Compute shader inputs: */
86 struct ir3_instruction *local_invocation_id, *work_group_id;
87
88 struct ir3_instruction *frag_shading_rate;
89
90 /* mapping from nir_register to defining instruction: */
91 struct hash_table *def_ht;
92
93 unsigned num_arrays;
94
95 unsigned loop_depth;
96
97 /* a common pattern for indirect addressing is to request the
98 * same address register multiple times. To avoid generating
99 * duplicate instruction sequences (which our backend does not
100 * try to clean up, since that should be done as the NIR stage)
101 * we cache the address value generated for a given src value:
102 *
103 * Note that we have to cache these per alignment, since same
104 * src used for an array of vec1 cannot be also used for an
105 * array of vec4.
106 */
107 struct hash_table *addr0_ht[4];
108
109 /* The same for a1.x. We only support immediate values for a1.x, as this
110 * is the only use so far.
111 */
112 struct hash_table_u64 *addr1_ht;
113
114 struct hash_table *sel_cond_conversions;
115 struct hash_table *predicate_conversions;
116
117 /* last dst array, for indirect we need to insert a var-store.
118 */
119 struct ir3_instruction **last_dst;
120 unsigned last_dst_n;
121
122 /* maps nir_block to ir3_block, mostly for the purposes of
123 * figuring out the blocks successors
124 */
125 struct hash_table *block_ht;
126
127 /* maps nir_block at the top of a loop to ir3_block collecting continue
128 * edges.
129 */
130 struct hash_table *continue_block_ht;
131
132 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
133 unsigned astc_srgb;
134
135 /* on a4xx, per-sampler per-component swizzles, for tg4: */
136 uint16_t sampler_swizzles[16];
137
138 unsigned samples; /* bitmask of x,y sample shifts */
139
140 unsigned max_texture_index;
141
142 unsigned prefetch_limit;
143
144 bool has_relative_load_const_ir3;
145
146 /* set if we encounter something we can't handle yet, so we
147 * can bail cleanly and fallback to TGSI compiler f/e
148 */
149 bool error;
150 };
151
152 struct ir3_context_funcs {
153 void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx,
154 nir_intrinsic_instr *intr,
155 struct ir3_instruction **dst);
156 void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx,
157 nir_intrinsic_instr *intr);
158 struct ir3_instruction *(*emit_intrinsic_atomic_ssbo)(
159 struct ir3_context *ctx, nir_intrinsic_instr *intr);
160 void (*emit_intrinsic_load_image)(struct ir3_context *ctx,
161 nir_intrinsic_instr *intr,
162 struct ir3_instruction **dst);
163 void (*emit_intrinsic_store_image)(struct ir3_context *ctx,
164 nir_intrinsic_instr *intr);
165 struct ir3_instruction *(*emit_intrinsic_atomic_image)(
166 struct ir3_context *ctx, nir_intrinsic_instr *intr);
167 void (*emit_intrinsic_image_size)(struct ir3_context *ctx,
168 nir_intrinsic_instr *intr,
169 struct ir3_instruction **dst);
170 void (*emit_intrinsic_load_global_ir3)(struct ir3_context *ctx,
171 nir_intrinsic_instr *intr,
172 struct ir3_instruction **dst);
173 void (*emit_intrinsic_store_global_ir3)(struct ir3_context *ctx,
174 nir_intrinsic_instr *intr);
175 struct ir3_instruction *(*emit_intrinsic_atomic_global)(
176 struct ir3_context *ctx, nir_intrinsic_instr *intr);
177 };
178
179 extern const struct ir3_context_funcs ir3_a4xx_funcs;
180 extern const struct ir3_context_funcs ir3_a6xx_funcs;
181
182 struct ir3_context *ir3_context_init(struct ir3_compiler *compiler,
183 struct ir3_shader *shader,
184 struct ir3_shader_variant *so);
185 void ir3_context_free(struct ir3_context *ctx);
186
187 static inline void
ir3_context_set_block(struct ir3_context * ctx,struct ir3_block * block)188 ir3_context_set_block(struct ir3_context *ctx, struct ir3_block *block)
189 {
190 ctx->block = block;
191 ctx->build = ir3_builder_at(ir3_before_terminator(block));
192 }
193
194 struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx,
195 nir_def *dst, unsigned n);
196 struct ir3_instruction **ir3_get_def(struct ir3_context *ctx, nir_def *def,
197 unsigned n);
198 struct ir3_instruction *const *ir3_get_src_maybe_shared(struct ir3_context *ctx,
199 nir_src *src);
200 struct ir3_instruction *const *ir3_get_src_shared(struct ir3_context *ctx,
201 nir_src *src, bool shared);
202
203 static inline struct ir3_instruction *const *
ir3_get_src(struct ir3_context * ctx,nir_src * src)204 ir3_get_src(struct ir3_context *ctx, nir_src *src)
205 {
206 return ir3_get_src_shared(ctx, src, false);
207 }
208
209 void ir3_put_def(struct ir3_context *ctx, nir_def *def);
210 struct ir3_instruction *ir3_create_collect(struct ir3_builder *build,
211 struct ir3_instruction *const *arr,
212 unsigned arrsz);
213 void ir3_split_dest(struct ir3_builder *build, struct ir3_instruction **dst,
214 struct ir3_instruction *src, unsigned base, unsigned n);
215 void ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc);
216 void ir3_handle_nonuniform(struct ir3_instruction *instr,
217 nir_intrinsic_instr *intrin);
218 void emit_intrinsic_image_size_tex(struct ir3_context *ctx,
219 nir_intrinsic_instr *intr,
220 struct ir3_instruction **dst);
221
222 #define ir3_collect(build, ...) \
223 ({ \
224 struct ir3_instruction *__arr[] = {__VA_ARGS__}; \
225 ir3_create_collect(build, __arr, ARRAY_SIZE(__arr)); \
226 })
227
228 NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format,
229 ...);
230
231 #define compile_assert(ctx, cond) \
232 do { \
233 if (!(cond)) \
234 ir3_context_error((ctx), "failed assert: " #cond "\n"); \
235 } while (0)
236
237 struct ir3_instruction *ir3_get_addr0(struct ir3_context *ctx,
238 struct ir3_instruction *src, int align);
239 struct ir3_instruction *ir3_get_addr1(struct ir3_context *ctx,
240 unsigned const_val);
241 struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx,
242 struct ir3_instruction *src);
243
244 void ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl);
245 struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_def *reg);
246 struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
247 struct ir3_array *arr, int n,
248 struct ir3_instruction *address);
249 void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr,
250 int n, struct ir3_instruction *src,
251 struct ir3_instruction *address);
252 void ir3_lower_imm_offset(struct ir3_context *ctx, nir_intrinsic_instr *intr,
253 nir_src *offset_src, unsigned imm_offset_bits,
254 struct ir3_instruction **offset,
255 unsigned *imm_offset);
256
257 static inline type_t
utype_for_size(unsigned bit_size)258 utype_for_size(unsigned bit_size)
259 {
260 switch (bit_size) {
261 case 32:
262 return TYPE_U32;
263 case 16:
264 return TYPE_U16;
265 case 8:
266 return TYPE_U8;
267 default:
268 unreachable("bad bitsize");
269 return ~0;
270 }
271 }
272
273 static inline type_t
utype_src(nir_src src)274 utype_src(nir_src src)
275 {
276 return utype_for_size(nir_src_bit_size(src));
277 }
278
279 static inline type_t
utype_def(nir_def * def)280 utype_def(nir_def *def)
281 {
282 return utype_for_size(def->bit_size);
283 }
284
285 /**
286 * Convert nir bitsize to ir3 bitsize, handling the special case of 1b bools
287 * which can be 16b or 32b depending on gen.
288 */
289 static inline unsigned
ir3_bitsize(struct ir3_context * ctx,unsigned nir_bitsize)290 ir3_bitsize(struct ir3_context *ctx, unsigned nir_bitsize)
291 {
292 if (nir_bitsize == 1)
293 return type_size(ctx->compiler->bool_type);
294 return nir_bitsize;
295 }
296
297 #endif /* IR3_CONTEXT_H_ */
298