1 /*
2 * Copyright (C) 2015-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef IR3_CONTEXT_H_
28 #define IR3_CONTEXT_H_
29
30 #include "ir3.h"
31 #include "ir3_compiler.h"
32 #include "ir3_nir.h"
33
34 /* for conditionally setting boolean flag(s): */
35 #define COND(bool, val) ((bool) ? (val) : 0)
36
37 #define DBG(fmt, ...) \
38 do { \
39 mesa_logd("%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__); \
40 } while (0)
41
42 /**
43 * The context for compilation of a single shader.
44 */
45 struct ir3_context {
46 struct ir3_compiler *compiler;
47 const struct ir3_context_funcs *funcs;
48
49 struct nir_shader *s;
50
51 struct nir_instr *cur_instr; /* current instruction, just for debug */
52
53 struct ir3 *ir;
54 struct ir3_shader_variant *so;
55
56 /* Tables of scalar inputs/outputs. Because of the way varying packing
57 * works, we could have inputs w/ fractional location, which is a bit
58 * awkward to deal with unless we keep track of the split scalar in/
59 * out components.
60 *
61 * These *only* have inputs/outputs that are touched by load_*input and
62 * store_output.
63 */
64 unsigned ninputs, noutputs;
65 struct ir3_instruction **inputs;
66 struct ir3_instruction **outputs;
67
68 struct ir3_block *block; /* the current block */
69 struct ir3_block *in_block; /* block created for shader inputs */
70
71 nir_function_impl *impl;
72
73 /* For fragment shaders, varyings are not actual shader inputs,
74 * instead the hw passes a ij coord which is used with
75 * bary.f.
76 *
77 * But NIR doesn't know that, it still declares varyings as
78 * inputs. So we do all the input tracking normally and fix
79 * things up after compile_instructions()
80 */
81 struct ir3_instruction *ij[IJ_COUNT];
82
83 /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
84 struct ir3_instruction *frag_face, *frag_coord;
85
86 /* For vertex shaders, keep track of the system values sources */
87 struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance,
88 *draw_id, *view_index, *is_indexed_draw;
89
90 /* For fragment shaders: */
91 struct ir3_instruction *samp_id, *samp_mask_in;
92
93 /* For geometry shaders: */
94 struct ir3_instruction *primitive_id;
95 struct ir3_instruction *gs_header;
96
97 /* For tessellation shaders: */
98 struct ir3_instruction *tcs_header;
99 struct ir3_instruction *tess_coord;
100 struct ir3_instruction *rel_patch_id;
101
102 /* Compute shader inputs: */
103 struct ir3_instruction *local_invocation_id, *work_group_id;
104
105 /* mapping from nir_register to defining instruction: */
106 struct hash_table *def_ht;
107
108 unsigned num_arrays;
109
110 unsigned loop_id;
111 unsigned loop_depth;
112
113 /* a common pattern for indirect addressing is to request the
114 * same address register multiple times. To avoid generating
115 * duplicate instruction sequences (which our backend does not
116 * try to clean up, since that should be done as the NIR stage)
117 * we cache the address value generated for a given src value:
118 *
119 * Note that we have to cache these per alignment, since same
120 * src used for an array of vec1 cannot be also used for an
121 * array of vec4.
122 */
123 struct hash_table *addr0_ht[4];
124
125 /* The same for a1.x. We only support immediate values for a1.x, as this
126 * is the only use so far.
127 */
128 struct hash_table_u64 *addr1_ht;
129
130 struct hash_table *sel_cond_conversions;
131
132 /* last dst array, for indirect we need to insert a var-store.
133 */
134 struct ir3_instruction **last_dst;
135 unsigned last_dst_n;
136
137 /* maps nir_block to ir3_block, mostly for the purposes of
138 * figuring out the blocks successors
139 */
140 struct hash_table *block_ht;
141
142 /* maps nir_block at the top of a loop to ir3_block collecting continue
143 * edges.
144 */
145 struct hash_table *continue_block_ht;
146
147 /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
148 unsigned astc_srgb;
149
150 /* on a4xx, per-sampler per-component swizzles, for tg4: */
151 uint16_t sampler_swizzles[16];
152
153 unsigned samples; /* bitmask of x,y sample shifts */
154
155 unsigned max_texture_index;
156
157 unsigned prefetch_limit;
158
159 /* set if we encounter something we can't handle yet, so we
160 * can bail cleanly and fallback to TGSI compiler f/e
161 */
162 bool error;
163 };
164
165 struct ir3_context_funcs {
166 void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx,
167 nir_intrinsic_instr *intr,
168 struct ir3_instruction **dst);
169 void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx,
170 nir_intrinsic_instr *intr);
171 struct ir3_instruction *(*emit_intrinsic_atomic_ssbo)(
172 struct ir3_context *ctx, nir_intrinsic_instr *intr);
173 void (*emit_intrinsic_load_image)(struct ir3_context *ctx,
174 nir_intrinsic_instr *intr,
175 struct ir3_instruction **dst);
176 void (*emit_intrinsic_store_image)(struct ir3_context *ctx,
177 nir_intrinsic_instr *intr);
178 struct ir3_instruction *(*emit_intrinsic_atomic_image)(
179 struct ir3_context *ctx, nir_intrinsic_instr *intr);
180 void (*emit_intrinsic_image_size)(struct ir3_context *ctx,
181 nir_intrinsic_instr *intr,
182 struct ir3_instruction **dst);
183 void (*emit_intrinsic_load_global_ir3)(struct ir3_context *ctx,
184 nir_intrinsic_instr *intr,
185 struct ir3_instruction **dst);
186 void (*emit_intrinsic_store_global_ir3)(struct ir3_context *ctx,
187 nir_intrinsic_instr *intr);
188 struct ir3_instruction *(*emit_intrinsic_atomic_global)(
189 struct ir3_context *ctx, nir_intrinsic_instr *intr);
190 };
191
192 extern const struct ir3_context_funcs ir3_a4xx_funcs;
193 extern const struct ir3_context_funcs ir3_a6xx_funcs;
194
195 struct ir3_context *ir3_context_init(struct ir3_compiler *compiler,
196 struct ir3_shader *shader,
197 struct ir3_shader_variant *so);
198 void ir3_context_free(struct ir3_context *ctx);
199
200 struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx,
201 nir_def *dst, unsigned n);
202 struct ir3_instruction **ir3_get_def(struct ir3_context *ctx, nir_def *def,
203 unsigned n);
204 struct ir3_instruction *const *ir3_get_src(struct ir3_context *ctx,
205 nir_src *src);
206 void ir3_put_def(struct ir3_context *ctx, nir_def *def);
207 struct ir3_instruction *ir3_create_collect(struct ir3_block *block,
208 struct ir3_instruction *const *arr,
209 unsigned arrsz);
210 void ir3_split_dest(struct ir3_block *block, struct ir3_instruction **dst,
211 struct ir3_instruction *src, unsigned base, unsigned n);
212 void ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc);
213 void ir3_handle_nonuniform(struct ir3_instruction *instr,
214 nir_intrinsic_instr *intrin);
215 void emit_intrinsic_image_size_tex(struct ir3_context *ctx,
216 nir_intrinsic_instr *intr,
217 struct ir3_instruction **dst);
218
219 #define ir3_collect(block, ...) \
220 ({ \
221 struct ir3_instruction *__arr[] = {__VA_ARGS__}; \
222 ir3_create_collect(block, __arr, ARRAY_SIZE(__arr)); \
223 })
224
225 NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format,
226 ...);
227
228 #define compile_assert(ctx, cond) \
229 do { \
230 if (!(cond)) \
231 ir3_context_error((ctx), "failed assert: " #cond "\n"); \
232 } while (0)
233
234 struct ir3_instruction *ir3_get_addr0(struct ir3_context *ctx,
235 struct ir3_instruction *src, int align);
236 struct ir3_instruction *ir3_get_addr1(struct ir3_context *ctx,
237 unsigned const_val);
238 struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx,
239 struct ir3_instruction *src);
240
241 void ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl);
242 struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_def *reg);
243 struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
244 struct ir3_array *arr, int n,
245 struct ir3_instruction *address);
246 void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr,
247 int n, struct ir3_instruction *src,
248 struct ir3_instruction *address);
249
250 static inline type_t
utype_for_size(unsigned bit_size)251 utype_for_size(unsigned bit_size)
252 {
253 switch (bit_size) {
254 case 32:
255 return TYPE_U32;
256 case 16:
257 return TYPE_U16;
258 case 8:
259 return TYPE_U8;
260 default:
261 unreachable("bad bitsize");
262 return ~0;
263 }
264 }
265
266 static inline type_t
utype_src(nir_src src)267 utype_src(nir_src src)
268 {
269 return utype_for_size(nir_src_bit_size(src));
270 }
271
272 static inline type_t
utype_def(nir_def * def)273 utype_def(nir_def *def)
274 {
275 return utype_for_size(def->bit_size);
276 }
277
278 /**
279 * Convert nir bitsize to ir3 bitsize, handling the special case of 1b bools
280 * which can be 16b or 32b depending on gen.
281 */
282 static inline unsigned
ir3_bitsize(struct ir3_context * ctx,unsigned nir_bitsize)283 ir3_bitsize(struct ir3_context *ctx, unsigned nir_bitsize)
284 {
285 if (nir_bitsize == 1)
286 return type_size(ctx->compiler->bool_type);
287 return nir_bitsize;
288 }
289
290 #endif /* IR3_CONTEXT_H_ */
291