• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015-2018 Rob Clark <robclark@freedesktop.org>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <robclark@freedesktop.org>
7  */
8 
9 #ifndef IR3_CONTEXT_H_
10 #define IR3_CONTEXT_H_
11 
12 #include "ir3.h"
13 #include "ir3_compiler.h"
14 #include "ir3_nir.h"
15 
16 /* for conditionally setting boolean flag(s): */
17 #define COND(bool, val) ((bool) ? (val) : 0)
18 
19 #define DBG(fmt, ...)                                                          \
20    do {                                                                        \
21       mesa_logd("%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__);             \
22    } while (0)
23 
24 /**
25  * The context for compilation of a single shader.
26  */
27 struct ir3_context {
28    struct ir3_compiler *compiler;
29    const struct ir3_context_funcs *funcs;
30 
31    struct nir_shader *s;
32 
33    struct nir_instr *cur_instr; /* current instruction, just for debug */
34 
35    struct ir3 *ir;
36    struct ir3_shader_variant *so;
37 
38    /* Tables of scalar inputs/outputs.  Because of the way varying packing
39     * works, we could have inputs w/ fractional location, which is a bit
40     * awkward to deal with unless we keep track of the split scalar in/
41     * out components.
42     *
43     * These *only* have inputs/outputs that are touched by load_*input and
44     * store_output.
45     */
46    unsigned ninputs, noutputs;
47    struct ir3_instruction **inputs;
48    struct ir3_instruction **outputs;
49 
50    struct ir3_block *block;    /* the current block */
51    struct ir3_builder build;
52    struct ir3_block *in_block; /* block created for shader inputs */
53 
54    nir_function_impl *impl;
55 
56    /* For fragment shaders, varyings are not actual shader inputs,
57     * instead the hw passes a ij coord which is used with
58     * bary.f.
59     *
60     * But NIR doesn't know that, it still declares varyings as
61     * inputs.  So we do all the input tracking normally and fix
62     * things up after compile_instructions()
63     */
64    struct ir3_instruction *ij[IJ_COUNT];
65 
66    /* for fragment shaders, for gl_FrontFacing and gl_FragCoord: */
67    struct ir3_instruction *frag_face, *frag_coord;
68 
69    /* For vertex shaders, keep track of the system values sources */
70    struct ir3_instruction *vertex_id, *basevertex, *instance_id, *base_instance,
71       *draw_id, *view_index, *is_indexed_draw;
72 
73    /* For fragment shaders: */
74    struct ir3_instruction *samp_id, *samp_mask_in;
75 
76    /* For geometry shaders: */
77    struct ir3_instruction *primitive_id;
78    struct ir3_instruction *gs_header;
79 
80    /* For tessellation shaders: */
81    struct ir3_instruction *tcs_header;
82    struct ir3_instruction *tess_coord;
83    struct ir3_instruction *rel_patch_id;
84 
85    /* Compute shader inputs: */
86    struct ir3_instruction *local_invocation_id, *work_group_id;
87 
88    struct ir3_instruction *frag_shading_rate;
89 
90    /* mapping from nir_register to defining instruction: */
91    struct hash_table *def_ht;
92 
93    unsigned num_arrays;
94 
95    unsigned loop_depth;
96 
97    /* a common pattern for indirect addressing is to request the
98     * same address register multiple times.  To avoid generating
99     * duplicate instruction sequences (which our backend does not
100     * try to clean up, since that should be done as the NIR stage)
101     * we cache the address value generated for a given src value:
102     *
103     * Note that we have to cache these per alignment, since same
104     * src used for an array of vec1 cannot be also used for an
105     * array of vec4.
106     */
107    struct hash_table *addr0_ht[4];
108 
109    /* The same for a1.x. We only support immediate values for a1.x, as this
110     * is the only use so far.
111     */
112    struct hash_table_u64 *addr1_ht;
113 
114    struct hash_table *sel_cond_conversions;
115    struct hash_table *predicate_conversions;
116 
117    /* last dst array, for indirect we need to insert a var-store.
118     */
119    struct ir3_instruction **last_dst;
120    unsigned last_dst_n;
121 
122    /* maps nir_block to ir3_block, mostly for the purposes of
123     * figuring out the blocks successors
124     */
125    struct hash_table *block_ht;
126 
127    /* maps nir_block at the top of a loop to ir3_block collecting continue
128     * edges.
129     */
130    struct hash_table *continue_block_ht;
131 
132    /* on a4xx, bitmask of samplers which need astc+srgb workaround: */
133    unsigned astc_srgb;
134 
135    /* on a4xx, per-sampler per-component swizzles, for tg4: */
136    uint16_t sampler_swizzles[16];
137 
138    unsigned samples; /* bitmask of x,y sample shifts */
139 
140    unsigned max_texture_index;
141 
142    unsigned prefetch_limit;
143 
144    bool has_relative_load_const_ir3;
145 
146    /* set if we encounter something we can't handle yet, so we
147     * can bail cleanly and fallback to TGSI compiler f/e
148     */
149    bool error;
150 };
151 
152 struct ir3_context_funcs {
153    void (*emit_intrinsic_load_ssbo)(struct ir3_context *ctx,
154                                     nir_intrinsic_instr *intr,
155                                     struct ir3_instruction **dst);
156    void (*emit_intrinsic_load_uav)(struct ir3_context *ctx,
157                                    nir_intrinsic_instr *intr,
158                                    struct ir3_instruction **dst);
159    void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx,
160                                      nir_intrinsic_instr *intr);
161    struct ir3_instruction *(*emit_intrinsic_atomic_ssbo)(
162       struct ir3_context *ctx, nir_intrinsic_instr *intr);
163    void (*emit_intrinsic_load_image)(struct ir3_context *ctx,
164                                      nir_intrinsic_instr *intr,
165                                      struct ir3_instruction **dst);
166    void (*emit_intrinsic_store_image)(struct ir3_context *ctx,
167                                       nir_intrinsic_instr *intr);
168    struct ir3_instruction *(*emit_intrinsic_atomic_image)(
169       struct ir3_context *ctx, nir_intrinsic_instr *intr);
170    void (*emit_intrinsic_image_size)(struct ir3_context *ctx,
171                                      nir_intrinsic_instr *intr,
172                                      struct ir3_instruction **dst);
173    void (*emit_intrinsic_load_global_ir3)(struct ir3_context *ctx,
174                                           nir_intrinsic_instr *intr,
175                                           struct ir3_instruction **dst);
176    void (*emit_intrinsic_store_global_ir3)(struct ir3_context *ctx,
177                                            nir_intrinsic_instr *intr);
178    struct ir3_instruction *(*emit_intrinsic_atomic_global)(
179       struct ir3_context *ctx, nir_intrinsic_instr *intr);
180 };
181 
182 extern const struct ir3_context_funcs ir3_a4xx_funcs;
183 extern const struct ir3_context_funcs ir3_a6xx_funcs;
184 
185 struct ir3_context *ir3_context_init(struct ir3_compiler *compiler,
186                                      struct ir3_shader *shader,
187                                      struct ir3_shader_variant *so);
188 void ir3_context_free(struct ir3_context *ctx);
189 
190 static inline void
ir3_context_set_block(struct ir3_context * ctx,struct ir3_block * block)191 ir3_context_set_block(struct ir3_context *ctx, struct ir3_block *block)
192 {
193    ctx->block = block;
194    ctx->build = ir3_builder_at(ir3_before_terminator(block));
195 }
196 
197 struct ir3_instruction **ir3_get_dst_ssa(struct ir3_context *ctx,
198                                          nir_def *dst, unsigned n);
199 struct ir3_instruction **ir3_get_def(struct ir3_context *ctx, nir_def *def,
200                                      unsigned n);
201 struct ir3_instruction *const *ir3_get_src_maybe_shared(struct ir3_context *ctx,
202                                                         nir_src *src);
203 struct ir3_instruction *const *ir3_get_src_shared(struct ir3_context *ctx,
204                                                   nir_src *src, bool shared);
205 
206 static inline struct ir3_instruction *const *
ir3_get_src(struct ir3_context * ctx,nir_src * src)207 ir3_get_src(struct ir3_context *ctx, nir_src *src)
208 {
209    return ir3_get_src_shared(ctx, src, false);
210 }
211 
212 void ir3_put_def(struct ir3_context *ctx, nir_def *def);
213 struct ir3_instruction *ir3_create_collect(struct ir3_builder *build,
214                                            struct ir3_instruction *const *arr,
215                                            unsigned arrsz);
216 void ir3_split_dest(struct ir3_builder *build, struct ir3_instruction **dst,
217                     struct ir3_instruction *src, unsigned base, unsigned n);
218 void ir3_handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc);
219 void ir3_handle_nonuniform(struct ir3_instruction *instr,
220                            nir_intrinsic_instr *intrin);
221 void emit_intrinsic_image_size_tex(struct ir3_context *ctx,
222                                    nir_intrinsic_instr *intr,
223                                    struct ir3_instruction **dst);
224 
225 #define ir3_collect(build, ...)                                                \
226    ({                                                                          \
227       struct ir3_instruction *__arr[] = {__VA_ARGS__};                         \
228       ir3_create_collect(build, __arr, ARRAY_SIZE(__arr));                     \
229    })
230 
231 NORETURN void ir3_context_error(struct ir3_context *ctx, const char *format,
232                                 ...);
233 
234 #define compile_assert(ctx, cond)                                              \
235    do {                                                                        \
236       if (!(cond))                                                             \
237          ir3_context_error((ctx), "failed assert: " #cond "\n");               \
238    } while (0)
239 
240 struct ir3_instruction *ir3_get_addr0(struct ir3_context *ctx,
241                                       struct ir3_instruction *src, int align);
242 struct ir3_instruction *ir3_get_addr1(struct ir3_context *ctx,
243                                       unsigned const_val);
244 struct ir3_instruction *ir3_get_predicate(struct ir3_context *ctx,
245                                           struct ir3_instruction *src);
246 
247 void ir3_declare_array(struct ir3_context *ctx, nir_intrinsic_instr *decl);
248 struct ir3_array *ir3_get_array(struct ir3_context *ctx, nir_def *reg);
249 struct ir3_instruction *ir3_create_array_load(struct ir3_context *ctx,
250                                               struct ir3_array *arr, int n,
251                                               struct ir3_instruction *address);
252 void ir3_create_array_store(struct ir3_context *ctx, struct ir3_array *arr,
253                             int n, struct ir3_instruction *src,
254                             struct ir3_instruction *address);
255 void ir3_lower_imm_offset(struct ir3_context *ctx, nir_intrinsic_instr *intr,
256                           nir_src *offset_src, unsigned imm_offset_bits,
257                           struct ir3_instruction **offset,
258                           unsigned *imm_offset);
259 
260 static inline type_t
utype_for_size(unsigned bit_size)261 utype_for_size(unsigned bit_size)
262 {
263    switch (bit_size) {
264    case 32:
265       return TYPE_U32;
266    case 16:
267       return TYPE_U16;
268    case 8:
269       return TYPE_U8;
270    default:
271       unreachable("bad bitsize");
272       return ~0;
273    }
274 }
275 
276 static inline type_t
utype_src(nir_src src)277 utype_src(nir_src src)
278 {
279    return utype_for_size(nir_src_bit_size(src));
280 }
281 
282 static inline type_t
utype_def(nir_def * def)283 utype_def(nir_def *def)
284 {
285    return utype_for_size(def->bit_size);
286 }
287 
288 /**
289  * Convert nir bitsize to ir3 bitsize, handling the special case of 1b bools
290  * which can be 16b or 32b depending on gen.
291  */
292 static inline unsigned
ir3_bitsize(struct ir3_context * ctx,unsigned nir_bitsize)293 ir3_bitsize(struct ir3_context *ctx, unsigned nir_bitsize)
294 {
295    if (nir_bitsize == 1)
296       return type_size(ctx->compiler->bool_type);
297    return nir_bitsize;
298 }
299 
300 #endif /* IR3_CONTEXT_H_ */
301