1 /*
2 * Copyright © 2013 Rob Clark <robclark@freedesktop.org>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <robclark@freedesktop.org>
7 */
8
9 #ifndef IR3_COMPILER_H_
10 #define IR3_COMPILER_H_
11
12 #include "compiler/nir/nir.h"
13 #include "util/disk_cache.h"
14 #include "util/log.h"
15 #include "util/perf/cpu_trace.h"
16
17 #include "freedreno_dev_info.h"
18
19 #include "ir3.h"
20
21 BEGINC;
22
23 struct ir3_ra_reg_set;
24 struct ir3_shader;
25
26 struct ir3_compiler_options {
27 /* If true, promote UBOs (except for constant data) to constants using ldc.k
28 * in the preamble. The driver should ignore everything in ubo_state except
29 * for the constant data UBO, which is excluded because the command pushing
30 * constants for it can be pre-baked when compiling the shader.
31 */
32 bool push_ubo_with_preamble;
33
34 /* If true, disable the shader cache. The driver is then responsible for
35 * caching.
36 */
37 bool disable_cache;
38
39 /* If >= 0, this specifies the bindless descriptor set + descriptor to use
40 * for txf_ms_fb
41 */
42 int bindless_fb_read_descriptor;
43 int bindless_fb_read_slot;
44
45 /* True if 16-bit descriptors are available. */
46 bool storage_16bit;
47 /* True if 8-bit descriptors are available. */
48 bool storage_8bit;
49
50 /* If base_vertex should be lowered in nir */
51 bool lower_base_vertex;
52
53 bool shared_push_consts;
54
55 /* "dual_color_blend_by_location" workaround is enabled: */
56 bool dual_color_blend_by_location;
57 };
58
59 struct ir3_compiler {
60 struct fd_device *dev;
61 const struct fd_dev_id *dev_id;
62 uint8_t gen;
63 uint32_t shader_count;
64
65 struct disk_cache *disk_cache;
66
67 struct nir_shader_compiler_options nir_options;
68
69 /*
70 * Configuration options for things handled differently by turnip vs
71 * gallium
72 */
73 struct ir3_compiler_options options;
74
75 /*
76 * Configuration options for things that are handled differently on
77 * different generations:
78 */
79
80 bool is_64bit;
81
82 /* a4xx (and later) drops SP_FS_FLAT_SHAD_MODE_REG_* for flat-interpolate
83 * so we need to use ldlv.u32 to load the varying directly:
84 */
85 bool flat_bypass;
86
87 /* on a3xx, we need to add one to # of array levels:
88 */
89 bool levels_add_one;
90
91 /* on a3xx, we need to scale up integer coords for isaml based
92 * on LoD:
93 */
94 bool unminify_coords;
95
96 /* on a3xx do txf_ms w/ isaml and scaled coords: */
97 bool txf_ms_with_isaml;
98
99 /* on a4xx, for array textures we need to add 0.5 to the array
100 * index coordinate:
101 */
102 bool array_index_add_half;
103
104 /* on a6xx, rewrite samgp to sequence of samgq0-3 in vertex shaders:
105 */
106 bool samgq_workaround;
107
108 /* on a650, vertex shader <-> tess control io uses LDL/STL */
109 bool tess_use_shared;
110
111 /* The maximum number of constants, in vec4's, across the entire graphics
112 * pipeline.
113 */
114 uint16_t max_const_pipeline;
115
116 /* The maximum number of constants, in vec4's, for VS+HS+DS+GS. */
117 uint16_t max_const_geom;
118
119 /* The maximum number of constants, in vec4's, for FS. */
120 uint16_t max_const_frag;
121
122 /* A "safe" max constlen that can be applied to each shader in the
123 * pipeline which we guarantee will never exceed any combined limits.
124 */
125 uint16_t max_const_safe;
126
127 /* The maximum number of constants, in vec4's, for compute shaders. */
128 uint16_t max_const_compute;
129
130 /* Number of instructions that the shader's base address and length
131 * (instrlen divides instruction count by this) must be aligned to.
132 */
133 uint32_t instr_align;
134
135 /* on a3xx, the unit of indirect const load is higher than later gens (in
136 * vec4 units):
137 */
138 uint32_t const_upload_unit;
139
140 /* The base number of threads per wave. Some stages may be able to double
141 * this.
142 */
143 uint32_t threadsize_base;
144
145 /* On at least a6xx, waves are always launched in pairs. In calculations
146 * about occupancy, we pretend that each wave pair is actually one wave,
147 * which simplifies many of the calculations, but means we have to
148 * multiply threadsize_base by this number.
149 */
150 uint32_t wave_granularity;
151
152 /* The maximum number of simultaneous waves per core. */
153 uint32_t max_waves;
154
155 /* This is theoretical maximum number of vec4 registers that one wave of
156 * the base threadsize could use. To get the actual size of the register
157 * file in bytes one would need to compute:
158 *
159 * reg_size_vec4 * threadsize_base * wave_granularity * 16 (bytes per vec4)
160 *
161 * However this number is more often what we actually need. For example, a
162 * max_reg more than half of this will result in a doubled threadsize
163 * being impossible (because double-sized waves take up twice as many
164 * registers). Also, the formula for the occupancy given a particular
165 * register footprint is simpler.
166 *
167 * It is in vec4 units because the register file is allocated
168 * with vec4 granularity, so it's in the same units as max_reg.
169 */
170 uint32_t reg_size_vec4;
171
172 /* The size of local memory in bytes */
173 uint32_t local_mem_size;
174
175 /* The number of total branch stack entries, divided by wave_granularity. */
176 uint32_t branchstack_size;
177
178 /* The byte increment of MEMSIZEPERITEM, the private memory per-fiber allocation. */
179 uint32_t pvtmem_per_fiber_align;
180
181 /* Whether clip+cull distances are supported */
182 bool has_clip_cull;
183
184 /* Whether private memory is supported */
185 bool has_pvtmem;
186
187 /* Whether SSBOs have descriptors for sampling with ISAM */
188 bool has_isam_ssbo;
189
190 /* Whether isam.v is supported to sample multiple components from SSBOs */
191 bool has_isam_v;
192
193 /* Whether isam/stib/ldib have immediate offsets. */
194 bool has_ssbo_imm_offsets;
195
196 /* True if getfiberid, getlast.w8, brcst.active, and quad_shuffle
197 * instructions are supported which are necessary to support
198 * subgroup quad and arithmetic operations.
199 */
200 bool has_getfiberid;
201
202 /* True if the shfl instruction is supported. Needed for subgroup rotate and
203 * (more efficient) shuffle.
204 */
205 bool has_shfl;
206
207 /* True if the bitwise triops (sh[lr][gm]/andg) are supported. */
208 bool has_bitwise_triops;
209
210 /* Number of available predicate registers (p0.c) */
211 uint32_t num_predicates;
212
213 /* True if bitops (and.b, or.b, xor.b, not.b) can write to p0.c */
214 bool bitops_can_write_predicates;
215
216 /* True if braa/brao are available. */
217 bool has_branch_and_or;
218
219 /* True if predt/predf/prede are supported. */
220 bool has_predication;
221 bool predtf_nop_quirk;
222 bool prede_nop_quirk;
223
224 /* MAX_COMPUTE_VARIABLE_GROUP_INVOCATIONS_ARB */
225 uint32_t max_variable_workgroup_size;
226
227 bool has_dp2acc;
228 bool has_dp4acc;
229 bool has_compliant_dp4acc;
230
231 /* Type to use for 1b nir bools: */
232 type_t bool_type;
233
234 /* Whether compute invocation params are passed in via shared regfile or
235 * constbuf. a5xx+ has the shared regfile.
236 */
237 bool has_shared_regfile;
238
239 /* True if preamble instructions (shps, shpe, etc.) are supported */
240 bool has_preamble;
241
242 /* Where the shared consts start in constants file, in vec4's. */
243 uint16_t shared_consts_base_offset;
244
245 /* The size of shared consts for CS and FS(in vec4's).
246 * Also the size that is actually used on geometry stages (on a6xx).
247 */
248 uint64_t shared_consts_size;
249
250 /* Found on a6xx for geometry stages, that is different from
251 * actually used shared consts.
252 *
253 * TODO: Keep an eye on this for next gens.
254 */
255 uint64_t geom_shared_consts_size_quirk;
256
257 bool has_fs_tex_prefetch;
258
259 bool stsc_duplication_quirk;
260
261 bool load_shader_consts_via_preamble;
262 bool load_inline_uniforms_via_preamble_ldgk;
263
264 /* True if there is a scalar ALU capable of executing a subset of
265 * cat2-cat4 instructions with a shared register destination. This also
266 * implies expanded MOV/COV capability when writing to shared registers,
267 * as MOV/COV is now executed on the scalar ALU except when reading from a
268 * normal register, as well as the ability for ldc to write to a shared
269 * register.
270 */
271 bool has_scalar_alu;
272
273 bool fs_must_have_non_zero_constlen_quirk;
274
275 /* On all generations that support scalar ALU, there is also a copy of the
276 * scalar ALU and some other HW units in HLSQ that can execute preambles
277 * before work is dispatched to the SPs, called "early preamble". We detect
278 * whether the shader can use early preamble in ir3.
279 */
280 bool has_early_preamble;
281
282 /* True if (rptN) is supported for bary.f. */
283 bool has_rpt_bary_f;
284
285 bool reading_shading_rate_requires_smask_quirk;
286 };
287
288 void ir3_compiler_destroy(struct ir3_compiler *compiler);
289 struct ir3_compiler *ir3_compiler_create(struct fd_device *dev,
290 const struct fd_dev_id *dev_id,
291 const struct fd_dev_info *dev_info,
292 const struct ir3_compiler_options *options);
293
294 void ir3_disk_cache_init(struct ir3_compiler *compiler);
295 void ir3_disk_cache_init_shader_key(struct ir3_compiler *compiler,
296 struct ir3_shader *shader);
297 struct ir3_shader_variant *ir3_retrieve_variant(struct blob_reader *blob,
298 struct ir3_compiler *compiler,
299 void *mem_ctx);
300 void ir3_store_variant(struct blob *blob, const struct ir3_shader_variant *v);
301 bool ir3_disk_cache_retrieve(struct ir3_shader *shader,
302 struct ir3_shader_variant *v);
303 void ir3_disk_cache_store(struct ir3_shader *shader,
304 struct ir3_shader_variant *v);
305
306 const nir_shader_compiler_options *
307 ir3_get_compiler_options(struct ir3_compiler *compiler);
308
309 int ir3_compile_shader_nir(struct ir3_compiler *compiler,
310 struct ir3_shader *shader,
311 struct ir3_shader_variant *so);
312
313 /* gpu pointer size in units of 32bit registers/slots */
314 static inline unsigned
ir3_pointer_size(struct ir3_compiler * compiler)315 ir3_pointer_size(struct ir3_compiler *compiler)
316 {
317 return compiler->is_64bit ? 2 : 1;
318 }
319
320 enum ir3_shader_debug {
321 IR3_DBG_SHADER_VS = BITFIELD_BIT(0),
322 IR3_DBG_SHADER_TCS = BITFIELD_BIT(1),
323 IR3_DBG_SHADER_TES = BITFIELD_BIT(2),
324 IR3_DBG_SHADER_GS = BITFIELD_BIT(3),
325 IR3_DBG_SHADER_FS = BITFIELD_BIT(4),
326 IR3_DBG_SHADER_CS = BITFIELD_BIT(5),
327 IR3_DBG_DISASM = BITFIELD_BIT(6),
328 IR3_DBG_OPTMSGS = BITFIELD_BIT(7),
329 IR3_DBG_FORCES2EN = BITFIELD_BIT(8),
330 IR3_DBG_NOUBOOPT = BITFIELD_BIT(9),
331 IR3_DBG_NOFP16 = BITFIELD_BIT(10),
332 IR3_DBG_NOCACHE = BITFIELD_BIT(11),
333 IR3_DBG_SPILLALL = BITFIELD_BIT(12),
334 IR3_DBG_NOPREAMBLE = BITFIELD_BIT(13),
335 IR3_DBG_SHADER_INTERNAL = BITFIELD_BIT(14),
336 IR3_DBG_FULLSYNC = BITFIELD_BIT(15),
337 IR3_DBG_FULLNOP = BITFIELD_BIT(16),
338 IR3_DBG_NOEARLYPREAMBLE = BITFIELD_BIT(17),
339 IR3_DBG_NODESCPREFETCH = BITFIELD_BIT(18),
340 IR3_DBG_EXPANDRPT = BITFIELD_BIT(19),
341
342 /* MESA_DEBUG-only options: */
343 IR3_DBG_SCHEDMSGS = BITFIELD_BIT(20),
344 IR3_DBG_RAMSGS = BITFIELD_BIT(21),
345 };
346
347 extern enum ir3_shader_debug ir3_shader_debug;
348 extern const char *ir3_shader_override_path;
349
350 static inline bool
shader_debug_enabled(gl_shader_stage type,bool internal)351 shader_debug_enabled(gl_shader_stage type, bool internal)
352 {
353 if (internal)
354 return !!(ir3_shader_debug & IR3_DBG_SHADER_INTERNAL);
355
356 if (ir3_shader_debug & IR3_DBG_DISASM)
357 return true;
358
359 switch (type) {
360 case MESA_SHADER_VERTEX:
361 return !!(ir3_shader_debug & IR3_DBG_SHADER_VS);
362 case MESA_SHADER_TESS_CTRL:
363 return !!(ir3_shader_debug & IR3_DBG_SHADER_TCS);
364 case MESA_SHADER_TESS_EVAL:
365 return !!(ir3_shader_debug & IR3_DBG_SHADER_TES);
366 case MESA_SHADER_GEOMETRY:
367 return !!(ir3_shader_debug & IR3_DBG_SHADER_GS);
368 case MESA_SHADER_FRAGMENT:
369 return !!(ir3_shader_debug & IR3_DBG_SHADER_FS);
370 case MESA_SHADER_COMPUTE:
371 case MESA_SHADER_KERNEL:
372 return !!(ir3_shader_debug & IR3_DBG_SHADER_CS);
373 default:
374 assert(0);
375 return false;
376 }
377 }
378
379 static inline void
ir3_debug_print(struct ir3 * ir,const char * when)380 ir3_debug_print(struct ir3 *ir, const char *when)
381 {
382 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
383 mesa_logi("%s:", when);
384 ir3_print(ir);
385 }
386 }
387
388 /* Return the debug flags that influence shader codegen and should be included
389 * in the hash key. Note that we use a deny list so that we don't accidentally
390 * forget to include new flags.
391 */
392 static inline enum ir3_shader_debug
ir3_shader_debug_hash_key()393 ir3_shader_debug_hash_key()
394 {
395 return (enum ir3_shader_debug)(
396 ir3_shader_debug &
397 ~(IR3_DBG_SHADER_VS | IR3_DBG_SHADER_TCS | IR3_DBG_SHADER_TES |
398 IR3_DBG_SHADER_GS | IR3_DBG_SHADER_FS | IR3_DBG_SHADER_CS |
399 IR3_DBG_DISASM | IR3_DBG_OPTMSGS | IR3_DBG_NOCACHE |
400 IR3_DBG_SHADER_INTERNAL | IR3_DBG_SCHEDMSGS | IR3_DBG_RAMSGS));
401 }
402
403 ENDC;
404
405 #endif /* IR3_COMPILER_H_ */
406