1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "ac_exp_param.h"
26 #include "ac_rtld.h"
27 #include "compiler/nir/nir.h"
28 #include "compiler/nir/nir_serialize.h"
29 #include "si_pipe.h"
30 #include "si_shader_internal.h"
31 #include "sid.h"
32 #include "tgsi/tgsi_from_mesa.h"
33 #include "tgsi/tgsi_strings.h"
34 #include "util/u_memory.h"
35
36 static const char scratch_rsrc_dword0_symbol[] = "SCRATCH_RSRC_DWORD0";
37
38 static const char scratch_rsrc_dword1_symbol[] = "SCRATCH_RSRC_DWORD1";
39
40 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
41
42 /** Whether the shader runs as a combination of multiple API shaders */
si_is_multi_part_shader(struct si_shader * shader)43 bool si_is_multi_part_shader(struct si_shader *shader)
44 {
45 if (shader->selector->screen->info.chip_class <= GFX8)
46 return false;
47
48 return shader->key.as_ls || shader->key.as_es ||
49 shader->selector->info.stage == MESA_SHADER_TESS_CTRL ||
50 shader->selector->info.stage == MESA_SHADER_GEOMETRY;
51 }
52
53 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
si_is_merged_shader(struct si_shader * shader)54 bool si_is_merged_shader(struct si_shader *shader)
55 {
56 return shader->key.as_ngg || si_is_multi_part_shader(shader);
57 }
58
59 /**
60 * Returns a unique index for a per-patch semantic name and index. The index
61 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
62 * can be calculated.
63 */
si_shader_io_get_unique_index_patch(unsigned semantic)64 unsigned si_shader_io_get_unique_index_patch(unsigned semantic)
65 {
66 switch (semantic) {
67 case VARYING_SLOT_TESS_LEVEL_OUTER:
68 return 0;
69 case VARYING_SLOT_TESS_LEVEL_INNER:
70 return 1;
71 default:
72 if (semantic >= VARYING_SLOT_PATCH0 && semantic < VARYING_SLOT_PATCH0 + 30)
73 return 2 + (semantic - VARYING_SLOT_PATCH0);
74
75 assert(!"invalid semantic");
76 return 0;
77 }
78 }
79
80 /**
81 * Returns a unique index for a semantic name and index. The index must be
82 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
83 * calculated.
84 */
si_shader_io_get_unique_index(unsigned semantic,bool is_varying)85 unsigned si_shader_io_get_unique_index(unsigned semantic, bool is_varying)
86 {
87 switch (semantic) {
88 case VARYING_SLOT_POS:
89 return 0;
90 default:
91 /* Since some shader stages use the the highest used IO index
92 * to determine the size to allocate for inputs/outputs
93 * (in LDS, tess and GS rings). GENERIC should be placed right
94 * after POSITION to make that size as small as possible.
95 */
96 if (semantic >= VARYING_SLOT_VAR0 &&
97 semantic < VARYING_SLOT_VAR0 + SI_MAX_IO_GENERIC)
98 return 1 + (semantic - VARYING_SLOT_VAR0);
99
100 assert(!"invalid generic index");
101 return 0;
102 case VARYING_SLOT_FOGC:
103 return SI_MAX_IO_GENERIC + 1;
104 case VARYING_SLOT_COL0:
105 return SI_MAX_IO_GENERIC + 2;
106 case VARYING_SLOT_COL1:
107 return SI_MAX_IO_GENERIC + 3;
108 case VARYING_SLOT_BFC0:
109 /* If it's a varying, COLOR and BCOLOR alias. */
110 if (is_varying)
111 return SI_MAX_IO_GENERIC + 2;
112 else
113 return SI_MAX_IO_GENERIC + 4;
114 case VARYING_SLOT_BFC1:
115 if (is_varying)
116 return SI_MAX_IO_GENERIC + 3;
117 else
118 return SI_MAX_IO_GENERIC + 5;
119 case VARYING_SLOT_TEX0:
120 case VARYING_SLOT_TEX1:
121 case VARYING_SLOT_TEX2:
122 case VARYING_SLOT_TEX3:
123 case VARYING_SLOT_TEX4:
124 case VARYING_SLOT_TEX5:
125 case VARYING_SLOT_TEX6:
126 case VARYING_SLOT_TEX7:
127 return SI_MAX_IO_GENERIC + 6 + (semantic - VARYING_SLOT_TEX0);
128
129 /* These are rarely used between LS and HS or ES and GS. */
130 case VARYING_SLOT_CLIP_DIST0:
131 return SI_MAX_IO_GENERIC + 6 + 8;
132 case VARYING_SLOT_CLIP_DIST1:
133 return SI_MAX_IO_GENERIC + 6 + 8 + 1;
134 case VARYING_SLOT_CLIP_VERTEX:
135 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
136 case VARYING_SLOT_PSIZ:
137 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
138
139 /* These can't be written by LS, HS, and ES. */
140 case VARYING_SLOT_LAYER:
141 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
142 case VARYING_SLOT_VIEWPORT:
143 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
144 case VARYING_SLOT_PRIMITIVE_ID:
145 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
146 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
147 }
148 }
149
si_dump_streamout(struct pipe_stream_output_info * so)150 static void si_dump_streamout(struct pipe_stream_output_info *so)
151 {
152 unsigned i;
153
154 if (so->num_outputs)
155 fprintf(stderr, "STREAMOUT\n");
156
157 for (i = 0; i < so->num_outputs; i++) {
158 unsigned mask = ((1 << so->output[i].num_components) - 1) << so->output[i].start_component;
159 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n", i, so->output[i].output_buffer,
160 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
161 so->output[i].register_index, mask & 1 ? "x" : "", mask & 2 ? "y" : "",
162 mask & 4 ? "z" : "", mask & 8 ? "w" : "");
163 }
164 }
165
declare_streamout_params(struct si_shader_context * ctx,struct pipe_stream_output_info * so)166 static void declare_streamout_params(struct si_shader_context *ctx,
167 struct pipe_stream_output_info *so)
168 {
169 if (ctx->screen->use_ngg_streamout) {
170 if (ctx->stage == MESA_SHADER_TESS_EVAL)
171 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
172 return;
173 }
174
175 /* Streamout SGPRs. */
176 if (so->num_outputs) {
177 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
178 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
179 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
180 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
181 }
182
183 /* A streamout buffer offset is loaded if the stride is non-zero. */
184 for (int i = 0; i < 4; i++) {
185 if (!so->stride[i])
186 continue;
187
188 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
189 }
190 }
191
si_get_max_workgroup_size(const struct si_shader * shader)192 unsigned si_get_max_workgroup_size(const struct si_shader *shader)
193 {
194 switch (shader->selector->info.stage) {
195 case MESA_SHADER_VERTEX:
196 case MESA_SHADER_TESS_EVAL:
197 return shader->key.as_ngg ? 128 : 0;
198
199 case MESA_SHADER_TESS_CTRL:
200 /* Return this so that LLVM doesn't remove s_barrier
201 * instructions on chips where we use s_barrier. */
202 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
203
204 case MESA_SHADER_GEOMETRY:
205 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
206
207 case MESA_SHADER_COMPUTE:
208 break; /* see below */
209
210 default:
211 return 0;
212 }
213
214 /* Compile a variable block size using the maximum variable size. */
215 if (shader->selector->info.base.cs.local_size_variable)
216 return SI_MAX_VARIABLE_THREADS_PER_BLOCK;
217
218 uint16_t *local_size = shader->selector->info.base.cs.local_size;
219 unsigned max_work_group_size = (uint32_t)local_size[0] *
220 (uint32_t)local_size[1] *
221 (uint32_t)local_size[2];
222 assert(max_work_group_size);
223 return max_work_group_size;
224 }
225
declare_const_and_shader_buffers(struct si_shader_context * ctx,bool assign_params)226 static void declare_const_and_shader_buffers(struct si_shader_context *ctx, bool assign_params)
227 {
228 enum ac_arg_type const_shader_buf_type;
229
230 if (ctx->shader->selector->info.base.num_ubos == 1 &&
231 ctx->shader->selector->info.base.num_ssbos == 0)
232 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
233 else
234 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
235
236 ac_add_arg(
237 &ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
238 assign_params ? &ctx->const_and_shader_buffers : &ctx->other_const_and_shader_buffers);
239 }
240
declare_samplers_and_images(struct si_shader_context * ctx,bool assign_params)241 static void declare_samplers_and_images(struct si_shader_context *ctx, bool assign_params)
242 {
243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
244 assign_params ? &ctx->samplers_and_images : &ctx->other_samplers_and_images);
245 }
246
declare_per_stage_desc_pointers(struct si_shader_context * ctx,bool assign_params)247 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx, bool assign_params)
248 {
249 declare_const_and_shader_buffers(ctx, assign_params);
250 declare_samplers_and_images(ctx, assign_params);
251 }
252
declare_global_desc_pointers(struct si_shader_context * ctx)253 static void declare_global_desc_pointers(struct si_shader_context *ctx)
254 {
255 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->rw_buffers);
256 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
257 &ctx->bindless_samplers_and_images);
258 }
259
declare_vs_specific_input_sgprs(struct si_shader_context * ctx)260 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
261 {
262 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
263 if (!ctx->shader->is_gs_copy_shader) {
264 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
265 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
266 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
267 }
268 }
269
declare_vb_descriptor_input_sgprs(struct si_shader_context * ctx)270 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
271 {
272 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
273
274 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
275 if (num_vbos_in_user_sgprs) {
276 unsigned user_sgprs = ctx->args.num_sgprs_used;
277
278 if (si_is_merged_shader(ctx->shader))
279 user_sgprs -= 8;
280 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
281
282 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
283 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
284 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
285
286 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
287 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
288 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
289 }
290 }
291
declare_vs_input_vgprs(struct si_shader_context * ctx,unsigned * num_prolog_vgprs)292 static void declare_vs_input_vgprs(struct si_shader_context *ctx, unsigned *num_prolog_vgprs)
293 {
294 struct si_shader *shader = ctx->shader;
295
296 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
297 if (shader->key.as_ls) {
298 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
299 if (ctx->screen->info.chip_class >= GFX10) {
300 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
301 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
302 } else {
303 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
304 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
305 }
306 } else if (ctx->screen->info.chip_class >= GFX10) {
307 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
308 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
309 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
310 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
311 } else {
312 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
313 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
314 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
315 }
316
317 if (!shader->is_gs_copy_shader) {
318 /* Vertex load indices. */
319 if (shader->selector->info.num_inputs) {
320 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vertex_index0);
321 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
322 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
323 }
324 *num_prolog_vgprs += shader->selector->info.num_inputs;
325 }
326 }
327
declare_vs_blit_inputs(struct si_shader_context * ctx,unsigned vs_blit_property)328 static void declare_vs_blit_inputs(struct si_shader_context *ctx, unsigned vs_blit_property)
329 {
330 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_blit_inputs); /* i16 x1, y1 */
331 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
332 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
333
334 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
335 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
336 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
337 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
338 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
339 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
340 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
341 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
342 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
343 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
344 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
345 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
346 }
347 }
348
declare_tes_input_vgprs(struct si_shader_context * ctx)349 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
350 {
351 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
352 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
353 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
354 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
355 }
356
357 enum
358 {
359 /* Convenient merged shader definitions. */
360 SI_SHADER_MERGED_VERTEX_TESSCTRL = MESA_ALL_SHADER_STAGES,
361 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
362 };
363
si_add_arg_checked(struct ac_shader_args * args,enum ac_arg_regfile file,unsigned registers,enum ac_arg_type type,struct ac_arg * arg,unsigned idx)364 void si_add_arg_checked(struct ac_shader_args *args, enum ac_arg_regfile file, unsigned registers,
365 enum ac_arg_type type, struct ac_arg *arg, unsigned idx)
366 {
367 assert(args->arg_count == idx);
368 ac_add_arg(args, file, registers, type, arg);
369 }
370
si_create_function(struct si_shader_context * ctx,bool ngg_cull_shader)371 void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
372 {
373 struct si_shader *shader = ctx->shader;
374 LLVMTypeRef returns[AC_MAX_ARGS];
375 unsigned i, num_return_sgprs;
376 unsigned num_returns = 0;
377 unsigned num_prolog_vgprs = 0;
378 unsigned stage = ctx->stage;
379
380 memset(&ctx->args, 0, sizeof(ctx->args));
381
382 /* Set MERGED shaders. */
383 if (ctx->screen->info.chip_class >= GFX9) {
384 if (shader->key.as_ls || stage == MESA_SHADER_TESS_CTRL)
385 stage = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
386 else if (shader->key.as_es || shader->key.as_ngg || stage == MESA_SHADER_GEOMETRY)
387 stage = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
388 }
389
390 switch (stage) {
391 case MESA_SHADER_VERTEX:
392 declare_global_desc_pointers(ctx);
393
394 if (shader->selector->info.base.vs.blit_sgprs_amd) {
395 declare_vs_blit_inputs(ctx, shader->selector->info.base.vs.blit_sgprs_amd);
396
397 /* VGPRs */
398 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
399 break;
400 }
401
402 declare_per_stage_desc_pointers(ctx, true);
403 declare_vs_specific_input_sgprs(ctx);
404 if (!shader->is_gs_copy_shader)
405 declare_vb_descriptor_input_sgprs(ctx);
406
407 if (shader->key.as_es) {
408 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
409 } else if (shader->key.as_ls) {
410 /* no extra parameters */
411 } else {
412 /* The locations of the other parameters are assigned dynamically. */
413 declare_streamout_params(ctx, &shader->selector->so);
414 }
415
416 /* VGPRs */
417 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
418
419 /* Return values */
420 if (shader->key.opt.vs_as_prim_discard_cs) {
421 for (i = 0; i < 4; i++)
422 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
423 }
424 break;
425
426 case MESA_SHADER_TESS_CTRL: /* GFX6-GFX8 */
427 declare_global_desc_pointers(ctx);
428 declare_per_stage_desc_pointers(ctx, true);
429 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
430 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
431 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
432 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
433 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
434 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
435
436 /* VGPRs */
437 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
438 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
439
440 /* param_tcs_offchip_offset and param_tcs_factor_offset are
441 * placed after the user SGPRs.
442 */
443 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
444 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
445 for (i = 0; i < 11; i++)
446 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
447 break;
448
449 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
450 /* Merged stages have 8 system SGPRs at the beginning. */
451 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
452 declare_per_stage_desc_pointers(ctx, ctx->stage == MESA_SHADER_TESS_CTRL);
453 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
454 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
455 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
456 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
457 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
458 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
459
460 declare_global_desc_pointers(ctx);
461 declare_per_stage_desc_pointers(ctx, ctx->stage == MESA_SHADER_VERTEX);
462 declare_vs_specific_input_sgprs(ctx);
463
464 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
465 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
466 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
467 declare_vb_descriptor_input_sgprs(ctx);
468
469 /* VGPRs (first TCS, then VS) */
470 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
471 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
472
473 if (ctx->stage == MESA_SHADER_VERTEX) {
474 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
475
476 /* LS return values are inputs to the TCS main shader part. */
477 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
478 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
479 for (i = 0; i < 2; i++)
480 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
481 } else {
482 /* TCS return values are inputs to the TCS epilog.
483 *
484 * param_tcs_offchip_offset, param_tcs_factor_offset,
485 * param_tcs_offchip_layout, and param_rw_buffers
486 * should be passed to the epilog.
487 */
488 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
489 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
490 for (i = 0; i < 11; i++)
491 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
492 }
493 break;
494
495 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
496 /* Merged stages have 8 system SGPRs at the beginning. */
497 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
498 declare_per_stage_desc_pointers(ctx, ctx->stage == MESA_SHADER_GEOMETRY);
499
500 if (ctx->shader->key.as_ngg)
501 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
502 else
503 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
504
505 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
506 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
507 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
508 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
509 &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
510 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
511 NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
512
513 declare_global_desc_pointers(ctx);
514 if (ctx->stage != MESA_SHADER_VERTEX || !shader->selector->info.base.vs.blit_sgprs_amd) {
515 declare_per_stage_desc_pointers(
516 ctx, (ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL));
517 }
518
519 if (ctx->stage == MESA_SHADER_VERTEX) {
520 if (shader->selector->info.base.vs.blit_sgprs_amd)
521 declare_vs_blit_inputs(ctx, shader->selector->info.base.vs.blit_sgprs_amd);
522 else
523 declare_vs_specific_input_sgprs(ctx);
524 } else {
525 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
526 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
527 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
528 /* Declare as many input SGPRs as the VS has. */
529 }
530
531 if (ctx->stage == MESA_SHADER_VERTEX)
532 declare_vb_descriptor_input_sgprs(ctx);
533
534 /* VGPRs (first GS, then VS/TES) */
535 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
536 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
537 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
538 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
539 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
540
541 if (ctx->stage == MESA_SHADER_VERTEX) {
542 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
543 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
544 declare_tes_input_vgprs(ctx);
545 }
546
547 if ((ctx->shader->key.as_es || ngg_cull_shader) &&
548 (ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL)) {
549 unsigned num_user_sgprs, num_vgprs;
550
551 if (ctx->stage == MESA_SHADER_VERTEX) {
552 /* For the NGG cull shader, add 1 SGPR to hold
553 * the vertex buffer pointer.
554 */
555 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
556
557 if (ngg_cull_shader && shader->selector->num_vbos_in_user_sgprs) {
558 assert(num_user_sgprs <= 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
559 num_user_sgprs =
560 SI_SGPR_VS_VB_DESCRIPTOR_FIRST + shader->selector->num_vbos_in_user_sgprs * 4;
561 }
562 } else {
563 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
564 }
565
566 /* The NGG cull shader has to return all 9 VGPRs.
567 *
568 * The normal merged ESGS shader only has to return the 5 VGPRs
569 * for the GS stage.
570 */
571 num_vgprs = ngg_cull_shader ? 9 : 5;
572
573 /* ES return values are inputs to GS. */
574 for (i = 0; i < 8 + num_user_sgprs; i++)
575 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
576 for (i = 0; i < num_vgprs; i++)
577 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
578 }
579 break;
580
581 case MESA_SHADER_TESS_EVAL:
582 declare_global_desc_pointers(ctx);
583 declare_per_stage_desc_pointers(ctx, true);
584 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
585 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
586 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
587
588 if (shader->key.as_es) {
589 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
590 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
591 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
592 } else {
593 declare_streamout_params(ctx, &shader->selector->so);
594 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
595 }
596
597 /* VGPRs */
598 declare_tes_input_vgprs(ctx);
599 break;
600
601 case MESA_SHADER_GEOMETRY:
602 declare_global_desc_pointers(ctx);
603 declare_per_stage_desc_pointers(ctx, true);
604 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
605 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
606
607 /* VGPRs */
608 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
609 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
610 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
611 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
612 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
613 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
614 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
615 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
616 break;
617
618 case MESA_SHADER_FRAGMENT:
619 declare_global_desc_pointers(ctx);
620 declare_per_stage_desc_pointers(ctx, true);
621 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL, SI_PARAM_ALPHA_REF);
622 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.prim_mask,
623 SI_PARAM_PRIM_MASK);
624
625 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
626 SI_PARAM_PERSP_SAMPLE);
627 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_center,
628 SI_PARAM_PERSP_CENTER);
629 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_centroid,
630 SI_PARAM_PERSP_CENTROID);
631 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT, NULL, SI_PARAM_PERSP_PULL_MODEL);
632 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.linear_sample,
633 SI_PARAM_LINEAR_SAMPLE);
634 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.linear_center,
635 SI_PARAM_LINEAR_CENTER);
636 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.linear_centroid,
637 SI_PARAM_LINEAR_CENTROID);
638 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT, NULL, SI_PARAM_LINE_STIPPLE_TEX);
639 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[0],
640 SI_PARAM_POS_X_FLOAT);
641 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[1],
642 SI_PARAM_POS_Y_FLOAT);
643 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[2],
644 SI_PARAM_POS_Z_FLOAT);
645 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.frag_pos[3],
646 SI_PARAM_POS_W_FLOAT);
647 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
648 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.front_face,
649 SI_PARAM_FRONT_FACE);
650 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
651 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.ancillary,
652 SI_PARAM_ANCILLARY);
653 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->args.sample_coverage,
654 SI_PARAM_SAMPLE_COVERAGE);
655 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->pos_fixed_pt,
656 SI_PARAM_POS_FIXED_PT);
657
658 /* Color inputs from the prolog. */
659 if (shader->selector->info.colors_read) {
660 unsigned num_color_elements = util_bitcount(shader->selector->info.colors_read);
661
662 for (i = 0; i < num_color_elements; i++)
663 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
664
665 num_prolog_vgprs += num_color_elements;
666 }
667
668 /* Outputs for the epilog. */
669 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
670 num_returns = num_return_sgprs + util_bitcount(shader->selector->info.colors_written) * 4 +
671 shader->selector->info.writes_z + shader->selector->info.writes_stencil +
672 shader->selector->info.writes_samplemask + 1 /* SampleMaskIn */;
673
674 num_returns = MAX2(num_returns, num_return_sgprs + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
675
676 for (i = 0; i < num_return_sgprs; i++)
677 returns[i] = ctx->ac.i32;
678 for (; i < num_returns; i++)
679 returns[i] = ctx->ac.f32;
680 break;
681
682 case MESA_SHADER_COMPUTE:
683 declare_global_desc_pointers(ctx);
684 declare_per_stage_desc_pointers(ctx, true);
685 if (shader->selector->info.uses_grid_size)
686 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->args.num_work_groups);
687 if (shader->selector->info.uses_variable_block_size)
688 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
689
690 unsigned cs_user_data_dwords =
691 shader->selector->info.base.cs.user_data_components_amd;
692 if (cs_user_data_dwords) {
693 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT, &ctx->cs_user_data);
694 }
695
696 /* Some descriptors can be in user SGPRs. */
697 /* Shader buffers in user SGPRs. */
698 for (unsigned i = 0; i < shader->selector->cs_num_shaderbufs_in_user_sgprs; i++) {
699 while (ctx->args.num_sgprs_used % 4 != 0)
700 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
701
702 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->cs_shaderbuf[i]);
703 }
704 /* Images in user SGPRs. */
705 for (unsigned i = 0; i < shader->selector->cs_num_images_in_user_sgprs; i++) {
706 unsigned num_sgprs = shader->selector->info.base.image_buffers & (1 << i) ? 4 : 8;
707
708 while (ctx->args.num_sgprs_used % num_sgprs != 0)
709 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
710
711 ac_add_arg(&ctx->args, AC_ARG_SGPR, num_sgprs, AC_ARG_INT, &ctx->cs_image[i]);
712 }
713
714 /* Hardware SGPRs. */
715 for (i = 0; i < 3; i++) {
716 if (shader->selector->info.uses_block_id[i]) {
717 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.workgroup_ids[i]);
718 }
719 }
720 if (shader->selector->info.uses_subgroup_info)
721 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
722
723 /* Hardware VGPRs. */
724 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT, &ctx->args.local_invocation_ids);
725 break;
726 default:
727 assert(0 && "unimplemented shader");
728 return;
729 }
730
731 si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main", returns, num_returns,
732 si_get_max_workgroup_size(shader));
733
734 /* Reserve register locations for VGPR inputs the PS prolog may need. */
735 if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
736 ac_llvm_add_target_dep_function_attr(
737 ctx->main_fn, "InitialPSInputAddr",
738 S_0286D0_PERSP_SAMPLE_ENA(1) | S_0286D0_PERSP_CENTER_ENA(1) |
739 S_0286D0_PERSP_CENTROID_ENA(1) | S_0286D0_LINEAR_SAMPLE_ENA(1) |
740 S_0286D0_LINEAR_CENTER_ENA(1) | S_0286D0_LINEAR_CENTROID_ENA(1) |
741 S_0286D0_FRONT_FACE_ENA(1) | S_0286D0_ANCILLARY_ENA(1) | S_0286D0_POS_FIXED_PT_ENA(1));
742 }
743
744 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
745 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
746
747 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
748 shader->info.num_input_vgprs -= num_prolog_vgprs;
749
750 if (shader->key.as_ls || ctx->stage == MESA_SHADER_TESS_CTRL) {
751 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
752 /* The LSHS size is not known until draw time, so we append it
753 * at the end of whatever LDS use there may be in the rest of
754 * the shader (currently none, unless LLVM decides to do its
755 * own LDS-based lowering).
756 */
757 ctx->ac.lds = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
758 "__lds_end", AC_ADDR_SPACE_LDS);
759 LLVMSetAlignment(ctx->ac.lds, 256);
760 } else {
761 ac_declare_lds_as_pointer(&ctx->ac);
762 }
763 }
764
765 /* Unlike radv, we override these arguments in the prolog, so to the
766 * API shader they appear as normal arguments.
767 */
768 if (ctx->stage == MESA_SHADER_VERTEX) {
769 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
770 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
771 } else if (ctx->stage == MESA_SHADER_FRAGMENT) {
772 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
773 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
774 }
775 }
776
777 /* For the UMR disassembler. */
778 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
779 #define DEBUGGER_NUM_MARKERS 5
780
si_shader_binary_open(struct si_screen * screen,struct si_shader * shader,struct ac_rtld_binary * rtld)781 static bool si_shader_binary_open(struct si_screen *screen, struct si_shader *shader,
782 struct ac_rtld_binary *rtld)
783 {
784 const struct si_shader_selector *sel = shader->selector;
785 const char *part_elfs[5];
786 size_t part_sizes[5];
787 unsigned num_parts = 0;
788
789 #define add_part(shader_or_part) \
790 if (shader_or_part) { \
791 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
792 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
793 num_parts++; \
794 }
795
796 add_part(shader->prolog);
797 add_part(shader->previous_stage);
798 add_part(shader->prolog2);
799 add_part(shader);
800 add_part(shader->epilog);
801
802 #undef add_part
803
804 struct ac_rtld_symbol lds_symbols[2];
805 unsigned num_lds_symbols = 0;
806
807 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
808 (sel->info.stage == MESA_SHADER_GEOMETRY || shader->key.as_ngg)) {
809 /* We add this symbol even on LLVM <= 8 to ensure that
810 * shader->config.lds_size is set correctly below.
811 */
812 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
813 sym->name = "esgs_ring";
814 sym->size = shader->gs_info.esgs_ring_size * 4;
815 sym->align = 64 * 1024;
816 }
817
818 if (shader->key.as_ngg && sel->info.stage == MESA_SHADER_GEOMETRY) {
819 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
820 sym->name = "ngg_emit";
821 sym->size = shader->ngg.ngg_emit_size * 4;
822 sym->align = 4;
823 }
824
825 bool ok = ac_rtld_open(
826 rtld, (struct ac_rtld_open_info){.info = &screen->info,
827 .options =
828 {
829 .halt_at_entry = screen->options.halt_shaders,
830 },
831 .shader_type = sel->info.stage,
832 .wave_size = si_get_shader_wave_size(shader),
833 .num_parts = num_parts,
834 .elf_ptrs = part_elfs,
835 .elf_sizes = part_sizes,
836 .num_shared_lds_symbols = num_lds_symbols,
837 .shared_lds_symbols = lds_symbols});
838
839 if (rtld->lds_size > 0) {
840 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
841 shader->config.lds_size = align(rtld->lds_size, alloc_granularity) / alloc_granularity;
842 }
843
844 return ok;
845 }
846
si_get_shader_binary_size(struct si_screen * screen,struct si_shader * shader)847 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
848 {
849 struct ac_rtld_binary rtld;
850 si_shader_binary_open(screen, shader, &rtld);
851 return rtld.exec_size;
852 }
853
si_get_external_symbol(void * data,const char * name,uint64_t * value)854 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
855 {
856 uint64_t *scratch_va = data;
857
858 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
859 *value = (uint32_t)*scratch_va;
860 return true;
861 }
862 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
863 /* Enable scratch coalescing. */
864 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1);
865 return true;
866 }
867
868 return false;
869 }
870
si_shader_binary_upload(struct si_screen * sscreen,struct si_shader * shader,uint64_t scratch_va)871 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
872 uint64_t scratch_va)
873 {
874 struct ac_rtld_binary binary;
875 if (!si_shader_binary_open(sscreen, shader, &binary))
876 return false;
877
878 si_resource_reference(&shader->bo, NULL);
879 shader->bo = si_aligned_buffer_create(
880 &sscreen->b,
881 (sscreen->info.cpdma_prefetch_writes_memory ?
882 0 : SI_RESOURCE_FLAG_READ_ONLY) | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
883 PIPE_USAGE_IMMUTABLE, align(binary.rx_size, SI_CPDMA_ALIGNMENT), 256);
884 if (!shader->bo)
885 return false;
886
887 /* Upload. */
888 struct ac_rtld_upload_info u = {};
889 u.binary = &binary;
890 u.get_external_symbol = si_get_external_symbol;
891 u.cb_data = &scratch_va;
892 u.rx_va = shader->bo->gpu_address;
893 u.rx_ptr = sscreen->ws->buffer_map(
894 shader->bo->buf, NULL,
895 PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_MAP_TEMPORARY);
896 if (!u.rx_ptr)
897 return false;
898
899 bool ok = ac_rtld_upload(&u);
900
901 sscreen->ws->buffer_unmap(shader->bo->buf);
902 ac_rtld_close(&binary);
903
904 return ok;
905 }
906
si_shader_dump_disassembly(struct si_screen * screen,const struct si_shader_binary * binary,gl_shader_stage stage,unsigned wave_size,struct pipe_debug_callback * debug,const char * name,FILE * file)907 static void si_shader_dump_disassembly(struct si_screen *screen,
908 const struct si_shader_binary *binary,
909 gl_shader_stage stage, unsigned wave_size,
910 struct pipe_debug_callback *debug, const char *name,
911 FILE *file)
912 {
913 struct ac_rtld_binary rtld_binary;
914
915 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
916 .info = &screen->info,
917 .shader_type = stage,
918 .wave_size = wave_size,
919 .num_parts = 1,
920 .elf_ptrs = &binary->elf_buffer,
921 .elf_sizes = &binary->elf_size}))
922 return;
923
924 const char *disasm;
925 size_t nbytes;
926
927 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
928 goto out;
929
930 if (nbytes > INT_MAX)
931 goto out;
932
933 if (debug && debug->debug_message) {
934 /* Very long debug messages are cut off, so send the
935 * disassembly one line at a time. This causes more
936 * overhead, but on the plus side it simplifies
937 * parsing of resulting logs.
938 */
939 pipe_debug_message(debug, SHADER_INFO, "Shader Disassembly Begin");
940
941 uint64_t line = 0;
942 while (line < nbytes) {
943 int count = nbytes - line;
944 const char *nl = memchr(disasm + line, '\n', nbytes - line);
945 if (nl)
946 count = nl - (disasm + line);
947
948 if (count) {
949 pipe_debug_message(debug, SHADER_INFO, "%.*s", count, disasm + line);
950 }
951
952 line += count + 1;
953 }
954
955 pipe_debug_message(debug, SHADER_INFO, "Shader Disassembly End");
956 }
957
958 if (file) {
959 fprintf(file, "Shader %s disassembly:\n", name);
960 fprintf(file, "%*s", (int)nbytes, disasm);
961 }
962
963 out:
964 ac_rtld_close(&rtld_binary);
965 }
966
si_calculate_max_simd_waves(struct si_shader * shader)967 static void si_calculate_max_simd_waves(struct si_shader *shader)
968 {
969 struct si_screen *sscreen = shader->selector->screen;
970 struct ac_shader_config *conf = &shader->config;
971 unsigned num_inputs = shader->selector->info.num_inputs;
972 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
973 unsigned lds_per_wave = 0;
974 unsigned max_simd_waves;
975
976 max_simd_waves = sscreen->info.max_wave64_per_simd;
977
978 /* Compute LDS usage for PS. */
979 switch (shader->selector->info.stage) {
980 case MESA_SHADER_FRAGMENT:
981 /* The minimum usage per wave is (num_inputs * 48). The maximum
982 * usage is (num_inputs * 48 * 16).
983 * We can get anything in between and it varies between waves.
984 *
985 * The 48 bytes per input for a single primitive is equal to
986 * 4 bytes/component * 4 components/input * 3 points.
987 *
988 * Other stages don't know the size at compile time or don't
989 * allocate LDS per wave, but instead they do it per thread group.
990 */
991 lds_per_wave = conf->lds_size * lds_increment + align(num_inputs * 48, lds_increment);
992 break;
993 case MESA_SHADER_COMPUTE: {
994 unsigned max_workgroup_size = si_get_max_workgroup_size(shader);
995 lds_per_wave = (conf->lds_size * lds_increment) /
996 DIV_ROUND_UP(max_workgroup_size, sscreen->compute_wave_size);
997 }
998 break;
999 default:;
1000 }
1001
1002 /* Compute the per-SIMD wave counts. */
1003 if (conf->num_sgprs) {
1004 max_simd_waves =
1005 MIN2(max_simd_waves, sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1006 }
1007
1008 if (conf->num_vgprs) {
1009 /* Always print wave limits as Wave64, so that we can compare
1010 * Wave32 and Wave64 with shader-db fairly. */
1011 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1012 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1013 }
1014
1015 unsigned max_lds_per_simd = sscreen->info.lds_size_per_workgroup / 4;
1016 if (lds_per_wave)
1017 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1018
1019 shader->info.max_simd_waves = max_simd_waves;
1020 }
1021
si_shader_dump_stats_for_shader_db(struct si_screen * screen,struct si_shader * shader,struct pipe_debug_callback * debug)1022 void si_shader_dump_stats_for_shader_db(struct si_screen *screen, struct si_shader *shader,
1023 struct pipe_debug_callback *debug)
1024 {
1025 const struct ac_shader_config *conf = &shader->config;
1026
1027 if (screen->options.debug_disassembly)
1028 si_shader_dump_disassembly(screen, &shader->binary, shader->selector->info.stage,
1029 si_get_shader_wave_size(shader), debug, "main", NULL);
1030
1031 pipe_debug_message(debug, SHADER_INFO,
1032 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1033 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1034 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1035 conf->num_sgprs, conf->num_vgprs, si_get_shader_binary_size(screen, shader),
1036 conf->lds_size, conf->scratch_bytes_per_wave, shader->info.max_simd_waves,
1037 conf->spilled_sgprs, conf->spilled_vgprs, shader->info.private_mem_vgprs);
1038 }
1039
si_shader_dump_stats(struct si_screen * sscreen,struct si_shader * shader,FILE * file,bool check_debug_option)1040 static void si_shader_dump_stats(struct si_screen *sscreen, struct si_shader *shader, FILE *file,
1041 bool check_debug_option)
1042 {
1043 const struct ac_shader_config *conf = &shader->config;
1044
1045 if (!check_debug_option || si_can_dump_shader(sscreen, shader->selector->info.stage)) {
1046 if (shader->selector->info.stage == MESA_SHADER_FRAGMENT) {
1047 fprintf(file,
1048 "*** SHADER CONFIG ***\n"
1049 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1050 "SPI_PS_INPUT_ENA = 0x%04x\n",
1051 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1052 }
1053
1054 fprintf(file,
1055 "*** SHADER STATS ***\n"
1056 "SGPRS: %d\n"
1057 "VGPRS: %d\n"
1058 "Spilled SGPRs: %d\n"
1059 "Spilled VGPRs: %d\n"
1060 "Private memory VGPRs: %d\n"
1061 "Code Size: %d bytes\n"
1062 "LDS: %d blocks\n"
1063 "Scratch: %d bytes per wave\n"
1064 "Max Waves: %d\n"
1065 "********************\n\n\n",
1066 conf->num_sgprs, conf->num_vgprs, conf->spilled_sgprs, conf->spilled_vgprs,
1067 shader->info.private_mem_vgprs, si_get_shader_binary_size(sscreen, shader),
1068 conf->lds_size, conf->scratch_bytes_per_wave, shader->info.max_simd_waves);
1069 }
1070 }
1071
si_get_shader_name(const struct si_shader * shader)1072 const char *si_get_shader_name(const struct si_shader *shader)
1073 {
1074 switch (shader->selector->info.stage) {
1075 case MESA_SHADER_VERTEX:
1076 if (shader->key.as_es)
1077 return "Vertex Shader as ES";
1078 else if (shader->key.as_ls)
1079 return "Vertex Shader as LS";
1080 else if (shader->key.opt.vs_as_prim_discard_cs)
1081 return "Vertex Shader as Primitive Discard CS";
1082 else if (shader->key.as_ngg)
1083 return "Vertex Shader as ESGS";
1084 else
1085 return "Vertex Shader as VS";
1086 case MESA_SHADER_TESS_CTRL:
1087 return "Tessellation Control Shader";
1088 case MESA_SHADER_TESS_EVAL:
1089 if (shader->key.as_es)
1090 return "Tessellation Evaluation Shader as ES";
1091 else if (shader->key.as_ngg)
1092 return "Tessellation Evaluation Shader as ESGS";
1093 else
1094 return "Tessellation Evaluation Shader as VS";
1095 case MESA_SHADER_GEOMETRY:
1096 if (shader->is_gs_copy_shader)
1097 return "GS Copy Shader as VS";
1098 else
1099 return "Geometry Shader";
1100 case MESA_SHADER_FRAGMENT:
1101 return "Pixel Shader";
1102 case MESA_SHADER_COMPUTE:
1103 return "Compute Shader";
1104 default:
1105 return "Unknown Shader";
1106 }
1107 }
1108
si_shader_dump(struct si_screen * sscreen,struct si_shader * shader,struct pipe_debug_callback * debug,FILE * file,bool check_debug_option)1109 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
1110 struct pipe_debug_callback *debug, FILE *file, bool check_debug_option)
1111 {
1112 gl_shader_stage stage = shader->selector->info.stage;
1113
1114 if (!check_debug_option || si_can_dump_shader(sscreen, stage))
1115 si_dump_shader_key(shader, file);
1116
1117 if (!check_debug_option && shader->binary.llvm_ir_string) {
1118 if (shader->previous_stage && shader->previous_stage->binary.llvm_ir_string) {
1119 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n", si_get_shader_name(shader));
1120 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
1121 }
1122
1123 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n", si_get_shader_name(shader));
1124 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
1125 }
1126
1127 if (!check_debug_option ||
1128 (si_can_dump_shader(sscreen, stage) && !(sscreen->debug_flags & DBG(NO_ASM)))) {
1129 unsigned wave_size = si_get_shader_wave_size(shader);
1130
1131 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
1132
1133 if (shader->prolog)
1134 si_shader_dump_disassembly(sscreen, &shader->prolog->binary, stage, wave_size, debug,
1135 "prolog", file);
1136 if (shader->previous_stage)
1137 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary, stage,
1138 wave_size, debug, "previous stage", file);
1139 if (shader->prolog2)
1140 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary, stage, wave_size,
1141 debug, "prolog2", file);
1142
1143 si_shader_dump_disassembly(sscreen, &shader->binary, stage, wave_size, debug, "main",
1144 file);
1145
1146 if (shader->epilog)
1147 si_shader_dump_disassembly(sscreen, &shader->epilog->binary, stage, wave_size, debug,
1148 "epilog", file);
1149 fprintf(file, "\n");
1150 }
1151
1152 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
1153 }
1154
si_dump_shader_key_vs(const struct si_shader_key * key,const struct si_vs_prolog_bits * prolog,const char * prefix,FILE * f)1155 static void si_dump_shader_key_vs(const struct si_shader_key *key,
1156 const struct si_vs_prolog_bits *prolog, const char *prefix,
1157 FILE *f)
1158 {
1159 fprintf(f, " %s.instance_divisor_is_one = %u\n", prefix, prolog->instance_divisor_is_one);
1160 fprintf(f, " %s.instance_divisor_is_fetched = %u\n", prefix,
1161 prolog->instance_divisor_is_fetched);
1162 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n", prefix,
1163 prolog->unpack_instance_id_from_vertex_id);
1164 fprintf(f, " %s.ls_vgpr_fix = %u\n", prefix, prolog->ls_vgpr_fix);
1165
1166 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
1167 fprintf(f, " mono.vs.fix_fetch = {");
1168 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
1169 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
1170 if (i)
1171 fprintf(f, ", ");
1172 if (!fix.bits)
1173 fprintf(f, "0");
1174 else
1175 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size, fix.u.num_channels_m1,
1176 fix.u.format);
1177 }
1178 fprintf(f, "}\n");
1179 }
1180
si_dump_shader_key(const struct si_shader * shader,FILE * f)1181 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
1182 {
1183 const struct si_shader_key *key = &shader->key;
1184 gl_shader_stage stage = shader->selector->info.stage;
1185
1186 fprintf(f, "SHADER KEY\n");
1187
1188 switch (stage) {
1189 case MESA_SHADER_VERTEX:
1190 si_dump_shader_key_vs(key, &key->part.vs.prolog, "part.vs.prolog", f);
1191 fprintf(f, " as_es = %u\n", key->as_es);
1192 fprintf(f, " as_ls = %u\n", key->as_ls);
1193 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1194 fprintf(f, " mono.u.vs_export_prim_id = %u\n", key->mono.u.vs_export_prim_id);
1195 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n", key->opt.vs_as_prim_discard_cs);
1196 fprintf(f, " opt.cs_prim_type = %s\n", tgsi_primitive_names[key->opt.cs_prim_type]);
1197 fprintf(f, " opt.cs_indexed = %u\n", key->opt.cs_indexed);
1198 fprintf(f, " opt.cs_instancing = %u\n", key->opt.cs_instancing);
1199 fprintf(f, " opt.cs_primitive_restart = %u\n", key->opt.cs_primitive_restart);
1200 fprintf(f, " opt.cs_provoking_vertex_first = %u\n", key->opt.cs_provoking_vertex_first);
1201 fprintf(f, " opt.cs_need_correct_orientation = %u\n", key->opt.cs_need_correct_orientation);
1202 fprintf(f, " opt.cs_cull_front = %u\n", key->opt.cs_cull_front);
1203 fprintf(f, " opt.cs_cull_back = %u\n", key->opt.cs_cull_back);
1204 fprintf(f, " opt.cs_cull_z = %u\n", key->opt.cs_cull_z);
1205 fprintf(f, " opt.cs_halfz_clip_space = %u\n", key->opt.cs_halfz_clip_space);
1206 break;
1207
1208 case MESA_SHADER_TESS_CTRL:
1209 if (shader->selector->screen->info.chip_class >= GFX9) {
1210 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog, "part.tcs.ls_prolog", f);
1211 }
1212 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
1213 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%" PRIx64 "\n",
1214 key->mono.u.ff_tcs_inputs_to_copy);
1215 break;
1216
1217 case MESA_SHADER_TESS_EVAL:
1218 fprintf(f, " as_es = %u\n", key->as_es);
1219 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1220 fprintf(f, " mono.u.vs_export_prim_id = %u\n", key->mono.u.vs_export_prim_id);
1221 break;
1222
1223 case MESA_SHADER_GEOMETRY:
1224 if (shader->is_gs_copy_shader)
1225 break;
1226
1227 if (shader->selector->screen->info.chip_class >= GFX9 &&
1228 key->part.gs.es->info.stage == MESA_SHADER_VERTEX) {
1229 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog, "part.gs.vs_prolog", f);
1230 }
1231 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n",
1232 key->part.gs.prolog.tri_strip_adj_fix);
1233 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
1234 fprintf(f, " as_ngg = %u\n", key->as_ngg);
1235 break;
1236
1237 case MESA_SHADER_COMPUTE:
1238 break;
1239
1240 case MESA_SHADER_FRAGMENT:
1241 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
1242 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
1243 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
1244 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n",
1245 key->part.ps.prolog.force_persp_sample_interp);
1246 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n",
1247 key->part.ps.prolog.force_linear_sample_interp);
1248 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n",
1249 key->part.ps.prolog.force_persp_center_interp);
1250 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n",
1251 key->part.ps.prolog.force_linear_center_interp);
1252 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n",
1253 key->part.ps.prolog.bc_optimize_for_persp);
1254 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n",
1255 key->part.ps.prolog.bc_optimize_for_linear);
1256 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n",
1257 key->part.ps.prolog.samplemask_log_ps_iter);
1258 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n",
1259 key->part.ps.epilog.spi_shader_col_format);
1260 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
1261 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
1262 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
1263 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
1264 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
1265 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n",
1266 key->part.ps.epilog.poly_line_smoothing);
1267 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
1268 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n",
1269 key->mono.u.ps.interpolate_at_sample_force_center);
1270 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
1271 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
1272 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
1273 break;
1274
1275 default:
1276 assert(0);
1277 }
1278
1279 if ((stage == MESA_SHADER_GEOMETRY || stage == MESA_SHADER_TESS_EVAL ||
1280 stage == MESA_SHADER_VERTEX) &&
1281 !key->as_es && !key->as_ls) {
1282 fprintf(f, " opt.kill_outputs = 0x%" PRIx64 "\n", key->opt.kill_outputs);
1283 fprintf(f, " opt.kill_clip_distances = 0x%x\n", key->opt.kill_clip_distances);
1284 if (stage != MESA_SHADER_GEOMETRY)
1285 fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
1286 }
1287 }
1288
si_optimize_vs_outputs(struct si_shader_context * ctx)1289 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
1290 {
1291 struct si_shader *shader = ctx->shader;
1292 struct si_shader_info *info = &shader->selector->info;
1293 unsigned skip_vs_optim_mask = 0;
1294
1295 if ((ctx->stage != MESA_SHADER_VERTEX && ctx->stage != MESA_SHADER_TESS_EVAL) ||
1296 shader->key.as_ls || shader->key.as_es)
1297 return;
1298
1299 /* Optimizing these outputs is not possible, since they might be overriden
1300 * at runtime with S_028644_PT_SPRITE_TEX. */
1301 for (int i = 0; i < info->num_outputs; i++) {
1302 if (info->output_semantic[i] == VARYING_SLOT_PNTC ||
1303 (info->output_semantic[i] >= VARYING_SLOT_TEX0 &&
1304 info->output_semantic[i] <= VARYING_SLOT_TEX7)) {
1305 skip_vs_optim_mask |= 1u << shader->info.vs_output_param_offset[i];
1306 }
1307 }
1308
1309 ac_optimize_vs_outputs(&ctx->ac, ctx->main_fn, shader->info.vs_output_param_offset,
1310 info->num_outputs, skip_vs_optim_mask,
1311 &shader->info.nr_param_exports);
1312 }
1313
si_vs_needs_prolog(const struct si_shader_selector * sel,const struct si_vs_prolog_bits * prolog_key,const struct si_shader_key * key,bool ngg_cull_shader)1314 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
1315 const struct si_vs_prolog_bits *prolog_key,
1316 const struct si_shader_key *key, bool ngg_cull_shader)
1317 {
1318 /* VGPR initialization fixup for Vega10 and Raven is always done in the
1319 * VS prolog. */
1320 return sel->vs_needs_prolog || prolog_key->ls_vgpr_fix ||
1321 prolog_key->unpack_instance_id_from_vertex_id ||
1322 (ngg_cull_shader && key->opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
1323 }
1324
si_build_main_function(struct si_shader_context * ctx,struct si_shader * shader,struct nir_shader * nir,bool free_nir,bool ngg_cull_shader)1325 static bool si_build_main_function(struct si_shader_context *ctx, struct si_shader *shader,
1326 struct nir_shader *nir, bool free_nir, bool ngg_cull_shader)
1327 {
1328 struct si_shader_selector *sel = shader->selector;
1329 const struct si_shader_info *info = &sel->info;
1330
1331 ctx->shader = shader;
1332 ctx->stage = sel->info.stage;
1333
1334 ctx->num_const_buffers = info->base.num_ubos;
1335 ctx->num_shader_buffers = info->base.num_ssbos;
1336
1337 ctx->num_samplers = util_last_bit(info->base.textures_used);
1338 ctx->num_images = info->base.num_images;
1339
1340 si_llvm_init_resource_callbacks(ctx);
1341
1342 switch (ctx->stage) {
1343 case MESA_SHADER_VERTEX:
1344 si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
1345 break;
1346 case MESA_SHADER_TESS_CTRL:
1347 si_llvm_init_tcs_callbacks(ctx);
1348 break;
1349 case MESA_SHADER_TESS_EVAL:
1350 si_llvm_init_tes_callbacks(ctx, ngg_cull_shader);
1351 break;
1352 case MESA_SHADER_GEOMETRY:
1353 si_llvm_init_gs_callbacks(ctx);
1354 break;
1355 case MESA_SHADER_FRAGMENT:
1356 si_llvm_init_ps_callbacks(ctx);
1357 break;
1358 case MESA_SHADER_COMPUTE:
1359 ctx->abi.load_local_group_size = si_llvm_get_block_size;
1360 break;
1361 default:
1362 assert(!"Unsupported shader type");
1363 return false;
1364 }
1365
1366 si_create_function(ctx, ngg_cull_shader);
1367
1368 if (ctx->shader->key.as_es || ctx->stage == MESA_SHADER_GEOMETRY)
1369 si_preload_esgs_ring(ctx);
1370
1371 if (ctx->stage == MESA_SHADER_GEOMETRY)
1372 si_preload_gs_rings(ctx);
1373 else if (ctx->stage == MESA_SHADER_TESS_EVAL)
1374 si_llvm_preload_tes_rings(ctx);
1375
1376 if (ctx->stage == MESA_SHADER_TESS_CTRL && sel->info.tessfactors_are_def_in_all_invocs) {
1377 for (unsigned i = 0; i < 6; i++) {
1378 ctx->invoc0_tess_factors[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
1379 }
1380 }
1381
1382 if (ctx->stage == MESA_SHADER_GEOMETRY) {
1383 for (unsigned i = 0; i < 4; i++) {
1384 ctx->gs_next_vertex[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1385 }
1386 if (shader->key.as_ngg) {
1387 for (unsigned i = 0; i < 4; ++i) {
1388 ctx->gs_curprim_verts[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1389 ctx->gs_generated_prims[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
1390 }
1391
1392 assert(!ctx->gs_ngg_scratch);
1393 LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, gfx10_ngg_get_scratch_dw_size(shader));
1394 ctx->gs_ngg_scratch =
1395 LLVMAddGlobalInAddressSpace(ctx->ac.module, ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1396 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
1397 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1398
1399 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(
1400 ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
1401 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
1402 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
1403 }
1404 }
1405
1406 if (ctx->stage != MESA_SHADER_GEOMETRY && (shader->key.as_ngg && !shader->key.as_es)) {
1407 /* Unconditionally declare scratch space base for streamout and
1408 * vertex compaction. Whether space is actually allocated is
1409 * determined during linking / PM4 creation.
1410 *
1411 * Add an extra dword per vertex to ensure an odd stride, which
1412 * avoids bank conflicts for SoA accesses.
1413 */
1414 if (!gfx10_is_ngg_passthrough(shader))
1415 si_llvm_declare_esgs_ring(ctx);
1416
1417 /* This is really only needed when streamout and / or vertex
1418 * compaction is enabled.
1419 */
1420 if (!ctx->gs_ngg_scratch && (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
1421 LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, gfx10_ngg_get_scratch_dw_size(shader));
1422 ctx->gs_ngg_scratch =
1423 LLVMAddGlobalInAddressSpace(ctx->ac.module, asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
1424 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
1425 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
1426 }
1427 }
1428
1429 /* For GFX9 merged shaders:
1430 * - Set EXEC for the first shader. If the prolog is present, set
1431 * EXEC there instead.
1432 * - Add a barrier before the second shader.
1433 * - In the second shader, reset EXEC to ~0 and wrap the main part in
1434 * an if-statement. This is required for correctness in geometry
1435 * shaders, to ensure that empty GS waves do not send GS_EMIT and
1436 * GS_CUT messages.
1437 *
1438 * For monolithic merged shaders, the first shader is wrapped in an
1439 * if-block together with its prolog in si_build_wrapper_function.
1440 *
1441 * NGG vertex and tess eval shaders running as the last
1442 * vertex/geometry stage handle execution explicitly using
1443 * if-statements.
1444 */
1445 if (ctx->screen->info.chip_class >= GFX9) {
1446 if (!shader->is_monolithic && (shader->key.as_es || shader->key.as_ls) &&
1447 (ctx->stage == MESA_SHADER_TESS_EVAL ||
1448 (ctx->stage == MESA_SHADER_VERTEX &&
1449 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, ngg_cull_shader)))) {
1450 si_init_exec_from_input(ctx, ctx->merged_wave_info, 0);
1451 } else if (ctx->stage == MESA_SHADER_TESS_CTRL || ctx->stage == MESA_SHADER_GEOMETRY ||
1452 (shader->key.as_ngg && !shader->key.as_es)) {
1453 LLVMValueRef thread_enabled;
1454 bool nested_barrier;
1455
1456 if (!shader->is_monolithic || (ctx->stage == MESA_SHADER_TESS_EVAL && shader->key.as_ngg &&
1457 !shader->key.as_es && !shader->key.opt.ngg_culling))
1458 ac_init_exec_full_mask(&ctx->ac);
1459
1460 if ((ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL) &&
1461 shader->key.as_ngg && !shader->key.as_es && !shader->key.opt.ngg_culling) {
1462 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
1463
1464 /* Build the primitive export at the beginning
1465 * of the shader if possible.
1466 */
1467 if (gfx10_ngg_export_prim_early(shader))
1468 gfx10_ngg_build_export_prim(ctx, NULL, NULL);
1469 }
1470
1471 if (ctx->stage == MESA_SHADER_TESS_CTRL || ctx->stage == MESA_SHADER_GEOMETRY) {
1472 if (ctx->stage == MESA_SHADER_GEOMETRY && shader->key.as_ngg) {
1473 gfx10_ngg_gs_emit_prologue(ctx);
1474 nested_barrier = false;
1475 } else {
1476 nested_barrier = true;
1477 }
1478
1479 thread_enabled = si_is_gs_thread(ctx);
1480 } else {
1481 thread_enabled = si_is_es_thread(ctx);
1482 nested_barrier = false;
1483 }
1484
1485 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
1486 ctx->merged_wrap_if_label = 11500;
1487 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
1488
1489 if (nested_barrier) {
1490 /* Execute a barrier before the second shader in
1491 * a merged shader.
1492 *
1493 * Execute the barrier inside the conditional block,
1494 * so that empty waves can jump directly to s_endpgm,
1495 * which will also signal the barrier.
1496 *
1497 * This is possible in gfx9, because an empty wave
1498 * for the second shader does not participate in
1499 * the epilogue. With NGG, empty waves may still
1500 * be required to export data (e.g. GS output vertices),
1501 * so we cannot let them exit early.
1502 *
1503 * If the shader is TCS and the TCS epilog is present
1504 * and contains a barrier, it will wait there and then
1505 * reach s_endpgm.
1506 */
1507 si_llvm_emit_barrier(ctx);
1508 }
1509 }
1510 }
1511
1512 bool success = si_nir_build_llvm(ctx, nir);
1513 if (free_nir)
1514 ralloc_free(nir);
1515 if (!success) {
1516 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
1517 return false;
1518 }
1519
1520 si_llvm_build_ret(ctx, ctx->return_value);
1521 return true;
1522 }
1523
1524 /**
1525 * Compute the VS prolog key, which contains all the information needed to
1526 * build the VS prolog function, and set shader->info bits where needed.
1527 *
1528 * \param info Shader info of the vertex shader.
1529 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
1530 * \param has_old_ Whether the preceding shader part is the NGG cull shader.
1531 * \param prolog_key Key of the VS prolog
1532 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
1533 * \param key Output shader part key.
1534 */
si_get_vs_prolog_key(const struct si_shader_info * info,unsigned num_input_sgprs,bool ngg_cull_shader,const struct si_vs_prolog_bits * prolog_key,struct si_shader * shader_out,union si_shader_part_key * key)1535 static void si_get_vs_prolog_key(const struct si_shader_info *info, unsigned num_input_sgprs,
1536 bool ngg_cull_shader, const struct si_vs_prolog_bits *prolog_key,
1537 struct si_shader *shader_out, union si_shader_part_key *key)
1538 {
1539 memset(key, 0, sizeof(*key));
1540 key->vs_prolog.states = *prolog_key;
1541 key->vs_prolog.num_input_sgprs = num_input_sgprs;
1542 key->vs_prolog.num_inputs = info->num_inputs;
1543 key->vs_prolog.as_ls = shader_out->key.as_ls;
1544 key->vs_prolog.as_es = shader_out->key.as_es;
1545 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
1546 key->vs_prolog.as_prim_discard_cs = shader_out->key.opt.vs_as_prim_discard_cs;
1547
1548 if (ngg_cull_shader) {
1549 key->vs_prolog.gs_fast_launch_tri_list =
1550 !!(shader_out->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST);
1551 key->vs_prolog.gs_fast_launch_tri_strip =
1552 !!(shader_out->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP);
1553 }
1554
1555 if (shader_out->selector->info.stage == MESA_SHADER_TESS_CTRL) {
1556 key->vs_prolog.as_ls = 1;
1557 key->vs_prolog.num_merged_next_stage_vgprs = 2;
1558 } else if (shader_out->selector->info.stage == MESA_SHADER_GEOMETRY) {
1559 key->vs_prolog.as_es = 1;
1560 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1561 } else if (shader_out->key.as_ngg) {
1562 key->vs_prolog.num_merged_next_stage_vgprs = 5;
1563 }
1564
1565 /* Only one of these combinations can be set. as_ngg can be set with as_es. */
1566 assert(key->vs_prolog.as_ls + key->vs_prolog.as_ngg +
1567 (key->vs_prolog.as_es && !key->vs_prolog.as_ngg) + key->vs_prolog.as_prim_discard_cs <=
1568 1);
1569
1570 /* Enable loading the InstanceID VGPR. */
1571 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
1572
1573 if ((key->vs_prolog.states.instance_divisor_is_one |
1574 key->vs_prolog.states.instance_divisor_is_fetched) &
1575 input_mask)
1576 shader_out->info.uses_instanceid = true;
1577 }
1578
si_should_optimize_less(struct ac_llvm_compiler * compiler,struct si_shader_selector * sel)1579 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1580 struct si_shader_selector *sel)
1581 {
1582 if (!compiler->low_opt_passes)
1583 return false;
1584
1585 /* Assume a slow CPU. */
1586 assert(!sel->screen->info.has_dedicated_vram && sel->screen->info.chip_class <= GFX8);
1587
1588 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
1589 * buffer stores. */
1590 return sel->info.stage == MESA_SHADER_COMPUTE && sel->info.num_memory_stores > 1000;
1591 }
1592
get_nir_shader(struct si_shader_selector * sel,const struct si_shader_key * key,bool * free_nir)1593 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
1594 const struct si_shader_key *key,
1595 bool *free_nir)
1596 {
1597 nir_shader *nir;
1598 *free_nir = false;
1599
1600 if (sel->nir) {
1601 nir = sel->nir;
1602 } else if (sel->nir_binary) {
1603 struct pipe_screen *screen = &sel->screen->b;
1604 const void *options = screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
1605 pipe_shader_type_from_mesa(sel->info.stage));
1606
1607 struct blob_reader blob_reader;
1608 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
1609 *free_nir = true;
1610 nir = nir_deserialize(NULL, options, &blob_reader);
1611 } else {
1612 return NULL;
1613 }
1614
1615 if (key && key->opt.inline_uniforms) {
1616 assert(*free_nir);
1617
1618 /* Most places use shader information from the default variant, not
1619 * the optimized variant. These are the things that the driver looks at
1620 * in optimized variants and the list of things that we need to do.
1621 *
1622 * The driver takes into account these things if they suddenly disappear
1623 * from the shader code:
1624 * - Register usage and code size decrease (obvious)
1625 * - Eliminated PS system values are disabled by LLVM
1626 * (FragCoord, FrontFace, barycentrics)
1627 * - VS/TES/GS outputs feeding PS are eliminated if outputs are undef.
1628 * (thanks to an LLVM pass in Mesa - TODO: move it to NIR)
1629 * The storage for eliminated outputs is also not allocated.
1630 * - VS/TCS/TES/GS/PS input loads are eliminated (VS relies on DCE in LLVM)
1631 * - TCS output stores are eliminated
1632 *
1633 * TODO: These are things the driver ignores in the final shader code
1634 * and relies on the default shader info.
1635 * - Other system values are not eliminated
1636 * - PS.NUM_INTERP = bitcount64(inputs_read), renumber inputs
1637 * to remove holes
1638 * - uses_discard - if it changed to false
1639 * - writes_memory - if it changed to false
1640 * - VS->TCS, VS->GS, TES->GS output stores for the former stage are not
1641 * eliminated
1642 * - Eliminated VS/TCS/TES outputs are still allocated. (except when feeding PS)
1643 * GS outputs are eliminated except for the temporary LDS.
1644 * Clip distances, gl_PointSize, and PS outputs are eliminated based
1645 * on current states, so we don't care about the shader code.
1646 *
1647 * TODO: Merged shaders don't inline uniforms for the first stage.
1648 * VS-GS: only GS inlines uniforms; VS-TCS: only TCS; TES-GS: only GS.
1649 * (key == NULL for the first stage here)
1650 *
1651 * TODO: Compute shaders don't support inlinable uniforms, because they
1652 * don't have shader variants.
1653 *
1654 * TODO: The driver uses a linear search to find a shader variant. This
1655 * can be really slow if we get too many variants due to uniform inlining.
1656 */
1657 NIR_PASS_V(nir, nir_inline_uniforms,
1658 nir->info.num_inlinable_uniforms,
1659 key->opt.inlined_uniform_values,
1660 nir->info.inlinable_uniform_dw_offsets);
1661
1662 si_nir_opts(sel->screen, nir, true);
1663
1664 /* This must be done again. */
1665 NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in |
1666 nir_var_shader_out);
1667 }
1668
1669 return nir;
1670 }
1671
si_llvm_compile_shader(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug,struct nir_shader * nir,bool free_nir)1672 static bool si_llvm_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
1673 struct si_shader *shader, struct pipe_debug_callback *debug,
1674 struct nir_shader *nir, bool free_nir)
1675 {
1676 struct si_shader_selector *sel = shader->selector;
1677 struct si_shader_context ctx;
1678
1679 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
1680
1681 LLVMValueRef ngg_cull_main_fn = NULL;
1682 if (shader->key.opt.ngg_culling) {
1683 if (!si_build_main_function(&ctx, shader, nir, false, true)) {
1684 si_llvm_dispose(&ctx);
1685 return false;
1686 }
1687 ngg_cull_main_fn = ctx.main_fn;
1688 ctx.main_fn = NULL;
1689 }
1690
1691 if (!si_build_main_function(&ctx, shader, nir, free_nir, false)) {
1692 si_llvm_dispose(&ctx);
1693 return false;
1694 }
1695
1696 if (shader->is_monolithic && ctx.stage == MESA_SHADER_VERTEX) {
1697 LLVMValueRef parts[4];
1698 unsigned num_parts = 0;
1699 bool has_prolog = false;
1700 LLVMValueRef main_fn = ctx.main_fn;
1701
1702 if (ngg_cull_main_fn) {
1703 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, true)) {
1704 union si_shader_part_key prolog_key;
1705 si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, true,
1706 &shader->key.part.vs.prolog, shader, &prolog_key);
1707 prolog_key.vs_prolog.is_monolithic = true;
1708 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1709 parts[num_parts++] = ctx.main_fn;
1710 has_prolog = true;
1711 }
1712 parts[num_parts++] = ngg_cull_main_fn;
1713 }
1714
1715 if (si_vs_needs_prolog(sel, &shader->key.part.vs.prolog, &shader->key, false)) {
1716 union si_shader_part_key prolog_key;
1717 si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, false,
1718 &shader->key.part.vs.prolog, shader, &prolog_key);
1719 prolog_key.vs_prolog.is_monolithic = true;
1720 si_llvm_build_vs_prolog(&ctx, &prolog_key);
1721 parts[num_parts++] = ctx.main_fn;
1722 has_prolog = true;
1723 }
1724 parts[num_parts++] = main_fn;
1725
1726 si_build_wrapper_function(&ctx, parts, num_parts, has_prolog ? 1 : 0, 0);
1727
1728 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
1729 si_build_prim_discard_compute_shader(&ctx);
1730 } else if (shader->is_monolithic && ctx.stage == MESA_SHADER_TESS_EVAL && ngg_cull_main_fn) {
1731 LLVMValueRef parts[2];
1732
1733 parts[0] = ngg_cull_main_fn;
1734 parts[1] = ctx.main_fn;
1735
1736 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1737 } else if (shader->is_monolithic && ctx.stage == MESA_SHADER_TESS_CTRL) {
1738 if (sscreen->info.chip_class >= GFX9) {
1739 struct si_shader_selector *ls = shader->key.part.tcs.ls;
1740 LLVMValueRef parts[4];
1741 bool vs_needs_prolog =
1742 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog, &shader->key, false);
1743
1744 /* TCS main part */
1745 parts[2] = ctx.main_fn;
1746
1747 /* TCS epilog */
1748 union si_shader_part_key tcs_epilog_key;
1749 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
1750 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1751 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1752 parts[3] = ctx.main_fn;
1753
1754 /* VS as LS main part */
1755 nir = get_nir_shader(ls, NULL, &free_nir);
1756 struct si_shader shader_ls = {};
1757 shader_ls.selector = ls;
1758 shader_ls.key.as_ls = 1;
1759 shader_ls.key.mono = shader->key.mono;
1760 shader_ls.key.opt = shader->key.opt;
1761 shader_ls.is_monolithic = true;
1762
1763 if (!si_build_main_function(&ctx, &shader_ls, nir, free_nir, false)) {
1764 si_llvm_dispose(&ctx);
1765 return false;
1766 }
1767 shader->info.uses_instanceid |= ls->info.uses_instanceid;
1768 parts[1] = ctx.main_fn;
1769
1770 /* LS prolog */
1771 if (vs_needs_prolog) {
1772 union si_shader_part_key vs_prolog_key;
1773 si_get_vs_prolog_key(&ls->info, shader_ls.info.num_input_sgprs, false,
1774 &shader->key.part.tcs.ls_prolog, shader, &vs_prolog_key);
1775 vs_prolog_key.vs_prolog.is_monolithic = true;
1776 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1777 parts[0] = ctx.main_fn;
1778 }
1779
1780 /* Reset the shader context. */
1781 ctx.shader = shader;
1782 ctx.stage = MESA_SHADER_TESS_CTRL;
1783
1784 si_build_wrapper_function(&ctx, parts + !vs_needs_prolog, 4 - !vs_needs_prolog,
1785 vs_needs_prolog, vs_needs_prolog ? 2 : 1);
1786 } else {
1787 LLVMValueRef parts[2];
1788 union si_shader_part_key epilog_key;
1789
1790 parts[0] = ctx.main_fn;
1791
1792 memset(&epilog_key, 0, sizeof(epilog_key));
1793 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
1794 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1795 parts[1] = ctx.main_fn;
1796
1797 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
1798 }
1799 } else if (shader->is_monolithic && ctx.stage == MESA_SHADER_GEOMETRY) {
1800 if (ctx.screen->info.chip_class >= GFX9) {
1801 struct si_shader_selector *es = shader->key.part.gs.es;
1802 LLVMValueRef es_prolog = NULL;
1803 LLVMValueRef es_main = NULL;
1804 LLVMValueRef gs_prolog = NULL;
1805 LLVMValueRef gs_main = ctx.main_fn;
1806
1807 /* GS prolog */
1808 union si_shader_part_key gs_prolog_key;
1809 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
1810 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1811 gs_prolog_key.gs_prolog.is_monolithic = true;
1812 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
1813 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
1814 gs_prolog = ctx.main_fn;
1815
1816 /* ES main part */
1817 nir = get_nir_shader(es, NULL, &free_nir);
1818 struct si_shader shader_es = {};
1819 shader_es.selector = es;
1820 shader_es.key.as_es = 1;
1821 shader_es.key.as_ngg = shader->key.as_ngg;
1822 shader_es.key.mono = shader->key.mono;
1823 shader_es.key.opt = shader->key.opt;
1824 shader_es.is_monolithic = true;
1825
1826 if (!si_build_main_function(&ctx, &shader_es, nir, free_nir, false)) {
1827 si_llvm_dispose(&ctx);
1828 return false;
1829 }
1830 shader->info.uses_instanceid |= es->info.uses_instanceid;
1831 es_main = ctx.main_fn;
1832
1833 /* ES prolog */
1834 if (es->info.stage == MESA_SHADER_VERTEX &&
1835 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog, &shader->key, false)) {
1836 union si_shader_part_key vs_prolog_key;
1837 si_get_vs_prolog_key(&es->info, shader_es.info.num_input_sgprs, false,
1838 &shader->key.part.gs.vs_prolog, shader, &vs_prolog_key);
1839 vs_prolog_key.vs_prolog.is_monolithic = true;
1840 si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1841 es_prolog = ctx.main_fn;
1842 }
1843
1844 /* Reset the shader context. */
1845 ctx.shader = shader;
1846 ctx.stage = MESA_SHADER_GEOMETRY;
1847
1848 /* Prepare the array of shader parts. */
1849 LLVMValueRef parts[4];
1850 unsigned num_parts = 0, main_part, next_first_part;
1851
1852 if (es_prolog)
1853 parts[num_parts++] = es_prolog;
1854
1855 parts[main_part = num_parts++] = es_main;
1856 parts[next_first_part = num_parts++] = gs_prolog;
1857 parts[num_parts++] = gs_main;
1858
1859 si_build_wrapper_function(&ctx, parts, num_parts, main_part, next_first_part);
1860 } else {
1861 LLVMValueRef parts[2];
1862 union si_shader_part_key prolog_key;
1863
1864 parts[1] = ctx.main_fn;
1865
1866 memset(&prolog_key, 0, sizeof(prolog_key));
1867 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
1868 si_llvm_build_gs_prolog(&ctx, &prolog_key);
1869 parts[0] = ctx.main_fn;
1870
1871 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
1872 }
1873 } else if (shader->is_monolithic && ctx.stage == MESA_SHADER_FRAGMENT) {
1874 si_llvm_build_monolithic_ps(&ctx, shader);
1875 }
1876
1877 si_llvm_optimize_module(&ctx);
1878
1879 /* Post-optimization transformations and analysis. */
1880 si_optimize_vs_outputs(&ctx);
1881
1882 if ((debug && debug->debug_message) || si_can_dump_shader(sscreen, ctx.stage)) {
1883 ctx.shader->info.private_mem_vgprs = ac_count_scratch_private_memory(ctx.main_fn);
1884 }
1885
1886 /* Make sure the input is a pointer and not integer followed by inttoptr. */
1887 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) == LLVMPointerTypeKind);
1888
1889 /* Compile to bytecode. */
1890 if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler, &ctx.ac, debug,
1891 ctx.stage, si_get_shader_name(shader),
1892 si_should_optimize_less(compiler, shader->selector))) {
1893 si_llvm_dispose(&ctx);
1894 fprintf(stderr, "LLVM failed to compile shader\n");
1895 return false;
1896 }
1897
1898 si_llvm_dispose(&ctx);
1899 return true;
1900 }
1901
si_compile_shader(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug)1902 bool si_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
1903 struct si_shader *shader, struct pipe_debug_callback *debug)
1904 {
1905 struct si_shader_selector *sel = shader->selector;
1906 bool free_nir;
1907 struct nir_shader *nir = get_nir_shader(sel, &shader->key, &free_nir);
1908
1909 /* Dump NIR before doing NIR->LLVM conversion in case the
1910 * conversion fails. */
1911 if (si_can_dump_shader(sscreen, sel->info.stage) &&
1912 !(sscreen->debug_flags & DBG(NO_NIR))) {
1913 nir_print_shader(nir, stderr);
1914 si_dump_streamout(&sel->so);
1915 }
1916
1917 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1918 sizeof(shader->info.vs_output_param_offset));
1919
1920 shader->info.uses_instanceid = sel->info.uses_instanceid;
1921
1922 /* TODO: ACO could compile non-monolithic shaders here (starting
1923 * with PS and NGG VS), but monolithic shaders should be compiled
1924 * by LLVM due to more complicated compilation.
1925 */
1926 if (!si_llvm_compile_shader(sscreen, compiler, shader, debug, nir, free_nir))
1927 return false;
1928
1929 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
1930 * LLVM 3.9svn has this bug.
1931 */
1932 if (sel->info.stage == MESA_SHADER_COMPUTE) {
1933 unsigned wave_size = sscreen->compute_wave_size;
1934 unsigned max_vgprs =
1935 sscreen->info.num_physical_wave64_vgprs_per_simd * (wave_size == 32 ? 2 : 1);
1936 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
1937 unsigned max_sgprs_per_wave = 128;
1938 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
1939 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
1940 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
1941 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
1942
1943 max_vgprs = max_vgprs / waves_per_simd;
1944 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
1945
1946 if (shader->config.num_sgprs > max_sgprs || shader->config.num_vgprs > max_vgprs) {
1947 fprintf(stderr,
1948 "LLVM failed to compile a shader correctly: "
1949 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
1950 shader->config.num_sgprs, shader->config.num_vgprs, max_sgprs, max_vgprs);
1951
1952 /* Just terminate the process, because dependent
1953 * shaders can hang due to bad input data, but use
1954 * the env var to allow shader-db to work.
1955 */
1956 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
1957 abort();
1958 }
1959 }
1960
1961 /* Add the scratch offset to input SGPRs. */
1962 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(shader))
1963 shader->info.num_input_sgprs += 1; /* scratch byte offset */
1964
1965 /* Calculate the number of fragment input VGPRs. */
1966 if (sel->info.stage == MESA_SHADER_FRAGMENT) {
1967 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(
1968 &shader->config, &shader->info.face_vgpr_index, &shader->info.ancillary_vgpr_index);
1969 }
1970
1971 si_calculate_max_simd_waves(shader);
1972 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
1973 return true;
1974 }
1975
1976 /**
1977 * Create, compile and return a shader part (prolog or epilog).
1978 *
1979 * \param sscreen screen
1980 * \param list list of shader parts of the same category
1981 * \param type shader type
1982 * \param key shader part key
1983 * \param prolog whether the part being requested is a prolog
1984 * \param tm LLVM target machine
1985 * \param debug debug callback
1986 * \param build the callback responsible for building the main function
1987 * \return non-NULL on success
1988 */
1989 static struct si_shader_part *
si_get_shader_part(struct si_screen * sscreen,struct si_shader_part ** list,gl_shader_stage stage,bool prolog,union si_shader_part_key * key,struct ac_llvm_compiler * compiler,struct pipe_debug_callback * debug,void (* build)(struct si_shader_context *,union si_shader_part_key *),const char * name)1990 si_get_shader_part(struct si_screen *sscreen, struct si_shader_part **list,
1991 gl_shader_stage stage, bool prolog, union si_shader_part_key *key,
1992 struct ac_llvm_compiler *compiler, struct pipe_debug_callback *debug,
1993 void (*build)(struct si_shader_context *, union si_shader_part_key *),
1994 const char *name)
1995 {
1996 struct si_shader_part *result;
1997
1998 simple_mtx_lock(&sscreen->shader_parts_mutex);
1999
2000 /* Find existing. */
2001 for (result = *list; result; result = result->next) {
2002 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
2003 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2004 return result;
2005 }
2006 }
2007
2008 /* Compile a new one. */
2009 result = CALLOC_STRUCT(si_shader_part);
2010 result->key = *key;
2011
2012 struct si_shader_selector sel = {};
2013 sel.screen = sscreen;
2014
2015 struct si_shader shader = {};
2016 shader.selector = &sel;
2017
2018 switch (stage) {
2019 case MESA_SHADER_VERTEX:
2020 shader.key.as_ls = key->vs_prolog.as_ls;
2021 shader.key.as_es = key->vs_prolog.as_es;
2022 shader.key.as_ngg = key->vs_prolog.as_ngg;
2023 shader.key.opt.ngg_culling =
2024 (key->vs_prolog.gs_fast_launch_tri_list ? SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST : 0) |
2025 (key->vs_prolog.gs_fast_launch_tri_strip ? SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP : 0);
2026 shader.key.opt.vs_as_prim_discard_cs = key->vs_prolog.as_prim_discard_cs;
2027 break;
2028 case MESA_SHADER_TESS_CTRL:
2029 assert(!prolog);
2030 shader.key.part.tcs.epilog = key->tcs_epilog.states;
2031 break;
2032 case MESA_SHADER_GEOMETRY:
2033 assert(prolog);
2034 shader.key.as_ngg = key->gs_prolog.as_ngg;
2035 break;
2036 case MESA_SHADER_FRAGMENT:
2037 if (prolog)
2038 shader.key.part.ps.prolog = key->ps_prolog.states;
2039 else
2040 shader.key.part.ps.epilog = key->ps_epilog.states;
2041 break;
2042 default:
2043 unreachable("bad shader part");
2044 }
2045
2046 struct si_shader_context ctx;
2047 si_llvm_context_init(&ctx, sscreen, compiler,
2048 si_get_wave_size(sscreen, stage,
2049 shader.key.as_ngg, shader.key.as_es,
2050 shader.key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL,
2051 shader.key.opt.vs_as_prim_discard_cs));
2052 ctx.shader = &shader;
2053 ctx.stage = stage;
2054
2055 build(&ctx, key);
2056
2057 /* Compile. */
2058 si_llvm_optimize_module(&ctx);
2059
2060 if (!si_compile_llvm(sscreen, &result->binary, &result->config, compiler, &ctx.ac, debug,
2061 ctx.stage, name, false)) {
2062 FREE(result);
2063 result = NULL;
2064 goto out;
2065 }
2066
2067 result->next = *list;
2068 *list = result;
2069
2070 out:
2071 si_llvm_dispose(&ctx);
2072 simple_mtx_unlock(&sscreen->shader_parts_mutex);
2073 return result;
2074 }
2075
si_get_vs_prolog(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug,struct si_shader * main_part,const struct si_vs_prolog_bits * key)2076 static bool si_get_vs_prolog(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2077 struct si_shader *shader, struct pipe_debug_callback *debug,
2078 struct si_shader *main_part, const struct si_vs_prolog_bits *key)
2079 {
2080 struct si_shader_selector *vs = main_part->selector;
2081
2082 if (!si_vs_needs_prolog(vs, key, &shader->key, false))
2083 return true;
2084
2085 /* Get the prolog. */
2086 union si_shader_part_key prolog_key;
2087 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false, key, shader,
2088 &prolog_key);
2089
2090 shader->prolog =
2091 si_get_shader_part(sscreen, &sscreen->vs_prologs, MESA_SHADER_VERTEX, true, &prolog_key,
2092 compiler, debug, si_llvm_build_vs_prolog, "Vertex Shader Prolog");
2093 return shader->prolog != NULL;
2094 }
2095
2096 /**
2097 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
2098 */
si_shader_select_vs_parts(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug)2099 static bool si_shader_select_vs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2100 struct si_shader *shader, struct pipe_debug_callback *debug)
2101 {
2102 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader, &shader->key.part.vs.prolog);
2103 }
2104
2105 /**
2106 * Select and compile (or reuse) TCS parts (epilog).
2107 */
si_shader_select_tcs_parts(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug)2108 static bool si_shader_select_tcs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2109 struct si_shader *shader, struct pipe_debug_callback *debug)
2110 {
2111 if (sscreen->info.chip_class >= GFX9) {
2112 struct si_shader *ls_main_part = shader->key.part.tcs.ls->main_shader_part_ls;
2113
2114 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
2115 &shader->key.part.tcs.ls_prolog))
2116 return false;
2117
2118 shader->previous_stage = ls_main_part;
2119 }
2120
2121 /* Get the epilog. */
2122 union si_shader_part_key epilog_key;
2123 memset(&epilog_key, 0, sizeof(epilog_key));
2124 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2125
2126 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs, MESA_SHADER_TESS_CTRL, false,
2127 &epilog_key, compiler, debug, si_llvm_build_tcs_epilog,
2128 "Tessellation Control Shader Epilog");
2129 return shader->epilog != NULL;
2130 }
2131
2132 /**
2133 * Select and compile (or reuse) GS parts (prolog).
2134 */
si_shader_select_gs_parts(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug)2135 static bool si_shader_select_gs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2136 struct si_shader *shader, struct pipe_debug_callback *debug)
2137 {
2138 if (sscreen->info.chip_class >= GFX9) {
2139 struct si_shader *es_main_part;
2140
2141 if (shader->key.as_ngg)
2142 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
2143 else
2144 es_main_part = shader->key.part.gs.es->main_shader_part_es;
2145
2146 if (shader->key.part.gs.es->info.stage == MESA_SHADER_VERTEX &&
2147 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
2148 &shader->key.part.gs.vs_prolog))
2149 return false;
2150
2151 shader->previous_stage = es_main_part;
2152 }
2153
2154 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
2155 return true;
2156
2157 union si_shader_part_key prolog_key;
2158 memset(&prolog_key, 0, sizeof(prolog_key));
2159 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
2160 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
2161
2162 shader->prolog2 =
2163 si_get_shader_part(sscreen, &sscreen->gs_prologs, MESA_SHADER_GEOMETRY, true, &prolog_key,
2164 compiler, debug, si_llvm_build_gs_prolog, "Geometry Shader Prolog");
2165 return shader->prolog2 != NULL;
2166 }
2167
2168 /**
2169 * Compute the PS prolog key, which contains all the information needed to
2170 * build the PS prolog function, and set related bits in shader->config.
2171 */
si_get_ps_prolog_key(struct si_shader * shader,union si_shader_part_key * key,bool separate_prolog)2172 void si_get_ps_prolog_key(struct si_shader *shader, union si_shader_part_key *key,
2173 bool separate_prolog)
2174 {
2175 struct si_shader_info *info = &shader->selector->info;
2176
2177 memset(key, 0, sizeof(*key));
2178 key->ps_prolog.states = shader->key.part.ps.prolog;
2179 key->ps_prolog.colors_read = info->colors_read;
2180 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
2181 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
2182 key->ps_prolog.wqm =
2183 info->base.fs.needs_helper_invocations &&
2184 (key->ps_prolog.colors_read || key->ps_prolog.states.force_persp_sample_interp ||
2185 key->ps_prolog.states.force_linear_sample_interp ||
2186 key->ps_prolog.states.force_persp_center_interp ||
2187 key->ps_prolog.states.force_linear_center_interp ||
2188 key->ps_prolog.states.bc_optimize_for_persp || key->ps_prolog.states.bc_optimize_for_linear);
2189 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
2190
2191 if (info->colors_read) {
2192 ubyte *color = shader->selector->color_attr_index;
2193
2194 if (shader->key.part.ps.prolog.color_two_side) {
2195 /* BCOLORs are stored after the last input. */
2196 key->ps_prolog.num_interp_inputs = info->num_inputs;
2197 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
2198 if (separate_prolog)
2199 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
2200 }
2201
2202 for (unsigned i = 0; i < 2; i++) {
2203 unsigned interp = info->color_interpolate[i];
2204 unsigned location = info->color_interpolate_loc[i];
2205
2206 if (!(info->colors_read & (0xf << i * 4)))
2207 continue;
2208
2209 key->ps_prolog.color_attr_index[i] = color[i];
2210
2211 if (shader->key.part.ps.prolog.flatshade_colors && interp == INTERP_MODE_COLOR)
2212 interp = INTERP_MODE_FLAT;
2213
2214 switch (interp) {
2215 case INTERP_MODE_FLAT:
2216 key->ps_prolog.color_interp_vgpr_index[i] = -1;
2217 break;
2218 case INTERP_MODE_SMOOTH:
2219 case INTERP_MODE_COLOR:
2220 /* Force the interpolation location for colors here. */
2221 if (shader->key.part.ps.prolog.force_persp_sample_interp)
2222 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2223 if (shader->key.part.ps.prolog.force_persp_center_interp)
2224 location = TGSI_INTERPOLATE_LOC_CENTER;
2225
2226 switch (location) {
2227 case TGSI_INTERPOLATE_LOC_SAMPLE:
2228 key->ps_prolog.color_interp_vgpr_index[i] = 0;
2229 if (separate_prolog) {
2230 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2231 }
2232 break;
2233 case TGSI_INTERPOLATE_LOC_CENTER:
2234 key->ps_prolog.color_interp_vgpr_index[i] = 2;
2235 if (separate_prolog) {
2236 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2237 }
2238 break;
2239 case TGSI_INTERPOLATE_LOC_CENTROID:
2240 key->ps_prolog.color_interp_vgpr_index[i] = 4;
2241 if (separate_prolog) {
2242 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTROID_ENA(1);
2243 }
2244 break;
2245 default:
2246 assert(0);
2247 }
2248 break;
2249 case INTERP_MODE_NOPERSPECTIVE:
2250 /* Force the interpolation location for colors here. */
2251 if (shader->key.part.ps.prolog.force_linear_sample_interp)
2252 location = TGSI_INTERPOLATE_LOC_SAMPLE;
2253 if (shader->key.part.ps.prolog.force_linear_center_interp)
2254 location = TGSI_INTERPOLATE_LOC_CENTER;
2255
2256 /* The VGPR assignment for non-monolithic shaders
2257 * works because InitialPSInputAddr is set on the
2258 * main shader and PERSP_PULL_MODEL is never used.
2259 */
2260 switch (location) {
2261 case TGSI_INTERPOLATE_LOC_SAMPLE:
2262 key->ps_prolog.color_interp_vgpr_index[i] = separate_prolog ? 6 : 9;
2263 if (separate_prolog) {
2264 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2265 }
2266 break;
2267 case TGSI_INTERPOLATE_LOC_CENTER:
2268 key->ps_prolog.color_interp_vgpr_index[i] = separate_prolog ? 8 : 11;
2269 if (separate_prolog) {
2270 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2271 }
2272 break;
2273 case TGSI_INTERPOLATE_LOC_CENTROID:
2274 key->ps_prolog.color_interp_vgpr_index[i] = separate_prolog ? 10 : 13;
2275 if (separate_prolog) {
2276 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTROID_ENA(1);
2277 }
2278 break;
2279 default:
2280 assert(0);
2281 }
2282 break;
2283 default:
2284 assert(0);
2285 }
2286 }
2287 }
2288 }
2289
2290 /**
2291 * Check whether a PS prolog is required based on the key.
2292 */
si_need_ps_prolog(const union si_shader_part_key * key)2293 bool si_need_ps_prolog(const union si_shader_part_key *key)
2294 {
2295 return key->ps_prolog.colors_read || key->ps_prolog.states.force_persp_sample_interp ||
2296 key->ps_prolog.states.force_linear_sample_interp ||
2297 key->ps_prolog.states.force_persp_center_interp ||
2298 key->ps_prolog.states.force_linear_center_interp ||
2299 key->ps_prolog.states.bc_optimize_for_persp ||
2300 key->ps_prolog.states.bc_optimize_for_linear || key->ps_prolog.states.poly_stipple ||
2301 key->ps_prolog.states.samplemask_log_ps_iter;
2302 }
2303
2304 /**
2305 * Compute the PS epilog key, which contains all the information needed to
2306 * build the PS epilog function.
2307 */
si_get_ps_epilog_key(struct si_shader * shader,union si_shader_part_key * key)2308 void si_get_ps_epilog_key(struct si_shader *shader, union si_shader_part_key *key)
2309 {
2310 struct si_shader_info *info = &shader->selector->info;
2311 memset(key, 0, sizeof(*key));
2312 key->ps_epilog.colors_written = info->colors_written;
2313 key->ps_epilog.color_types = info->output_color_types;
2314 key->ps_epilog.writes_z = info->writes_z;
2315 key->ps_epilog.writes_stencil = info->writes_stencil;
2316 key->ps_epilog.writes_samplemask = info->writes_samplemask;
2317 key->ps_epilog.states = shader->key.part.ps.epilog;
2318 }
2319
2320 /**
2321 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
2322 */
si_shader_select_ps_parts(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug)2323 static bool si_shader_select_ps_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2324 struct si_shader *shader, struct pipe_debug_callback *debug)
2325 {
2326 union si_shader_part_key prolog_key;
2327 union si_shader_part_key epilog_key;
2328
2329 /* Get the prolog. */
2330 si_get_ps_prolog_key(shader, &prolog_key, true);
2331
2332 /* The prolog is a no-op if these aren't set. */
2333 if (si_need_ps_prolog(&prolog_key)) {
2334 shader->prolog =
2335 si_get_shader_part(sscreen, &sscreen->ps_prologs, MESA_SHADER_FRAGMENT, true, &prolog_key,
2336 compiler, debug, si_llvm_build_ps_prolog, "Fragment Shader Prolog");
2337 if (!shader->prolog)
2338 return false;
2339 }
2340
2341 /* Get the epilog. */
2342 si_get_ps_epilog_key(shader, &epilog_key);
2343
2344 shader->epilog =
2345 si_get_shader_part(sscreen, &sscreen->ps_epilogs, MESA_SHADER_FRAGMENT, false, &epilog_key,
2346 compiler, debug, si_llvm_build_ps_epilog, "Fragment Shader Epilog");
2347 if (!shader->epilog)
2348 return false;
2349
2350 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
2351 if (shader->key.part.ps.prolog.poly_stipple) {
2352 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
2353 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
2354 }
2355
2356 /* Set up the enable bits for per-sample shading if needed. */
2357 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
2358 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2359 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2360 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
2361 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2362 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
2363 }
2364 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
2365 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
2366 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2367 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
2368 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2369 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
2370 }
2371 if (shader->key.part.ps.prolog.force_persp_center_interp &&
2372 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2373 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2374 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
2375 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
2376 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2377 }
2378 if (shader->key.part.ps.prolog.force_linear_center_interp &&
2379 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
2380 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
2381 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
2382 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
2383 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2384 }
2385
2386 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
2387 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
2388 !(shader->config.spi_ps_input_ena & 0xf)) {
2389 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
2390 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
2391 }
2392
2393 /* At least one pair of interpolation weights must be enabled. */
2394 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
2395 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
2396 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
2397 }
2398
2399 /* Samplemask fixup requires the sample ID. */
2400 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
2401 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
2402 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
2403 }
2404
2405 /* The sample mask input is always enabled, because the API shader always
2406 * passes it through to the epilog. Disable it here if it's unused.
2407 */
2408 if (!shader->key.part.ps.epilog.poly_line_smoothing && !shader->selector->info.reads_samplemask)
2409 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
2410
2411 return true;
2412 }
2413
si_multiwave_lds_size_workaround(struct si_screen * sscreen,unsigned * lds_size)2414 void si_multiwave_lds_size_workaround(struct si_screen *sscreen, unsigned *lds_size)
2415 {
2416 /* If tessellation is all offchip and on-chip GS isn't used, this
2417 * workaround is not needed.
2418 */
2419 return;
2420
2421 /* SPI barrier management bug:
2422 * Make sure we have at least 4k of LDS in use to avoid the bug.
2423 * It applies to workgroup sizes of more than one wavefront.
2424 */
2425 if (sscreen->info.family == CHIP_BONAIRE || sscreen->info.family == CHIP_KABINI)
2426 *lds_size = MAX2(*lds_size, 8);
2427 }
2428
si_fix_resource_usage(struct si_screen * sscreen,struct si_shader * shader)2429 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
2430 {
2431 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
2432
2433 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
2434
2435 if (shader->selector->info.stage == MESA_SHADER_COMPUTE &&
2436 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
2437 si_multiwave_lds_size_workaround(sscreen, &shader->config.lds_size);
2438 }
2439 }
2440
si_create_shader_variant(struct si_screen * sscreen,struct ac_llvm_compiler * compiler,struct si_shader * shader,struct pipe_debug_callback * debug)2441 bool si_create_shader_variant(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
2442 struct si_shader *shader, struct pipe_debug_callback *debug)
2443 {
2444 struct si_shader_selector *sel = shader->selector;
2445 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
2446
2447 /* LS, ES, VS are compiled on demand if the main part hasn't been
2448 * compiled for that stage.
2449 *
2450 * GS are compiled on demand if the main part hasn't been compiled
2451 * for the chosen NGG-ness.
2452 *
2453 * Vertex shaders are compiled on demand when a vertex fetch
2454 * workaround must be applied.
2455 */
2456 if (shader->is_monolithic) {
2457 /* Monolithic shader (compiled as a whole, has many variants,
2458 * may take a long time to compile).
2459 */
2460 if (!si_compile_shader(sscreen, compiler, shader, debug))
2461 return false;
2462 } else {
2463 /* The shader consists of several parts:
2464 *
2465 * - the middle part is the user shader, it has 1 variant only
2466 * and it was compiled during the creation of the shader
2467 * selector
2468 * - the prolog part is inserted at the beginning
2469 * - the epilog part is inserted at the end
2470 *
2471 * The prolog and epilog have many (but simple) variants.
2472 *
2473 * Starting with gfx9, geometry and tessellation control
2474 * shaders also contain the prolog and user shader parts of
2475 * the previous shader stage.
2476 */
2477
2478 if (!mainp)
2479 return false;
2480
2481 /* Copy the compiled shader data over. */
2482 shader->is_binary_shared = true;
2483 shader->binary = mainp->binary;
2484 shader->config = mainp->config;
2485 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
2486 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
2487 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
2488 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
2489 memcpy(shader->info.vs_output_param_offset, mainp->info.vs_output_param_offset,
2490 sizeof(mainp->info.vs_output_param_offset));
2491 shader->info.uses_instanceid = mainp->info.uses_instanceid;
2492 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
2493 shader->info.nr_param_exports = mainp->info.nr_param_exports;
2494
2495 /* Select prologs and/or epilogs. */
2496 switch (sel->info.stage) {
2497 case MESA_SHADER_VERTEX:
2498 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
2499 return false;
2500 break;
2501 case MESA_SHADER_TESS_CTRL:
2502 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
2503 return false;
2504 break;
2505 case MESA_SHADER_TESS_EVAL:
2506 break;
2507 case MESA_SHADER_GEOMETRY:
2508 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
2509 return false;
2510 break;
2511 case MESA_SHADER_FRAGMENT:
2512 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
2513 return false;
2514
2515 /* Make sure we have at least as many VGPRs as there
2516 * are allocated inputs.
2517 */
2518 shader->config.num_vgprs = MAX2(shader->config.num_vgprs, shader->info.num_input_vgprs);
2519 break;
2520 default:;
2521 }
2522
2523 /* Update SGPR and VGPR counts. */
2524 if (shader->prolog) {
2525 shader->config.num_sgprs =
2526 MAX2(shader->config.num_sgprs, shader->prolog->config.num_sgprs);
2527 shader->config.num_vgprs =
2528 MAX2(shader->config.num_vgprs, shader->prolog->config.num_vgprs);
2529 }
2530 if (shader->previous_stage) {
2531 shader->config.num_sgprs =
2532 MAX2(shader->config.num_sgprs, shader->previous_stage->config.num_sgprs);
2533 shader->config.num_vgprs =
2534 MAX2(shader->config.num_vgprs, shader->previous_stage->config.num_vgprs);
2535 shader->config.spilled_sgprs =
2536 MAX2(shader->config.spilled_sgprs, shader->previous_stage->config.spilled_sgprs);
2537 shader->config.spilled_vgprs =
2538 MAX2(shader->config.spilled_vgprs, shader->previous_stage->config.spilled_vgprs);
2539 shader->info.private_mem_vgprs =
2540 MAX2(shader->info.private_mem_vgprs, shader->previous_stage->info.private_mem_vgprs);
2541 shader->config.scratch_bytes_per_wave =
2542 MAX2(shader->config.scratch_bytes_per_wave,
2543 shader->previous_stage->config.scratch_bytes_per_wave);
2544 shader->info.uses_instanceid |= shader->previous_stage->info.uses_instanceid;
2545 }
2546 if (shader->prolog2) {
2547 shader->config.num_sgprs =
2548 MAX2(shader->config.num_sgprs, shader->prolog2->config.num_sgprs);
2549 shader->config.num_vgprs =
2550 MAX2(shader->config.num_vgprs, shader->prolog2->config.num_vgprs);
2551 }
2552 if (shader->epilog) {
2553 shader->config.num_sgprs =
2554 MAX2(shader->config.num_sgprs, shader->epilog->config.num_sgprs);
2555 shader->config.num_vgprs =
2556 MAX2(shader->config.num_vgprs, shader->epilog->config.num_vgprs);
2557 }
2558 si_calculate_max_simd_waves(shader);
2559 }
2560
2561 if (shader->key.as_ngg) {
2562 assert(!shader->key.as_es && !shader->key.as_ls);
2563 if (!gfx10_ngg_calculate_subgroup_info(shader)) {
2564 fprintf(stderr, "Failed to compute subgroup info\n");
2565 return false;
2566 }
2567 } else if (sscreen->info.chip_class >= GFX9 && sel->info.stage == MESA_SHADER_GEOMETRY) {
2568 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
2569 }
2570
2571 si_fix_resource_usage(sscreen, shader);
2572 si_shader_dump(sscreen, shader, debug, stderr, true);
2573
2574 /* Upload. */
2575 if (!si_shader_binary_upload(sscreen, shader, 0)) {
2576 fprintf(stderr, "LLVM failed to upload shader\n");
2577 return false;
2578 }
2579
2580 return true;
2581 }
2582
si_shader_binary_clean(struct si_shader_binary * binary)2583 void si_shader_binary_clean(struct si_shader_binary *binary)
2584 {
2585 free((void *)binary->elf_buffer);
2586 binary->elf_buffer = NULL;
2587
2588 free(binary->llvm_ir_string);
2589 binary->llvm_ir_string = NULL;
2590 }
2591
si_shader_destroy(struct si_shader * shader)2592 void si_shader_destroy(struct si_shader *shader)
2593 {
2594 if (shader->scratch_bo)
2595 si_resource_reference(&shader->scratch_bo, NULL);
2596
2597 si_resource_reference(&shader->bo, NULL);
2598
2599 if (!shader->is_binary_shared)
2600 si_shader_binary_clean(&shader->binary);
2601
2602 free(shader->shader_log);
2603 }
2604