• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25  * IN THE SOFTWARE.
26  */
27 
28 #include "util/memstream.h"
29 #include "util/mesa-sha1.h"
30 #include "util/u_atomic.h"
31 #include "radv_debug.h"
32 #include "radv_private.h"
33 #include "radv_shader.h"
34 #include "radv_shader_helper.h"
35 #include "radv_shader_args.h"
36 #include "nir/nir.h"
37 #include "nir/nir_builder.h"
38 #include "spirv/nir_spirv.h"
39 
40 #include "sid.h"
41 #include "ac_binary.h"
42 #include "ac_llvm_util.h"
43 #include "ac_nir_to_llvm.h"
44 #include "ac_rtld.h"
45 #include "vk_format.h"
46 #include "util/debug.h"
47 #include "ac_exp_param.h"
48 
49 static const struct nir_shader_compiler_options nir_options = {
50 	.vertex_id_zero_based = true,
51 	.lower_scmp = true,
52 	.lower_flrp16 = true,
53 	.lower_flrp32 = true,
54 	.lower_flrp64 = true,
55 	.lower_device_index_to_zero = true,
56 	.lower_fdiv = true,
57 	.lower_fmod = true,
58 	.lower_bitfield_insert_to_bitfield_select = true,
59 	.lower_bitfield_extract = true,
60 	.lower_pack_snorm_2x16 = true,
61 	.lower_pack_snorm_4x8 = true,
62 	.lower_pack_unorm_2x16 = true,
63 	.lower_pack_unorm_4x8 = true,
64 	.lower_pack_half_2x16 = true,
65 	.lower_pack_64_2x32 = true,
66 	.lower_pack_64_4x16 = true,
67 	.lower_pack_32_2x16 = true,
68 	.lower_unpack_snorm_2x16 = true,
69 	.lower_unpack_snorm_4x8 = true,
70 	.lower_unpack_unorm_2x16 = true,
71 	.lower_unpack_unorm_4x8 = true,
72 	.lower_unpack_half_2x16 = true,
73 	.lower_extract_byte = true,
74 	.lower_extract_word = true,
75 	.lower_ffma16 = true,
76 	.lower_ffma32 = true,
77 	.lower_ffma64 = true,
78 	.lower_fpow = true,
79 	.lower_mul_2x32_64 = true,
80 	.lower_rotate = true,
81 	.use_scoped_barrier = true,
82 	.max_unroll_iterations = 32,
83 	.use_interpolated_input_intrinsics = true,
84 	/* nir_lower_int64() isn't actually called for the LLVM backend, but
85 	 * this helps the loop unrolling heuristics. */
86 	.lower_int64_options = nir_lower_imul64 |
87                                nir_lower_imul_high64 |
88                                nir_lower_imul_2x32_64 |
89                                nir_lower_divmod64 |
90                                nir_lower_minmax64 |
91                                nir_lower_iabs64,
92 	.lower_doubles_options = nir_lower_drcp |
93 				 nir_lower_dsqrt |
94 				 nir_lower_drsq |
95 				 nir_lower_ddiv,
96    .divergence_analysis_options = nir_divergence_view_index_uniform,
97 };
98 
99 bool
radv_can_dump_shader(struct radv_device * device,struct radv_shader_module * module,bool is_gs_copy_shader)100 radv_can_dump_shader(struct radv_device *device,
101 		     struct radv_shader_module *module,
102 		     bool is_gs_copy_shader)
103 {
104 	if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS))
105 		return false;
106 	if (module)
107 		return !module->nir ||
108 			(device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS);
109 
110 	return is_gs_copy_shader;
111 }
112 
113 bool
radv_can_dump_shader_stats(struct radv_device * device,struct radv_shader_module * module)114 radv_can_dump_shader_stats(struct radv_device *device,
115 			   struct radv_shader_module *module)
116 {
117 	/* Only dump non-meta shader stats. */
118 	return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS &&
119 	       module && !module->nir;
120 }
121 
radv_CreateShaderModule(VkDevice _device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)122 VkResult radv_CreateShaderModule(
123 	VkDevice                                    _device,
124 	const VkShaderModuleCreateInfo*             pCreateInfo,
125 	const VkAllocationCallbacks*                pAllocator,
126 	VkShaderModule*                             pShaderModule)
127 {
128 	RADV_FROM_HANDLE(radv_device, device, _device);
129 	struct radv_shader_module *module;
130 
131 	assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
132 	assert(pCreateInfo->flags == 0);
133 
134 	module = vk_alloc2(&device->vk.alloc, pAllocator,
135 			     sizeof(*module) + pCreateInfo->codeSize, 8,
136 			     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 	if (module == NULL)
138 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
139 
140 	vk_object_base_init(&device->vk, &module->base,
141 			    VK_OBJECT_TYPE_SHADER_MODULE);
142 
143 	module->nir = NULL;
144 	module->size = pCreateInfo->codeSize;
145 	memcpy(module->data, pCreateInfo->pCode, module->size);
146 
147 	_mesa_sha1_compute(module->data, module->size, module->sha1);
148 
149 	*pShaderModule = radv_shader_module_to_handle(module);
150 
151 	return VK_SUCCESS;
152 }
153 
radv_DestroyShaderModule(VkDevice _device,VkShaderModule _module,const VkAllocationCallbacks * pAllocator)154 void radv_DestroyShaderModule(
155 	VkDevice                                    _device,
156 	VkShaderModule                              _module,
157 	const VkAllocationCallbacks*                pAllocator)
158 {
159 	RADV_FROM_HANDLE(radv_device, device, _device);
160 	RADV_FROM_HANDLE(radv_shader_module, module, _module);
161 
162 	if (!module)
163 		return;
164 
165 	vk_object_base_finish(&module->base);
166 	vk_free2(&device->vk.alloc, pAllocator, module);
167 }
168 
169 void
radv_optimize_nir(struct nir_shader * shader,bool optimize_conservatively,bool allow_copies)170 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
171                   bool allow_copies)
172 {
173         bool progress;
174         unsigned lower_flrp =
175                 (shader->options->lower_flrp16 ? 16 : 0) |
176                 (shader->options->lower_flrp32 ? 32 : 0) |
177                 (shader->options->lower_flrp64 ? 64 : 0);
178 
179         do {
180                 progress = false;
181 
182 		NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
183 		NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
184 
185                 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
186 
187 		if (allow_copies) {
188 			/* Only run this pass in the first call to
189 			 * radv_optimize_nir.  Later calls assume that we've
190 			 * lowered away any copy_deref instructions and we
191 			 *  don't want to introduce any more.
192 			*/
193 			NIR_PASS(progress, shader, nir_opt_find_array_copies);
194 		}
195 
196 		NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
197 		NIR_PASS(progress, shader, nir_opt_dead_write_vars);
198 		NIR_PASS(progress, shader, nir_remove_dead_variables,
199 			 nir_var_function_temp | nir_var_shader_in | nir_var_shader_out,
200 			 NULL);
201 
202                 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
203                 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
204 
205                 NIR_PASS(progress, shader, nir_copy_prop);
206                 NIR_PASS(progress, shader, nir_opt_remove_phis);
207                 NIR_PASS(progress, shader, nir_opt_dce);
208                 if (nir_opt_trivial_continues(shader)) {
209                         progress = true;
210                         NIR_PASS(progress, shader, nir_copy_prop);
211 			NIR_PASS(progress, shader, nir_opt_remove_phis);
212                         NIR_PASS(progress, shader, nir_opt_dce);
213                 }
214                 NIR_PASS(progress, shader, nir_opt_if, true);
215                 NIR_PASS(progress, shader, nir_opt_dead_cf);
216                 NIR_PASS(progress, shader, nir_opt_cse);
217                 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
218                 NIR_PASS(progress, shader, nir_opt_constant_folding);
219                 NIR_PASS(progress, shader, nir_opt_algebraic);
220 
221                 if (lower_flrp != 0) {
222                         bool lower_flrp_progress = false;
223                         NIR_PASS(lower_flrp_progress,
224                                  shader,
225                                  nir_lower_flrp,
226                                  lower_flrp,
227                                  false /* always_precise */);
228                         if (lower_flrp_progress) {
229                                 NIR_PASS(progress, shader,
230                                          nir_opt_constant_folding);
231                                 progress = true;
232                         }
233 
234                         /* Nothing should rematerialize any flrps, so we only
235                          * need to do this lowering once.
236                          */
237                         lower_flrp = 0;
238                 }
239 
240                 NIR_PASS(progress, shader, nir_opt_undef);
241                 NIR_PASS(progress, shader, nir_opt_shrink_vectors);
242                 if (shader->options->max_unroll_iterations) {
243                         NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
244                 }
245         } while (progress && !optimize_conservatively);
246 
247 	NIR_PASS(progress, shader, nir_opt_conditional_discard);
248         NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
249 }
250 
251 static void
shared_var_info(const struct glsl_type * type,unsigned * size,unsigned * align)252 shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
253 {
254 	assert(glsl_type_is_vector_or_scalar(type));
255 
256 	uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
257 	unsigned length = glsl_get_vector_elements(type);
258 	*size = comp_size * length,
259 	*align = comp_size;
260 }
261 
262 struct radv_shader_debug_data {
263 	struct radv_device *device;
264 	const struct radv_shader_module *module;
265 };
266 
radv_spirv_nir_debug(void * private_data,enum nir_spirv_debug_level level,size_t spirv_offset,const char * message)267 static void radv_spirv_nir_debug(void *private_data,
268 				 enum nir_spirv_debug_level level,
269 				 size_t spirv_offset,
270 				 const char *message)
271 {
272 	struct radv_shader_debug_data *debug_data = private_data;
273 	struct radv_instance *instance = debug_data->device->instance;
274 
275 	static const VkDebugReportFlagsEXT vk_flags[] = {
276 		[NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
277 		[NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
278 		[NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
279 	};
280 	char buffer[256];
281 
282 	snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s",
283 		 (unsigned long)spirv_offset, message);
284 
285 	vk_debug_report(&instance->debug_report_callbacks,
286 			vk_flags[level],
287 			VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
288 			(uint64_t)(uintptr_t)debug_data->module,
289 			0, 0, "radv", buffer);
290 }
291 
radv_compiler_debug(void * private_data,enum radv_compiler_debug_level level,const char * message)292 static void radv_compiler_debug(void *private_data,
293 				enum radv_compiler_debug_level level,
294 				const char *message)
295 {
296 	struct radv_shader_debug_data *debug_data = private_data;
297 	struct radv_instance *instance = debug_data->device->instance;
298 
299 	static const VkDebugReportFlagsEXT vk_flags[] = {
300 		[RADV_COMPILER_DEBUG_LEVEL_PERFWARN] = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
301 		[RADV_COMPILER_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
302 	};
303 
304 	/* VK_DEBUG_REPORT_DEBUG_BIT_EXT specifies diagnostic information
305 	 * from the implementation and layers.
306 	 */
307 	vk_debug_report(&instance->debug_report_callbacks,
308 			vk_flags[level] | VK_DEBUG_REPORT_DEBUG_BIT_EXT,
309 			VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
310 			(uint64_t)(uintptr_t)debug_data->module,
311 			0, 0, "radv", message);
312 }
313 
314 static void
mark_geom_invariant(nir_shader * nir)315 mark_geom_invariant(nir_shader *nir)
316 {
317 	nir_foreach_shader_out_variable(var, nir) {
318 		switch (var->data.location) {
319 		case VARYING_SLOT_POS:
320 		case VARYING_SLOT_PSIZ:
321 		case VARYING_SLOT_CLIP_DIST0:
322 		case VARYING_SLOT_CLIP_DIST1:
323 		case VARYING_SLOT_CULL_DIST0:
324 		case VARYING_SLOT_CULL_DIST1:
325 		case VARYING_SLOT_TESS_LEVEL_OUTER:
326 		case VARYING_SLOT_TESS_LEVEL_INNER:
327 			var->data.invariant = true;
328 			break;
329 		default:
330 			break;
331 		}
332 	}
333 }
334 
335 static bool
lower_load_vulkan_descriptor(nir_shader * nir)336 lower_load_vulkan_descriptor(nir_shader *nir)
337 {
338 	nir_function_impl *entry = nir_shader_get_entrypoint(nir);
339 	bool progress = false;
340 	nir_builder b;
341 
342 	nir_builder_init(&b, entry);
343 
344 	nir_foreach_block(block, entry) {
345 		nir_foreach_instr_safe(instr, block) {
346 			if (instr->type != nir_instr_type_intrinsic)
347 				continue;
348 
349 			nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
350 			if (intrin->intrinsic != nir_intrinsic_load_vulkan_descriptor)
351 				continue;
352 
353 			b.cursor = nir_before_instr(&intrin->instr);
354 
355 			nir_ssa_def *def = nir_vec2(&b,
356 						    nir_channel(&b, intrin->src[0].ssa, 0),
357 						    nir_imm_int(&b, 0));
358 			nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
359 						 nir_src_for_ssa(def));
360 
361 			nir_instr_remove(instr);
362 			progress = true;
363 		}
364 	}
365 
366 	return progress;
367 }
368 
369 nir_shader *
radv_shader_compile_to_nir(struct radv_device * device,struct radv_shader_module * module,const char * entrypoint_name,gl_shader_stage stage,const VkSpecializationInfo * spec_info,const VkPipelineCreateFlags flags,const struct radv_pipeline_layout * layout,unsigned subgroup_size,unsigned ballot_bit_size)370 radv_shader_compile_to_nir(struct radv_device *device,
371 			   struct radv_shader_module *module,
372 			   const char *entrypoint_name,
373 			   gl_shader_stage stage,
374 			   const VkSpecializationInfo *spec_info,
375 			   const VkPipelineCreateFlags flags,
376 			   const struct radv_pipeline_layout *layout,
377 			   unsigned subgroup_size, unsigned ballot_bit_size)
378 {
379 	nir_shader *nir;
380 
381 	if (module->nir) {
382 		/* Some things such as our meta clear/blit code will give us a NIR
383 		 * shader directly.  In that case, we just ignore the SPIR-V entirely
384 		 * and just use the NIR shader */
385 		nir = module->nir;
386 		nir->options = &nir_options;
387 		nir_validate_shader(nir, "in internal shader");
388 
389 		assert(exec_list_length(&nir->functions) == 1);
390 	} else {
391 		uint32_t *spirv = (uint32_t *) module->data;
392 		assert(module->size % 4 == 0);
393 
394 		if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
395 			radv_print_spirv(module->data, module->size, stderr);
396 
397 		uint32_t num_spec_entries = 0;
398 		struct nir_spirv_specialization *spec_entries = NULL;
399 		if (spec_info && spec_info->mapEntryCount > 0) {
400 			num_spec_entries = spec_info->mapEntryCount;
401 			spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
402 			for (uint32_t i = 0; i < num_spec_entries; i++) {
403 				VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
404 				const void *data = spec_info->pData + entry.offset;
405 				assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
406 
407 				spec_entries[i].id = spec_info->pMapEntries[i].constantID;
408 				switch (entry.size) {
409 				case 8:
410 					memcpy(&spec_entries[i].value.u64, data, sizeof(uint64_t));
411 					break;
412 				case 4:
413 					memcpy(&spec_entries[i].value.u32, data, sizeof(uint32_t));
414 					break;
415 				case 2:
416 					memcpy(&spec_entries[i].value.u16, data, sizeof(uint16_t));
417 					break;
418 				case 1:
419 					memcpy(&spec_entries[i].value.u8, data, sizeof(uint8_t));
420 					break;
421 				default:
422 					assert(!"Invalid spec constant size");
423 					break;
424 				}
425 			}
426 		}
427 
428 		struct radv_shader_debug_data spirv_debug_data = {
429 			.device = device,
430 			.module = module,
431 		};
432 		const struct spirv_to_nir_options spirv_options = {
433 			.caps = {
434 				.amd_fragment_mask = true,
435 				.amd_gcn_shader = true,
436 				.amd_image_gather_bias_lod = true,
437 				.amd_image_read_write_lod = true,
438 				.amd_shader_ballot = true,
439 				.amd_shader_explicit_vertex_parameter = true,
440 				.amd_trinary_minmax = true,
441 				.demote_to_helper_invocation = true,
442 				.derivative_group = true,
443 				.descriptor_array_dynamic_indexing = true,
444 				.descriptor_array_non_uniform_indexing = true,
445 				.descriptor_indexing = true,
446 				.device_group = true,
447 				.draw_parameters = true,
448 				.float_controls = true,
449 				.float16 = device->physical_device->rad_info.has_packed_math_16bit,
450 				.float32_atomic_add = true,
451 				.float64 = true,
452 				.geometry_streams = true,
453 				.image_atomic_int64 = true,
454 				.image_ms_array = true,
455 				.image_read_without_format = true,
456 				.image_write_without_format = true,
457 				.int8 = true,
458 				.int16 = true,
459 				.int64 = true,
460 				.int64_atomics = true,
461 				.min_lod = true,
462 				.multiview = true,
463 				.physical_storage_buffer_address = true,
464 				.post_depth_coverage = true,
465 				.runtime_descriptor_array = true,
466 				.shader_clock = true,
467 				.shader_viewport_index_layer = true,
468 				.stencil_export = true,
469 				.storage_8bit = true,
470 				.storage_16bit = true,
471 				.storage_image_ms = true,
472 				.subgroup_arithmetic = true,
473 				.subgroup_ballot = true,
474 				.subgroup_basic = true,
475 				.subgroup_quad = true,
476 				.subgroup_shuffle = true,
477 				.subgroup_vote = true,
478 				.tessellation = true,
479 				.transform_feedback = true,
480 				.variable_pointers = true,
481 				.vk_memory_model = true,
482 				.vk_memory_model_device_scope = true,
483 			},
484 			.ubo_addr_format = nir_address_format_32bit_index_offset,
485 			.ssbo_addr_format = nir_address_format_32bit_index_offset,
486 			.phys_ssbo_addr_format = nir_address_format_64bit_global,
487 			.push_const_addr_format = nir_address_format_logical,
488 			.shared_addr_format = nir_address_format_32bit_offset,
489 			.frag_coord_is_sysval = true,
490 			.debug = {
491 				.func = radv_spirv_nir_debug,
492 				.private_data = &spirv_debug_data,
493 			},
494 		};
495 		nir = spirv_to_nir(spirv, module->size / 4,
496 				   spec_entries, num_spec_entries,
497 				   stage, entrypoint_name,
498 				   &spirv_options, &nir_options);
499 		assert(nir->info.stage == stage);
500 		nir_validate_shader(nir, "after spirv_to_nir");
501 
502 		free(spec_entries);
503 
504 		/* We have to lower away local constant initializers right before we
505 		 * inline functions.  That way they get properly initialized at the top
506 		 * of the function and not at the top of its caller.
507 		 */
508 		NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
509 		NIR_PASS_V(nir, nir_lower_returns);
510 		NIR_PASS_V(nir, nir_inline_functions);
511 		NIR_PASS_V(nir, nir_copy_prop);
512 		NIR_PASS_V(nir, nir_opt_deref);
513 
514 		/* Pick off the single entrypoint that we want */
515 		foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
516 			if (func->is_entrypoint)
517 				func->name = ralloc_strdup(func, "main");
518 			else
519 				exec_node_remove(&func->node);
520 		}
521 		assert(exec_list_length(&nir->functions) == 1);
522 
523 		/* Make sure we lower constant initializers on output variables so that
524 		 * nir_remove_dead_variables below sees the corresponding stores
525 		 */
526 		NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_shader_out);
527 
528 		/* Now that we've deleted all but the main function, we can go ahead and
529 		 * lower the rest of the constant initializers.
530 		 */
531 		NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
532 
533 		/* Split member structs.  We do this before lower_io_to_temporaries so that
534 		 * it doesn't lower system values to temporaries by accident.
535 		 */
536 		NIR_PASS_V(nir, nir_split_var_copies);
537 		NIR_PASS_V(nir, nir_split_per_member_structs);
538 
539 		if (nir->info.stage == MESA_SHADER_FRAGMENT)
540                         NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
541 		if (nir->info.stage == MESA_SHADER_FRAGMENT)
542 			NIR_PASS_V(nir, nir_lower_input_attachments,
543 				   &(nir_input_attachment_options) {
544 					.use_fragcoord_sysval = true,
545 					.use_layer_id_sysval = false,
546 				   });
547 
548 		NIR_PASS_V(nir, nir_remove_dead_variables,
549 		           nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared,
550 			   NULL);
551 
552 		if (device->instance->debug_flags & RADV_DEBUG_INVARIANT_GEOM &&
553 		    stage != MESA_SHADER_FRAGMENT) {
554 			mark_geom_invariant(nir);
555 		}
556 
557 		NIR_PASS_V(nir, nir_propagate_invariant);
558 
559 		NIR_PASS_V(nir, nir_lower_system_values);
560 		NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
561 
562 		NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
563 
564 		if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
565 			NIR_PASS_V(nir, nir_lower_discard_to_demote);
566 
567 		nir_lower_doubles_options lower_doubles =
568 			nir->options->lower_doubles_options;
569 
570 		if (device->physical_device->rad_info.chip_class == GFX6) {
571 			/* GFX6 doesn't support v_floor_f64 and the precision
572 			 * of v_fract_f64 which is used to implement 64-bit
573 			 * floor is less than what Vulkan requires.
574 			 */
575 			lower_doubles |= nir_lower_dfloor;
576 		}
577 
578 		NIR_PASS_V(nir, nir_lower_doubles, NULL, lower_doubles);
579 	}
580 
581 	/* Vulkan uses the separate-shader linking model */
582 	nir->info.separate_shader = true;
583 
584 	nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
585 
586 	if (nir->info.stage == MESA_SHADER_GEOMETRY) {
587 		unsigned nir_gs_flags = nir_lower_gs_intrinsics_per_stream;
588 
589 		if (device->physical_device->use_ngg && !radv_use_llvm_for_stage(device, stage)) {
590 			/* ACO needs NIR to do some of the hard lifting */
591 			nir_gs_flags |= nir_lower_gs_intrinsics_count_primitives |
592 			                nir_lower_gs_intrinsics_count_vertices_per_primitive |
593 							nir_lower_gs_intrinsics_overwrite_incomplete;
594 		}
595 
596 		nir_lower_gs_intrinsics(nir, nir_gs_flags);
597 	}
598 
599 	static const nir_lower_tex_options tex_options = {
600 	  .lower_txp = ~0,
601 	  .lower_tg4_offsets = true,
602 	};
603 
604 	nir_lower_tex(nir, &tex_options);
605 
606 	nir_lower_vars_to_ssa(nir);
607 
608 	if (nir->info.stage == MESA_SHADER_VERTEX ||
609 	    nir->info.stage == MESA_SHADER_GEOMETRY ||
610 	    nir->info.stage == MESA_SHADER_FRAGMENT) {
611 		NIR_PASS_V(nir, nir_lower_io_to_temporaries,
612 			   nir_shader_get_entrypoint(nir), true, true);
613 	} else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
614 		NIR_PASS_V(nir, nir_lower_io_to_temporaries,
615 			   nir_shader_get_entrypoint(nir), true, false);
616 	}
617 
618 	nir_split_var_copies(nir);
619 
620 	nir_lower_global_vars_to_local(nir);
621 	nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
622 	bool gfx7minus = device->physical_device->rad_info.chip_class <= GFX7;
623 	nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
624 			.subgroup_size = subgroup_size,
625 			.ballot_bit_size = ballot_bit_size,
626 			.lower_to_scalar = 1,
627 			.lower_subgroup_masks = 1,
628 			.lower_shuffle = 1,
629 			.lower_shuffle_to_32bit = 1,
630 			.lower_vote_eq_to_ballot = 1,
631 			.lower_quad_broadcast_dynamic = 1,
632 			.lower_quad_broadcast_dynamic_to_const = gfx7minus,
633 			.lower_shuffle_to_swizzle_amd = 1,
634 			.lower_elect = radv_use_llvm_for_stage(device, stage),
635 		});
636 
637 	nir_lower_load_const_to_scalar(nir);
638 
639 	if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
640 		radv_optimize_nir(nir, false, true);
641 
642 	/* call radv_nir_lower_ycbcr_textures() late as there might still be
643 	 * tex with undef texture/sampler before first optimization */
644 	NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
645 
646 	/* We call nir_lower_var_copies() after the first radv_optimize_nir()
647 	 * to remove any copies introduced by nir_opt_find_array_copies().
648 	 */
649 	nir_lower_var_copies(nir);
650 
651 	NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
652 		   nir_address_format_32bit_offset);
653 
654 	NIR_PASS_V(nir, nir_lower_explicit_io,
655 		   nir_var_mem_ubo | nir_var_mem_ssbo,
656 		   nir_address_format_32bit_index_offset);
657 
658 	NIR_PASS_V(nir, lower_load_vulkan_descriptor);
659 
660 	/* Lower deref operations for compute shared memory. */
661 	if (nir->info.stage == MESA_SHADER_COMPUTE) {
662 		NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
663 			   nir_var_mem_shared, shared_var_info);
664 		NIR_PASS_V(nir, nir_lower_explicit_io,
665 			   nir_var_mem_shared, nir_address_format_32bit_offset);
666 	}
667 
668 	nir_lower_explicit_io(nir, nir_var_mem_global,
669 			      nir_address_format_64bit_global);
670 
671 	/* Lower large variables that are always constant with load_constant
672 	 * intrinsics, which get turned into PC-relative loads from a data
673 	 * section next to the shader.
674 	 */
675 	NIR_PASS_V(nir, nir_opt_large_constants,
676 		   glsl_get_natural_size_align_bytes, 16);
677 
678 	/* Indirect lowering must be called after the radv_optimize_nir() loop
679 	 * has been called at least once. Otherwise indirect lowering can
680 	 * bloat the instruction count of the loop and cause it to be
681 	 * considered too large for unrolling.
682 	 */
683 	if (ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class) &&
684 	    !(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT) &&
685 	    nir->info.stage != MESA_SHADER_COMPUTE) {
686 		/* Optimize the lowered code before the linking optimizations. */
687 		radv_optimize_nir(nir, false, false);
688 	}
689 
690 	return nir;
691 }
692 
693 static int
type_size_vec4(const struct glsl_type * type,bool bindless)694 type_size_vec4(const struct glsl_type *type, bool bindless)
695 {
696 	return glsl_count_attribute_slots(type, false);
697 }
698 
699 static nir_variable *
find_layer_in_var(nir_shader * nir)700 find_layer_in_var(nir_shader *nir)
701 {
702 	nir_variable *var =
703 		nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_LAYER);
704 	if (var != NULL)
705 		return var;
706 
707 	var = nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
708 	var->data.location = VARYING_SLOT_LAYER;
709 	var->data.interpolation = INTERP_MODE_FLAT;
710 	return var;
711 }
712 
713 /* We use layered rendering to implement multiview, which means we need to map
714  * view_index to gl_Layer. The code generates a load from the layer_id sysval,
715  * but since we don't have a way to get at this information from the fragment
716  * shader, we also need to lower this to the gl_Layer varying.  This pass
717  * lowers both to a varying load from the LAYER slot, before lowering io, so
718  * that nir_assign_var_locations() will give the LAYER varying the correct
719  * driver_location.
720  */
721 
722 static bool
lower_view_index(nir_shader * nir)723 lower_view_index(nir_shader *nir)
724 {
725 	bool progress = false;
726 	nir_function_impl *entry = nir_shader_get_entrypoint(nir);
727 	nir_builder b;
728 	nir_builder_init(&b, entry);
729 
730 	nir_variable *layer = NULL;
731 	nir_foreach_block(block, entry) {
732 		nir_foreach_instr_safe(instr, block) {
733 			if (instr->type != nir_instr_type_intrinsic)
734 				continue;
735 
736 			nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
737 			if (load->intrinsic != nir_intrinsic_load_view_index)
738 				continue;
739 
740 			if (!layer)
741 				layer = find_layer_in_var(nir);
742 
743 			b.cursor = nir_before_instr(instr);
744 			nir_ssa_def *def = nir_load_var(&b, layer);
745 			nir_ssa_def_rewrite_uses(&load->dest.ssa,
746 						 nir_src_for_ssa(def));
747 
748 			nir_instr_remove(instr);
749 			progress = true;
750 		}
751 	}
752 
753 	return progress;
754 }
755 
756 void
radv_lower_io(struct radv_device * device,nir_shader * nir)757 radv_lower_io(struct radv_device *device, nir_shader *nir)
758 {
759 	if (nir->info.stage == MESA_SHADER_COMPUTE)
760 		return;
761 
762 	if (nir->info.stage == MESA_SHADER_FRAGMENT) {
763 		NIR_PASS_V(nir, lower_view_index);
764 		nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs,
765 					    MESA_SHADER_FRAGMENT);
766 	}
767 
768 	/* The RADV/LLVM backend expects 64-bit IO to be lowered. */
769 	nir_lower_io_options options =
770 		radv_use_llvm_for_stage(device, nir->info.stage) ? nir_lower_io_lower_64bit_to_32 : 0;
771 
772 	NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
773 		   type_size_vec4, options);
774 
775 	/* This pass needs actual constants */
776 	nir_opt_constant_folding(nir);
777 
778 	NIR_PASS_V(nir, nir_io_add_const_offset_to_base,
779 		   nir_var_shader_in | nir_var_shader_out);
780 }
781 
782 
783 static void *
radv_alloc_shader_memory(struct radv_device * device,struct radv_shader_variant * shader)784 radv_alloc_shader_memory(struct radv_device *device,
785 			 struct radv_shader_variant *shader)
786 {
787 	mtx_lock(&device->shader_slab_mutex);
788 	list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
789 		uint64_t offset = 0;
790 		list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
791 			if (s->bo_offset - offset >= shader->code_size) {
792 				shader->bo = slab->bo;
793 				shader->bo_offset = offset;
794 				list_addtail(&shader->slab_list, &s->slab_list);
795 				mtx_unlock(&device->shader_slab_mutex);
796 				return slab->ptr + offset;
797 			}
798 			offset = align_u64(s->bo_offset + s->code_size, 256);
799 		}
800 		if (offset <= slab->size && slab->size - offset >= shader->code_size) {
801 			shader->bo = slab->bo;
802 			shader->bo_offset = offset;
803 			list_addtail(&shader->slab_list, &slab->shaders);
804 			mtx_unlock(&device->shader_slab_mutex);
805 			return slab->ptr + offset;
806 		}
807 	}
808 
809 	mtx_unlock(&device->shader_slab_mutex);
810 	struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
811 
812 	slab->size = MAX2(256 * 1024, shader->code_size);
813 	slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
814 	                                     RADEON_DOMAIN_VRAM,
815 					     RADEON_FLAG_NO_INTERPROCESS_SHARING |
816 					     (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
817 					             0 : RADEON_FLAG_READ_ONLY),
818 					     RADV_BO_PRIORITY_SHADER);
819 	if (!slab->bo) {
820 		free(slab);
821 		return NULL;
822 	}
823 
824 	slab->ptr = (char*)device->ws->buffer_map(slab->bo);
825 	if (!slab->ptr) {
826 		device->ws->buffer_destroy(slab->bo);
827 		free(slab);
828 		return NULL;
829 	}
830 
831 	list_inithead(&slab->shaders);
832 
833 	mtx_lock(&device->shader_slab_mutex);
834 	list_add(&slab->slabs, &device->shader_slabs);
835 
836 	shader->bo = slab->bo;
837 	shader->bo_offset = 0;
838 	list_add(&shader->slab_list, &slab->shaders);
839 	mtx_unlock(&device->shader_slab_mutex);
840 	return slab->ptr;
841 }
842 
843 void
radv_destroy_shader_slabs(struct radv_device * device)844 radv_destroy_shader_slabs(struct radv_device *device)
845 {
846 	list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
847 		device->ws->buffer_destroy(slab->bo);
848 		free(slab);
849 	}
850 	mtx_destroy(&device->shader_slab_mutex);
851 }
852 
853 /* For the UMR disassembler. */
854 #define DEBUGGER_END_OF_CODE_MARKER    0xbf9f0000 /* invalid instruction */
855 #define DEBUGGER_NUM_MARKERS           5
856 
857 static unsigned
radv_get_shader_binary_size(size_t code_size)858 radv_get_shader_binary_size(size_t code_size)
859 {
860 	return code_size + DEBUGGER_NUM_MARKERS * 4;
861 }
862 
radv_postprocess_config(const struct radv_device * device,const struct ac_shader_config * config_in,const struct radv_shader_info * info,gl_shader_stage stage,struct ac_shader_config * config_out)863 static void radv_postprocess_config(const struct radv_device *device,
864 				    const struct ac_shader_config *config_in,
865 				    const struct radv_shader_info *info,
866 				    gl_shader_stage stage,
867 				    struct ac_shader_config *config_out)
868 {
869 	const struct radv_physical_device *pdevice = device->physical_device;
870 	bool scratch_enabled = config_in->scratch_bytes_per_wave > 0;
871 	bool trap_enabled = !!device->trap_handler_shader;
872 	unsigned vgpr_comp_cnt = 0;
873 	unsigned num_input_vgprs = info->num_input_vgprs;
874 
875 	if (stage == MESA_SHADER_FRAGMENT) {
876 		num_input_vgprs = ac_get_fs_input_vgpr_cnt(config_in, NULL, NULL);
877 	}
878 
879 	unsigned num_vgprs = MAX2(config_in->num_vgprs, num_input_vgprs);
880 	/* +3 for scratch wave offset and VCC */
881 	unsigned num_sgprs = MAX2(config_in->num_sgprs, info->num_input_sgprs + 3);
882 	unsigned num_shared_vgprs = config_in->num_shared_vgprs;
883 	/* shared VGPRs are introduced in Navi and are allocated in blocks of 8 (RDNA ref 3.6.5) */
884 	assert((pdevice->rad_info.chip_class >= GFX10 && num_shared_vgprs % 8 == 0)
885 	       || (pdevice->rad_info.chip_class < GFX10 && num_shared_vgprs == 0));
886 	unsigned num_shared_vgpr_blocks = num_shared_vgprs / 8;
887 	unsigned excp_en = 0;
888 
889 	*config_out = *config_in;
890 	config_out->num_vgprs = num_vgprs;
891 	config_out->num_sgprs = num_sgprs;
892 	config_out->num_shared_vgprs = num_shared_vgprs;
893 
894 	config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
895 			    S_00B12C_SCRATCH_EN(scratch_enabled) |
896 			    S_00B12C_TRAP_PRESENT(trap_enabled);
897 
898 	if (trap_enabled) {
899 		/* Configure the shader exceptions like memory violation, etc.
900 		 * TODO: Enable (and validate) more exceptions.
901 		 */
902 		excp_en = 1 << 8; /* mem_viol */
903 	}
904 
905 	if (!pdevice->use_ngg_streamout) {
906 		config_out->rsrc2 |= S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
907 				     S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
908 				     S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
909 				     S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
910 				     S_00B12C_SO_EN(!!info->so.num_outputs);
911 	}
912 
913 	config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
914 					   (info->wave_size == 32 ? 8 : 4)) |
915 			    S_00B848_DX10_CLAMP(1) |
916 			    S_00B848_FLOAT_MODE(config_out->float_mode);
917 
918 	if (pdevice->rad_info.chip_class >= GFX10) {
919 		config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
920 	} else {
921 		config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
922 		config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5);
923 	}
924 
925 	switch (stage) {
926 	case MESA_SHADER_TESS_EVAL:
927 		if (info->is_ngg) {
928 			config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
929 			config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1) |
930 					     S_00B22C_EXCP_EN(excp_en);
931 		} else if (info->tes.as_es) {
932 			assert(pdevice->rad_info.chip_class <= GFX8);
933 			vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
934 
935 			config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1) |
936 					     S_00B12C_EXCP_EN(excp_en);
937 		} else {
938 			bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
939 			vgpr_comp_cnt = enable_prim_id ? 3 : 2;
940 
941 			config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
942 			config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1) |
943 					     S_00B12C_EXCP_EN(excp_en);
944 		}
945 		config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
946 		break;
947 	case MESA_SHADER_TESS_CTRL:
948 		if (pdevice->rad_info.chip_class >= GFX9) {
949 			/* We need at least 2 components for LS.
950 			 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
951 			 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
952 			 */
953 			if (pdevice->rad_info.chip_class >= GFX10) {
954 				vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
955 				config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX10(info->tcs.num_lds_blocks) |
956 						     S_00B42C_EXCP_EN_GFX6(excp_en);
957 			} else {
958 				vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
959 				config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX9(info->tcs.num_lds_blocks) |
960 						     S_00B42C_EXCP_EN_GFX9(excp_en);
961 			}
962 		} else {
963 			config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1) |
964 					     S_00B12C_EXCP_EN(excp_en);
965 		}
966 		config_out->rsrc1 |= S_00B428_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
967 				     S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
968 		config_out->rsrc2 |= S_00B42C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
969 		break;
970 	case MESA_SHADER_VERTEX:
971 		if (info->is_ngg) {
972 			config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
973 		} else if (info->vs.as_ls) {
974 			assert(pdevice->rad_info.chip_class <= GFX8);
975 			/* We need at least 2 components for LS.
976 			 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
977 			 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
978 			 */
979 			vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
980 		} else if (info->vs.as_es) {
981 			assert(pdevice->rad_info.chip_class <= GFX8);
982 			/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
983 			vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
984 		} else {
985 			/* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
986 			 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
987 			 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
988 			 */
989 			if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
990 				vgpr_comp_cnt = 3;
991 			} else if (info->vs.export_prim_id) {
992 				vgpr_comp_cnt = 2;
993 			} else if (info->vs.needs_instance_id) {
994 				vgpr_comp_cnt = 1;
995 			} else {
996 				vgpr_comp_cnt = 0;
997 			}
998 
999 			config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
1000 		}
1001 		config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks) |
1002 				     S_00B12C_EXCP_EN(excp_en);
1003 		break;
1004 	case MESA_SHADER_FRAGMENT:
1005 		config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
1006 		config_out->rsrc2 |= S_00B02C_SHARED_VGPR_CNT(num_shared_vgpr_blocks) |
1007 				     S_00B02C_TRAP_PRESENT(1) |
1008 				     S_00B02C_EXCP_EN(excp_en);
1009 		break;
1010 	case MESA_SHADER_GEOMETRY:
1011 		config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
1012 				     S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
1013 		config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks) |
1014 				     S_00B22C_EXCP_EN(excp_en);
1015 		break;
1016 	case MESA_SHADER_COMPUTE:
1017 		config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
1018 				     S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
1019 		config_out->rsrc2 |=
1020 			S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
1021 			S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
1022 			S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
1023 			S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
1024 						info->cs.uses_thread_id[1] ? 1 : 0) |
1025 			S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
1026 			S_00B84C_LDS_SIZE(config_in->lds_size) |
1027 			S_00B84C_EXCP_EN(excp_en);
1028 		config_out->rsrc3 |= S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
1029 
1030 		break;
1031 	default:
1032 		unreachable("unsupported shader type");
1033 		break;
1034 	}
1035 
1036 	if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
1037 	    (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
1038 		unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
1039 		gl_shader_stage es_stage = stage;
1040 		if (stage == MESA_SHADER_GEOMETRY)
1041 			es_stage = info->gs.es_type;
1042 
1043 		/* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
1044 		if (es_stage == MESA_SHADER_VERTEX) {
1045 			es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
1046 		} else if (es_stage == MESA_SHADER_TESS_EVAL) {
1047 			bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
1048 			es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
1049 		} else
1050 			unreachable("Unexpected ES shader stage");
1051 
1052 		bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
1053 			info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
1054 		if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
1055 			gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
1056 		} else if (info->uses_prim_id) {
1057 			gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
1058 		} else if (info->gs.vertices_in >= 3 || tes_triangles) {
1059 			gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
1060 		} else {
1061 			gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
1062 		}
1063 
1064 		config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
1065 				     S_00B228_WGP_MODE(1);
1066 		config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
1067 				     S_00B22C_LDS_SIZE(config_in->lds_size) |
1068 				     S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
1069 	} else if (pdevice->rad_info.chip_class >= GFX9 &&
1070 		   stage == MESA_SHADER_GEOMETRY) {
1071 		unsigned es_type = info->gs.es_type;
1072 		unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
1073 
1074 		if (es_type == MESA_SHADER_VERTEX) {
1075 			/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
1076 			if (info->vs.needs_instance_id) {
1077 				es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
1078 			} else {
1079 				es_vgpr_comp_cnt = 0;
1080 			}
1081 		} else if (es_type == MESA_SHADER_TESS_EVAL) {
1082 			es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
1083 		} else {
1084 			unreachable("invalid shader ES type");
1085 		}
1086 
1087 		/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1088 		 * VGPR[0:4] are always loaded.
1089 		 */
1090 		if (info->uses_invocation_id) {
1091 			gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
1092 		} else if (info->uses_prim_id) {
1093 			gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
1094 		} else if (info->gs.vertices_in >= 3) {
1095 			gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
1096 		} else {
1097 			gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
1098 		}
1099 
1100 		config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
1101 		config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
1102 		                         S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
1103 	} else if (pdevice->rad_info.chip_class >= GFX9 &&
1104 		   stage == MESA_SHADER_TESS_CTRL) {
1105 		config_out->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
1106 	} else {
1107 		config_out->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
1108 	}
1109 }
1110 
1111 struct radv_shader_variant *
radv_shader_variant_create(struct radv_device * device,const struct radv_shader_binary * binary,bool keep_shader_info)1112 radv_shader_variant_create(struct radv_device *device,
1113 			   const struct radv_shader_binary *binary,
1114 			   bool keep_shader_info)
1115 {
1116 	struct ac_shader_config config = {0};
1117 	struct ac_rtld_binary rtld_binary = {0};
1118 	struct radv_shader_variant *variant = calloc(1, sizeof(struct radv_shader_variant));
1119 	if (!variant)
1120 		return NULL;
1121 
1122 	variant->ref_count = 1;
1123 
1124 	if (binary->type == RADV_BINARY_TYPE_RTLD) {
1125 		struct ac_rtld_symbol lds_symbols[2];
1126 		unsigned num_lds_symbols = 0;
1127 		const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
1128 		size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
1129 
1130 		if (device->physical_device->rad_info.chip_class >= GFX9 &&
1131 		    (binary->stage == MESA_SHADER_GEOMETRY || binary->info.is_ngg) &&
1132 		    !binary->is_gs_copy_shader) {
1133 			/* We add this symbol even on LLVM <= 8 to ensure that
1134 			 * shader->config.lds_size is set correctly below.
1135 			 */
1136 			struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1137 			sym->name = "esgs_ring";
1138 			sym->size = binary->info.ngg_info.esgs_ring_size;
1139 			sym->align = 64 * 1024;
1140 		}
1141 
1142 		if (binary->info.is_ngg &&
1143 		    binary->stage == MESA_SHADER_GEOMETRY) {
1144 			struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1145 			sym->name = "ngg_emit";
1146 			sym->size = binary->info.ngg_info.ngg_emit_size * 4;
1147 			sym->align = 4;
1148 		}
1149 
1150 		struct ac_rtld_open_info open_info = {
1151 			.info = &device->physical_device->rad_info,
1152 			.shader_type = binary->stage,
1153 			.wave_size = binary->info.wave_size,
1154 			.num_parts = 1,
1155 			.elf_ptrs = &elf_data,
1156 			.elf_sizes = &elf_size,
1157 			.num_shared_lds_symbols = num_lds_symbols,
1158 			.shared_lds_symbols = lds_symbols,
1159 		};
1160 
1161 		if (!ac_rtld_open(&rtld_binary, open_info)) {
1162 			free(variant);
1163 			return NULL;
1164 		}
1165 
1166 		if (!ac_rtld_read_config(&device->physical_device->rad_info,
1167 					 &rtld_binary, &config)) {
1168 			ac_rtld_close(&rtld_binary);
1169 			free(variant);
1170 			return NULL;
1171 		}
1172 
1173 		if (rtld_binary.lds_size > 0) {
1174 			unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1175 			config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
1176 		}
1177 
1178 		variant->code_size = rtld_binary.rx_size;
1179 		variant->exec_size = rtld_binary.exec_size;
1180 	} else {
1181 		assert(binary->type == RADV_BINARY_TYPE_LEGACY);
1182 		config = ((struct radv_shader_binary_legacy *)binary)->config;
1183 		variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
1184 		variant->exec_size = ((struct radv_shader_binary_legacy *)binary)->exec_size;
1185 	}
1186 
1187 	variant->info = binary->info;
1188 	radv_postprocess_config(device, &config, &binary->info,
1189 				binary->stage, &variant->config);
1190 
1191 	void *dest_ptr = radv_alloc_shader_memory(device, variant);
1192 	if (!dest_ptr) {
1193 		if (binary->type == RADV_BINARY_TYPE_RTLD)
1194 			ac_rtld_close(&rtld_binary);
1195 		free(variant);
1196 		return NULL;
1197 	}
1198 
1199 	if (binary->type == RADV_BINARY_TYPE_RTLD) {
1200 		struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
1201 		struct ac_rtld_upload_info info = {
1202 			.binary = &rtld_binary,
1203 			.rx_va = radv_buffer_get_va(variant->bo) + variant->bo_offset,
1204 			.rx_ptr = dest_ptr,
1205 		};
1206 
1207 		if (!ac_rtld_upload(&info)) {
1208 			radv_shader_variant_destroy(device, variant);
1209 			ac_rtld_close(&rtld_binary);
1210 			return NULL;
1211 		}
1212 
1213 		if (keep_shader_info ||
1214 		    (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
1215 			const char *disasm_data;
1216 			size_t disasm_size;
1217 			if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
1218 				radv_shader_variant_destroy(device, variant);
1219 				ac_rtld_close(&rtld_binary);
1220 				return NULL;
1221 			}
1222 
1223 			variant->ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
1224 			variant->disasm_string = malloc(disasm_size + 1);
1225 			memcpy(variant->disasm_string, disasm_data, disasm_size);
1226 			variant->disasm_string[disasm_size] = 0;
1227 		}
1228 
1229 		ac_rtld_close(&rtld_binary);
1230 	} else {
1231 		struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
1232 		memcpy(dest_ptr, bin->data + bin->stats_size, bin->code_size);
1233 
1234 		/* Add end-of-code markers for the UMR disassembler. */
1235 		uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
1236 		for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
1237 			ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
1238 
1239 		variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size)) : NULL;
1240 		variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size + bin->ir_size)) : NULL;
1241 
1242 		if (bin->stats_size) {
1243 			variant->statistics = calloc(bin->stats_size, 1);
1244 			memcpy(variant->statistics, bin->data, bin->stats_size);
1245 		}
1246 	}
1247 	return variant;
1248 }
1249 
1250 static char *
radv_dump_nir_shaders(struct nir_shader * const * shaders,int shader_count)1251 radv_dump_nir_shaders(struct nir_shader * const *shaders,
1252                       int shader_count)
1253 {
1254 	char *data = NULL;
1255 	char *ret = NULL;
1256 	size_t size = 0;
1257 	struct u_memstream mem;
1258 	if (u_memstream_open(&mem, &data, &size)) {
1259 		FILE *const memf = u_memstream_get(&mem);
1260 		for (int i = 0; i < shader_count; ++i)
1261 			nir_print_shader(shaders[i], memf);
1262 		u_memstream_close(&mem);
1263 	}
1264 
1265 	ret = malloc(size + 1);
1266 	if (ret) {
1267 		memcpy(ret, data, size);
1268 		ret[size] = 0;
1269 	}
1270 	free(data);
1271 	return ret;
1272 }
1273 
1274 static struct radv_shader_variant *
shader_variant_compile(struct radv_device * device,struct radv_shader_module * module,struct nir_shader * const * shaders,int shader_count,gl_shader_stage stage,struct radv_shader_info * info,struct radv_nir_compiler_options * options,bool gs_copy_shader,bool trap_handler_shader,bool keep_shader_info,bool keep_statistic_info,struct radv_shader_binary ** binary_out)1275 shader_variant_compile(struct radv_device *device,
1276 		       struct radv_shader_module *module,
1277 		       struct nir_shader * const *shaders,
1278 		       int shader_count,
1279 		       gl_shader_stage stage,
1280 		       struct radv_shader_info *info,
1281 		       struct radv_nir_compiler_options *options,
1282 		       bool gs_copy_shader,
1283 		       bool trap_handler_shader,
1284 		       bool keep_shader_info,
1285 		       bool keep_statistic_info,
1286 		       struct radv_shader_binary **binary_out)
1287 {
1288 	enum radeon_family chip_family = device->physical_device->rad_info.family;
1289 	struct radv_shader_binary *binary = NULL;
1290 
1291 	struct radv_shader_debug_data debug_data = {
1292 		.device = device,
1293                 .module = module,
1294         };
1295 
1296 	options->family = chip_family;
1297 	options->chip_class = device->physical_device->rad_info.chip_class;
1298 	options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
1299 	options->dump_preoptir = options->dump_shader &&
1300 				 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
1301 	options->record_ir = keep_shader_info;
1302 	options->record_stats = keep_statistic_info;
1303 	options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
1304 	options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
1305 	options->address32_hi = device->physical_device->rad_info.address32_hi;
1306 	options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
1307 	options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
1308 	options->enable_mrt_output_nan_fixup = device->instance->enable_mrt_output_nan_fixup;
1309 	options->debug.func = radv_compiler_debug;
1310 	options->debug.private_data = &debug_data;
1311 
1312 	struct radv_shader_args args = {0};
1313 	args.options = options;
1314 	args.shader_info = info;
1315 	args.is_gs_copy_shader = gs_copy_shader;
1316 	args.is_trap_handler_shader = trap_handler_shader;
1317 
1318 	radv_declare_shader_args(&args,
1319 				 gs_copy_shader ? MESA_SHADER_VERTEX
1320 						: shaders[shader_count - 1]->info.stage,
1321 				 shader_count >= 2,
1322 				 shader_count >= 2 ? shaders[shader_count - 2]->info.stage
1323 						   : MESA_SHADER_VERTEX);
1324 
1325 	if (radv_use_llvm_for_stage(device, stage) ||
1326 	    options->dump_shader || options->record_ir)
1327 		ac_init_llvm_once();
1328 
1329 	if (radv_use_llvm_for_stage(device, stage)) {
1330 		llvm_compile_shader(device, shader_count, shaders, &binary, &args);
1331 	} else {
1332 		aco_compile_shader(shader_count, shaders, &binary, &args);
1333 	}
1334 
1335 	binary->info = *info;
1336 
1337 	struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
1338 									 keep_shader_info);
1339 	if (!variant) {
1340 		free(binary);
1341 		return NULL;
1342 	}
1343 
1344 	if (options->dump_shader) {
1345 		fprintf(stderr, "%s", radv_get_shader_name(info, shaders[0]->info.stage));
1346 		for (int i = 1; i < shader_count; ++i)
1347 			fprintf(stderr, " + %s", radv_get_shader_name(info, shaders[i]->info.stage));
1348 
1349 		fprintf(stderr, "\ndisasm:\n%s\n", variant->disasm_string);
1350 	}
1351 
1352 
1353 	if (keep_shader_info) {
1354 		variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
1355 		if (!gs_copy_shader && !trap_handler_shader && !module->nir) {
1356 			variant->spirv = malloc(module->size);
1357 			if (!variant->spirv) {
1358 				free(variant);
1359 				free(binary);
1360 				return NULL;
1361 			}
1362 
1363 			memcpy(variant->spirv, module->data, module->size);
1364 			variant->spirv_size = module->size;
1365 		}
1366 	}
1367 
1368 	if (binary_out)
1369 		*binary_out = binary;
1370 	else
1371 		free(binary);
1372 
1373 	return variant;
1374 }
1375 
1376 struct radv_shader_variant *
radv_shader_variant_compile(struct radv_device * device,struct radv_shader_module * module,struct nir_shader * const * shaders,int shader_count,struct radv_pipeline_layout * layout,const struct radv_shader_variant_key * key,struct radv_shader_info * info,bool keep_shader_info,bool keep_statistic_info,bool disable_optimizations,struct radv_shader_binary ** binary_out)1377 radv_shader_variant_compile(struct radv_device *device,
1378 			   struct radv_shader_module *module,
1379 			   struct nir_shader *const *shaders,
1380 			   int shader_count,
1381 			   struct radv_pipeline_layout *layout,
1382 			   const struct radv_shader_variant_key *key,
1383 			   struct radv_shader_info *info,
1384 			   bool keep_shader_info, bool keep_statistic_info,
1385 			   bool disable_optimizations,
1386 			   struct radv_shader_binary **binary_out)
1387 {
1388 	gl_shader_stage stage =  shaders[shader_count - 1]->info.stage;
1389 	struct radv_nir_compiler_options options = {0};
1390 
1391 	options.layout = layout;
1392 	if (key)
1393 		options.key = *key;
1394 
1395 	options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
1396 	options.robust_buffer_access = device->robust_buffer_access;
1397 	options.disable_optimizations = disable_optimizations;
1398 
1399 	return shader_variant_compile(device, module, shaders, shader_count, stage, info,
1400 				      &options, false, false,
1401 				      keep_shader_info, keep_statistic_info, binary_out);
1402 }
1403 
1404 struct radv_shader_variant *
radv_create_gs_copy_shader(struct radv_device * device,struct nir_shader * shader,struct radv_shader_info * info,struct radv_shader_binary ** binary_out,bool keep_shader_info,bool keep_statistic_info,bool multiview,bool disable_optimizations)1405 radv_create_gs_copy_shader(struct radv_device *device,
1406 			   struct nir_shader *shader,
1407 			   struct radv_shader_info *info,
1408 			   struct radv_shader_binary **binary_out,
1409 			   bool keep_shader_info, bool keep_statistic_info,
1410 			   bool multiview, bool disable_optimizations)
1411 {
1412 	struct radv_nir_compiler_options options = {0};
1413 	gl_shader_stage stage = MESA_SHADER_VERTEX;
1414 
1415 	options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
1416 	options.key.has_multiview_view_index = multiview;
1417 	options.disable_optimizations = disable_optimizations;
1418 
1419 	return shader_variant_compile(device, NULL, &shader, 1, stage,
1420 				      info, &options, true, false,
1421 				      keep_shader_info, keep_statistic_info, binary_out);
1422 }
1423 
1424 struct radv_shader_variant *
radv_create_trap_handler_shader(struct radv_device * device)1425 radv_create_trap_handler_shader(struct radv_device *device)
1426 {
1427 	struct radv_nir_compiler_options options = {0};
1428 	struct radv_shader_variant *shader = NULL;
1429 	struct radv_shader_binary *binary = NULL;
1430 	struct radv_shader_info info = {0};
1431 
1432 	nir_builder b;
1433 	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL);
1434 	b.shader->info.name = ralloc_strdup(b.shader, "meta_trap_handler");
1435 
1436 	options.explicit_scratch_args = true;
1437 	info.wave_size = 64;
1438 
1439 	shader = shader_variant_compile(device, NULL, &b.shader, 1,
1440 					MESA_SHADER_COMPUTE, &info, &options,
1441 					false, true, true, false, &binary);
1442 
1443 	ralloc_free(b.shader);
1444 	free(binary);
1445 
1446 	return shader;
1447 }
1448 
1449 void
radv_shader_variant_destroy(struct radv_device * device,struct radv_shader_variant * variant)1450 radv_shader_variant_destroy(struct radv_device *device,
1451 			    struct radv_shader_variant *variant)
1452 {
1453 	if (!p_atomic_dec_zero(&variant->ref_count))
1454 		return;
1455 
1456 	mtx_lock(&device->shader_slab_mutex);
1457 	list_del(&variant->slab_list);
1458 	mtx_unlock(&device->shader_slab_mutex);
1459 
1460 	free(variant->spirv);
1461 	free(variant->nir_string);
1462 	free(variant->disasm_string);
1463 	free(variant->ir_string);
1464 	free(variant->statistics);
1465 	free(variant);
1466 }
1467 
1468 const char *
radv_get_shader_name(struct radv_shader_info * info,gl_shader_stage stage)1469 radv_get_shader_name(struct radv_shader_info *info,
1470 		     gl_shader_stage stage)
1471 {
1472 	switch (stage) {
1473 	case MESA_SHADER_VERTEX:
1474 		if (info->vs.as_ls)
1475 			return "Vertex Shader as LS";
1476 		else if (info->vs.as_es)
1477 			return "Vertex Shader as ES";
1478 		else if (info->is_ngg)
1479 			return "Vertex Shader as ESGS";
1480 		else
1481 			return "Vertex Shader as VS";
1482 	case MESA_SHADER_TESS_CTRL:
1483 		return "Tessellation Control Shader";
1484 	case MESA_SHADER_TESS_EVAL:
1485 		if (info->tes.as_es)
1486 			return "Tessellation Evaluation Shader as ES";
1487 		else if (info->is_ngg)
1488 			return "Tessellation Evaluation Shader as ESGS";
1489 		else
1490 			return "Tessellation Evaluation Shader as VS";
1491 	case MESA_SHADER_GEOMETRY:
1492 		return "Geometry Shader";
1493 	case MESA_SHADER_FRAGMENT:
1494 		return "Pixel Shader";
1495 	case MESA_SHADER_COMPUTE:
1496 		return "Compute Shader";
1497 	default:
1498 		return "Unknown shader";
1499 	};
1500 }
1501 
1502 unsigned
radv_get_max_workgroup_size(enum chip_class chip_class,gl_shader_stage stage,const unsigned * sizes)1503 radv_get_max_workgroup_size(enum chip_class chip_class,
1504                             gl_shader_stage stage,
1505                             const unsigned *sizes)
1506 {
1507 	switch (stage) {
1508 	case MESA_SHADER_TESS_CTRL:
1509 		return chip_class >= GFX7 ? 128 : 64;
1510 	case MESA_SHADER_GEOMETRY:
1511 		return chip_class >= GFX9 ? 128 : 64;
1512 	case MESA_SHADER_COMPUTE:
1513 		break;
1514 	default:
1515 		return 0;
1516 	}
1517 
1518 	unsigned max_workgroup_size = sizes[0] * sizes[1] * sizes[2];
1519 	return max_workgroup_size;
1520 }
1521 
1522 unsigned
radv_get_max_waves(struct radv_device * device,struct radv_shader_variant * variant,gl_shader_stage stage)1523 radv_get_max_waves(struct radv_device *device,
1524                    struct radv_shader_variant *variant,
1525                    gl_shader_stage stage)
1526 {
1527 	enum chip_class chip_class = device->physical_device->rad_info.chip_class;
1528 	unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
1529 	uint8_t wave_size = variant->info.wave_size;
1530 	struct ac_shader_config *conf = &variant->config;
1531 	unsigned max_simd_waves;
1532 	unsigned lds_per_wave = 0;
1533 
1534 	max_simd_waves = device->physical_device->rad_info.max_wave64_per_simd;
1535 
1536 	if (stage == MESA_SHADER_FRAGMENT) {
1537 		lds_per_wave = conf->lds_size * lds_increment +
1538 			       align(variant->info.ps.num_interp * 48,
1539 				     lds_increment);
1540 	} else if (stage == MESA_SHADER_COMPUTE) {
1541 		unsigned max_workgroup_size =
1542 			radv_get_max_workgroup_size(chip_class, stage, variant->info.cs.block_size);
1543 		lds_per_wave = (conf->lds_size * lds_increment) /
1544 			       DIV_ROUND_UP(max_workgroup_size, wave_size);
1545 	}
1546 
1547 	if (conf->num_sgprs) {
1548 		unsigned sgprs = align(conf->num_sgprs, chip_class >= GFX8 ? 16 : 8);
1549 		max_simd_waves =
1550 			MIN2(max_simd_waves,
1551 			     device->physical_device->rad_info.num_physical_sgprs_per_simd /
1552 			     sgprs);
1553 	}
1554 
1555 	if (conf->num_vgprs) {
1556 		unsigned vgprs = align(conf->num_vgprs, wave_size == 32 ? 8 : 4);
1557 		max_simd_waves =
1558 			MIN2(max_simd_waves,
1559 			     device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd / vgprs);
1560 	}
1561 
1562 	unsigned max_lds_per_simd = device->physical_device->rad_info.lds_size_per_workgroup / device->physical_device->rad_info.num_simd_per_compute_unit;
1563 	if (lds_per_wave)
1564 		max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1565 
1566 	return max_simd_waves;
1567 }
1568 
1569 VkResult
radv_GetShaderInfoAMD(VkDevice _device,VkPipeline _pipeline,VkShaderStageFlagBits shaderStage,VkShaderInfoTypeAMD infoType,size_t * pInfoSize,void * pInfo)1570 radv_GetShaderInfoAMD(VkDevice _device,
1571 		      VkPipeline _pipeline,
1572 		      VkShaderStageFlagBits shaderStage,
1573 		      VkShaderInfoTypeAMD infoType,
1574 		      size_t* pInfoSize,
1575 		      void* pInfo)
1576 {
1577 	RADV_FROM_HANDLE(radv_device, device, _device);
1578 	RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1579 	gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
1580 	struct radv_shader_variant *variant = pipeline->shaders[stage];
1581 	VkResult result = VK_SUCCESS;
1582 
1583 	/* Spec doesn't indicate what to do if the stage is invalid, so just
1584 	 * return no info for this. */
1585 	if (!variant)
1586 		return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1587 
1588 	switch (infoType) {
1589 	case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
1590 		if (!pInfo) {
1591 			*pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
1592 		} else {
1593 			unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1594 			struct ac_shader_config *conf = &variant->config;
1595 
1596 			VkShaderStatisticsInfoAMD statistics = {0};
1597 			statistics.shaderStageMask = shaderStage;
1598 			statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd;
1599 			statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
1600 			statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
1601 
1602 			if (stage == MESA_SHADER_COMPUTE) {
1603 				unsigned *local_size = variant->info.cs.block_size;
1604 				unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
1605 
1606 				statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
1607 							       ceil((double)workgroup_size / statistics.numPhysicalVgprs);
1608 
1609 				statistics.computeWorkGroupSize[0] = local_size[0];
1610 				statistics.computeWorkGroupSize[1] = local_size[1];
1611 				statistics.computeWorkGroupSize[2] = local_size[2];
1612 			} else {
1613 				statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
1614 			}
1615 
1616 			statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
1617 			statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
1618 			statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
1619 			statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
1620 			statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
1621 
1622 			size_t size = *pInfoSize;
1623 			*pInfoSize = sizeof(statistics);
1624 
1625 			memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
1626 
1627 			if (size < *pInfoSize)
1628 				result = VK_INCOMPLETE;
1629 		}
1630 
1631 		break;
1632 	case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD: {
1633 		char *out;
1634 	        size_t outsize;
1635 		struct u_memstream mem;
1636 		u_memstream_open(&mem, &out, &outsize);
1637 		FILE *const memf = u_memstream_get(&mem);
1638 
1639 		fprintf(memf, "%s:\n", radv_get_shader_name(&variant->info, stage));
1640 		fprintf(memf, "%s\n\n", variant->ir_string);
1641 		fprintf(memf, "%s\n\n", variant->disasm_string);
1642 		radv_dump_shader_stats(device, pipeline, stage, memf);
1643 		u_memstream_close(&mem);
1644 
1645 		/* Need to include the null terminator. */
1646 		size_t length = outsize + 1;
1647 
1648 		if (!pInfo) {
1649 			*pInfoSize = length;
1650 		} else {
1651 			size_t size = *pInfoSize;
1652 			*pInfoSize = length;
1653 
1654 			memcpy(pInfo, out, MIN2(size, length));
1655 
1656 			if (size < length)
1657 				result = VK_INCOMPLETE;
1658 		}
1659 
1660 		free(out);
1661 		break;
1662 	}
1663 	default:
1664 		/* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1665 		result = VK_ERROR_FEATURE_NOT_PRESENT;
1666 		break;
1667 	}
1668 
1669 	return result;
1670 }
1671 
1672 VkResult
radv_dump_shader_stats(struct radv_device * device,struct radv_pipeline * pipeline,gl_shader_stage stage,FILE * output)1673 radv_dump_shader_stats(struct radv_device *device,
1674 		       struct radv_pipeline *pipeline,
1675 		       gl_shader_stage stage, FILE *output)
1676 {
1677 	struct radv_shader_variant *shader = pipeline->shaders[stage];
1678 	VkPipelineExecutablePropertiesKHR *props = NULL;
1679 	uint32_t prop_count = 0;
1680 	VkResult result;
1681 
1682 	VkPipelineInfoKHR pipeline_info = {0};
1683 	pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
1684 	pipeline_info.pipeline = radv_pipeline_to_handle(pipeline);
1685 
1686 	result = radv_GetPipelineExecutablePropertiesKHR(radv_device_to_handle(device),
1687 							 &pipeline_info,
1688 							 &prop_count, NULL);
1689 	if (result != VK_SUCCESS)
1690 		return result;
1691 
1692 	props = calloc(prop_count, sizeof(*props));
1693 	if (!props)
1694 		return VK_ERROR_OUT_OF_HOST_MEMORY;
1695 
1696 	result = radv_GetPipelineExecutablePropertiesKHR(radv_device_to_handle(device),
1697 							 &pipeline_info,
1698 							 &prop_count, props);
1699 	if (result != VK_SUCCESS)
1700 		goto fail;
1701 
1702 	for (unsigned i = 0; i < prop_count; i++) {
1703 		if (!(props[i].stages & mesa_to_vk_shader_stage(stage)))
1704 			continue;
1705 
1706 		VkPipelineExecutableStatisticKHR *stats = NULL;
1707 		uint32_t stat_count = 0;
1708 		VkResult result;
1709 
1710 		VkPipelineExecutableInfoKHR exec_info = {0};
1711 		exec_info.pipeline = radv_pipeline_to_handle(pipeline);
1712 		exec_info.executableIndex = i;
1713 
1714 		result = radv_GetPipelineExecutableStatisticsKHR(radv_device_to_handle(device),
1715 								 &exec_info,
1716 								 &stat_count, NULL);
1717 		if (result != VK_SUCCESS)
1718 			goto fail;
1719 
1720 		stats = calloc(stat_count, sizeof(*stats));
1721 		if (!stats) {
1722 			result = VK_ERROR_OUT_OF_HOST_MEMORY;
1723 			goto fail;
1724 		}
1725 
1726 		result = radv_GetPipelineExecutableStatisticsKHR(radv_device_to_handle(device),
1727 								 &exec_info,
1728 								 &stat_count, stats);
1729 		if (result != VK_SUCCESS) {
1730 			free(stats);
1731 			goto fail;
1732 		}
1733 
1734 		fprintf(output, "\n%s:\n",
1735 			radv_get_shader_name(&shader->info, stage));
1736 		fprintf(output, "*** SHADER STATS ***\n");
1737 
1738 		for (unsigned i = 0; i < stat_count; i++) {
1739 			fprintf(output, "%s: ", stats[i].name);
1740 			switch (stats[i].format) {
1741 			case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR:
1742 				fprintf(output, "%s", stats[i].value.b32 == VK_TRUE ? "true" : "false");
1743 				break;
1744 			case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR:
1745 				fprintf(output, "%"PRIi64, stats[i].value.i64);
1746 				break;
1747 			case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR:
1748 				fprintf(output, "%"PRIu64, stats[i].value.u64);
1749 				break;
1750 			case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR:
1751 				fprintf(output, "%f", stats[i].value.f64);
1752 				break;
1753 			default:
1754 				unreachable("Invalid pipeline statistic format");
1755 			}
1756 			fprintf(output, "\n");
1757 		}
1758 
1759 		fprintf(output, "********************\n\n\n");
1760 
1761 		free(stats);
1762 	}
1763 
1764 fail:
1765 	free(props);
1766 	return result;
1767 }
1768