1 /*
2 * Copyright © 2021 Google
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "nir/nir.h"
8 #include "nir/nir_builder.h"
9 #include "nir/nir_serialize.h"
10
11 #include "vk_shader_module.h"
12
13 #include "nir/radv_nir.h"
14 #include "radv_debug.h"
15 #include "radv_descriptor_set.h"
16 #include "radv_entrypoints.h"
17 #include "radv_pipeline_binary.h"
18 #include "radv_pipeline_cache.h"
19 #include "radv_pipeline_rt.h"
20 #include "radv_rmv.h"
21 #include "radv_shader.h"
22 #include "ac_nir.h"
23
24 struct rt_handle_hash_entry {
25 uint32_t key;
26 char hash[20];
27 };
28
29 static uint32_t
handle_from_stages(struct radv_device * device,const unsigned char * shader_sha1,bool replay_namespace)30 handle_from_stages(struct radv_device *device, const unsigned char *shader_sha1, bool replay_namespace)
31 {
32 uint32_t ret;
33
34 memcpy(&ret, shader_sha1, sizeof(ret));
35
36 /* Leave the low half for resume shaders etc. */
37 ret |= 1u << 31;
38
39 /* Ensure we have dedicated space for replayable shaders */
40 ret &= ~(1u << 30);
41 ret |= replay_namespace << 30;
42
43 simple_mtx_lock(&device->rt_handles_mtx);
44
45 struct hash_entry *he = NULL;
46 for (;;) {
47 he = _mesa_hash_table_search(device->rt_handles, &ret);
48 if (!he)
49 break;
50
51 if (memcmp(he->data, shader_sha1, SHA1_DIGEST_LENGTH) == 0)
52 break;
53
54 ++ret;
55 }
56
57 if (!he) {
58 struct rt_handle_hash_entry *e = ralloc(device->rt_handles, struct rt_handle_hash_entry);
59 e->key = ret;
60 memcpy(e->hash, shader_sha1, SHA1_DIGEST_LENGTH);
61 _mesa_hash_table_insert(device->rt_handles, &e->key, &e->hash);
62 }
63
64 simple_mtx_unlock(&device->rt_handles_mtx);
65
66 return ret;
67 }
68
69 static void
radv_generate_rt_shaders_key(const struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_shader_stage_key * stage_keys)70 radv_generate_rt_shaders_key(const struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
71 struct radv_shader_stage_key *stage_keys)
72 {
73 VkPipelineCreateFlags2 create_flags = vk_rt_pipeline_create_flags(pCreateInfo);
74
75 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
76 const VkPipelineShaderStageCreateInfo *stage = &pCreateInfo->pStages[i];
77 gl_shader_stage s = vk_to_mesa_shader_stage(stage->stage);
78
79 stage_keys[s] = radv_pipeline_get_shader_key(device, stage, create_flags, pCreateInfo->pNext);
80 }
81
82 if (pCreateInfo->pLibraryInfo) {
83 for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
84 VK_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
85 struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline_lib);
86 /* apply shader robustness from merged shaders */
87 if (library_pipeline->traversal_storage_robustness2)
88 stage_keys[MESA_SHADER_INTERSECTION].storage_robustness2 = true;
89
90 if (library_pipeline->traversal_uniform_robustness2)
91 stage_keys[MESA_SHADER_INTERSECTION].uniform_robustness2 = true;
92 }
93 }
94 }
95
96 static VkResult
radv_create_group_handles(struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,const struct radv_ray_tracing_stage * stages,struct radv_ray_tracing_group * groups)97 radv_create_group_handles(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
98 const struct radv_ray_tracing_stage *stages, struct radv_ray_tracing_group *groups)
99 {
100 VkPipelineCreateFlags2 create_flags = vk_rt_pipeline_create_flags(pCreateInfo);
101 bool capture_replay = create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
102 for (unsigned i = 0; i < pCreateInfo->groupCount; ++i) {
103 const VkRayTracingShaderGroupCreateInfoKHR *group_info = &pCreateInfo->pGroups[i];
104 switch (group_info->type) {
105 case VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR:
106 if (group_info->generalShader != VK_SHADER_UNUSED_KHR) {
107 const struct radv_ray_tracing_stage *stage = &stages[group_info->generalShader];
108 groups[i].handle.general_index = handle_from_stages(device, stage->sha1, capture_replay);
109 }
110 break;
111 case VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR:
112 if (group_info->closestHitShader != VK_SHADER_UNUSED_KHR) {
113 const struct radv_ray_tracing_stage *stage = &stages[group_info->closestHitShader];
114 groups[i].handle.closest_hit_index = handle_from_stages(device, stage->sha1, capture_replay);
115 }
116
117 if (group_info->intersectionShader != VK_SHADER_UNUSED_KHR) {
118 unsigned char sha1[SHA1_DIGEST_LENGTH];
119 struct mesa_sha1 ctx;
120
121 _mesa_sha1_init(&ctx);
122 _mesa_sha1_update(&ctx, stages[group_info->intersectionShader].sha1, SHA1_DIGEST_LENGTH);
123 if (group_info->anyHitShader != VK_SHADER_UNUSED_KHR)
124 _mesa_sha1_update(&ctx, stages[group_info->anyHitShader].sha1, SHA1_DIGEST_LENGTH);
125 _mesa_sha1_final(&ctx, sha1);
126
127 groups[i].handle.intersection_index = handle_from_stages(device, sha1, capture_replay);
128 }
129 break;
130 case VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR:
131 if (group_info->closestHitShader != VK_SHADER_UNUSED_KHR) {
132 const struct radv_ray_tracing_stage *stage = &stages[group_info->closestHitShader];
133 groups[i].handle.closest_hit_index = handle_from_stages(device, stage->sha1, capture_replay);
134 }
135
136 if (group_info->anyHitShader != VK_SHADER_UNUSED_KHR) {
137 const struct radv_ray_tracing_stage *stage = &stages[group_info->anyHitShader];
138 groups[i].handle.any_hit_index = handle_from_stages(device, stage->sha1, capture_replay);
139 }
140 break;
141 case VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR:
142 unreachable("VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR");
143 }
144
145 if (group_info->pShaderGroupCaptureReplayHandle) {
146 const struct radv_rt_capture_replay_handle *handle = group_info->pShaderGroupCaptureReplayHandle;
147 if (memcmp(&handle->non_recursive_idx, &groups[i].handle.any_hit_index, sizeof(uint32_t)) != 0) {
148 return VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS;
149 }
150 }
151 }
152
153 return VK_SUCCESS;
154 }
155
156 static VkResult
radv_rt_init_capture_replay(struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,const struct radv_ray_tracing_stage * stages,const struct radv_ray_tracing_group * groups,struct radv_serialized_shader_arena_block * capture_replay_blocks)157 radv_rt_init_capture_replay(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
158 const struct radv_ray_tracing_stage *stages, const struct radv_ray_tracing_group *groups,
159 struct radv_serialized_shader_arena_block *capture_replay_blocks)
160 {
161 VkResult result = VK_SUCCESS;
162 uint32_t idx;
163
164 for (idx = 0; idx < pCreateInfo->groupCount; idx++) {
165 if (!pCreateInfo->pGroups[idx].pShaderGroupCaptureReplayHandle)
166 continue;
167
168 const struct radv_rt_capture_replay_handle *handle =
169 (const struct radv_rt_capture_replay_handle *)pCreateInfo->pGroups[idx].pShaderGroupCaptureReplayHandle;
170
171 if (groups[idx].recursive_shader < pCreateInfo->stageCount) {
172 capture_replay_blocks[groups[idx].recursive_shader] = handle->recursive_shader_alloc;
173 } else if (groups[idx].recursive_shader != VK_SHADER_UNUSED_KHR) {
174 struct radv_shader *library_shader = stages[groups[idx].recursive_shader].shader;
175 simple_mtx_lock(&library_shader->replay_mtx);
176 /* If arena_va is 0, the pipeline is monolithic and the shader was inlined into raygen */
177 if (!library_shader->has_replay_alloc && handle->recursive_shader_alloc.arena_va) {
178 union radv_shader_arena_block *new_block =
179 radv_replay_shader_arena_block(device, &handle->recursive_shader_alloc, library_shader);
180 if (!new_block) {
181 result = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS;
182 goto reloc_out;
183 }
184
185 radv_shader_wait_for_upload(device, library_shader->upload_seq);
186 radv_free_shader_memory(device, library_shader->alloc);
187
188 library_shader->alloc = new_block;
189 library_shader->has_replay_alloc = true;
190
191 library_shader->bo = library_shader->alloc->arena->bo;
192 library_shader->va = radv_buffer_get_va(library_shader->bo) + library_shader->alloc->offset;
193
194 if (!radv_shader_reupload(device, library_shader)) {
195 result = VK_ERROR_UNKNOWN;
196 goto reloc_out;
197 }
198 }
199
200 reloc_out:
201 simple_mtx_unlock(&library_shader->replay_mtx);
202 if (result != VK_SUCCESS)
203 return result;
204 }
205 }
206
207 return result;
208 }
209
210 static VkResult
radv_rt_fill_group_info(struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,const struct radv_ray_tracing_stage * stages,struct radv_ray_tracing_group * groups)211 radv_rt_fill_group_info(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
212 const struct radv_ray_tracing_stage *stages, struct radv_ray_tracing_group *groups)
213 {
214 VkResult result = radv_create_group_handles(device, pCreateInfo, stages, groups);
215
216 uint32_t idx;
217 for (idx = 0; idx < pCreateInfo->groupCount; idx++) {
218 groups[idx].type = pCreateInfo->pGroups[idx].type;
219 if (groups[idx].type == VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR)
220 groups[idx].recursive_shader = pCreateInfo->pGroups[idx].generalShader;
221 else
222 groups[idx].recursive_shader = pCreateInfo->pGroups[idx].closestHitShader;
223 groups[idx].any_hit_shader = pCreateInfo->pGroups[idx].anyHitShader;
224 groups[idx].intersection_shader = pCreateInfo->pGroups[idx].intersectionShader;
225 }
226
227 /* copy and adjust library groups (incl. handles) */
228 if (pCreateInfo->pLibraryInfo) {
229 unsigned stage_count = pCreateInfo->stageCount;
230 for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
231 VK_FROM_HANDLE(radv_pipeline, pipeline_lib, pCreateInfo->pLibraryInfo->pLibraries[i]);
232 struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline_lib);
233
234 for (unsigned j = 0; j < library_pipeline->group_count; ++j) {
235 struct radv_ray_tracing_group *dst = &groups[idx + j];
236 *dst = library_pipeline->groups[j];
237 if (dst->recursive_shader != VK_SHADER_UNUSED_KHR)
238 dst->recursive_shader += stage_count;
239 if (dst->any_hit_shader != VK_SHADER_UNUSED_KHR)
240 dst->any_hit_shader += stage_count;
241 if (dst->intersection_shader != VK_SHADER_UNUSED_KHR)
242 dst->intersection_shader += stage_count;
243 /* Don't set the shader VA since the handles are part of the pipeline hash */
244 dst->handle.recursive_shader_ptr = 0;
245 }
246 idx += library_pipeline->group_count;
247 stage_count += library_pipeline->stage_count;
248 }
249 }
250
251 return result;
252 }
253
254 static void
radv_rt_fill_stage_info(const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_ray_tracing_stage * stages)255 radv_rt_fill_stage_info(const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, struct radv_ray_tracing_stage *stages)
256 {
257 uint32_t idx;
258 for (idx = 0; idx < pCreateInfo->stageCount; idx++)
259 stages[idx].stage = vk_to_mesa_shader_stage(pCreateInfo->pStages[idx].stage);
260
261 if (pCreateInfo->pLibraryInfo) {
262 for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
263 VK_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
264 struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline);
265 for (unsigned j = 0; j < library_pipeline->stage_count; ++j) {
266 if (library_pipeline->stages[j].nir)
267 stages[idx].nir = vk_pipeline_cache_object_ref(library_pipeline->stages[j].nir);
268 if (library_pipeline->stages[j].shader)
269 stages[idx].shader = radv_shader_ref(library_pipeline->stages[j].shader);
270
271 stages[idx].stage = library_pipeline->stages[j].stage;
272 stages[idx].stack_size = library_pipeline->stages[j].stack_size;
273 stages[idx].info = library_pipeline->stages[j].info;
274 memcpy(stages[idx].sha1, library_pipeline->stages[j].sha1, SHA1_DIGEST_LENGTH);
275 idx++;
276 }
277 }
278 }
279 }
280
281 static void
radv_init_rt_stage_hashes(const struct radv_device * device,VkPipelineCreateFlags2 pipeline_flags,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_ray_tracing_stage * stages,const struct radv_shader_stage_key * stage_keys)282 radv_init_rt_stage_hashes(const struct radv_device *device, VkPipelineCreateFlags2 pipeline_flags,
283 const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, struct radv_ray_tracing_stage *stages,
284 const struct radv_shader_stage_key *stage_keys)
285 {
286 const VkPipelineBinaryInfoKHR *binary_info = vk_find_struct_const(pCreateInfo->pNext, PIPELINE_BINARY_INFO_KHR);
287 if (binary_info && binary_info->binaryCount > 0) {
288 for (uint32_t i = 0; i < binary_info->binaryCount; i++) {
289 VK_FROM_HANDLE(radv_pipeline_binary, pipeline_binary, binary_info->pPipelineBinaries[i]);
290 struct blob_reader blob;
291
292 blob_reader_init(&blob, pipeline_binary->data, pipeline_binary->size);
293
294 const struct radv_ray_tracing_binary_header *header =
295 (const struct radv_ray_tracing_binary_header *)blob_read_bytes(&blob, sizeof(*header));
296
297 if (header->is_traversal_shader)
298 continue;
299
300 memcpy(stages[i].sha1, header->stage_sha1, SHA1_DIGEST_LENGTH);
301 }
302 } else {
303 for (uint32_t idx = 0; idx < pCreateInfo->stageCount; idx++) {
304 const VkPipelineShaderStageCreateInfo *sinfo = &pCreateInfo->pStages[idx];
305 gl_shader_stage s = vk_to_mesa_shader_stage(sinfo->stage);
306 struct mesa_sha1 ctx;
307
308 _mesa_sha1_init(&ctx);
309 radv_pipeline_hash_shader_stage(pipeline_flags, sinfo, &stage_keys[s], &ctx);
310 _mesa_sha1_final(&ctx, stages[idx].sha1);
311 }
312 }
313 }
314
315 static bool
should_move_rt_instruction(nir_intrinsic_instr * instr)316 should_move_rt_instruction(nir_intrinsic_instr *instr)
317 {
318 switch (instr->intrinsic) {
319 case nir_intrinsic_load_hit_attrib_amd:
320 return nir_intrinsic_base(instr) < RADV_MAX_HIT_ATTRIB_DWORDS;
321 case nir_intrinsic_load_rt_arg_scratch_offset_amd:
322 case nir_intrinsic_load_ray_flags:
323 case nir_intrinsic_load_ray_object_origin:
324 case nir_intrinsic_load_ray_world_origin:
325 case nir_intrinsic_load_ray_t_min:
326 case nir_intrinsic_load_ray_object_direction:
327 case nir_intrinsic_load_ray_world_direction:
328 case nir_intrinsic_load_ray_t_max:
329 return true;
330 default:
331 return false;
332 }
333 }
334
335 static void
move_rt_instructions(nir_shader * shader)336 move_rt_instructions(nir_shader *shader)
337 {
338 nir_cursor target = nir_before_impl(nir_shader_get_entrypoint(shader));
339
340 nir_foreach_block (block, nir_shader_get_entrypoint(shader)) {
341 nir_foreach_instr_safe (instr, block) {
342 if (instr->type != nir_instr_type_intrinsic)
343 continue;
344
345 nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
346
347 if (!should_move_rt_instruction(intrinsic))
348 continue;
349
350 nir_instr_move(target, instr);
351 }
352 }
353
354 nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_all & (~nir_metadata_instr_index));
355 }
356
357 static VkResult
radv_rt_nir_to_asm(struct radv_device * device,struct vk_pipeline_cache * cache,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_ray_tracing_pipeline * pipeline,bool monolithic,struct radv_shader_stage * stage,uint32_t * stack_size,struct radv_ray_tracing_stage_info * stage_info,const struct radv_ray_tracing_stage_info * traversal_stage_info,struct radv_serialized_shader_arena_block * replay_block,bool skip_shaders_cache,struct radv_shader ** out_shader)358 radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
359 const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, struct radv_ray_tracing_pipeline *pipeline,
360 bool monolithic, struct radv_shader_stage *stage, uint32_t *stack_size,
361 struct radv_ray_tracing_stage_info *stage_info,
362 const struct radv_ray_tracing_stage_info *traversal_stage_info,
363 struct radv_serialized_shader_arena_block *replay_block, bool skip_shaders_cache,
364 struct radv_shader **out_shader)
365 {
366 struct radv_physical_device *pdev = radv_device_physical(device);
367 struct radv_instance *instance = radv_physical_device_instance(pdev);
368
369 struct radv_shader_binary *binary;
370 bool keep_executable_info = radv_pipeline_capture_shaders(device, pipeline->base.base.create_flags);
371 bool keep_statistic_info = radv_pipeline_capture_shader_stats(device, pipeline->base.base.create_flags);
372
373 radv_nir_lower_rt_io(stage->nir, monolithic, 0);
374
375 /* Gather shader info. */
376 nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
377 radv_nir_shader_info_init(stage->stage, MESA_SHADER_NONE, &stage->info);
378 radv_nir_shader_info_pass(device, stage->nir, &stage->layout, &stage->key, NULL, RADV_PIPELINE_RAY_TRACING, false,
379 &stage->info);
380
381 /* Declare shader arguments. */
382 radv_declare_shader_args(device, NULL, &stage->info, stage->stage, MESA_SHADER_NONE, &stage->args);
383
384 stage->info.user_sgprs_locs = stage->args.user_sgprs_locs;
385 stage->info.inline_push_constant_mask = stage->args.ac.inline_push_const_mask;
386
387 /* Move ray tracing system values to the top that are set by rt_trace_ray
388 * to prevent them from being overwritten by other rt_trace_ray calls.
389 */
390 NIR_PASS_V(stage->nir, move_rt_instructions);
391
392 uint32_t num_resume_shaders = 0;
393 nir_shader **resume_shaders = NULL;
394
395 if (stage->stage != MESA_SHADER_INTERSECTION && !monolithic) {
396 nir_builder b = nir_builder_at(nir_after_impl(nir_shader_get_entrypoint(stage->nir)));
397 nir_rt_return_amd(&b);
398
399 const nir_lower_shader_calls_options opts = {
400 .address_format = nir_address_format_32bit_offset,
401 .stack_alignment = 16,
402 .localized_loads = true,
403 .vectorizer_callback = ac_nir_mem_vectorize_callback,
404 .vectorizer_data = &(struct ac_nir_config){pdev->info.gfx_level, !radv_use_llvm_for_stage(pdev, stage->stage)},
405 };
406 nir_lower_shader_calls(stage->nir, &opts, &resume_shaders, &num_resume_shaders, stage->nir);
407 }
408
409 unsigned num_shaders = num_resume_shaders + 1;
410 nir_shader **shaders = ralloc_array(stage->nir, nir_shader *, num_shaders);
411 if (!shaders)
412 return VK_ERROR_OUT_OF_HOST_MEMORY;
413
414 shaders[0] = stage->nir;
415 for (uint32_t i = 0; i < num_resume_shaders; i++)
416 shaders[i + 1] = resume_shaders[i];
417
418 if (stage_info)
419 memset(stage_info->unused_args, 0xFF, sizeof(stage_info->unused_args));
420
421 /* Postprocess shader parts. */
422 for (uint32_t i = 0; i < num_shaders; i++) {
423 struct radv_shader_stage temp_stage = *stage;
424 temp_stage.nir = shaders[i];
425 radv_nir_lower_rt_abi(temp_stage.nir, pCreateInfo, &temp_stage.args, &stage->info, stack_size, i > 0, device,
426 pipeline, monolithic, traversal_stage_info);
427
428 /* Info might be out-of-date after inlining in radv_nir_lower_rt_abi(). */
429 nir_shader_gather_info(temp_stage.nir, nir_shader_get_entrypoint(temp_stage.nir));
430
431 radv_optimize_nir(temp_stage.nir, stage->key.optimisations_disabled);
432 radv_postprocess_nir(device, NULL, &temp_stage);
433
434 if (stage_info)
435 radv_gather_unused_args(stage_info, shaders[i]);
436 }
437
438 bool dump_shader = radv_can_dump_shader(device, shaders[0]);
439 bool dump_nir = dump_shader && (instance->debug_flags & RADV_DEBUG_DUMP_NIR);
440 bool replayable =
441 pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
442
443 if (dump_shader) {
444 simple_mtx_lock(&instance->shader_dump_mtx);
445
446 if (dump_nir) {
447 for (uint32_t i = 0; i < num_shaders; i++)
448 nir_print_shader(shaders[i], stderr);
449 }
450 }
451
452 char *nir_string = NULL;
453 if (keep_executable_info || dump_shader)
454 nir_string = radv_dump_nir_shaders(instance, shaders, num_shaders);
455
456 /* Compile NIR shader to AMD assembly. */
457 binary =
458 radv_shader_nir_to_asm(device, stage, shaders, num_shaders, NULL, keep_executable_info, keep_statistic_info);
459 struct radv_shader *shader;
460 if (replay_block || replayable) {
461 VkResult result = radv_shader_create_uncached(device, binary, replayable, replay_block, &shader);
462 if (result != VK_SUCCESS) {
463 if (dump_shader)
464 simple_mtx_unlock(&instance->shader_dump_mtx);
465
466 free(binary);
467 return result;
468 }
469 } else
470 shader = radv_shader_create(device, cache, binary, skip_shaders_cache || dump_shader);
471
472 if (shader) {
473 shader->nir_string = nir_string;
474
475 radv_shader_dump_debug_info(device, dump_shader, binary, shader, shaders, num_shaders, &stage->info);
476
477 if (shader && keep_executable_info && stage->spirv.size) {
478 shader->spirv = malloc(stage->spirv.size);
479 memcpy(shader->spirv, stage->spirv.data, stage->spirv.size);
480 shader->spirv_size = stage->spirv.size;
481 }
482 }
483
484 if (dump_shader)
485 simple_mtx_unlock(&instance->shader_dump_mtx);
486
487 free(binary);
488
489 *out_shader = shader;
490
491 if (radv_can_dump_shader_stats(device, stage->nir))
492 radv_dump_shader_stats(device, &pipeline->base.base, shader, stage->nir->info.stage, stderr);
493
494 return shader ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
495 }
496
497 static void
radv_update_const_info(enum radv_rt_const_arg_state * state,bool equal)498 radv_update_const_info(enum radv_rt_const_arg_state *state, bool equal)
499 {
500 if (*state == RADV_RT_CONST_ARG_STATE_UNINITIALIZED)
501 *state = RADV_RT_CONST_ARG_STATE_VALID;
502 else if (*state == RADV_RT_CONST_ARG_STATE_VALID && !equal)
503 *state = RADV_RT_CONST_ARG_STATE_INVALID;
504 }
505
506 static void
radv_gather_trace_ray_src(struct radv_rt_const_arg_info * info,nir_src src)507 radv_gather_trace_ray_src(struct radv_rt_const_arg_info *info, nir_src src)
508 {
509 if (nir_src_is_const(src)) {
510 radv_update_const_info(&info->state, info->value == nir_src_as_uint(src));
511 info->value = nir_src_as_uint(src);
512 } else {
513 info->state = RADV_RT_CONST_ARG_STATE_INVALID;
514 }
515 }
516
517 static void
radv_rt_const_arg_info_combine(struct radv_rt_const_arg_info * dst,const struct radv_rt_const_arg_info * src)518 radv_rt_const_arg_info_combine(struct radv_rt_const_arg_info *dst, const struct radv_rt_const_arg_info *src)
519 {
520 if (src->state != RADV_RT_CONST_ARG_STATE_UNINITIALIZED) {
521 radv_update_const_info(&dst->state, dst->value == src->value);
522 if (src->state == RADV_RT_CONST_ARG_STATE_INVALID)
523 dst->state = RADV_RT_CONST_ARG_STATE_INVALID;
524 dst->value = src->value;
525 }
526 }
527
528 static struct radv_ray_tracing_stage_info
radv_gather_ray_tracing_stage_info(nir_shader * nir)529 radv_gather_ray_tracing_stage_info(nir_shader *nir)
530 {
531 struct radv_ray_tracing_stage_info info = {
532 .can_inline = true,
533 .set_flags = 0xFFFFFFFF,
534 .unset_flags = 0xFFFFFFFF,
535 };
536
537 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
538 nir_foreach_block (block, impl) {
539 nir_foreach_instr (instr, block) {
540 if (instr->type != nir_instr_type_intrinsic)
541 continue;
542
543 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
544 if (intr->intrinsic != nir_intrinsic_trace_ray)
545 continue;
546
547 info.can_inline = false;
548
549 radv_gather_trace_ray_src(&info.tmin, intr->src[7]);
550 radv_gather_trace_ray_src(&info.tmax, intr->src[9]);
551 radv_gather_trace_ray_src(&info.sbt_offset, intr->src[3]);
552 radv_gather_trace_ray_src(&info.sbt_stride, intr->src[4]);
553 radv_gather_trace_ray_src(&info.miss_index, intr->src[5]);
554
555 nir_src flags = intr->src[1];
556 if (nir_src_is_const(flags)) {
557 info.set_flags &= nir_src_as_uint(flags);
558 info.unset_flags &= ~nir_src_as_uint(flags);
559 } else {
560 info.set_flags = 0;
561 info.unset_flags = 0;
562 }
563 }
564 }
565
566 if (nir->info.stage == MESA_SHADER_RAYGEN || nir->info.stage == MESA_SHADER_ANY_HIT ||
567 nir->info.stage == MESA_SHADER_INTERSECTION)
568 info.can_inline = true;
569 else if (nir->info.stage == MESA_SHADER_CALLABLE)
570 info.can_inline = false;
571
572 return info;
573 }
574
575 static inline bool
radv_ray_tracing_stage_is_always_inlined(struct radv_ray_tracing_stage * stage)576 radv_ray_tracing_stage_is_always_inlined(struct radv_ray_tracing_stage *stage)
577 {
578 return stage->stage == MESA_SHADER_ANY_HIT || stage->stage == MESA_SHADER_INTERSECTION;
579 }
580
581 static VkResult
radv_rt_compile_shaders(struct radv_device * device,struct vk_pipeline_cache * cache,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,const VkPipelineCreationFeedbackCreateInfo * creation_feedback,const struct radv_shader_stage_key * stage_keys,struct radv_ray_tracing_pipeline * pipeline,struct radv_serialized_shader_arena_block * capture_replay_handles,bool skip_shaders_cache)582 radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *cache,
583 const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
584 const VkPipelineCreationFeedbackCreateInfo *creation_feedback,
585 const struct radv_shader_stage_key *stage_keys, struct radv_ray_tracing_pipeline *pipeline,
586 struct radv_serialized_shader_arena_block *capture_replay_handles, bool skip_shaders_cache)
587 {
588 VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
589
590 if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
591 return VK_PIPELINE_COMPILE_REQUIRED;
592 VkResult result = VK_SUCCESS;
593
594 struct radv_ray_tracing_stage *rt_stages = pipeline->stages;
595
596 struct radv_shader_stage *stages = calloc(pCreateInfo->stageCount, sizeof(struct radv_shader_stage));
597 if (!stages)
598 return VK_ERROR_OUT_OF_HOST_MEMORY;
599
600 bool library = pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR;
601
602 bool monolithic = !library;
603 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
604 if (rt_stages[i].shader || rt_stages[i].nir)
605 continue;
606
607 int64_t stage_start = os_time_get_nano();
608
609 struct radv_shader_stage *stage = &stages[i];
610 gl_shader_stage s = vk_to_mesa_shader_stage(pCreateInfo->pStages[i].stage);
611 radv_pipeline_stage_init(pipeline->base.base.create_flags, &pCreateInfo->pStages[i],
612 pipeline_layout, &stage_keys[s], stage);
613
614 /* precompile the shader */
615 stage->nir = radv_shader_spirv_to_nir(device, stage, NULL, false);
616
617 NIR_PASS(_, stage->nir, radv_nir_lower_hit_attrib_derefs);
618
619 rt_stages[i].info = radv_gather_ray_tracing_stage_info(stage->nir);
620
621 stage->feedback.duration = os_time_get_nano() - stage_start;
622 }
623
624 bool has_callable = false;
625 /* TODO: Recompile recursive raygen shaders instead. */
626 bool raygen_imported = false;
627 for (uint32_t i = 0; i < pipeline->stage_count; i++) {
628 has_callable |= rt_stages[i].stage == MESA_SHADER_CALLABLE;
629 monolithic &= rt_stages[i].info.can_inline;
630
631 if (i >= pCreateInfo->stageCount)
632 raygen_imported |= rt_stages[i].stage == MESA_SHADER_RAYGEN;
633 }
634
635 for (uint32_t idx = 0; idx < pCreateInfo->stageCount; idx++) {
636 if (rt_stages[idx].shader || rt_stages[idx].nir)
637 continue;
638
639 int64_t stage_start = os_time_get_nano();
640
641 struct radv_shader_stage *stage = &stages[idx];
642
643 /* Cases in which we need to keep around the NIR:
644 * - pipeline library: The final pipeline might be monolithic in which case it will need every NIR shader.
645 * If there is a callable shader, we can be sure that the final pipeline won't be
646 * monolithic.
647 * - non-recursive: Non-recursive shaders are inlined into the traversal shader.
648 * - monolithic: Callable shaders (chit/miss) are inlined into the raygen shader.
649 */
650 bool always_inlined = radv_ray_tracing_stage_is_always_inlined(&rt_stages[idx]);
651 bool nir_needed =
652 (library && !has_callable) || always_inlined || (monolithic && rt_stages[idx].stage != MESA_SHADER_RAYGEN);
653 nir_needed &= !rt_stages[idx].nir;
654 if (nir_needed) {
655 const bool cached = !stage->key.optimisations_disabled &&
656 !(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR);
657 rt_stages[idx].stack_size = stage->nir->scratch_size;
658 rt_stages[idx].nir = radv_pipeline_cache_nir_to_handle(device, cache, stage->nir, rt_stages[idx].sha1, cached);
659 }
660
661 stage->feedback.duration += os_time_get_nano() - stage_start;
662 }
663
664 for (uint32_t idx = 0; idx < pCreateInfo->stageCount; idx++) {
665 int64_t stage_start = os_time_get_nano();
666 struct radv_shader_stage *stage = &stages[idx];
667
668 /* Cases in which we need to compile the shader (raygen/callable/chit/miss):
669 * TODO: - monolithic: Extend the loop to cover imported stages and force compilation of imported raygen
670 * shaders since pipeline library shaders use separate compilation.
671 * - separate: Compile any recursive stage if wasn't compiled yet.
672 */
673 bool shader_needed = !radv_ray_tracing_stage_is_always_inlined(&rt_stages[idx]) && !rt_stages[idx].shader;
674 if (rt_stages[idx].stage == MESA_SHADER_CLOSEST_HIT || rt_stages[idx].stage == MESA_SHADER_MISS)
675 shader_needed &= !monolithic || raygen_imported;
676
677 if (shader_needed) {
678 uint32_t stack_size = 0;
679 struct radv_serialized_shader_arena_block *replay_block =
680 capture_replay_handles[idx].arena_va ? &capture_replay_handles[idx] : NULL;
681
682 bool monolithic_raygen = monolithic && stage->stage == MESA_SHADER_RAYGEN;
683
684 result =
685 radv_rt_nir_to_asm(device, cache, pCreateInfo, pipeline, monolithic_raygen, stage, &stack_size,
686 &rt_stages[idx].info, NULL, replay_block, skip_shaders_cache, &rt_stages[idx].shader);
687 if (result != VK_SUCCESS)
688 goto cleanup;
689
690 assert(rt_stages[idx].stack_size <= stack_size);
691 rt_stages[idx].stack_size = stack_size;
692 }
693
694 if (creation_feedback && creation_feedback->pipelineStageCreationFeedbackCount) {
695 assert(idx < creation_feedback->pipelineStageCreationFeedbackCount);
696 stage->feedback.duration += os_time_get_nano() - stage_start;
697 creation_feedback->pPipelineStageCreationFeedbacks[idx] = stage->feedback;
698 }
699 }
700
701 /* Monolithic raygen shaders do not need a traversal shader. Skip compiling one if there are only monolithic raygen
702 * shaders.
703 */
704 bool traversal_needed = !library && (!monolithic || raygen_imported);
705 if (!traversal_needed) {
706 result = VK_SUCCESS;
707 goto cleanup;
708 }
709
710 struct radv_ray_tracing_stage_info traversal_info = {
711 .set_flags = 0xFFFFFFFF,
712 .unset_flags = 0xFFFFFFFF,
713 };
714
715 memset(traversal_info.unused_args, 0xFF, sizeof(traversal_info.unused_args));
716
717 for (uint32_t i = 0; i < pipeline->stage_count; i++) {
718 if (!pipeline->stages[i].shader)
719 continue;
720
721 struct radv_ray_tracing_stage_info *info = &pipeline->stages[i].info;
722
723 BITSET_AND(traversal_info.unused_args, traversal_info.unused_args, info->unused_args);
724
725 radv_rt_const_arg_info_combine(&traversal_info.tmin, &info->tmin);
726 radv_rt_const_arg_info_combine(&traversal_info.tmax, &info->tmax);
727 radv_rt_const_arg_info_combine(&traversal_info.sbt_offset, &info->sbt_offset);
728 radv_rt_const_arg_info_combine(&traversal_info.sbt_stride, &info->sbt_stride);
729 radv_rt_const_arg_info_combine(&traversal_info.miss_index, &info->miss_index);
730
731 traversal_info.set_flags &= info->set_flags;
732 traversal_info.unset_flags &= info->unset_flags;
733 }
734
735 /* create traversal shader */
736 nir_shader *traversal_nir = radv_build_traversal_shader(device, pipeline, pCreateInfo, &traversal_info);
737 struct radv_shader_stage traversal_stage = {
738 .stage = MESA_SHADER_INTERSECTION,
739 .nir = traversal_nir,
740 .key = stage_keys[MESA_SHADER_INTERSECTION],
741 };
742 radv_shader_layout_init(pipeline_layout, MESA_SHADER_INTERSECTION, &traversal_stage.layout);
743 result =
744 radv_rt_nir_to_asm(device, cache, pCreateInfo, pipeline, false, &traversal_stage, NULL, NULL, &traversal_info,
745 NULL, skip_shaders_cache, &pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
746 ralloc_free(traversal_nir);
747
748 cleanup:
749 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++)
750 ralloc_free(stages[i].nir);
751 free(stages);
752 return result;
753 }
754
755 static bool
radv_rt_pipeline_has_dynamic_stack_size(const VkRayTracingPipelineCreateInfoKHR * pCreateInfo)756 radv_rt_pipeline_has_dynamic_stack_size(const VkRayTracingPipelineCreateInfoKHR *pCreateInfo)
757 {
758 if (!pCreateInfo->pDynamicState)
759 return false;
760
761 for (unsigned i = 0; i < pCreateInfo->pDynamicState->dynamicStateCount; ++i) {
762 if (pCreateInfo->pDynamicState->pDynamicStates[i] == VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR)
763 return true;
764 }
765
766 return false;
767 }
768
769 static void
compute_rt_stack_size(const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_ray_tracing_pipeline * pipeline)770 compute_rt_stack_size(const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, struct radv_ray_tracing_pipeline *pipeline)
771 {
772 if (radv_rt_pipeline_has_dynamic_stack_size(pCreateInfo)) {
773 pipeline->stack_size = -1u;
774 return;
775 }
776
777 unsigned raygen_size = 0;
778 unsigned callable_size = 0;
779 unsigned chit_miss_size = 0;
780 unsigned intersection_size = 0;
781 unsigned any_hit_size = 0;
782
783 for (unsigned i = 0; i < pipeline->stage_count; ++i) {
784 uint32_t size = pipeline->stages[i].stack_size;
785 switch (pipeline->stages[i].stage) {
786 case MESA_SHADER_RAYGEN:
787 raygen_size = MAX2(raygen_size, size);
788 break;
789 case MESA_SHADER_CLOSEST_HIT:
790 case MESA_SHADER_MISS:
791 chit_miss_size = MAX2(chit_miss_size, size);
792 break;
793 case MESA_SHADER_CALLABLE:
794 callable_size = MAX2(callable_size, size);
795 break;
796 case MESA_SHADER_INTERSECTION:
797 intersection_size = MAX2(intersection_size, size);
798 break;
799 case MESA_SHADER_ANY_HIT:
800 any_hit_size = MAX2(any_hit_size, size);
801 break;
802 default:
803 unreachable("Invalid stage type in RT shader");
804 }
805 }
806 pipeline->stack_size =
807 raygen_size +
808 MIN2(pCreateInfo->maxPipelineRayRecursionDepth, 1) * MAX2(chit_miss_size, intersection_size + any_hit_size) +
809 MAX2(0, (int)(pCreateInfo->maxPipelineRayRecursionDepth) - 1) * chit_miss_size + 2 * callable_size;
810 }
811
812 static void
combine_config(struct ac_shader_config * config,struct ac_shader_config * other)813 combine_config(struct ac_shader_config *config, struct ac_shader_config *other)
814 {
815 config->num_sgprs = MAX2(config->num_sgprs, other->num_sgprs);
816 config->num_vgprs = MAX2(config->num_vgprs, other->num_vgprs);
817 config->num_shared_vgprs = MAX2(config->num_shared_vgprs, other->num_shared_vgprs);
818 config->spilled_sgprs = MAX2(config->spilled_sgprs, other->spilled_sgprs);
819 config->spilled_vgprs = MAX2(config->spilled_vgprs, other->spilled_vgprs);
820 config->lds_size = MAX2(config->lds_size, other->lds_size);
821 config->scratch_bytes_per_wave = MAX2(config->scratch_bytes_per_wave, other->scratch_bytes_per_wave);
822
823 assert(config->float_mode == other->float_mode);
824 }
825
826 static void
postprocess_rt_config(struct ac_shader_config * config,enum amd_gfx_level gfx_level,unsigned wave_size)827 postprocess_rt_config(struct ac_shader_config *config, enum amd_gfx_level gfx_level, unsigned wave_size)
828 {
829 config->rsrc1 =
830 (config->rsrc1 & C_00B848_VGPRS) | S_00B848_VGPRS((config->num_vgprs - 1) / (wave_size == 32 ? 8 : 4));
831 if (gfx_level < GFX10)
832 config->rsrc1 = (config->rsrc1 & C_00B848_SGPRS) | S_00B848_SGPRS((config->num_sgprs - 1) / 8);
833
834 config->rsrc2 = (config->rsrc2 & C_00B84C_LDS_SIZE) | S_00B84C_LDS_SIZE(config->lds_size);
835 config->rsrc3 = (config->rsrc3 & C_00B8A0_SHARED_VGPR_CNT) | S_00B8A0_SHARED_VGPR_CNT(config->num_shared_vgprs / 8);
836 }
837
838 static void
compile_rt_prolog(struct radv_device * device,struct radv_ray_tracing_pipeline * pipeline)839 compile_rt_prolog(struct radv_device *device, struct radv_ray_tracing_pipeline *pipeline)
840 {
841 const struct radv_physical_device *pdev = radv_device_physical(device);
842
843 pipeline->prolog = radv_create_rt_prolog(device);
844
845 /* create combined config */
846 struct ac_shader_config *config = &pipeline->prolog->config;
847 for (unsigned i = 0; i < pipeline->stage_count; i++)
848 if (pipeline->stages[i].shader)
849 combine_config(config, &pipeline->stages[i].shader->config);
850
851 if (pipeline->base.base.shaders[MESA_SHADER_INTERSECTION])
852 combine_config(config, &pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]->config);
853
854 postprocess_rt_config(config, pdev->info.gfx_level, pdev->rt_wave_size);
855
856 pipeline->prolog->max_waves = radv_get_max_waves(device, config, &pipeline->prolog->info);
857 }
858
859 void
radv_ray_tracing_pipeline_hash(const struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,const struct radv_ray_tracing_state_key * rt_state,unsigned char * hash)860 radv_ray_tracing_pipeline_hash(const struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
861 const struct radv_ray_tracing_state_key *rt_state, unsigned char *hash)
862 {
863 VK_FROM_HANDLE(radv_pipeline_layout, layout, pCreateInfo->layout);
864 struct mesa_sha1 ctx;
865
866 _mesa_sha1_init(&ctx);
867 radv_pipeline_hash(device, layout, &ctx);
868
869 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
870 _mesa_sha1_update(&ctx, rt_state->stages[i].sha1, sizeof(rt_state->stages[i].sha1));
871 }
872
873 for (uint32_t i = 0; i < pCreateInfo->groupCount; i++) {
874 _mesa_sha1_update(&ctx, &pCreateInfo->pGroups[i].type, sizeof(pCreateInfo->pGroups[i].type));
875 _mesa_sha1_update(&ctx, &pCreateInfo->pGroups[i].generalShader, sizeof(pCreateInfo->pGroups[i].generalShader));
876 _mesa_sha1_update(&ctx, &pCreateInfo->pGroups[i].anyHitShader, sizeof(pCreateInfo->pGroups[i].anyHitShader));
877 _mesa_sha1_update(&ctx, &pCreateInfo->pGroups[i].closestHitShader,
878 sizeof(pCreateInfo->pGroups[i].closestHitShader));
879 _mesa_sha1_update(&ctx, &pCreateInfo->pGroups[i].intersectionShader,
880 sizeof(pCreateInfo->pGroups[i].intersectionShader));
881 _mesa_sha1_update(&ctx, &rt_state->groups[i].handle, sizeof(struct radv_pipeline_group_handle));
882 }
883
884 if (pCreateInfo->pLibraryInfo) {
885 for (uint32_t i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
886 VK_FROM_HANDLE(radv_pipeline, lib_pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
887 struct radv_ray_tracing_pipeline *lib = radv_pipeline_to_ray_tracing(lib_pipeline);
888 _mesa_sha1_update(&ctx, lib->base.base.sha1, SHA1_DIGEST_LENGTH);
889 }
890 }
891
892 const uint64_t pipeline_flags =
893 vk_rt_pipeline_create_flags(pCreateInfo) &
894 (VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR | VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR |
895 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR |
896 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR |
897 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR |
898 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR | VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR);
899 _mesa_sha1_update(&ctx, &pipeline_flags, sizeof(pipeline_flags));
900
901 _mesa_sha1_final(&ctx, hash);
902 }
903
904 static VkResult
radv_rt_pipeline_compile(struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_ray_tracing_pipeline * pipeline,struct vk_pipeline_cache * cache,const struct radv_ray_tracing_state_key * rt_state,struct radv_serialized_shader_arena_block * capture_replay_blocks,const VkPipelineCreationFeedbackCreateInfo * creation_feedback)905 radv_rt_pipeline_compile(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
906 struct radv_ray_tracing_pipeline *pipeline, struct vk_pipeline_cache *cache,
907 const struct radv_ray_tracing_state_key *rt_state,
908 struct radv_serialized_shader_arena_block *capture_replay_blocks,
909 const VkPipelineCreationFeedbackCreateInfo *creation_feedback)
910 {
911 bool skip_shaders_cache = radv_pipeline_skip_shaders_cache(device, &pipeline->base.base);
912 const bool emit_ray_history = !!device->rra_trace.ray_history_buffer;
913 VkPipelineCreationFeedback pipeline_feedback = {
914 .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT,
915 };
916 VkResult result = VK_SUCCESS;
917
918 int64_t pipeline_start = os_time_get_nano();
919
920 radv_ray_tracing_pipeline_hash(device, pCreateInfo, rt_state, pipeline->base.base.sha1);
921 pipeline->base.base.pipeline_hash = *(uint64_t *)pipeline->base.base.sha1;
922
923 /* Skip the shaders cache when any of the below are true:
924 * - ray history is enabled
925 * - group handles are saved and reused on a subsequent run (ie. capture/replay)
926 */
927 if (emit_ray_history || (pipeline->base.base.create_flags &
928 VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) {
929 skip_shaders_cache = true;
930 }
931
932 bool found_in_application_cache = true;
933 if (!skip_shaders_cache &&
934 radv_ray_tracing_pipeline_cache_search(device, cache, pipeline, &found_in_application_cache)) {
935 if (found_in_application_cache)
936 pipeline_feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
937 result = VK_SUCCESS;
938 goto done;
939 }
940
941 result = radv_rt_compile_shaders(device, cache, pCreateInfo, creation_feedback, rt_state->stage_keys, pipeline,
942 capture_replay_blocks, skip_shaders_cache);
943
944 if (result != VK_SUCCESS)
945 return result;
946
947 if (!skip_shaders_cache)
948 radv_ray_tracing_pipeline_cache_insert(device, cache, pipeline, pCreateInfo->stageCount);
949
950 done:
951 pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
952
953 if (creation_feedback)
954 *creation_feedback->pPipelineCreationFeedback = pipeline_feedback;
955
956 return result;
957 }
958
959 void
radv_ray_tracing_state_key_finish(struct radv_ray_tracing_state_key * rt_state)960 radv_ray_tracing_state_key_finish(struct radv_ray_tracing_state_key *rt_state)
961 {
962 free(rt_state->stages);
963 free(rt_state->groups);
964 }
965
966 VkResult
radv_generate_ray_tracing_state_key(struct radv_device * device,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,struct radv_ray_tracing_state_key * rt_state)967 radv_generate_ray_tracing_state_key(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
968 struct radv_ray_tracing_state_key *rt_state)
969 {
970 VkResult result;
971
972 memset(rt_state, 0, sizeof(*rt_state));
973
974 /* Count the total number of stages/groups. */
975 rt_state->stage_count = pCreateInfo->stageCount;
976 rt_state->group_count = pCreateInfo->groupCount;
977
978 if (pCreateInfo->pLibraryInfo) {
979 for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
980 VK_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
981 struct radv_ray_tracing_pipeline *library_pipeline = radv_pipeline_to_ray_tracing(pipeline);
982
983 rt_state->stage_count += library_pipeline->stage_count;
984 rt_state->group_count += library_pipeline->group_count;
985 }
986 }
987
988 rt_state->stages = calloc(rt_state->stage_count, sizeof(*rt_state->stages));
989 if (!rt_state->stages)
990 return VK_ERROR_OUT_OF_HOST_MEMORY;
991
992 rt_state->groups = calloc(rt_state->group_count, sizeof(*rt_state->groups));
993 if (!rt_state->groups) {
994 result = VK_ERROR_OUT_OF_HOST_MEMORY;
995 goto fail;
996 }
997
998 /* Initialize stages/stage_keys/groups info. */
999 radv_rt_fill_stage_info(pCreateInfo, rt_state->stages);
1000
1001 radv_generate_rt_shaders_key(device, pCreateInfo, rt_state->stage_keys);
1002
1003 VkPipelineCreateFlags2 create_flags = vk_rt_pipeline_create_flags(pCreateInfo);
1004 radv_init_rt_stage_hashes(device, create_flags, pCreateInfo, rt_state->stages, rt_state->stage_keys);
1005
1006 result = radv_rt_fill_group_info(device, pCreateInfo, rt_state->stages, rt_state->groups);
1007 if (result != VK_SUCCESS)
1008 goto fail;
1009
1010 return VK_SUCCESS;
1011
1012 fail:
1013 radv_ray_tracing_state_key_finish(rt_state);
1014 return result;
1015 }
1016
1017 static VkResult
radv_ray_tracing_pipeline_import_binary(struct radv_device * device,struct radv_ray_tracing_pipeline * pipeline,const VkPipelineBinaryInfoKHR * binary_info)1018 radv_ray_tracing_pipeline_import_binary(struct radv_device *device, struct radv_ray_tracing_pipeline *pipeline,
1019 const VkPipelineBinaryInfoKHR *binary_info)
1020 {
1021 blake3_hash pipeline_hash;
1022 struct mesa_blake3 ctx;
1023
1024 _mesa_blake3_init(&ctx);
1025
1026 for (uint32_t i = 0; i < binary_info->binaryCount; i++) {
1027 VK_FROM_HANDLE(radv_pipeline_binary, pipeline_binary, binary_info->pPipelineBinaries[i]);
1028 struct radv_shader *shader;
1029 struct blob_reader blob;
1030
1031 blob_reader_init(&blob, pipeline_binary->data, pipeline_binary->size);
1032
1033 const struct radv_ray_tracing_binary_header *header =
1034 (const struct radv_ray_tracing_binary_header *)blob_read_bytes(&blob, sizeof(*header));
1035
1036 if (header->is_traversal_shader) {
1037 shader = radv_shader_deserialize(device, pipeline_binary->key, sizeof(pipeline_binary->key), &blob);
1038 if (!shader)
1039 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1040
1041 pipeline->base.base.shaders[MESA_SHADER_INTERSECTION] = shader;
1042
1043 _mesa_blake3_update(&ctx, pipeline_binary->key, sizeof(pipeline_binary->key));
1044 continue;
1045 }
1046
1047 memcpy(&pipeline->stages[i].info, &header->stage_info, sizeof(pipeline->stages[i].info));
1048 pipeline->stages[i].stack_size = header->stack_size;
1049
1050 if (header->has_shader) {
1051 shader = radv_shader_deserialize(device, pipeline_binary->key, sizeof(pipeline_binary->key), &blob);
1052 if (!shader)
1053 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1054
1055 pipeline->stages[i].shader = shader;
1056
1057 _mesa_blake3_update(&ctx, pipeline_binary->key, sizeof(pipeline_binary->key));
1058 }
1059
1060 if (header->has_nir) {
1061 nir_shader *nir = nir_deserialize(NULL, NULL, &blob);
1062
1063 pipeline->stages[i].nir = radv_pipeline_cache_nir_to_handle(device, NULL, nir, header->stage_sha1, false);
1064 ralloc_free(nir);
1065
1066 if (!pipeline->stages[i].nir)
1067 return VK_ERROR_OUT_OF_HOST_MEMORY;
1068 }
1069 }
1070
1071 _mesa_blake3_final(&ctx, pipeline_hash);
1072
1073 pipeline->base.base.pipeline_hash = *(uint64_t *)pipeline_hash;
1074
1075 return VK_SUCCESS;
1076 }
1077
1078 static VkResult
radv_rt_pipeline_create(VkDevice _device,VkPipelineCache _cache,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipeline)1079 radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
1080 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
1081 {
1082 VK_FROM_HANDLE(radv_device, device, _device);
1083 VK_FROM_HANDLE(vk_pipeline_cache, cache, _cache);
1084 VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
1085 struct radv_ray_tracing_state_key rt_state;
1086 VkResult result;
1087 const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
1088 vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
1089
1090 result = radv_generate_ray_tracing_state_key(device, pCreateInfo, &rt_state);
1091 if (result != VK_SUCCESS)
1092 return result;
1093
1094 VK_MULTIALLOC(ma);
1095 VK_MULTIALLOC_DECL(&ma, struct radv_ray_tracing_pipeline, pipeline, 1);
1096 VK_MULTIALLOC_DECL(&ma, struct radv_ray_tracing_stage, stages, rt_state.stage_count);
1097 VK_MULTIALLOC_DECL(&ma, struct radv_ray_tracing_group, groups, rt_state.group_count);
1098 VK_MULTIALLOC_DECL(&ma, struct radv_serialized_shader_arena_block, capture_replay_blocks, pCreateInfo->stageCount);
1099 if (!vk_multialloc_zalloc2(&ma, &device->vk.alloc, pAllocator, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)) {
1100 radv_ray_tracing_state_key_finish(&rt_state);
1101 return VK_ERROR_OUT_OF_HOST_MEMORY;
1102 }
1103
1104 radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_RAY_TRACING);
1105 pipeline->base.base.create_flags = vk_rt_pipeline_create_flags(pCreateInfo);
1106 pipeline->stage_count = rt_state.stage_count;
1107 pipeline->non_imported_stage_count = pCreateInfo->stageCount;
1108 pipeline->group_count = rt_state.group_count;
1109 pipeline->stages = stages;
1110 pipeline->groups = groups;
1111
1112 memcpy(pipeline->stages, rt_state.stages, rt_state.stage_count * sizeof(struct radv_ray_tracing_stage));
1113 memcpy(pipeline->groups, rt_state.groups, rt_state.group_count * sizeof(struct radv_ray_tracing_group));
1114
1115 /* cache robustness state for making merged shaders */
1116 if (rt_state.stage_keys[MESA_SHADER_INTERSECTION].storage_robustness2)
1117 pipeline->traversal_storage_robustness2 = true;
1118
1119 if (rt_state.stage_keys[MESA_SHADER_INTERSECTION].uniform_robustness2)
1120 pipeline->traversal_uniform_robustness2 = true;
1121
1122 result = radv_rt_init_capture_replay(device, pCreateInfo, stages, pipeline->groups, capture_replay_blocks);
1123 if (result != VK_SUCCESS)
1124 goto fail;
1125
1126 const VkPipelineBinaryInfoKHR *binary_info = vk_find_struct_const(pCreateInfo->pNext, PIPELINE_BINARY_INFO_KHR);
1127
1128 if (binary_info && binary_info->binaryCount > 0) {
1129 result = radv_ray_tracing_pipeline_import_binary(device, pipeline, binary_info);
1130 } else {
1131 result = radv_rt_pipeline_compile(device, pCreateInfo, pipeline, cache, &rt_state, capture_replay_blocks,
1132 creation_feedback);
1133 if (result != VK_SUCCESS)
1134 goto fail;
1135 }
1136
1137 if (!(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)) {
1138 compute_rt_stack_size(pCreateInfo, pipeline);
1139 compile_rt_prolog(device, pipeline);
1140
1141 radv_compute_pipeline_init(&pipeline->base, pipeline_layout, pipeline->prolog);
1142 }
1143
1144 /* write shader VAs into group handles */
1145 for (unsigned i = 0; i < pipeline->group_count; i++) {
1146 if (pipeline->groups[i].recursive_shader != VK_SHADER_UNUSED_KHR) {
1147 struct radv_shader *shader = pipeline->stages[pipeline->groups[i].recursive_shader].shader;
1148 if (shader)
1149 pipeline->groups[i].handle.recursive_shader_ptr = shader->va | radv_get_rt_priority(shader->info.stage);
1150 }
1151 }
1152
1153 *pPipeline = radv_pipeline_to_handle(&pipeline->base.base);
1154 radv_rmv_log_rt_pipeline_create(device, pipeline);
1155
1156 radv_ray_tracing_state_key_finish(&rt_state);
1157 return result;
1158
1159 fail:
1160 radv_ray_tracing_state_key_finish(&rt_state);
1161 radv_pipeline_destroy(device, &pipeline->base.base, pAllocator);
1162 return result;
1163 }
1164
1165 void
radv_destroy_ray_tracing_pipeline(struct radv_device * device,struct radv_ray_tracing_pipeline * pipeline)1166 radv_destroy_ray_tracing_pipeline(struct radv_device *device, struct radv_ray_tracing_pipeline *pipeline)
1167 {
1168 for (unsigned i = 0; i < pipeline->stage_count; i++) {
1169 if (pipeline->stages[i].nir)
1170 vk_pipeline_cache_object_unref(&device->vk, pipeline->stages[i].nir);
1171 if (pipeline->stages[i].shader)
1172 radv_shader_unref(device, pipeline->stages[i].shader);
1173 }
1174
1175 if (pipeline->prolog)
1176 radv_shader_unref(device, pipeline->prolog);
1177 if (pipeline->base.base.shaders[MESA_SHADER_INTERSECTION])
1178 radv_shader_unref(device, pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
1179 }
1180
1181 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateRayTracingPipelinesKHR(VkDevice _device,VkDeferredOperationKHR deferredOperation,VkPipelineCache pipelineCache,uint32_t count,const VkRayTracingPipelineCreateInfoKHR * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)1182 radv_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR deferredOperation,
1183 VkPipelineCache pipelineCache, uint32_t count,
1184 const VkRayTracingPipelineCreateInfoKHR *pCreateInfos,
1185 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines)
1186 {
1187 VkResult result = VK_SUCCESS;
1188
1189 unsigned i = 0;
1190 for (; i < count; i++) {
1191 VkResult r;
1192 r = radv_rt_pipeline_create(_device, pipelineCache, &pCreateInfos[i], pAllocator, &pPipelines[i]);
1193 if (r != VK_SUCCESS) {
1194 result = r;
1195 pPipelines[i] = VK_NULL_HANDLE;
1196
1197 const VkPipelineCreateFlagBits2 create_flags = vk_rt_pipeline_create_flags(&pCreateInfos[i]);
1198 if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT)
1199 break;
1200 }
1201 }
1202
1203 for (; i < count; ++i)
1204 pPipelines[i] = VK_NULL_HANDLE;
1205
1206 if (result != VK_SUCCESS)
1207 return result;
1208
1209 /* Work around Portal RTX not handling VK_OPERATION_NOT_DEFERRED_KHR correctly. */
1210 if (deferredOperation != VK_NULL_HANDLE)
1211 return VK_OPERATION_DEFERRED_KHR;
1212
1213 return result;
1214 }
1215
1216 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetRayTracingShaderGroupHandlesKHR(VkDevice device,VkPipeline _pipeline,uint32_t firstGroup,uint32_t groupCount,size_t dataSize,void * pData)1217 radv_GetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline _pipeline, uint32_t firstGroup, uint32_t groupCount,
1218 size_t dataSize, void *pData)
1219 {
1220 VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1221 struct radv_ray_tracing_group *groups = radv_pipeline_to_ray_tracing(pipeline)->groups;
1222 char *data = pData;
1223
1224 STATIC_ASSERT(sizeof(struct radv_pipeline_group_handle) <= RADV_RT_HANDLE_SIZE);
1225
1226 memset(data, 0, groupCount * RADV_RT_HANDLE_SIZE);
1227
1228 for (uint32_t i = 0; i < groupCount; ++i) {
1229 memcpy(data + i * RADV_RT_HANDLE_SIZE, &groups[firstGroup + i].handle, sizeof(struct radv_pipeline_group_handle));
1230 }
1231
1232 return VK_SUCCESS;
1233 }
1234
1235 VKAPI_ATTR VkDeviceSize VKAPI_CALL
radv_GetRayTracingShaderGroupStackSizeKHR(VkDevice device,VkPipeline _pipeline,uint32_t group,VkShaderGroupShaderKHR groupShader)1236 radv_GetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline _pipeline, uint32_t group,
1237 VkShaderGroupShaderKHR groupShader)
1238 {
1239 VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1240 struct radv_ray_tracing_pipeline *rt_pipeline = radv_pipeline_to_ray_tracing(pipeline);
1241 struct radv_ray_tracing_group *rt_group = &rt_pipeline->groups[group];
1242 switch (groupShader) {
1243 case VK_SHADER_GROUP_SHADER_GENERAL_KHR:
1244 case VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR:
1245 return rt_pipeline->stages[rt_group->recursive_shader].stack_size;
1246 case VK_SHADER_GROUP_SHADER_ANY_HIT_KHR:
1247 return rt_pipeline->stages[rt_group->any_hit_shader].stack_size;
1248 case VK_SHADER_GROUP_SHADER_INTERSECTION_KHR:
1249 return rt_pipeline->stages[rt_group->intersection_shader].stack_size;
1250 default:
1251 return 0;
1252 }
1253 }
1254
1255 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device,VkPipeline _pipeline,uint32_t firstGroup,uint32_t groupCount,size_t dataSize,void * pData)1256 radv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline _pipeline, uint32_t firstGroup,
1257 uint32_t groupCount, size_t dataSize, void *pData)
1258 {
1259 VK_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1260 struct radv_ray_tracing_pipeline *rt_pipeline = radv_pipeline_to_ray_tracing(pipeline);
1261 struct radv_rt_capture_replay_handle *data = pData;
1262
1263 memset(data, 0, groupCount * sizeof(struct radv_rt_capture_replay_handle));
1264
1265 for (uint32_t i = 0; i < groupCount; ++i) {
1266 uint32_t recursive_shader = rt_pipeline->groups[firstGroup + i].recursive_shader;
1267 if (recursive_shader != VK_SHADER_UNUSED_KHR) {
1268 struct radv_shader *shader = rt_pipeline->stages[recursive_shader].shader;
1269 if (shader) {
1270 data[i].recursive_shader_alloc.offset = shader->alloc->offset;
1271 data[i].recursive_shader_alloc.size = shader->alloc->size;
1272 data[i].recursive_shader_alloc.arena_va = shader->alloc->arena->bo->va;
1273 data[i].recursive_shader_alloc.arena_size = shader->alloc->arena->size;
1274 }
1275 }
1276 data[i].non_recursive_idx = rt_pipeline->groups[firstGroup + i].handle.any_hit_index;
1277 }
1278
1279 return VK_SUCCESS;
1280 }
1281