1 /*
2 * Copyright © 2024 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "radv_private.h"
25
26 static void
radv_shader_object_destroy_variant(struct radv_device * device,VkShaderCodeTypeEXT code_type,struct radv_shader * shader,struct radv_shader_binary * binary)27 radv_shader_object_destroy_variant(struct radv_device *device, VkShaderCodeTypeEXT code_type,
28 struct radv_shader *shader, struct radv_shader_binary *binary)
29 {
30 if (shader)
31 radv_shader_unref(device, shader);
32
33 if (code_type == VK_SHADER_CODE_TYPE_SPIRV_EXT)
34 free(binary);
35 }
36
37 static void
radv_shader_object_destroy(struct radv_device * device,struct radv_shader_object * shader_obj,const VkAllocationCallbacks * pAllocator)38 radv_shader_object_destroy(struct radv_device *device, struct radv_shader_object *shader_obj,
39 const VkAllocationCallbacks *pAllocator)
40 {
41 radv_shader_object_destroy_variant(device, shader_obj->code_type, shader_obj->as_ls.shader,
42 shader_obj->as_ls.binary);
43 radv_shader_object_destroy_variant(device, shader_obj->code_type, shader_obj->as_es.shader,
44 shader_obj->as_es.binary);
45 radv_shader_object_destroy_variant(device, shader_obj->code_type, shader_obj->gs.copy_shader,
46 shader_obj->gs.copy_binary);
47 radv_shader_object_destroy_variant(device, shader_obj->code_type, shader_obj->shader, shader_obj->binary);
48
49 vk_object_base_finish(&shader_obj->base);
50 vk_free2(&device->vk.alloc, pAllocator, shader_obj);
51 }
52
53 VKAPI_ATTR void VKAPI_CALL
radv_DestroyShaderEXT(VkDevice _device,VkShaderEXT shader,const VkAllocationCallbacks * pAllocator)54 radv_DestroyShaderEXT(VkDevice _device, VkShaderEXT shader, const VkAllocationCallbacks *pAllocator)
55 {
56 RADV_FROM_HANDLE(radv_device, device, _device);
57 RADV_FROM_HANDLE(radv_shader_object, shader_obj, shader);
58
59 if (!shader)
60 return;
61
62 radv_shader_object_destroy(device, shader_obj, pAllocator);
63 }
64
65 static void
radv_shader_stage_init(const VkShaderCreateInfoEXT * sinfo,struct radv_shader_stage * out_stage)66 radv_shader_stage_init(const VkShaderCreateInfoEXT *sinfo, struct radv_shader_stage *out_stage)
67 {
68 uint16_t dynamic_shader_stages = 0;
69
70 memset(out_stage, 0, sizeof(*out_stage));
71
72 out_stage->stage = vk_to_mesa_shader_stage(sinfo->stage);
73 out_stage->next_stage = MESA_SHADER_NONE;
74 out_stage->entrypoint = sinfo->pName;
75 out_stage->spec_info = sinfo->pSpecializationInfo;
76 out_stage->feedback.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
77 out_stage->spirv.data = (const char *)sinfo->pCode;
78 out_stage->spirv.size = sinfo->codeSize;
79
80 for (uint32_t i = 0; i < sinfo->setLayoutCount; i++) {
81 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, sinfo->pSetLayouts[i]);
82
83 if (set_layout == NULL)
84 continue;
85
86 out_stage->layout.num_sets = MAX2(i + 1, out_stage->layout.num_sets);
87 out_stage->layout.set[i].layout = set_layout;
88
89 out_stage->layout.set[i].dynamic_offset_start = out_stage->layout.dynamic_offset_count;
90 out_stage->layout.dynamic_offset_count += set_layout->dynamic_offset_count;
91
92 dynamic_shader_stages |= set_layout->dynamic_shader_stages;
93 }
94
95 if (out_stage->layout.dynamic_offset_count && (dynamic_shader_stages & sinfo->stage)) {
96 out_stage->layout.use_dynamic_descriptors = true;
97 }
98
99 for (unsigned i = 0; i < sinfo->pushConstantRangeCount; ++i) {
100 const VkPushConstantRange *range = sinfo->pPushConstantRanges + i;
101 out_stage->layout.push_constant_size = MAX2(out_stage->layout.push_constant_size, range->offset + range->size);
102 }
103
104 out_stage->layout.push_constant_size = align(out_stage->layout.push_constant_size, 16);
105
106 const VkShaderRequiredSubgroupSizeCreateInfoEXT *const subgroup_size =
107 vk_find_struct_const(sinfo->pNext, SHADER_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
108
109 if (subgroup_size) {
110 if (subgroup_size->requiredSubgroupSize == 32)
111 out_stage->key.subgroup_required_size = RADV_REQUIRED_WAVE32;
112 else if (subgroup_size->requiredSubgroupSize == 64)
113 out_stage->key.subgroup_required_size = RADV_REQUIRED_WAVE64;
114 else
115 unreachable("Unsupported required subgroup size.");
116 }
117
118 if (sinfo->flags & VK_SHADER_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT) {
119 out_stage->key.subgroup_require_full = 1;
120 }
121
122 if (out_stage->stage == MESA_SHADER_MESH) {
123 out_stage->key.has_task_shader = !(sinfo->flags & VK_SHADER_CREATE_NO_TASK_SHADER_BIT_EXT);
124 }
125 }
126
127 static VkResult
radv_shader_object_init_graphics(struct radv_shader_object * shader_obj,struct radv_device * device,const VkShaderCreateInfoEXT * pCreateInfo)128 radv_shader_object_init_graphics(struct radv_shader_object *shader_obj, struct radv_device *device,
129 const VkShaderCreateInfoEXT *pCreateInfo)
130 {
131 gl_shader_stage stage = vk_to_mesa_shader_stage(pCreateInfo->stage);
132 struct radv_shader_stage stages[MESA_VULKAN_SHADER_STAGES];
133
134 for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
135 stages[i].entrypoint = NULL;
136 stages[i].nir = NULL;
137 stages[i].spirv.size = 0;
138 stages[i].next_stage = MESA_SHADER_NONE;
139 }
140
141 radv_shader_stage_init(pCreateInfo, &stages[stage]);
142
143 struct radv_graphics_state_key gfx_state = {0};
144
145 gfx_state.vs.has_prolog = true;
146 gfx_state.ps.has_epilog = true;
147 gfx_state.dynamic_rasterization_samples = true;
148 gfx_state.unknown_rast_prim = true;
149 gfx_state.dynamic_provoking_vtx_mode = true;
150 gfx_state.dynamic_line_rast_mode = true;
151
152 if (device->physical_device->rad_info.gfx_level >= GFX11)
153 gfx_state.ms.alpha_to_coverage_via_mrtz = true;
154
155 struct radv_shader *shader = NULL;
156 struct radv_shader_binary *binary = NULL;
157
158 if (!pCreateInfo->nextStage) {
159 struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES] = {NULL};
160 struct radv_shader_binary *binaries[MESA_VULKAN_SHADER_STAGES] = {NULL};
161
162 radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, true, false, false, NULL, false, shaders,
163 binaries, &shader_obj->gs.copy_shader, &shader_obj->gs.copy_binary);
164
165 shader = shaders[stage];
166 binary = binaries[stage];
167
168 ralloc_free(stages[stage].nir);
169
170 shader_obj->shader = shader;
171 shader_obj->binary = binary;
172 } else {
173 radv_foreach_stage(next_stage, pCreateInfo->nextStage)
174 {
175 struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES] = {NULL};
176 struct radv_shader_binary *binaries[MESA_VULKAN_SHADER_STAGES] = {NULL};
177
178 radv_shader_stage_init(pCreateInfo, &stages[stage]);
179 stages[stage].next_stage = next_stage;
180
181 radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, true, false, false, NULL, false, shaders,
182 binaries, &shader_obj->gs.copy_shader, &shader_obj->gs.copy_binary);
183
184 shader = shaders[stage];
185 binary = binaries[stage];
186
187 ralloc_free(stages[stage].nir);
188
189 if (stage == MESA_SHADER_VERTEX) {
190 if (next_stage == MESA_SHADER_TESS_CTRL) {
191 shader_obj->as_ls.shader = shader;
192 shader_obj->as_ls.binary = binary;
193 } else if (next_stage == MESA_SHADER_GEOMETRY) {
194 shader_obj->as_es.shader = shader;
195 shader_obj->as_es.binary = binary;
196 } else {
197 shader_obj->shader = shader;
198 shader_obj->binary = binary;
199 }
200 } else if (stage == MESA_SHADER_TESS_EVAL) {
201 if (next_stage == MESA_SHADER_GEOMETRY) {
202 shader_obj->as_es.shader = shader;
203 shader_obj->as_es.binary = binary;
204 } else {
205 shader_obj->shader = shader;
206 shader_obj->binary = binary;
207 }
208 } else {
209 shader_obj->shader = shader;
210 shader_obj->binary = binary;
211 }
212 }
213 }
214
215 return VK_SUCCESS;
216 }
217
218 static VkResult
radv_shader_object_init_compute(struct radv_shader_object * shader_obj,struct radv_device * device,const VkShaderCreateInfoEXT * pCreateInfo)219 radv_shader_object_init_compute(struct radv_shader_object *shader_obj, struct radv_device *device,
220 const VkShaderCreateInfoEXT *pCreateInfo)
221 {
222 struct radv_shader_binary *cs_binary;
223 struct radv_shader_stage stage = {0};
224
225 assert(pCreateInfo->flags == 0);
226
227 radv_shader_stage_init(pCreateInfo, &stage);
228
229 struct radv_shader *cs_shader = radv_compile_cs(device, NULL, &stage, true, false, false, &cs_binary);
230
231 ralloc_free(stage.nir);
232
233 shader_obj->shader = cs_shader;
234 shader_obj->binary = cs_binary;
235
236 return VK_SUCCESS;
237 }
238
239 static void
radv_get_shader_layout(const VkShaderCreateInfoEXT * pCreateInfo,struct radv_shader_layout * layout)240 radv_get_shader_layout(const VkShaderCreateInfoEXT *pCreateInfo, struct radv_shader_layout *layout)
241 {
242 uint16_t dynamic_shader_stages = 0;
243
244 memset(layout, 0, sizeof(*layout));
245
246 layout->dynamic_offset_count = 0;
247
248 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) {
249 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[i]);
250
251 if (set_layout == NULL)
252 continue;
253
254 layout->num_sets = MAX2(i + 1, layout->num_sets);
255
256 layout->set[i].layout = set_layout;
257 layout->set[i].dynamic_offset_start = layout->dynamic_offset_count;
258
259 layout->dynamic_offset_count += set_layout->dynamic_offset_count;
260 dynamic_shader_stages |= set_layout->dynamic_shader_stages;
261 }
262
263 if (layout->dynamic_offset_count && (dynamic_shader_stages & pCreateInfo->stage)) {
264 layout->use_dynamic_descriptors = true;
265 }
266
267 layout->push_constant_size = 0;
268
269 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
270 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
271 layout->push_constant_size = MAX2(layout->push_constant_size, range->offset + range->size);
272 }
273
274 layout->push_constant_size = align(layout->push_constant_size, 16);
275 }
276
277 static VkResult
radv_shader_object_init_binary(struct radv_device * device,struct blob_reader * blob,struct radv_shader ** shader_out,struct radv_shader_binary ** binary_out)278 radv_shader_object_init_binary(struct radv_device *device, struct blob_reader *blob, struct radv_shader **shader_out,
279 struct radv_shader_binary **binary_out)
280 {
281 const char *binary_sha1 = blob_read_bytes(blob, SHA1_DIGEST_LENGTH);
282 const uint32_t binary_size = blob_read_uint32(blob);
283 const struct radv_shader_binary *binary = blob_read_bytes(blob, binary_size);
284 unsigned char sha1[SHA1_DIGEST_LENGTH];
285
286 _mesa_sha1_compute(binary, binary->total_size, sha1);
287 if (memcmp(sha1, binary_sha1, SHA1_DIGEST_LENGTH))
288 return VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT;
289
290 *shader_out = radv_shader_create(device, NULL, binary, true);
291 *binary_out = (struct radv_shader_binary *)binary;
292
293 return VK_SUCCESS;
294 }
295
296 static VkResult
radv_shader_object_init(struct radv_shader_object * shader_obj,struct radv_device * device,const VkShaderCreateInfoEXT * pCreateInfo)297 radv_shader_object_init(struct radv_shader_object *shader_obj, struct radv_device *device,
298 const VkShaderCreateInfoEXT *pCreateInfo)
299 {
300 struct radv_shader_layout layout;
301 VkResult result;
302
303 radv_get_shader_layout(pCreateInfo, &layout);
304
305 shader_obj->stage = vk_to_mesa_shader_stage(pCreateInfo->stage);
306 shader_obj->code_type = pCreateInfo->codeType;
307 shader_obj->push_constant_size = layout.push_constant_size;
308 shader_obj->dynamic_offset_count = layout.dynamic_offset_count;
309
310 if (pCreateInfo->codeType == VK_SHADER_CODE_TYPE_BINARY_EXT) {
311 if (pCreateInfo->codeSize < VK_UUID_SIZE + sizeof(uint32_t)) {
312 return VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT;
313 }
314
315 struct blob_reader blob;
316 blob_reader_init(&blob, pCreateInfo->pCode, pCreateInfo->codeSize);
317
318 const uint8_t *cache_uuid = blob_read_bytes(&blob, VK_UUID_SIZE);
319
320 if (memcmp(cache_uuid, device->physical_device->cache_uuid, VK_UUID_SIZE))
321 return VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT;
322
323 const bool has_main_binary = blob_read_uint32(&blob);
324
325 if (has_main_binary) {
326 result = radv_shader_object_init_binary(device, &blob, &shader_obj->shader, &shader_obj->binary);
327 if (result != VK_SUCCESS)
328 return result;
329 }
330
331 if (shader_obj->stage == MESA_SHADER_VERTEX) {
332 const bool has_es_binary = blob_read_uint32(&blob);
333 if (has_es_binary) {
334 result =
335 radv_shader_object_init_binary(device, &blob, &shader_obj->as_es.shader, &shader_obj->as_es.binary);
336 if (result != VK_SUCCESS)
337 return result;
338 }
339
340 const bool has_ls_binary = blob_read_uint32(&blob);
341 if (has_ls_binary) {
342 result =
343 radv_shader_object_init_binary(device, &blob, &shader_obj->as_ls.shader, &shader_obj->as_ls.binary);
344 if (result != VK_SUCCESS)
345 return result;
346 }
347 } else if (shader_obj->stage == MESA_SHADER_TESS_EVAL) {
348 const bool has_es_binary = blob_read_uint32(&blob);
349 if (has_es_binary) {
350 result =
351 radv_shader_object_init_binary(device, &blob, &shader_obj->as_es.shader, &shader_obj->as_es.binary);
352 if (result != VK_SUCCESS)
353 return result;
354 }
355 } else if (shader_obj->stage == MESA_SHADER_GEOMETRY) {
356 const bool has_gs_copy_binary = blob_read_uint32(&blob);
357 if (has_gs_copy_binary) {
358 result =
359 radv_shader_object_init_binary(device, &blob, &shader_obj->gs.copy_shader, &shader_obj->gs.copy_binary);
360 if (result != VK_SUCCESS)
361 return result;
362 }
363 }
364 } else {
365 assert(pCreateInfo->codeType == VK_SHADER_CODE_TYPE_SPIRV_EXT);
366
367 if (pCreateInfo->stage == VK_SHADER_STAGE_COMPUTE_BIT) {
368 result = radv_shader_object_init_compute(shader_obj, device, pCreateInfo);
369 } else {
370 result = radv_shader_object_init_graphics(shader_obj, device, pCreateInfo);
371 }
372
373 if (result != VK_SUCCESS)
374 return result;
375 }
376
377 return VK_SUCCESS;
378 }
379
380 static VkResult
radv_shader_object_create(VkDevice _device,const VkShaderCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderEXT * pShader)381 radv_shader_object_create(VkDevice _device, const VkShaderCreateInfoEXT *pCreateInfo,
382 const VkAllocationCallbacks *pAllocator, VkShaderEXT *pShader)
383 {
384 RADV_FROM_HANDLE(radv_device, device, _device);
385 struct radv_shader_object *shader_obj;
386 VkResult result;
387
388 shader_obj = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*shader_obj), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
389 if (shader_obj == NULL)
390 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
391
392 vk_object_base_init(&device->vk, &shader_obj->base, VK_OBJECT_TYPE_SHADER_EXT);
393
394 result = radv_shader_object_init(shader_obj, device, pCreateInfo);
395 if (result != VK_SUCCESS) {
396 radv_shader_object_destroy(device, shader_obj, pAllocator);
397 return result;
398 }
399
400 *pShader = radv_shader_object_to_handle(shader_obj);
401
402 return VK_SUCCESS;
403 }
404
405 static VkResult
radv_shader_object_create_linked(VkDevice _device,uint32_t createInfoCount,const VkShaderCreateInfoEXT * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkShaderEXT * pShaders)406 radv_shader_object_create_linked(VkDevice _device, uint32_t createInfoCount, const VkShaderCreateInfoEXT *pCreateInfos,
407 const VkAllocationCallbacks *pAllocator, VkShaderEXT *pShaders)
408 {
409 RADV_FROM_HANDLE(radv_device, device, _device);
410 struct radv_shader_stage stages[MESA_VULKAN_SHADER_STAGES];
411
412 for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
413 stages[i].entrypoint = NULL;
414 stages[i].nir = NULL;
415 stages[i].spirv.size = 0;
416 stages[i].next_stage = MESA_SHADER_NONE;
417 }
418
419 struct radv_graphics_state_key gfx_state = {0};
420
421 gfx_state.vs.has_prolog = true;
422 gfx_state.ps.has_epilog = true;
423 gfx_state.dynamic_rasterization_samples = true;
424 gfx_state.unknown_rast_prim = true;
425 gfx_state.dynamic_provoking_vtx_mode = true;
426 gfx_state.dynamic_line_rast_mode = true;
427
428 if (device->physical_device->rad_info.gfx_level >= GFX11)
429 gfx_state.ms.alpha_to_coverage_via_mrtz = true;
430
431 for (unsigned i = 0; i < createInfoCount; i++) {
432 const VkShaderCreateInfoEXT *pCreateInfo = &pCreateInfos[i];
433 gl_shader_stage s = vk_to_mesa_shader_stage(pCreateInfo->stage);
434
435 radv_shader_stage_init(pCreateInfo, &stages[s]);
436 }
437
438 /* Determine next stage. */
439 for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
440 if (!stages[i].entrypoint)
441 continue;
442
443 switch (stages[i].stage) {
444 case MESA_SHADER_VERTEX:
445 if (stages[MESA_SHADER_TESS_CTRL].entrypoint) {
446 stages[i].next_stage = MESA_SHADER_TESS_CTRL;
447 } else if (stages[MESA_SHADER_GEOMETRY].entrypoint) {
448 stages[i].next_stage = MESA_SHADER_GEOMETRY;
449 } else if (stages[MESA_SHADER_FRAGMENT].entrypoint) {
450 stages[i].next_stage = MESA_SHADER_FRAGMENT;
451 }
452 break;
453 case MESA_SHADER_TESS_CTRL:
454 stages[i].next_stage = MESA_SHADER_TESS_EVAL;
455 break;
456 case MESA_SHADER_TESS_EVAL:
457 if (stages[MESA_SHADER_GEOMETRY].entrypoint) {
458 stages[i].next_stage = MESA_SHADER_GEOMETRY;
459 } else if (stages[MESA_SHADER_FRAGMENT].entrypoint) {
460 stages[i].next_stage = MESA_SHADER_FRAGMENT;
461 }
462 break;
463 case MESA_SHADER_GEOMETRY:
464 case MESA_SHADER_MESH:
465 if (stages[MESA_SHADER_FRAGMENT].entrypoint) {
466 stages[i].next_stage = MESA_SHADER_FRAGMENT;
467 }
468 break;
469 case MESA_SHADER_FRAGMENT:
470 stages[i].next_stage = MESA_SHADER_NONE;
471 break;
472 case MESA_SHADER_TASK:
473 stages[i].next_stage = MESA_SHADER_MESH;
474 break;
475 default:
476 assert(0);
477 }
478 }
479
480 struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES] = {NULL};
481 struct radv_shader_binary *binaries[MESA_VULKAN_SHADER_STAGES] = {NULL};
482 struct radv_shader *gs_copy_shader = NULL;
483 struct radv_shader_binary *gs_copy_binary = NULL;
484
485 radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, true, false, false, NULL, false, shaders, binaries,
486 &gs_copy_shader, &gs_copy_binary);
487
488 for (unsigned i = 0; i < createInfoCount; i++) {
489 const VkShaderCreateInfoEXT *pCreateInfo = &pCreateInfos[i];
490 gl_shader_stage s = vk_to_mesa_shader_stage(pCreateInfo->stage);
491 struct radv_shader_object *shader_obj;
492
493 shader_obj = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*shader_obj), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
494 if (shader_obj == NULL)
495 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
496
497 vk_object_base_init(&device->vk, &shader_obj->base, VK_OBJECT_TYPE_SHADER_EXT);
498
499 shader_obj->stage = s;
500 shader_obj->code_type = pCreateInfo->codeType;
501 shader_obj->push_constant_size = stages[s].layout.push_constant_size;
502 shader_obj->dynamic_offset_count = stages[s].layout.dynamic_offset_count;
503
504 if (s == MESA_SHADER_VERTEX) {
505 if (stages[s].next_stage == MESA_SHADER_TESS_CTRL) {
506 shader_obj->as_ls.shader = shaders[s];
507 shader_obj->as_ls.binary = binaries[s];
508 } else if (stages[s].next_stage == MESA_SHADER_GEOMETRY) {
509 shader_obj->as_es.shader = shaders[s];
510 shader_obj->as_es.binary = binaries[s];
511 } else {
512 shader_obj->shader = shaders[s];
513 shader_obj->binary = binaries[s];
514 }
515 } else if (s == MESA_SHADER_TESS_EVAL) {
516 if (stages[s].next_stage == MESA_SHADER_GEOMETRY) {
517 shader_obj->as_es.shader = shaders[s];
518 shader_obj->as_es.binary = binaries[s];
519 } else {
520 shader_obj->shader = shaders[s];
521 shader_obj->binary = binaries[s];
522 }
523 } else {
524 shader_obj->shader = shaders[s];
525 shader_obj->binary = binaries[s];
526 }
527
528 if (s == MESA_SHADER_GEOMETRY) {
529 shader_obj->gs.copy_shader = gs_copy_shader;
530 shader_obj->gs.copy_binary = gs_copy_binary;
531 }
532
533 ralloc_free(stages[s].nir);
534
535 pShaders[i] = radv_shader_object_to_handle(shader_obj);
536 }
537
538 return VK_SUCCESS;
539 }
540
541 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateShadersEXT(VkDevice _device,uint32_t createInfoCount,const VkShaderCreateInfoEXT * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkShaderEXT * pShaders)542 radv_CreateShadersEXT(VkDevice _device, uint32_t createInfoCount, const VkShaderCreateInfoEXT *pCreateInfos,
543 const VkAllocationCallbacks *pAllocator, VkShaderEXT *pShaders)
544 {
545 VkResult result = VK_SUCCESS;
546 unsigned i = 0;
547
548 if (createInfoCount > 1 && !!(pCreateInfos[0].flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT) &&
549 pCreateInfos[0].codeType == VK_SHADER_CODE_TYPE_SPIRV_EXT) {
550 for (unsigned j = 0; j < createInfoCount; j++) {
551 assert(pCreateInfos[i].flags & VK_SHADER_CREATE_LINK_STAGE_BIT_EXT);
552 }
553
554 return radv_shader_object_create_linked(_device, createInfoCount, pCreateInfos, pAllocator, pShaders);
555 }
556
557 for (; i < createInfoCount; i++) {
558 VkResult r;
559
560 r = radv_shader_object_create(_device, &pCreateInfos[i], pAllocator, &pShaders[i]);
561 if (r != VK_SUCCESS) {
562 result = r;
563 pShaders[i] = VK_NULL_HANDLE;
564 }
565 }
566
567 for (; i < createInfoCount; ++i)
568 pShaders[i] = VK_NULL_HANDLE;
569
570 return result;
571 }
572
573 static size_t
radv_get_shader_binary_size(const struct radv_shader_binary * binary)574 radv_get_shader_binary_size(const struct radv_shader_binary *binary)
575 {
576 size_t size = sizeof(uint32_t); /* has_binary */
577
578 if (binary)
579 size += SHA1_DIGEST_LENGTH + 4 + ALIGN(binary->total_size, 4);
580
581 return size;
582 }
583
584 static size_t
radv_get_shader_object_size(const struct radv_shader_object * shader_obj)585 radv_get_shader_object_size(const struct radv_shader_object *shader_obj)
586 {
587 size_t size = VK_UUID_SIZE;
588
589 size += radv_get_shader_binary_size(shader_obj->binary);
590
591 if (shader_obj->stage == MESA_SHADER_VERTEX) {
592 size += radv_get_shader_binary_size(shader_obj->as_es.binary);
593 size += radv_get_shader_binary_size(shader_obj->as_ls.binary);
594 } else if (shader_obj->stage == MESA_SHADER_TESS_EVAL) {
595 size += radv_get_shader_binary_size(shader_obj->as_es.binary);
596 } else if (shader_obj->stage == MESA_SHADER_GEOMETRY) {
597 size += radv_get_shader_binary_size(shader_obj->gs.copy_binary);
598 }
599
600 return size;
601 }
602
603 static void
radv_write_shader_binary(struct blob * blob,const struct radv_shader_binary * binary)604 radv_write_shader_binary(struct blob *blob, const struct radv_shader_binary *binary)
605 {
606 unsigned char binary_sha1[SHA1_DIGEST_LENGTH];
607
608 blob_write_uint32(blob, !!binary);
609
610 if (binary) {
611 _mesa_sha1_compute(binary, binary->total_size, binary_sha1);
612
613 blob_write_bytes(blob, binary_sha1, sizeof(binary_sha1));
614 blob_write_uint32(blob, binary->total_size);
615 blob_write_bytes(blob, binary, binary->total_size);
616 }
617 }
618
619 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetShaderBinaryDataEXT(VkDevice _device,VkShaderEXT shader,size_t * pDataSize,void * pData)620 radv_GetShaderBinaryDataEXT(VkDevice _device, VkShaderEXT shader, size_t *pDataSize, void *pData)
621 {
622 RADV_FROM_HANDLE(radv_device, device, _device);
623 RADV_FROM_HANDLE(radv_shader_object, shader_obj, shader);
624 const size_t size = radv_get_shader_object_size(shader_obj);
625
626 if (!pData) {
627 *pDataSize = size;
628 return VK_SUCCESS;
629 }
630
631 if (*pDataSize < size) {
632 *pDataSize = 0;
633 return VK_INCOMPLETE;
634 }
635
636 struct blob blob;
637 blob_init_fixed(&blob, pData, *pDataSize);
638 blob_write_bytes(&blob, device->physical_device->cache_uuid, VK_UUID_SIZE);
639
640 radv_write_shader_binary(&blob, shader_obj->binary);
641
642 if (shader_obj->stage == MESA_SHADER_VERTEX) {
643 radv_write_shader_binary(&blob, shader_obj->as_es.binary);
644 radv_write_shader_binary(&blob, shader_obj->as_ls.binary);
645 } else if (shader_obj->stage == MESA_SHADER_TESS_EVAL) {
646 radv_write_shader_binary(&blob, shader_obj->as_es.binary);
647 } else if (shader_obj->stage == MESA_SHADER_GEOMETRY) {
648 radv_write_shader_binary(&blob, shader_obj->gs.copy_binary);
649 }
650
651 assert(!blob.out_of_memory);
652
653 return VK_SUCCESS;
654 }
655