1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24 #include <assert.h>
25 #include <fcntl.h>
26 #include <stdbool.h>
27 #include <string.h>
28
29 #include "util/mesa-sha1.h"
30 #include "radv_private.h"
31 #include "sid.h"
32 #include "vk_acceleration_structure.h"
33 #include "vk_descriptors.h"
34 #include "vk_format.h"
35 #include "vk_util.h"
36
37 static unsigned
radv_descriptor_type_buffer_count(VkDescriptorType type)38 radv_descriptor_type_buffer_count(VkDescriptorType type)
39 {
40 switch (type) {
41 case VK_DESCRIPTOR_TYPE_SAMPLER:
42 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
43 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
44 return 0;
45 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
46 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
47 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
48 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
49 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
50 return 3;
51 default:
52 return 1;
53 }
54 }
55
56 static bool
has_equal_immutable_samplers(const VkSampler * samplers,uint32_t count)57 has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
58 {
59 if (!samplers)
60 return false;
61 for (uint32_t i = 1; i < count; ++i) {
62 if (memcmp(radv_sampler_from_handle(samplers[0])->state, radv_sampler_from_handle(samplers[i])->state, 16)) {
63 return false;
64 }
65 }
66 return true;
67 }
68
69 static uint32_t
radv_descriptor_alignment(VkDescriptorType type)70 radv_descriptor_alignment(VkDescriptorType type)
71 {
72 switch (type) {
73 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
74 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
75 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
76 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
77 case VK_DESCRIPTOR_TYPE_SAMPLER:
78 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
79 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
80 return 16;
81 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
82 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
83 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
84 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
85 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
86 return 32;
87 default:
88 return 1;
89 }
90 }
91
92 static bool
radv_mutable_descriptor_type_size_alignment(const VkMutableDescriptorTypeListEXT * list,uint64_t * out_size,uint64_t * out_align)93 radv_mutable_descriptor_type_size_alignment(const VkMutableDescriptorTypeListEXT *list, uint64_t *out_size,
94 uint64_t *out_align)
95 {
96 uint32_t max_size = 0;
97 uint32_t max_align = 0;
98
99 for (uint32_t i = 0; i < list->descriptorTypeCount; i++) {
100 uint32_t size = 0;
101 uint32_t align = radv_descriptor_alignment(list->pDescriptorTypes[i]);
102
103 switch (list->pDescriptorTypes[i]) {
104 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
105 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
106 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
107 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
108 case VK_DESCRIPTOR_TYPE_SAMPLER:
109 size = 16;
110 break;
111 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
112 size = 32;
113 break;
114 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
115 size = 64;
116 break;
117 default:
118 return false;
119 }
120
121 max_size = MAX2(max_size, size);
122 max_align = MAX2(max_align, align);
123 }
124
125 *out_size = max_size;
126 *out_align = max_align;
127 return true;
128 }
129
130 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorSetLayout(VkDevice _device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)131 radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
132 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout)
133 {
134 RADV_FROM_HANDLE(radv_device, device, _device);
135 struct radv_descriptor_set_layout *set_layout;
136
137 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
138 const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
139 vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
140 const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
141 vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
142
143 uint32_t num_bindings = 0;
144 uint32_t immutable_sampler_count = 0;
145 uint32_t ycbcr_sampler_count = 0;
146 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
147 num_bindings = MAX2(num_bindings, pCreateInfo->pBindings[j].binding + 1);
148 if ((pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
149 pCreateInfo->pBindings[j].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
150 pCreateInfo->pBindings[j].pImmutableSamplers) {
151 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
152
153 bool has_ycbcr_sampler = false;
154 for (unsigned i = 0; i < pCreateInfo->pBindings[j].descriptorCount; ++i) {
155 if (radv_sampler_from_handle(pCreateInfo->pBindings[j].pImmutableSamplers[i])->vk.ycbcr_conversion)
156 has_ycbcr_sampler = true;
157 }
158
159 if (has_ycbcr_sampler)
160 ycbcr_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
161 }
162 }
163
164 uint32_t samplers_offset = offsetof(struct radv_descriptor_set_layout, binding[num_bindings]);
165 size_t size = samplers_offset + immutable_sampler_count * 4 * sizeof(uint32_t);
166 if (ycbcr_sampler_count > 0) {
167 /* Store block of offsets first, followed by the conversion descriptors (padded to the struct
168 * alignment) */
169 size += num_bindings * sizeof(uint32_t);
170 size = align_uintptr(size, alignof(struct vk_ycbcr_conversion_state));
171 size += ycbcr_sampler_count * sizeof(struct vk_ycbcr_conversion_state);
172 }
173
174 /* We need to allocate descriptor set layouts off the device allocator with DEVICE scope because
175 * they are reference counted and may not be destroyed when vkDestroyDescriptorSetLayout is
176 * called.
177 */
178 set_layout = vk_descriptor_set_layout_zalloc(&device->vk, size);
179 if (!set_layout)
180 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
181
182 set_layout->flags = pCreateInfo->flags;
183
184 /* We just allocate all the samplers at the end of the struct */
185 uint32_t *samplers = (uint32_t *)&set_layout->binding[num_bindings];
186 struct vk_ycbcr_conversion_state *ycbcr_samplers = NULL;
187 uint32_t *ycbcr_sampler_offsets = NULL;
188
189 if (ycbcr_sampler_count > 0) {
190 ycbcr_sampler_offsets = samplers + 4 * immutable_sampler_count;
191 set_layout->ycbcr_sampler_offsets_offset = (char *)ycbcr_sampler_offsets - (char *)set_layout;
192
193 uintptr_t first_ycbcr_sampler_offset = (uintptr_t)ycbcr_sampler_offsets + sizeof(uint32_t) * num_bindings;
194 first_ycbcr_sampler_offset = align_uintptr(first_ycbcr_sampler_offset, alignof(struct vk_ycbcr_conversion_state));
195 ycbcr_samplers = (struct vk_ycbcr_conversion_state *)first_ycbcr_sampler_offset;
196 } else
197 set_layout->ycbcr_sampler_offsets_offset = 0;
198
199 VkDescriptorSetLayoutBinding *bindings = NULL;
200 VkResult result = vk_create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount, &bindings);
201 if (result != VK_SUCCESS) {
202 vk_descriptor_set_layout_unref(&device->vk, &set_layout->vk);
203 return vk_error(device, result);
204 }
205
206 set_layout->binding_count = num_bindings;
207 set_layout->shader_stages = 0;
208 set_layout->dynamic_shader_stages = 0;
209 set_layout->has_immutable_samplers = false;
210 set_layout->size = 0;
211
212 uint32_t buffer_count = 0;
213 uint32_t dynamic_offset_count = 0;
214
215 uint32_t first_alignment = 32;
216 if (pCreateInfo->bindingCount > 0) {
217 uint32_t last_alignment = radv_descriptor_alignment(bindings[pCreateInfo->bindingCount - 1].descriptorType);
218 if (bindings[pCreateInfo->bindingCount - 1].descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_EXT) {
219 uint64_t mutable_size = 0, mutable_align = 0;
220 radv_mutable_descriptor_type_size_alignment(
221 &mutable_info->pMutableDescriptorTypeLists[pCreateInfo->bindingCount - 1], &mutable_size, &mutable_align);
222 last_alignment = mutable_align;
223 }
224
225 first_alignment = last_alignment == 32 ? 16 : 32;
226 }
227
228 for (unsigned pass = 0; pass < 2; ++pass) {
229 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
230 const VkDescriptorSetLayoutBinding *binding = bindings + j;
231 uint32_t b = binding->binding;
232 uint32_t alignment = radv_descriptor_alignment(binding->descriptorType);
233 unsigned binding_buffer_count = radv_descriptor_type_buffer_count(binding->descriptorType);
234 uint32_t descriptor_count = binding->descriptorCount;
235 bool has_ycbcr_sampler = false;
236
237 /* main image + fmask */
238 uint32_t max_sampled_image_descriptors = 2;
239
240 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && binding->pImmutableSamplers) {
241 for (unsigned i = 0; i < binding->descriptorCount; ++i) {
242 struct vk_ycbcr_conversion *conversion =
243 radv_sampler_from_handle(binding->pImmutableSamplers[i])->vk.ycbcr_conversion;
244
245 if (conversion) {
246 has_ycbcr_sampler = true;
247 max_sampled_image_descriptors =
248 MAX2(max_sampled_image_descriptors, vk_format_get_plane_count(conversion->state.format));
249 }
250 }
251 }
252
253 switch (binding->descriptorType) {
254 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
255 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
256 assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
257 set_layout->binding[b].dynamic_offset_count = 1;
258 set_layout->dynamic_shader_stages |= binding->stageFlags;
259 if (binding->stageFlags & RADV_RT_STAGE_BITS)
260 set_layout->dynamic_shader_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
261 set_layout->binding[b].size = 0;
262 break;
263 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
264 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
265 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
266 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
267 set_layout->binding[b].size = 16;
268 break;
269 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
270 set_layout->binding[b].size = 32;
271 break;
272 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
273 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
274 /* main descriptor + fmask descriptor */
275 set_layout->binding[b].size = 64;
276 break;
277 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
278 /* main descriptor + fmask descriptor + sampler */
279 set_layout->binding[b].size = 96;
280 break;
281 case VK_DESCRIPTOR_TYPE_SAMPLER:
282 set_layout->binding[b].size = 16;
283 break;
284 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT: {
285 uint64_t mutable_size = 0, mutable_align = 0;
286 radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[j], &mutable_size,
287 &mutable_align);
288 assert(mutable_size && mutable_align);
289 set_layout->binding[b].size = mutable_size;
290 alignment = mutable_align;
291 break;
292 }
293 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
294 set_layout->binding[b].size = descriptor_count;
295 descriptor_count = 1;
296 break;
297 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
298 set_layout->binding[b].size = 16;
299 break;
300 default:
301 break;
302 }
303
304 if ((pass == 0 && alignment != first_alignment) || (pass == 1 && alignment == first_alignment))
305 continue;
306
307 set_layout->size = align(set_layout->size, alignment);
308 set_layout->binding[b].type = binding->descriptorType;
309 set_layout->binding[b].array_size = descriptor_count;
310 set_layout->binding[b].offset = set_layout->size;
311 set_layout->binding[b].buffer_offset = buffer_count;
312 set_layout->binding[b].dynamic_offset_offset = dynamic_offset_count;
313
314 if (variable_flags && binding->binding < variable_flags->bindingCount &&
315 (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)) {
316 assert(!binding->pImmutableSamplers); /* Terribly ill defined how many samplers are valid */
317 assert(binding->binding == num_bindings - 1);
318
319 set_layout->has_variable_descriptors = true;
320 }
321
322 if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
323 binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
324 binding->pImmutableSamplers) {
325 set_layout->binding[b].immutable_samplers_offset = samplers_offset;
326 set_layout->has_immutable_samplers = true;
327
328 /* Do not optimize space for descriptor buffers and embedded samplers, otherwise the set
329 * layout size/offset are incorrect.
330 */
331 if (!(pCreateInfo->flags & (VK_DESCRIPTOR_SET_LAYOUT_CREATE_DESCRIPTOR_BUFFER_BIT_EXT |
332 VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT))) {
333 set_layout->binding[b].immutable_samplers_equal =
334 has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
335 }
336
337 for (uint32_t i = 0; i < binding->descriptorCount; i++)
338 memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
339
340 /* Don't reserve space for the samplers if they're not accessed. */
341 if (set_layout->binding[b].immutable_samplers_equal) {
342 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
343 max_sampled_image_descriptors <= 2)
344 set_layout->binding[b].size -= 32;
345 else if (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
346 set_layout->binding[b].size -= 16;
347 }
348 samplers += 4 * binding->descriptorCount;
349 samplers_offset += 4 * sizeof(uint32_t) * binding->descriptorCount;
350
351 if (has_ycbcr_sampler) {
352 ycbcr_sampler_offsets[b] = (const char *)ycbcr_samplers - (const char *)set_layout;
353 for (uint32_t i = 0; i < binding->descriptorCount; i++) {
354 if (radv_sampler_from_handle(binding->pImmutableSamplers[i])->vk.ycbcr_conversion)
355 ycbcr_samplers[i] =
356 radv_sampler_from_handle(binding->pImmutableSamplers[i])->vk.ycbcr_conversion->state;
357 else
358 ycbcr_samplers[i].format = VK_FORMAT_UNDEFINED;
359 }
360 ycbcr_samplers += binding->descriptorCount;
361 }
362 }
363
364 set_layout->size += descriptor_count * set_layout->binding[b].size;
365 buffer_count += descriptor_count * binding_buffer_count;
366 dynamic_offset_count += descriptor_count * set_layout->binding[b].dynamic_offset_count;
367 set_layout->shader_stages |= binding->stageFlags;
368 }
369 }
370
371 free(bindings);
372
373 set_layout->buffer_count = buffer_count;
374 set_layout->dynamic_offset_count = dynamic_offset_count;
375
376 /* Hash the entire set layout except vk_descriptor_set_layout. The rest of the set layout is
377 * carefully constructed to not have pointers so a full hash instead of a per-field hash
378 * should be ok.
379 */
380 uint32_t hash_offset = offsetof(struct radv_descriptor_set_layout, hash) + sizeof(set_layout->hash);
381 _mesa_sha1_compute((const char *)set_layout + hash_offset, size - hash_offset, set_layout->hash);
382
383 *pSetLayout = radv_descriptor_set_layout_to_handle(set_layout);
384
385 return VK_SUCCESS;
386 }
387
388 VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutSupport(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,VkDescriptorSetLayoutSupport * pSupport)389 radv_GetDescriptorSetLayoutSupport(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
390 VkDescriptorSetLayoutSupport *pSupport)
391 {
392 VkDescriptorSetLayoutBinding *bindings = NULL;
393 VkResult result = vk_create_sorted_bindings(pCreateInfo->pBindings, pCreateInfo->bindingCount, &bindings);
394 if (result != VK_SUCCESS) {
395 pSupport->supported = false;
396 return;
397 }
398
399 const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
400 vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
401 VkDescriptorSetVariableDescriptorCountLayoutSupport *variable_count =
402 vk_find_struct(pSupport->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT);
403 const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
404 vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
405 if (variable_count) {
406 variable_count->maxVariableDescriptorCount = 0;
407 }
408
409 uint32_t first_alignment = 32;
410 if (pCreateInfo->bindingCount > 0) {
411 uint32_t last_alignment = radv_descriptor_alignment(bindings[pCreateInfo->bindingCount - 1].descriptorType);
412 if (bindings[pCreateInfo->bindingCount - 1].descriptorType == VK_DESCRIPTOR_TYPE_MUTABLE_EXT) {
413 uint64_t mutable_size = 0, mutable_align = 0;
414 radv_mutable_descriptor_type_size_alignment(
415 &mutable_info->pMutableDescriptorTypeLists[pCreateInfo->bindingCount - 1], &mutable_size, &mutable_align);
416 last_alignment = mutable_align;
417 }
418
419 first_alignment = last_alignment == 32 ? 16 : 32;
420 }
421
422 bool supported = true;
423 uint64_t size = 0;
424 for (unsigned pass = 0; pass < 2; ++pass) {
425 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
426 const VkDescriptorSetLayoutBinding *binding = bindings + i;
427
428 uint64_t descriptor_size = 0;
429 uint64_t descriptor_alignment = radv_descriptor_alignment(binding->descriptorType);
430 uint32_t descriptor_count = binding->descriptorCount;
431 switch (binding->descriptorType) {
432 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
433 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
434 break;
435 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
436 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
437 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
438 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
439 descriptor_size = 16;
440 break;
441 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
442 descriptor_size = 32;
443 break;
444 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
445 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
446 descriptor_size = 64;
447 break;
448 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
449 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
450 descriptor_size = 64;
451 } else {
452 descriptor_size = 96;
453 }
454 break;
455 case VK_DESCRIPTOR_TYPE_SAMPLER:
456 if (!has_equal_immutable_samplers(binding->pImmutableSamplers, descriptor_count)) {
457 descriptor_size = 16;
458 }
459 break;
460 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
461 descriptor_size = descriptor_count;
462 descriptor_count = 1;
463 break;
464 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
465 if (!radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[i],
466 &descriptor_size, &descriptor_alignment)) {
467 supported = false;
468 }
469 break;
470 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
471 descriptor_size = 16;
472 break;
473 default:
474 break;
475 }
476
477 if ((pass == 0 && descriptor_alignment != first_alignment) ||
478 (pass == 1 && descriptor_alignment == first_alignment))
479 continue;
480
481 if (size && !align_u64(size, descriptor_alignment)) {
482 supported = false;
483 }
484 size = align_u64(size, descriptor_alignment);
485
486 uint64_t max_count = INT32_MAX;
487 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
488 max_count = INT32_MAX - size;
489 else if (descriptor_size)
490 max_count = (INT32_MAX - size) / descriptor_size;
491
492 if (max_count < descriptor_count) {
493 supported = false;
494 }
495 if (variable_flags && binding->binding < variable_flags->bindingCount && variable_count &&
496 (variable_flags->pBindingFlags[binding->binding] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT)) {
497 variable_count->maxVariableDescriptorCount = MIN2(UINT32_MAX, max_count);
498 }
499 size += descriptor_count * descriptor_size;
500 }
501 }
502
503 free(bindings);
504
505 pSupport->supported = supported;
506 }
507
508 /*
509 * Pipeline layouts. These have nothing to do with the pipeline. They are
510 * just multiple descriptor set layouts pasted together.
511 */
512 void
radv_pipeline_layout_init(struct radv_device * device,struct radv_pipeline_layout * layout,bool independent_sets)513 radv_pipeline_layout_init(struct radv_device *device, struct radv_pipeline_layout *layout, bool independent_sets)
514 {
515 memset(layout, 0, sizeof(*layout));
516
517 vk_object_base_init(&device->vk, &layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT);
518
519 layout->independent_sets = independent_sets;
520 }
521
522 void
radv_pipeline_layout_add_set(struct radv_pipeline_layout * layout,uint32_t set_idx,struct radv_descriptor_set_layout * set_layout)523 radv_pipeline_layout_add_set(struct radv_pipeline_layout *layout, uint32_t set_idx,
524 struct radv_descriptor_set_layout *set_layout)
525 {
526 if (layout->set[set_idx].layout)
527 return;
528
529 layout->num_sets = MAX2(set_idx + 1, layout->num_sets);
530
531 layout->set[set_idx].layout = set_layout;
532 vk_descriptor_set_layout_ref(&set_layout->vk);
533
534 layout->set[set_idx].dynamic_offset_start = layout->dynamic_offset_count;
535
536 layout->dynamic_offset_count += set_layout->dynamic_offset_count;
537 layout->dynamic_shader_stages |= set_layout->dynamic_shader_stages;
538 }
539
540 void
radv_pipeline_layout_hash(struct radv_pipeline_layout * layout)541 radv_pipeline_layout_hash(struct radv_pipeline_layout *layout)
542 {
543 struct mesa_sha1 ctx;
544
545 _mesa_sha1_init(&ctx);
546 for (uint32_t i = 0; i < layout->num_sets; i++) {
547 struct radv_descriptor_set_layout *set_layout = layout->set[i].layout;
548
549 if (!set_layout)
550 continue;
551
552 _mesa_sha1_update(&ctx, set_layout->hash, sizeof(set_layout->hash));
553 }
554 _mesa_sha1_update(&ctx, &layout->push_constant_size, sizeof(layout->push_constant_size));
555 _mesa_sha1_final(&ctx, layout->sha1);
556 }
557
558 void
radv_pipeline_layout_finish(struct radv_device * device,struct radv_pipeline_layout * layout)559 radv_pipeline_layout_finish(struct radv_device *device, struct radv_pipeline_layout *layout)
560 {
561 for (uint32_t i = 0; i < layout->num_sets; i++) {
562 if (!layout->set[i].layout)
563 continue;
564
565 vk_descriptor_set_layout_unref(&device->vk, &layout->set[i].layout->vk);
566 }
567
568 vk_object_base_finish(&layout->base);
569 }
570
571 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreatePipelineLayout(VkDevice _device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)572 radv_CreatePipelineLayout(VkDevice _device, const VkPipelineLayoutCreateInfo *pCreateInfo,
573 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout)
574 {
575 RADV_FROM_HANDLE(radv_device, device, _device);
576 struct radv_pipeline_layout *layout;
577
578 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
579
580 layout = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*layout), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
581 if (layout == NULL)
582 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
583
584 radv_pipeline_layout_init(device, layout, pCreateInfo->flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT);
585
586 layout->num_sets = pCreateInfo->setLayoutCount;
587
588 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
589 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->pSetLayouts[set]);
590
591 if (set_layout == NULL) {
592 layout->set[set].layout = NULL;
593 continue;
594 }
595
596 radv_pipeline_layout_add_set(layout, set, set_layout);
597 }
598
599 layout->push_constant_size = 0;
600
601 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
602 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
603 layout->push_constant_size = MAX2(layout->push_constant_size, range->offset + range->size);
604 }
605
606 layout->push_constant_size = align(layout->push_constant_size, 16);
607
608 radv_pipeline_layout_hash(layout);
609
610 *pPipelineLayout = radv_pipeline_layout_to_handle(layout);
611
612 return VK_SUCCESS;
613 }
614
615 VKAPI_ATTR void VKAPI_CALL
radv_DestroyPipelineLayout(VkDevice _device,VkPipelineLayout _pipelineLayout,const VkAllocationCallbacks * pAllocator)616 radv_DestroyPipelineLayout(VkDevice _device, VkPipelineLayout _pipelineLayout, const VkAllocationCallbacks *pAllocator)
617 {
618 RADV_FROM_HANDLE(radv_device, device, _device);
619 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, _pipelineLayout);
620
621 if (!pipeline_layout)
622 return;
623
624 radv_pipeline_layout_finish(device, pipeline_layout);
625
626 vk_free2(&device->vk.alloc, pAllocator, pipeline_layout);
627 }
628
629 static VkResult
radv_descriptor_set_create(struct radv_device * device,struct radv_descriptor_pool * pool,struct radv_descriptor_set_layout * layout,const uint32_t * variable_count,struct radv_descriptor_set ** out_set)630 radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_pool *pool,
631 struct radv_descriptor_set_layout *layout, const uint32_t *variable_count,
632 struct radv_descriptor_set **out_set)
633 {
634 if (pool->entry_count == pool->max_entry_count)
635 return VK_ERROR_OUT_OF_POOL_MEMORY;
636
637 struct radv_descriptor_set *set;
638 uint32_t buffer_count = layout->buffer_count;
639 if (variable_count) {
640 unsigned stride = radv_descriptor_type_buffer_count(layout->binding[layout->binding_count - 1].type);
641 buffer_count = layout->binding[layout->binding_count - 1].buffer_offset + *variable_count * stride;
642 }
643 unsigned range_offset = sizeof(struct radv_descriptor_set_header) + sizeof(struct radeon_winsys_bo *) * buffer_count;
644 const unsigned dynamic_offset_count = layout->dynamic_offset_count;
645 unsigned mem_size = range_offset + sizeof(struct radv_descriptor_range) * dynamic_offset_count;
646
647 if (pool->host_memory_base) {
648 if (pool->host_memory_end - pool->host_memory_ptr < mem_size)
649 return VK_ERROR_OUT_OF_POOL_MEMORY;
650
651 set = (struct radv_descriptor_set *)pool->host_memory_ptr;
652 pool->host_memory_ptr += mem_size;
653 } else {
654 set = vk_alloc2(&device->vk.alloc, NULL, mem_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
655
656 if (!set)
657 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
658 }
659
660 memset(set, 0, mem_size);
661
662 vk_object_base_init(&device->vk, &set->header.base, VK_OBJECT_TYPE_DESCRIPTOR_SET);
663
664 if (dynamic_offset_count) {
665 set->header.dynamic_descriptors = (struct radv_descriptor_range *)((uint8_t *)set + range_offset);
666 }
667
668 set->header.layout = layout;
669 set->header.buffer_count = buffer_count;
670 uint32_t layout_size = layout->size;
671 if (variable_count) {
672 uint32_t stride = layout->binding[layout->binding_count - 1].size;
673 if (layout->binding[layout->binding_count - 1].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
674 stride = 1;
675
676 layout_size = layout->binding[layout->binding_count - 1].offset + *variable_count * stride;
677 }
678 layout_size = align_u32(layout_size, 32);
679 set->header.size = layout_size;
680
681 /* try to allocate linearly first, so that we don't spend
682 * time looking for gaps if the app only allocates &
683 * resets via the pool. */
684 if (pool->current_offset + layout_size <= pool->size) {
685 set->header.bo = pool->bo;
686 set->header.mapped_ptr = (uint32_t *)(pool->mapped_ptr + pool->current_offset);
687 set->header.va = pool->bo ? (radv_buffer_get_va(set->header.bo) + pool->current_offset) : 0;
688
689 if (!pool->host_memory_base) {
690 pool->entries[pool->entry_count].offset = pool->current_offset;
691 pool->entries[pool->entry_count].size = layout_size;
692 pool->entries[pool->entry_count].set = set;
693 } else {
694 pool->sets[pool->entry_count] = set;
695 }
696
697 pool->current_offset += layout_size;
698 } else if (!pool->host_memory_base) {
699 uint64_t offset = 0;
700 int index;
701
702 for (index = 0; index < pool->entry_count; ++index) {
703 if (pool->entries[index].offset - offset >= layout_size)
704 break;
705 offset = pool->entries[index].offset + pool->entries[index].size;
706 }
707
708 if (pool->size - offset < layout_size) {
709 vk_free2(&device->vk.alloc, NULL, set);
710 return VK_ERROR_OUT_OF_POOL_MEMORY;
711 }
712 set->header.bo = pool->bo;
713 set->header.mapped_ptr = (uint32_t *)(pool->mapped_ptr + offset);
714 set->header.va = pool->bo ? (radv_buffer_get_va(set->header.bo) + offset) : 0;
715 memmove(&pool->entries[index + 1], &pool->entries[index], sizeof(pool->entries[0]) * (pool->entry_count - index));
716 pool->entries[index].offset = offset;
717 pool->entries[index].size = layout_size;
718 pool->entries[index].set = set;
719 } else
720 return VK_ERROR_OUT_OF_POOL_MEMORY;
721
722 if (layout->has_immutable_samplers) {
723 for (unsigned i = 0; i < layout->binding_count; ++i) {
724 if (!layout->binding[i].immutable_samplers_offset || layout->binding[i].immutable_samplers_equal)
725 continue;
726
727 unsigned offset = layout->binding[i].offset / 4;
728 if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
729 offset += radv_combined_image_descriptor_sampler_offset(layout->binding + i) / 4;
730
731 const uint32_t *samplers =
732 (const uint32_t *)((const char *)layout + layout->binding[i].immutable_samplers_offset);
733 for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
734 memcpy(set->header.mapped_ptr + offset, samplers + 4 * j, 16);
735 offset += layout->binding[i].size / 4;
736 }
737 }
738 }
739
740 pool->entry_count++;
741 vk_descriptor_set_layout_ref(&layout->vk);
742 *out_set = set;
743 return VK_SUCCESS;
744 }
745
746 static void
radv_descriptor_set_destroy(struct radv_device * device,struct radv_descriptor_pool * pool,struct radv_descriptor_set * set,bool free_bo)747 radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_pool *pool,
748 struct radv_descriptor_set *set, bool free_bo)
749 {
750 assert(!pool->host_memory_base);
751
752 vk_descriptor_set_layout_unref(&device->vk, &set->header.layout->vk);
753
754 if (free_bo && !pool->host_memory_base) {
755 for (int i = 0; i < pool->entry_count; ++i) {
756 if (pool->entries[i].set == set) {
757 memmove(&pool->entries[i], &pool->entries[i + 1], sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
758 --pool->entry_count;
759 break;
760 }
761 }
762 }
763 vk_object_base_finish(&set->header.base);
764 vk_free2(&device->vk.alloc, NULL, set);
765 }
766
767 static void
radv_destroy_descriptor_pool(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_descriptor_pool * pool)768 radv_destroy_descriptor_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
769 struct radv_descriptor_pool *pool)
770 {
771
772 if (!pool->host_memory_base) {
773 for (uint32_t i = 0; i < pool->entry_count; ++i) {
774 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
775 }
776 } else {
777 for (uint32_t i = 0; i < pool->entry_count; ++i) {
778 vk_descriptor_set_layout_unref(&device->vk, &pool->sets[i]->header.layout->vk);
779 vk_object_base_finish(&pool->sets[i]->header.base);
780 }
781 }
782
783 if (pool->bo) {
784 radv_rmv_log_bo_destroy(device, pool->bo);
785 device->ws->buffer_destroy(device->ws, pool->bo);
786 }
787 if (pool->host_bo)
788 vk_free2(&device->vk.alloc, pAllocator, pool->host_bo);
789
790 radv_rmv_log_resource_destroy(device, (uint64_t)radv_descriptor_pool_to_handle(pool));
791 vk_object_base_finish(&pool->base);
792 vk_free2(&device->vk.alloc, pAllocator, pool);
793 }
794
795 VkResult
radv_create_descriptor_pool(struct radv_device * device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool,bool is_internal)796 radv_create_descriptor_pool(struct radv_device *device, const VkDescriptorPoolCreateInfo *pCreateInfo,
797 const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool,
798 bool is_internal)
799 {
800 struct radv_descriptor_pool *pool;
801 uint64_t size = sizeof(struct radv_descriptor_pool);
802 uint64_t bo_size = 0, bo_count = 0, range_count = 0;
803
804 const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
805 vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
806
807 vk_foreach_struct_const (ext, pCreateInfo->pNext) {
808 switch (ext->sType) {
809 case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO: {
810 const VkDescriptorPoolInlineUniformBlockCreateInfo *info =
811 (const VkDescriptorPoolInlineUniformBlockCreateInfo *)ext;
812 /* the sizes are 4 aligned, and we need to align to at
813 * most 32, which needs at most 28 bytes extra per
814 * binding. */
815 bo_size += 28llu * info->maxInlineUniformBlockBindings;
816 break;
817 }
818 default:
819 break;
820 }
821 }
822
823 uint64_t num_16byte_descriptors = 0;
824 for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
825 bo_count += radv_descriptor_type_buffer_count(pCreateInfo->pPoolSizes[i].type) *
826 pCreateInfo->pPoolSizes[i].descriptorCount;
827
828 switch (pCreateInfo->pPoolSizes[i].type) {
829 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
830 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
831 range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
832 break;
833 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
834 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
835 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
836 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
837 case VK_DESCRIPTOR_TYPE_SAMPLER:
838 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
839 bo_size += 16 * pCreateInfo->pPoolSizes[i].descriptorCount;
840 num_16byte_descriptors += pCreateInfo->pPoolSizes[i].descriptorCount;
841 break;
842 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
843 bo_size += 32 * pCreateInfo->pPoolSizes[i].descriptorCount;
844 break;
845 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
846 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
847 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
848 break;
849 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
850 /* Per spec, if a mutable descriptor type list is provided for the pool entry, we
851 * allocate enough memory to hold any subset of that list.
852 * If there is no mutable descriptor type list available,
853 * we must allocate enough for any supported mutable descriptor type, i.e. 64 bytes. */
854 if (mutable_info && i < mutable_info->mutableDescriptorTypeListCount) {
855 uint64_t mutable_size, mutable_alignment;
856 if (radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[i],
857 &mutable_size, &mutable_alignment)) {
858 /* 32 as we may need to align for images */
859 mutable_size = align(mutable_size, 32);
860 bo_size += mutable_size * pCreateInfo->pPoolSizes[i].descriptorCount;
861 if (mutable_size < 32)
862 num_16byte_descriptors += pCreateInfo->pPoolSizes[i].descriptorCount;
863 }
864 } else {
865 bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
866 }
867 break;
868 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
869 bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
870 break;
871 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
872 bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
873 break;
874 default:
875 break;
876 }
877 }
878
879 if (num_16byte_descriptors) {
880 /* Reserve space to align before image descriptors. Our layout code ensures at most one gap
881 * per set. */
882 bo_size += 16 * MIN2(num_16byte_descriptors, pCreateInfo->maxSets);
883 }
884
885 uint64_t sets_size = 0;
886
887 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
888 size += pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
889 size += sizeof(struct radeon_winsys_bo *) * bo_count;
890 size += sizeof(struct radv_descriptor_range) * range_count;
891
892 sets_size = sizeof(struct radv_descriptor_set *) * pCreateInfo->maxSets;
893 size += sets_size;
894 } else {
895 size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
896 }
897
898 pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
899 if (!pool)
900 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
901
902 memset(pool, 0, sizeof(*pool));
903
904 vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
905
906 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
907 pool->host_memory_base = (uint8_t *)pool + sizeof(struct radv_descriptor_pool) + sets_size;
908 pool->host_memory_ptr = pool->host_memory_base;
909 pool->host_memory_end = (uint8_t *)pool + size;
910 }
911
912 if (bo_size) {
913 if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT)) {
914 enum radeon_bo_flag flags = RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT;
915
916 if (device->instance->drirc.zero_vram)
917 flags |= RADEON_FLAG_ZERO_VRAM;
918
919 VkResult result = device->ws->buffer_create(device->ws, bo_size, 32, RADEON_DOMAIN_VRAM, flags,
920 RADV_BO_PRIORITY_DESCRIPTOR, 0, &pool->bo);
921 if (result != VK_SUCCESS) {
922 radv_destroy_descriptor_pool(device, pAllocator, pool);
923 return vk_error(device, result);
924 }
925 pool->mapped_ptr = (uint8_t *)device->ws->buffer_map(pool->bo);
926 if (!pool->mapped_ptr) {
927 radv_destroy_descriptor_pool(device, pAllocator, pool);
928 return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
929 }
930 } else {
931 pool->host_bo = vk_alloc2(&device->vk.alloc, pAllocator, bo_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
932 if (!pool->host_bo) {
933 radv_destroy_descriptor_pool(device, pAllocator, pool);
934 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
935 }
936 pool->mapped_ptr = pool->host_bo;
937 }
938 }
939 pool->size = bo_size;
940 pool->max_entry_count = pCreateInfo->maxSets;
941
942 *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
943 radv_rmv_log_descriptor_pool_create(device, pCreateInfo, *pDescriptorPool, is_internal);
944 return VK_SUCCESS;
945 }
946
947 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorPool(VkDevice _device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)948 radv_CreateDescriptorPool(VkDevice _device, const VkDescriptorPoolCreateInfo *pCreateInfo,
949 const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
950 {
951 RADV_FROM_HANDLE(radv_device, device, _device);
952 return radv_create_descriptor_pool(device, pCreateInfo, pAllocator, pDescriptorPool, false);
953 }
954
955 VKAPI_ATTR void VKAPI_CALL
radv_DestroyDescriptorPool(VkDevice _device,VkDescriptorPool _pool,const VkAllocationCallbacks * pAllocator)956 radv_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, const VkAllocationCallbacks *pAllocator)
957 {
958 RADV_FROM_HANDLE(radv_device, device, _device);
959 RADV_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
960
961 if (!pool)
962 return;
963
964 radv_destroy_descriptor_pool(device, pAllocator, pool);
965 }
966
967 VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetDescriptorPool(VkDevice _device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)968 radv_ResetDescriptorPool(VkDevice _device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
969 {
970 RADV_FROM_HANDLE(radv_device, device, _device);
971 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
972
973 if (!pool->host_memory_base) {
974 for (uint32_t i = 0; i < pool->entry_count; ++i) {
975 radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
976 }
977 } else {
978 for (uint32_t i = 0; i < pool->entry_count; ++i) {
979 vk_descriptor_set_layout_unref(&device->vk, &pool->sets[i]->header.layout->vk);
980 vk_object_base_finish(&pool->sets[i]->header.base);
981 }
982 }
983
984 pool->entry_count = 0;
985
986 pool->current_offset = 0;
987 pool->host_memory_ptr = pool->host_memory_base;
988
989 return VK_SUCCESS;
990 }
991
992 VKAPI_ATTR VkResult VKAPI_CALL
radv_AllocateDescriptorSets(VkDevice _device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)993 radv_AllocateDescriptorSets(VkDevice _device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
994 VkDescriptorSet *pDescriptorSets)
995 {
996 RADV_FROM_HANDLE(radv_device, device, _device);
997 RADV_FROM_HANDLE(radv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
998
999 VkResult result = VK_SUCCESS;
1000 uint32_t i;
1001 struct radv_descriptor_set *set = NULL;
1002
1003 const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_counts =
1004 vk_find_struct_const(pAllocateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
1005 const uint32_t zero = 0;
1006
1007 /* allocate a set of buffers for each shader to contain descriptors */
1008 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1009 RADV_FROM_HANDLE(radv_descriptor_set_layout, layout, pAllocateInfo->pSetLayouts[i]);
1010
1011 const uint32_t *variable_count = NULL;
1012 if (layout->has_variable_descriptors && variable_counts) {
1013 if (i < variable_counts->descriptorSetCount)
1014 variable_count = variable_counts->pDescriptorCounts + i;
1015 else
1016 variable_count = &zero;
1017 }
1018
1019 assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1020
1021 result = radv_descriptor_set_create(device, pool, layout, variable_count, &set);
1022 if (result != VK_SUCCESS)
1023 break;
1024
1025 pDescriptorSets[i] = radv_descriptor_set_to_handle(set);
1026 }
1027
1028 if (result != VK_SUCCESS) {
1029 radv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
1030 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1031 pDescriptorSets[i] = VK_NULL_HANDLE;
1032 }
1033 }
1034 return result;
1035 }
1036
1037 VKAPI_ATTR VkResult VKAPI_CALL
radv_FreeDescriptorSets(VkDevice _device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)1038 radv_FreeDescriptorSets(VkDevice _device, VkDescriptorPool descriptorPool, uint32_t count,
1039 const VkDescriptorSet *pDescriptorSets)
1040 {
1041 RADV_FROM_HANDLE(radv_device, device, _device);
1042 RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
1043
1044 for (uint32_t i = 0; i < count; i++) {
1045 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
1046
1047 if (set && !pool->host_memory_base)
1048 radv_descriptor_set_destroy(device, pool, set, true);
1049 }
1050 return VK_SUCCESS;
1051 }
1052
1053 static ALWAYS_INLINE void
write_texel_buffer_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned * dst,struct radeon_winsys_bo ** buffer_list,const VkBufferView _buffer_view)1054 write_texel_buffer_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, unsigned *dst,
1055 struct radeon_winsys_bo **buffer_list, const VkBufferView _buffer_view)
1056 {
1057 RADV_FROM_HANDLE(radv_buffer_view, buffer_view, _buffer_view);
1058
1059 if (!buffer_view) {
1060 memset(dst, 0, 4 * 4);
1061 if (!cmd_buffer)
1062 *buffer_list = NULL;
1063 return;
1064 }
1065
1066 memcpy(dst, buffer_view->state, 4 * 4);
1067
1068 if (device->use_global_bo_list)
1069 return;
1070
1071 if (cmd_buffer)
1072 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo);
1073 else
1074 *buffer_list = buffer_view->bo;
1075 }
1076
1077 static ALWAYS_INLINE void
write_buffer_descriptor(struct radv_device * device,unsigned * dst,uint64_t va,uint64_t range)1078 write_buffer_descriptor(struct radv_device *device, unsigned *dst, uint64_t va, uint64_t range)
1079 {
1080 if (!va) {
1081 memset(dst, 0, 4 * 4);
1082 return;
1083 }
1084
1085 uint32_t rsrc_word3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1086 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1087
1088 if (device->physical_device->rad_info.gfx_level >= GFX11) {
1089 rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW);
1090 } else if (device->physical_device->rad_info.gfx_level >= GFX10) {
1091 rsrc_word3 |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) | S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1092 S_008F0C_RESOURCE_LEVEL(1);
1093 } else {
1094 rsrc_word3 |=
1095 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1096 }
1097
1098 dst[0] = va;
1099 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1100 /* robustBufferAccess is relaxed enough to allow this (in combination with the alignment/size
1101 * we return from vkGetBufferMemoryRequirements) and this allows the shader compiler to create
1102 * more efficient 8/16-bit buffer accesses.
1103 */
1104 dst[2] = align(range, 4);
1105 dst[3] = rsrc_word3;
1106 }
1107
1108 static ALWAYS_INLINE void
write_buffer_descriptor_impl(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned * dst,struct radeon_winsys_bo ** buffer_list,const VkDescriptorBufferInfo * buffer_info)1109 write_buffer_descriptor_impl(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, unsigned *dst,
1110 struct radeon_winsys_bo **buffer_list, const VkDescriptorBufferInfo *buffer_info)
1111 {
1112 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
1113 uint64_t va = 0, range = 0;
1114
1115 if (buffer) {
1116 va = radv_buffer_get_va(buffer->bo) + buffer_info->offset + buffer->offset;
1117
1118 range = vk_buffer_range(&buffer->vk, buffer_info->offset, buffer_info->range);
1119 assert(buffer->vk.size > 0 && range > 0);
1120 }
1121
1122 write_buffer_descriptor(device, dst, va, range);
1123
1124 if (device->use_global_bo_list)
1125 return;
1126
1127 if (!buffer) {
1128 if (!cmd_buffer)
1129 *buffer_list = NULL;
1130 return;
1131 }
1132
1133 if (cmd_buffer)
1134 radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo);
1135 else
1136 *buffer_list = buffer->bo;
1137 }
1138
1139 static ALWAYS_INLINE void
write_block_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,void * dst,const VkWriteDescriptorSet * writeset)1140 write_block_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, void *dst,
1141 const VkWriteDescriptorSet *writeset)
1142 {
1143 const VkWriteDescriptorSetInlineUniformBlock *inline_ub =
1144 vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK);
1145
1146 memcpy(dst, inline_ub->pData, inline_ub->dataSize);
1147 }
1148
1149 static ALWAYS_INLINE void
write_dynamic_buffer_descriptor(struct radv_device * device,struct radv_descriptor_range * range,struct radeon_winsys_bo ** buffer_list,const VkDescriptorBufferInfo * buffer_info)1150 write_dynamic_buffer_descriptor(struct radv_device *device, struct radv_descriptor_range *range,
1151 struct radeon_winsys_bo **buffer_list, const VkDescriptorBufferInfo *buffer_info)
1152 {
1153 RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
1154 uint64_t va;
1155 unsigned size;
1156
1157 if (!buffer) {
1158 range->va = 0;
1159 *buffer_list = NULL;
1160 return;
1161 }
1162
1163 va = radv_buffer_get_va(buffer->bo);
1164
1165 size = vk_buffer_range(&buffer->vk, buffer_info->offset, buffer_info->range);
1166 assert(buffer->vk.size > 0 && size > 0);
1167
1168 /* robustBufferAccess is relaxed enough to allow this (in combination
1169 * with the alignment/size we return from vkGetBufferMemoryRequirements)
1170 * and this allows the shader compiler to create more efficient 8/16-bit
1171 * buffer accesses. */
1172 size = align(size, 4);
1173
1174 va += buffer_info->offset + buffer->offset;
1175 range->va = va;
1176 range->size = size;
1177
1178 *buffer_list = buffer->bo;
1179 }
1180
1181 static ALWAYS_INLINE void
write_image_descriptor(unsigned * dst,unsigned size,VkDescriptorType descriptor_type,const VkDescriptorImageInfo * image_info)1182 write_image_descriptor(unsigned *dst, unsigned size, VkDescriptorType descriptor_type,
1183 const VkDescriptorImageInfo *image_info)
1184 {
1185 struct radv_image_view *iview = NULL;
1186 union radv_descriptor *descriptor;
1187
1188 if (image_info)
1189 iview = radv_image_view_from_handle(image_info->imageView);
1190
1191 if (!iview) {
1192 memset(dst, 0, size);
1193 return;
1194 }
1195
1196 if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
1197 descriptor = &iview->storage_descriptor;
1198 } else {
1199 descriptor = &iview->descriptor;
1200 }
1201 assert(size > 0);
1202
1203 memcpy(dst, descriptor, size);
1204 }
1205
1206 static ALWAYS_INLINE void
write_image_descriptor_impl(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned size,unsigned * dst,struct radeon_winsys_bo ** buffer_list,VkDescriptorType descriptor_type,const VkDescriptorImageInfo * image_info)1207 write_image_descriptor_impl(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer, unsigned size,
1208 unsigned *dst, struct radeon_winsys_bo **buffer_list, VkDescriptorType descriptor_type,
1209 const VkDescriptorImageInfo *image_info)
1210 {
1211 RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
1212
1213 write_image_descriptor(dst, size, descriptor_type, image_info);
1214
1215 if (device->use_global_bo_list)
1216 return;
1217
1218 if (!iview) {
1219 if (!cmd_buffer)
1220 *buffer_list = NULL;
1221 return;
1222 }
1223
1224 const uint32_t max_bindings = sizeof(iview->image->bindings) / sizeof(iview->image->bindings[0]);
1225 for (uint32_t b = 0; b < max_bindings; b++) {
1226 if (cmd_buffer) {
1227 if (iview->image->bindings[b].bo)
1228 radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->image->bindings[b].bo);
1229 } else {
1230 *buffer_list = iview->image->bindings[b].bo;
1231 buffer_list++;
1232 }
1233 }
1234 }
1235
1236 static ALWAYS_INLINE void
write_combined_image_sampler_descriptor(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,unsigned sampler_offset,unsigned * dst,struct radeon_winsys_bo ** buffer_list,VkDescriptorType descriptor_type,const VkDescriptorImageInfo * image_info,bool has_sampler)1237 write_combined_image_sampler_descriptor(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1238 unsigned sampler_offset, unsigned *dst, struct radeon_winsys_bo **buffer_list,
1239 VkDescriptorType descriptor_type, const VkDescriptorImageInfo *image_info,
1240 bool has_sampler)
1241 {
1242 write_image_descriptor_impl(device, cmd_buffer, sampler_offset, dst, buffer_list, descriptor_type, image_info);
1243 /* copy over sampler state */
1244 if (has_sampler) {
1245 RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
1246 memcpy(dst + sampler_offset / sizeof(*dst), sampler->state, 16);
1247 }
1248 }
1249
1250 static ALWAYS_INLINE void
write_sampler_descriptor(unsigned * dst,VkSampler _sampler)1251 write_sampler_descriptor(unsigned *dst, VkSampler _sampler)
1252 {
1253 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
1254 memcpy(dst, sampler->state, 16);
1255 }
1256
1257 static ALWAYS_INLINE void
write_accel_struct(struct radv_device * device,void * ptr,VkDeviceAddress va)1258 write_accel_struct(struct radv_device *device, void *ptr, VkDeviceAddress va)
1259 {
1260 if (!va) {
1261 RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct,
1262 device->meta_state.accel_struct_build.null.accel_struct);
1263 va = vk_acceleration_structure_get_va(accel_struct);
1264 }
1265
1266 memcpy(ptr, &va, sizeof(va));
1267 }
1268
1269 static ALWAYS_INLINE void
radv_update_descriptor_sets_impl(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,VkDescriptorSet dstSetOverride,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)1270 radv_update_descriptor_sets_impl(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1271 VkDescriptorSet dstSetOverride, uint32_t descriptorWriteCount,
1272 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
1273 const VkCopyDescriptorSet *pDescriptorCopies)
1274 {
1275 uint32_t i, j;
1276 for (i = 0; i < descriptorWriteCount; i++) {
1277 const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
1278 RADV_FROM_HANDLE(radv_descriptor_set, set, dstSetOverride ? dstSetOverride : writeset->dstSet);
1279 const struct radv_descriptor_set_binding_layout *binding_layout =
1280 set->header.layout->binding + writeset->dstBinding;
1281 uint32_t *ptr = set->header.mapped_ptr;
1282 struct radeon_winsys_bo **buffer_list = set->descriptors;
1283 /* Immutable samplers are not copied into push descriptors when they are
1284 * allocated, so if we are writing push descriptors we have to copy the
1285 * immutable samplers into them now.
1286 */
1287 const bool copy_immutable_samplers =
1288 cmd_buffer && binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
1289 const uint32_t *samplers = radv_immutable_samplers(set->header.layout, binding_layout);
1290 const VkWriteDescriptorSetAccelerationStructureKHR *accel_structs = NULL;
1291
1292 ptr += binding_layout->offset / 4;
1293
1294 if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
1295 write_block_descriptor(device, cmd_buffer, (uint8_t *)ptr + writeset->dstArrayElement, writeset);
1296 continue;
1297 } else if (writeset->descriptorType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
1298 accel_structs = vk_find_struct_const(writeset->pNext, WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR);
1299 }
1300
1301 ptr += binding_layout->size * writeset->dstArrayElement / 4;
1302 buffer_list += binding_layout->buffer_offset;
1303 buffer_list += writeset->dstArrayElement;
1304 for (j = 0; j < writeset->descriptorCount; ++j) {
1305 switch (writeset->descriptorType) {
1306 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1307 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1308 unsigned idx = writeset->dstArrayElement + j;
1309 idx += binding_layout->dynamic_offset_offset;
1310 assert(!(set->header.layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1311 write_dynamic_buffer_descriptor(device, set->header.dynamic_descriptors + idx, buffer_list,
1312 writeset->pBufferInfo + j);
1313 break;
1314 }
1315 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1316 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1317 write_buffer_descriptor_impl(device, cmd_buffer, ptr, buffer_list, writeset->pBufferInfo + j);
1318 break;
1319 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1320 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1321 write_texel_buffer_descriptor(device, cmd_buffer, ptr, buffer_list, writeset->pTexelBufferView[j]);
1322 break;
1323 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1324 write_image_descriptor_impl(device, cmd_buffer, 32, ptr, buffer_list, writeset->descriptorType,
1325 writeset->pImageInfo + j);
1326 break;
1327 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1328 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1329 write_image_descriptor_impl(device, cmd_buffer, 64, ptr, buffer_list, writeset->descriptorType,
1330 writeset->pImageInfo + j);
1331 break;
1332 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
1333 unsigned sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout);
1334 write_combined_image_sampler_descriptor(device, cmd_buffer, sampler_offset, ptr, buffer_list,
1335 writeset->descriptorType, writeset->pImageInfo + j,
1336 !binding_layout->immutable_samplers_offset);
1337 if (copy_immutable_samplers) {
1338 const unsigned idx = writeset->dstArrayElement + j;
1339 memcpy((char *)ptr + sampler_offset, samplers + 4 * idx, 16);
1340 }
1341 break;
1342 }
1343 case VK_DESCRIPTOR_TYPE_SAMPLER:
1344 if (!binding_layout->immutable_samplers_offset) {
1345 const VkDescriptorImageInfo *pImageInfo = writeset->pImageInfo + j;
1346 write_sampler_descriptor(ptr, pImageInfo->sampler);
1347 } else if (copy_immutable_samplers) {
1348 unsigned idx = writeset->dstArrayElement + j;
1349 memcpy(ptr, samplers + 4 * idx, 16);
1350 }
1351 break;
1352 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
1353 RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, accel_structs->pAccelerationStructures[j]);
1354
1355 write_accel_struct(device, ptr, accel_struct ? vk_acceleration_structure_get_va(accel_struct) : 0);
1356 break;
1357 }
1358 default:
1359 break;
1360 }
1361 ptr += binding_layout->size / 4;
1362 ++buffer_list;
1363 }
1364 }
1365
1366 for (i = 0; i < descriptorCopyCount; i++) {
1367 const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
1368 RADV_FROM_HANDLE(radv_descriptor_set, src_set, copyset->srcSet);
1369 RADV_FROM_HANDLE(radv_descriptor_set, dst_set, copyset->dstSet);
1370 const struct radv_descriptor_set_binding_layout *src_binding_layout =
1371 src_set->header.layout->binding + copyset->srcBinding;
1372 const struct radv_descriptor_set_binding_layout *dst_binding_layout =
1373 dst_set->header.layout->binding + copyset->dstBinding;
1374 uint32_t *src_ptr = src_set->header.mapped_ptr;
1375 uint32_t *dst_ptr = dst_set->header.mapped_ptr;
1376 struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
1377 struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
1378
1379 src_ptr += src_binding_layout->offset / 4;
1380 dst_ptr += dst_binding_layout->offset / 4;
1381
1382 if (src_binding_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
1383 src_ptr += copyset->srcArrayElement / 4;
1384 dst_ptr += copyset->dstArrayElement / 4;
1385
1386 memcpy(dst_ptr, src_ptr, copyset->descriptorCount);
1387 continue;
1388 }
1389
1390 src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
1391 dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
1392
1393 src_buffer_list += src_binding_layout->buffer_offset;
1394 src_buffer_list += copyset->srcArrayElement;
1395
1396 dst_buffer_list += dst_binding_layout->buffer_offset;
1397 dst_buffer_list += copyset->dstArrayElement;
1398
1399 /* In case of copies between mutable descriptor types
1400 * and non-mutable descriptor types. */
1401 size_t copy_size = MIN2(src_binding_layout->size, dst_binding_layout->size);
1402
1403 for (j = 0; j < copyset->descriptorCount; ++j) {
1404 switch (src_binding_layout->type) {
1405 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1406 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1407 unsigned src_idx = copyset->srcArrayElement + j;
1408 unsigned dst_idx = copyset->dstArrayElement + j;
1409 struct radv_descriptor_range *src_range, *dst_range;
1410 src_idx += src_binding_layout->dynamic_offset_offset;
1411 dst_idx += dst_binding_layout->dynamic_offset_offset;
1412
1413 src_range = src_set->header.dynamic_descriptors + src_idx;
1414 dst_range = dst_set->header.dynamic_descriptors + dst_idx;
1415 *dst_range = *src_range;
1416 break;
1417 }
1418 default:
1419 memcpy(dst_ptr, src_ptr, copy_size);
1420 }
1421 src_ptr += src_binding_layout->size / 4;
1422 dst_ptr += dst_binding_layout->size / 4;
1423
1424 unsigned src_buffer_count = radv_descriptor_type_buffer_count(src_binding_layout->type);
1425 unsigned dst_buffer_count = radv_descriptor_type_buffer_count(dst_binding_layout->type);
1426 for (unsigned k = 0; k < dst_buffer_count; k++) {
1427 if (k < src_buffer_count)
1428 dst_buffer_list[k] = src_buffer_list[k];
1429 else
1430 dst_buffer_list[k] = NULL;
1431 }
1432
1433 dst_buffer_list += dst_buffer_count;
1434 src_buffer_list += src_buffer_count;
1435 }
1436 }
1437 }
1438
1439 VKAPI_ATTR void VKAPI_CALL
radv_UpdateDescriptorSets(VkDevice _device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)1440 radv_UpdateDescriptorSets(VkDevice _device, uint32_t descriptorWriteCount,
1441 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
1442 const VkCopyDescriptorSet *pDescriptorCopies)
1443 {
1444 RADV_FROM_HANDLE(radv_device, device, _device);
1445
1446 radv_update_descriptor_sets_impl(device, NULL, VK_NULL_HANDLE, descriptorWriteCount, pDescriptorWrites,
1447 descriptorCopyCount, pDescriptorCopies);
1448 }
1449
1450 void
radv_cmd_update_descriptor_sets(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,VkDescriptorSet dstSetOverride,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)1451 radv_cmd_update_descriptor_sets(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1452 VkDescriptorSet dstSetOverride, uint32_t descriptorWriteCount,
1453 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
1454 const VkCopyDescriptorSet *pDescriptorCopies)
1455 {
1456 /* Assume cmd_buffer != NULL to optimize out cmd_buffer checks in generic code above. */
1457 assume(cmd_buffer != NULL);
1458 radv_update_descriptor_sets_impl(device, cmd_buffer, dstSetOverride, descriptorWriteCount, pDescriptorWrites,
1459 descriptorCopyCount, pDescriptorCopies);
1460 }
1461
1462 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorUpdateTemplate(VkDevice _device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)1463 radv_CreateDescriptorUpdateTemplate(VkDevice _device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
1464 const VkAllocationCallbacks *pAllocator,
1465 VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
1466 {
1467 RADV_FROM_HANDLE(radv_device, device, _device);
1468 const uint32_t entry_count = pCreateInfo->descriptorUpdateEntryCount;
1469 const size_t size = sizeof(struct radv_descriptor_update_template) +
1470 sizeof(struct radv_descriptor_update_template_entry) * entry_count;
1471 struct radv_descriptor_set_layout *set_layout = NULL;
1472 struct radv_descriptor_update_template *templ;
1473 uint32_t i;
1474
1475 templ = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1476 if (!templ)
1477 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1478
1479 vk_object_base_init(&device->vk, &templ->base, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
1480
1481 templ->entry_count = entry_count;
1482
1483 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
1484 RADV_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->pipelineLayout);
1485
1486 /* descriptorSetLayout should be ignored for push descriptors
1487 * and instead it refers to pipelineLayout and set.
1488 */
1489 assert(pCreateInfo->set < MAX_SETS);
1490 set_layout = pipeline_layout->set[pCreateInfo->set].layout;
1491
1492 templ->bind_point = pCreateInfo->pipelineBindPoint;
1493 } else {
1494 assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
1495 set_layout = radv_descriptor_set_layout_from_handle(pCreateInfo->descriptorSetLayout);
1496 }
1497
1498 for (i = 0; i < entry_count; i++) {
1499 const VkDescriptorUpdateTemplateEntry *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
1500 const struct radv_descriptor_set_binding_layout *binding_layout = set_layout->binding + entry->dstBinding;
1501 const uint32_t buffer_offset = binding_layout->buffer_offset + entry->dstArrayElement;
1502 const uint32_t *immutable_samplers = NULL;
1503 uint32_t dst_offset;
1504 uint32_t dst_stride;
1505
1506 /* dst_offset is an offset into dynamic_descriptors when the descriptor
1507 is dynamic, and an offset into mapped_ptr otherwise */
1508 switch (entry->descriptorType) {
1509 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1510 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1511 assert(pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET);
1512 dst_offset = binding_layout->dynamic_offset_offset + entry->dstArrayElement;
1513 dst_stride = 0; /* Not used */
1514 break;
1515 default:
1516 switch (entry->descriptorType) {
1517 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1518 case VK_DESCRIPTOR_TYPE_SAMPLER:
1519 /* Immutable samplers are copied into push descriptors when they are pushed */
1520 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR &&
1521 binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal) {
1522 immutable_samplers = radv_immutable_samplers(set_layout, binding_layout) + entry->dstArrayElement * 4;
1523 }
1524 break;
1525 default:
1526 break;
1527 }
1528 dst_offset = binding_layout->offset / 4;
1529 if (entry->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
1530 dst_offset += entry->dstArrayElement / 4;
1531 else
1532 dst_offset += binding_layout->size * entry->dstArrayElement / 4;
1533
1534 dst_stride = binding_layout->size / 4;
1535 break;
1536 }
1537
1538 templ->entry[i] = (struct radv_descriptor_update_template_entry){
1539 .descriptor_type = entry->descriptorType,
1540 .descriptor_count = entry->descriptorCount,
1541 .src_offset = entry->offset,
1542 .src_stride = entry->stride,
1543 .dst_offset = dst_offset,
1544 .dst_stride = dst_stride,
1545 .buffer_offset = buffer_offset,
1546 .has_sampler = !binding_layout->immutable_samplers_offset,
1547 .sampler_offset = radv_combined_image_descriptor_sampler_offset(binding_layout),
1548 .immutable_samplers = immutable_samplers};
1549 }
1550
1551 *pDescriptorUpdateTemplate = radv_descriptor_update_template_to_handle(templ);
1552 return VK_SUCCESS;
1553 }
1554
1555 VKAPI_ATTR void VKAPI_CALL
radv_DestroyDescriptorUpdateTemplate(VkDevice _device,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)1556 radv_DestroyDescriptorUpdateTemplate(VkDevice _device, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1557 const VkAllocationCallbacks *pAllocator)
1558 {
1559 RADV_FROM_HANDLE(radv_device, device, _device);
1560 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1561
1562 if (!templ)
1563 return;
1564
1565 vk_object_base_finish(&templ->base);
1566 vk_free2(&device->vk.alloc, pAllocator, templ);
1567 }
1568
1569 static ALWAYS_INLINE void
radv_update_descriptor_set_with_template_impl(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,struct radv_descriptor_set * set,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)1570 radv_update_descriptor_set_with_template_impl(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1571 struct radv_descriptor_set *set,
1572 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
1573 {
1574 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
1575 uint32_t i;
1576
1577 for (i = 0; i < templ->entry_count; ++i) {
1578 struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
1579 uint32_t *pDst = set->header.mapped_ptr + templ->entry[i].dst_offset;
1580 const uint8_t *pSrc = ((const uint8_t *)pData) + templ->entry[i].src_offset;
1581 uint32_t j;
1582
1583 if (templ->entry[i].descriptor_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
1584 memcpy((uint8_t *)pDst, pSrc, templ->entry[i].descriptor_count);
1585 continue;
1586 }
1587
1588 for (j = 0; j < templ->entry[i].descriptor_count; ++j) {
1589 switch (templ->entry[i].descriptor_type) {
1590 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1591 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1592 const unsigned idx = templ->entry[i].dst_offset + j;
1593 assert(!(set->header.layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
1594 write_dynamic_buffer_descriptor(device, set->header.dynamic_descriptors + idx, buffer_list,
1595 (struct VkDescriptorBufferInfo *)pSrc);
1596 break;
1597 }
1598 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1599 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1600 write_buffer_descriptor_impl(device, cmd_buffer, pDst, buffer_list, (struct VkDescriptorBufferInfo *)pSrc);
1601 break;
1602 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1603 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1604 write_texel_buffer_descriptor(device, cmd_buffer, pDst, buffer_list, *(VkBufferView *)pSrc);
1605 break;
1606 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1607 write_image_descriptor_impl(device, cmd_buffer, 32, pDst, buffer_list, templ->entry[i].descriptor_type,
1608 (struct VkDescriptorImageInfo *)pSrc);
1609 break;
1610 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1611 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1612 write_image_descriptor_impl(device, cmd_buffer, 64, pDst, buffer_list, templ->entry[i].descriptor_type,
1613 (struct VkDescriptorImageInfo *)pSrc);
1614 break;
1615 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1616 write_combined_image_sampler_descriptor(device, cmd_buffer, templ->entry[i].sampler_offset, pDst,
1617 buffer_list, templ->entry[i].descriptor_type,
1618 (struct VkDescriptorImageInfo *)pSrc, templ->entry[i].has_sampler);
1619 if (cmd_buffer && templ->entry[i].immutable_samplers) {
1620 memcpy((char *)pDst + templ->entry[i].sampler_offset, templ->entry[i].immutable_samplers + 4 * j, 16);
1621 }
1622 break;
1623 case VK_DESCRIPTOR_TYPE_SAMPLER:
1624 if (templ->entry[i].has_sampler) {
1625 const VkDescriptorImageInfo *pImageInfo = (struct VkDescriptorImageInfo *)pSrc;
1626 write_sampler_descriptor(pDst, pImageInfo->sampler);
1627 } else if (cmd_buffer && templ->entry[i].immutable_samplers)
1628 memcpy(pDst, templ->entry[i].immutable_samplers + 4 * j, 16);
1629 break;
1630 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
1631 RADV_FROM_HANDLE(vk_acceleration_structure, accel_struct, *(const VkAccelerationStructureKHR *)pSrc);
1632 write_accel_struct(device, pDst, accel_struct ? vk_acceleration_structure_get_va(accel_struct) : 0);
1633 break;
1634 }
1635 default:
1636 break;
1637 }
1638 pSrc += templ->entry[i].src_stride;
1639 pDst += templ->entry[i].dst_stride;
1640 ++buffer_list;
1641 }
1642 }
1643 }
1644
1645 void
radv_cmd_update_descriptor_set_with_template(struct radv_device * device,struct radv_cmd_buffer * cmd_buffer,struct radv_descriptor_set * set,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)1646 radv_cmd_update_descriptor_set_with_template(struct radv_device *device, struct radv_cmd_buffer *cmd_buffer,
1647 struct radv_descriptor_set *set,
1648 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
1649 {
1650 /* Assume cmd_buffer != NULL to optimize out cmd_buffer checks in generic code above. */
1651 assume(cmd_buffer != NULL);
1652 radv_update_descriptor_set_with_template_impl(device, cmd_buffer, set, descriptorUpdateTemplate, pData);
1653 }
1654
1655 VKAPI_ATTR void VKAPI_CALL
radv_UpdateDescriptorSetWithTemplate(VkDevice _device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)1656 radv_UpdateDescriptorSetWithTemplate(VkDevice _device, VkDescriptorSet descriptorSet,
1657 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
1658 {
1659 RADV_FROM_HANDLE(radv_device, device, _device);
1660 RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1661
1662 radv_update_descriptor_set_with_template_impl(device, NULL, set, descriptorUpdateTemplate, pData);
1663 }
1664
1665 VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutHostMappingInfoVALVE(VkDevice _device,const VkDescriptorSetBindingReferenceVALVE * pBindingReference,VkDescriptorSetLayoutHostMappingInfoVALVE * pHostMapping)1666 radv_GetDescriptorSetLayoutHostMappingInfoVALVE(VkDevice _device,
1667 const VkDescriptorSetBindingReferenceVALVE *pBindingReference,
1668 VkDescriptorSetLayoutHostMappingInfoVALVE *pHostMapping)
1669 {
1670 struct radv_descriptor_set_layout *set_layout =
1671 radv_descriptor_set_layout_from_handle(pBindingReference->descriptorSetLayout);
1672
1673 const struct radv_descriptor_set_binding_layout *binding_layout = set_layout->binding + pBindingReference->binding;
1674
1675 pHostMapping->descriptorOffset = binding_layout->offset;
1676 pHostMapping->descriptorSize = binding_layout->size;
1677 }
1678
1679 VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetHostMappingVALVE(VkDevice _device,VkDescriptorSet descriptorSet,void ** ppData)1680 radv_GetDescriptorSetHostMappingVALVE(VkDevice _device, VkDescriptorSet descriptorSet, void **ppData)
1681 {
1682 RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
1683 *ppData = set->header.mapped_ptr;
1684 }
1685
1686 /* VK_EXT_descriptor_buffer */
1687 VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutSizeEXT(VkDevice device,VkDescriptorSetLayout layout,VkDeviceSize * pLayoutSizeInBytes)1688 radv_GetDescriptorSetLayoutSizeEXT(VkDevice device, VkDescriptorSetLayout layout, VkDeviceSize *pLayoutSizeInBytes)
1689 {
1690 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, layout);
1691 *pLayoutSizeInBytes = set_layout->size;
1692 }
1693
1694 VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutBindingOffsetEXT(VkDevice device,VkDescriptorSetLayout layout,uint32_t binding,VkDeviceSize * pOffset)1695 radv_GetDescriptorSetLayoutBindingOffsetEXT(VkDevice device, VkDescriptorSetLayout layout, uint32_t binding,
1696 VkDeviceSize *pOffset)
1697 {
1698 RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, layout);
1699 *pOffset = set_layout->binding[binding].offset;
1700 }
1701
1702 VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorEXT(VkDevice _device,const VkDescriptorGetInfoEXT * pDescriptorInfo,size_t dataSize,void * pDescriptor)1703 radv_GetDescriptorEXT(VkDevice _device, const VkDescriptorGetInfoEXT *pDescriptorInfo, size_t dataSize,
1704 void *pDescriptor)
1705 {
1706 RADV_FROM_HANDLE(radv_device, device, _device);
1707
1708 switch (pDescriptorInfo->type) {
1709 case VK_DESCRIPTOR_TYPE_SAMPLER: {
1710 write_sampler_descriptor(pDescriptor, *pDescriptorInfo->data.pSampler);
1711 break;
1712 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1713 write_image_descriptor(pDescriptor, 64, pDescriptorInfo->type, pDescriptorInfo->data.pCombinedImageSampler);
1714 if (pDescriptorInfo->data.pCombinedImageSampler) {
1715 write_sampler_descriptor((uint32_t *)pDescriptor + 20, pDescriptorInfo->data.pCombinedImageSampler->sampler);
1716 }
1717 break;
1718 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1719 write_image_descriptor(pDescriptor, 64, pDescriptorInfo->type, pDescriptorInfo->data.pInputAttachmentImage);
1720 break;
1721 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1722 write_image_descriptor(pDescriptor, 64, pDescriptorInfo->type, pDescriptorInfo->data.pSampledImage);
1723 break;
1724 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1725 write_image_descriptor(pDescriptor, 32, pDescriptorInfo->type, pDescriptorInfo->data.pStorageImage);
1726 break;
1727 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
1728 const VkDescriptorAddressInfoEXT *addr_info = pDescriptorInfo->data.pUniformBuffer;
1729
1730 write_buffer_descriptor(device, pDescriptor, addr_info ? addr_info->address : 0,
1731 addr_info ? addr_info->range : 0);
1732 break;
1733 }
1734 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
1735 const VkDescriptorAddressInfoEXT *addr_info = pDescriptorInfo->data.pStorageBuffer;
1736
1737 write_buffer_descriptor(device, pDescriptor, addr_info ? addr_info->address : 0,
1738 addr_info ? addr_info->range : 0);
1739 break;
1740 }
1741 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
1742 const VkDescriptorAddressInfoEXT *addr_info = pDescriptorInfo->data.pUniformTexelBuffer;
1743
1744 if (addr_info && addr_info->address) {
1745 radv_make_texel_buffer_descriptor(device, addr_info->address, addr_info->format, 0, addr_info->range,
1746 pDescriptor);
1747 } else {
1748 memset(pDescriptor, 0, 4 * 4);
1749 }
1750 break;
1751 }
1752 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
1753 const VkDescriptorAddressInfoEXT *addr_info = pDescriptorInfo->data.pStorageTexelBuffer;
1754
1755 if (addr_info && addr_info->address) {
1756 radv_make_texel_buffer_descriptor(device, addr_info->address, addr_info->format, 0, addr_info->range,
1757 pDescriptor);
1758 } else {
1759 memset(pDescriptor, 0, 4 * 4);
1760 }
1761 break;
1762 }
1763 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1764 write_accel_struct(device, pDescriptor, pDescriptorInfo->data.accelerationStructure);
1765 break;
1766 }
1767 default:
1768 unreachable("invalid descriptor type");
1769 }
1770 }
1771