1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25 #include "vk_util.h"
26 #include "u_math.h"
27
lvp_CreateDescriptorSetLayout(VkDevice _device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)28 VkResult lvp_CreateDescriptorSetLayout(
29 VkDevice _device,
30 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
31 const VkAllocationCallbacks* pAllocator,
32 VkDescriptorSetLayout* pSetLayout)
33 {
34 LVP_FROM_HANDLE(lvp_device, device, _device);
35 struct lvp_descriptor_set_layout *set_layout;
36
37 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
38 uint32_t max_binding = 0;
39 uint32_t immutable_sampler_count = 0;
40 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
41 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
42 if (pCreateInfo->pBindings[j].pImmutableSamplers)
43 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
44 }
45
46 size_t size = sizeof(struct lvp_descriptor_set_layout) +
47 (max_binding + 1) * sizeof(set_layout->binding[0]) +
48 immutable_sampler_count * sizeof(struct lvp_sampler *);
49
50 set_layout = vk_zalloc2(&device->vk.alloc, pAllocator, size, 8,
51 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
52 if (!set_layout)
53 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
54
55 vk_object_base_init(&device->vk, &set_layout->base,
56 VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
57 /* We just allocate all the samplers at the end of the struct */
58 struct lvp_sampler **samplers =
59 (struct lvp_sampler **)&set_layout->binding[max_binding + 1];
60
61 set_layout->binding_count = max_binding + 1;
62 set_layout->shader_stages = 0;
63 set_layout->size = 0;
64
65 uint32_t dynamic_offset_count = 0;
66
67 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
68 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
69 uint32_t b = binding->binding;
70
71 set_layout->binding[b].array_size = binding->descriptorCount;
72 set_layout->binding[b].descriptor_index = set_layout->size;
73 set_layout->binding[b].type = binding->descriptorType;
74 set_layout->binding[b].valid = true;
75 set_layout->size += binding->descriptorCount;
76
77 for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
78 set_layout->binding[b].stage[stage].const_buffer_index = -1;
79 set_layout->binding[b].stage[stage].shader_buffer_index = -1;
80 set_layout->binding[b].stage[stage].sampler_index = -1;
81 set_layout->binding[b].stage[stage].sampler_view_index = -1;
82 set_layout->binding[b].stage[stage].image_index = -1;
83 }
84
85 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
86 binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
87 set_layout->binding[b].dynamic_index = dynamic_offset_count;
88 dynamic_offset_count += binding->descriptorCount;
89 }
90 switch (binding->descriptorType) {
91 case VK_DESCRIPTOR_TYPE_SAMPLER:
92 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
93 lvp_foreach_stage(s, binding->stageFlags) {
94 set_layout->binding[b].stage[s].sampler_index = set_layout->stage[s].sampler_count;
95 set_layout->stage[s].sampler_count += binding->descriptorCount;
96 }
97 break;
98 default:
99 break;
100 }
101
102 switch (binding->descriptorType) {
103 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
104 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
105 lvp_foreach_stage(s, binding->stageFlags) {
106 set_layout->binding[b].stage[s].const_buffer_index = set_layout->stage[s].const_buffer_count;
107 set_layout->stage[s].const_buffer_count += binding->descriptorCount;
108 }
109 break;
110 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
111 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
112 lvp_foreach_stage(s, binding->stageFlags) {
113 set_layout->binding[b].stage[s].shader_buffer_index = set_layout->stage[s].shader_buffer_count;
114 set_layout->stage[s].shader_buffer_count += binding->descriptorCount;
115 }
116 break;
117
118 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
119 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
120 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
121 lvp_foreach_stage(s, binding->stageFlags) {
122 set_layout->binding[b].stage[s].image_index = set_layout->stage[s].image_count;
123 set_layout->stage[s].image_count += binding->descriptorCount;
124 }
125 break;
126 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
127 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
128 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
129 lvp_foreach_stage(s, binding->stageFlags) {
130 set_layout->binding[b].stage[s].sampler_view_index = set_layout->stage[s].sampler_view_count;
131 set_layout->stage[s].sampler_view_count += binding->descriptorCount;
132 }
133 break;
134 default:
135 break;
136 }
137
138 if (binding->pImmutableSamplers) {
139 set_layout->binding[b].immutable_samplers = samplers;
140 samplers += binding->descriptorCount;
141
142 for (uint32_t i = 0; i < binding->descriptorCount; i++)
143 set_layout->binding[b].immutable_samplers[i] =
144 lvp_sampler_from_handle(binding->pImmutableSamplers[i]);
145 } else {
146 set_layout->binding[b].immutable_samplers = NULL;
147 }
148
149 set_layout->shader_stages |= binding->stageFlags;
150 }
151
152 set_layout->dynamic_offset_count = dynamic_offset_count;
153
154 *pSetLayout = lvp_descriptor_set_layout_to_handle(set_layout);
155
156 return VK_SUCCESS;
157 }
158
lvp_DestroyDescriptorSetLayout(VkDevice _device,VkDescriptorSetLayout _set_layout,const VkAllocationCallbacks * pAllocator)159 void lvp_DestroyDescriptorSetLayout(
160 VkDevice _device,
161 VkDescriptorSetLayout _set_layout,
162 const VkAllocationCallbacks* pAllocator)
163 {
164 LVP_FROM_HANDLE(lvp_device, device, _device);
165 LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout, _set_layout);
166
167 if (!_set_layout)
168 return;
169 vk_object_base_finish(&set_layout->base);
170 vk_free2(&device->vk.alloc, pAllocator, set_layout);
171 }
172
lvp_CreatePipelineLayout(VkDevice _device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)173 VkResult lvp_CreatePipelineLayout(
174 VkDevice _device,
175 const VkPipelineLayoutCreateInfo* pCreateInfo,
176 const VkAllocationCallbacks* pAllocator,
177 VkPipelineLayout* pPipelineLayout)
178 {
179 LVP_FROM_HANDLE(lvp_device, device, _device);
180 struct lvp_pipeline_layout *layout;
181
182 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
183
184 layout = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*layout), 8,
185 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
186 if (layout == NULL)
187 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
188
189 vk_object_base_init(&device->vk, &layout->base,
190 VK_OBJECT_TYPE_PIPELINE_LAYOUT);
191 layout->num_sets = pCreateInfo->setLayoutCount;
192
193 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
194 LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout,
195 pCreateInfo->pSetLayouts[set]);
196 layout->set[set].layout = set_layout;
197 }
198
199 layout->push_constant_size = 0;
200 for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
201 const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
202 layout->push_constant_size = MAX2(layout->push_constant_size,
203 range->offset + range->size);
204 }
205 layout->push_constant_size = align(layout->push_constant_size, 16);
206 *pPipelineLayout = lvp_pipeline_layout_to_handle(layout);
207
208 return VK_SUCCESS;
209 }
210
lvp_DestroyPipelineLayout(VkDevice _device,VkPipelineLayout _pipelineLayout,const VkAllocationCallbacks * pAllocator)211 void lvp_DestroyPipelineLayout(
212 VkDevice _device,
213 VkPipelineLayout _pipelineLayout,
214 const VkAllocationCallbacks* pAllocator)
215 {
216 LVP_FROM_HANDLE(lvp_device, device, _device);
217 LVP_FROM_HANDLE(lvp_pipeline_layout, pipeline_layout, _pipelineLayout);
218
219 if (!_pipelineLayout)
220 return;
221 vk_object_base_finish(&pipeline_layout->base);
222 vk_free2(&device->vk.alloc, pAllocator, pipeline_layout);
223 }
224
225 VkResult
lvp_descriptor_set_create(struct lvp_device * device,const struct lvp_descriptor_set_layout * layout,struct lvp_descriptor_set ** out_set)226 lvp_descriptor_set_create(struct lvp_device *device,
227 const struct lvp_descriptor_set_layout *layout,
228 struct lvp_descriptor_set **out_set)
229 {
230 struct lvp_descriptor_set *set;
231 size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
232
233 set = vk_alloc(&device->vk.alloc /* XXX: Use the pool */, size, 8,
234 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
235 if (!set)
236 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
237
238 /* A descriptor set may not be 100% filled. Clear the set so we can can
239 * later detect holes in it.
240 */
241 memset(set, 0, size);
242
243 vk_object_base_init(&device->vk, &set->base,
244 VK_OBJECT_TYPE_DESCRIPTOR_SET);
245 set->layout = layout;
246
247 /* Go through and fill out immutable samplers if we have any */
248 struct lvp_descriptor *desc = set->descriptors;
249 for (uint32_t b = 0; b < layout->binding_count; b++) {
250 if (layout->binding[b].immutable_samplers) {
251 for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
252 desc[i].sampler = layout->binding[b].immutable_samplers[i];
253 }
254 desc += layout->binding[b].array_size;
255 }
256
257 *out_set = set;
258
259 return VK_SUCCESS;
260 }
261
262 void
lvp_descriptor_set_destroy(struct lvp_device * device,struct lvp_descriptor_set * set)263 lvp_descriptor_set_destroy(struct lvp_device *device,
264 struct lvp_descriptor_set *set)
265 {
266 vk_object_base_finish(&set->base);
267 vk_free(&device->vk.alloc, set);
268 }
269
lvp_AllocateDescriptorSets(VkDevice _device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)270 VkResult lvp_AllocateDescriptorSets(
271 VkDevice _device,
272 const VkDescriptorSetAllocateInfo* pAllocateInfo,
273 VkDescriptorSet* pDescriptorSets)
274 {
275 LVP_FROM_HANDLE(lvp_device, device, _device);
276 LVP_FROM_HANDLE(lvp_descriptor_pool, pool, pAllocateInfo->descriptorPool);
277 VkResult result = VK_SUCCESS;
278 struct lvp_descriptor_set *set;
279 uint32_t i;
280
281 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
282 LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout,
283 pAllocateInfo->pSetLayouts[i]);
284
285 result = lvp_descriptor_set_create(device, layout, &set);
286 if (result != VK_SUCCESS)
287 break;
288
289 list_addtail(&set->link, &pool->sets);
290 pDescriptorSets[i] = lvp_descriptor_set_to_handle(set);
291 }
292
293 if (result != VK_SUCCESS)
294 lvp_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
295 i, pDescriptorSets);
296
297 return result;
298 }
299
lvp_FreeDescriptorSets(VkDevice _device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)300 VkResult lvp_FreeDescriptorSets(
301 VkDevice _device,
302 VkDescriptorPool descriptorPool,
303 uint32_t count,
304 const VkDescriptorSet* pDescriptorSets)
305 {
306 LVP_FROM_HANDLE(lvp_device, device, _device);
307 for (uint32_t i = 0; i < count; i++) {
308 LVP_FROM_HANDLE(lvp_descriptor_set, set, pDescriptorSets[i]);
309
310 if (!set)
311 continue;
312 list_del(&set->link);
313 lvp_descriptor_set_destroy(device, set);
314 }
315 return VK_SUCCESS;
316 }
317
lvp_UpdateDescriptorSets(VkDevice _device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)318 void lvp_UpdateDescriptorSets(
319 VkDevice _device,
320 uint32_t descriptorWriteCount,
321 const VkWriteDescriptorSet* pDescriptorWrites,
322 uint32_t descriptorCopyCount,
323 const VkCopyDescriptorSet* pDescriptorCopies)
324 {
325 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
326 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
327 LVP_FROM_HANDLE(lvp_descriptor_set, set, write->dstSet);
328 const struct lvp_descriptor_set_binding_layout *bind_layout =
329 &set->layout->binding[write->dstBinding];
330 struct lvp_descriptor *desc =
331 &set->descriptors[bind_layout->descriptor_index];
332 desc += write->dstArrayElement;
333
334 switch (write->descriptorType) {
335 case VK_DESCRIPTOR_TYPE_SAMPLER:
336 for (uint32_t j = 0; j < write->descriptorCount; j++) {
337 LVP_FROM_HANDLE(lvp_sampler, sampler,
338 write->pImageInfo[j].sampler);
339
340 desc[j] = (struct lvp_descriptor) {
341 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
342 .sampler = sampler,
343 };
344 }
345 break;
346
347 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
348 for (uint32_t j = 0; j < write->descriptorCount; j++) {
349 LVP_FROM_HANDLE(lvp_image_view, iview,
350 write->pImageInfo[j].imageView);
351 LVP_FROM_HANDLE(lvp_sampler, sampler,
352 write->pImageInfo[j].sampler);
353
354 desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
355 desc[j].image_view = iview;
356
357 /* If this descriptor has an immutable sampler, we don't want
358 * to stomp on it.
359 */
360 if (sampler)
361 desc[j].sampler = sampler;
362 }
363 break;
364
365 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
366 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
367 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
368 for (uint32_t j = 0; j < write->descriptorCount; j++) {
369 LVP_FROM_HANDLE(lvp_image_view, iview,
370 write->pImageInfo[j].imageView);
371
372 desc[j] = (struct lvp_descriptor) {
373 .type = write->descriptorType,
374 .image_view = iview,
375 };
376 }
377 break;
378
379 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
380 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
381 for (uint32_t j = 0; j < write->descriptorCount; j++) {
382 LVP_FROM_HANDLE(lvp_buffer_view, bview,
383 write->pTexelBufferView[j]);
384
385 desc[j] = (struct lvp_descriptor) {
386 .type = write->descriptorType,
387 .buffer_view = bview,
388 };
389 }
390 break;
391
392 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
393 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
394 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
395 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
396 for (uint32_t j = 0; j < write->descriptorCount; j++) {
397 assert(write->pBufferInfo[j].buffer);
398 LVP_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
399 assert(buffer);
400 desc[j] = (struct lvp_descriptor) {
401 .type = write->descriptorType,
402 .buf.offset = write->pBufferInfo[j].offset,
403 .buf.buffer = buffer,
404 .buf.range = write->pBufferInfo[j].range,
405 };
406
407 }
408
409 default:
410 break;
411 }
412 }
413
414 for (uint32_t i = 0; i < descriptorCopyCount; i++) {
415 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
416 LVP_FROM_HANDLE(lvp_descriptor_set, src, copy->srcSet);
417 LVP_FROM_HANDLE(lvp_descriptor_set, dst, copy->dstSet);
418
419 const struct lvp_descriptor_set_binding_layout *src_layout =
420 &src->layout->binding[copy->srcBinding];
421 struct lvp_descriptor *src_desc =
422 &src->descriptors[src_layout->descriptor_index];
423 src_desc += copy->srcArrayElement;
424
425 const struct lvp_descriptor_set_binding_layout *dst_layout =
426 &dst->layout->binding[copy->dstBinding];
427 struct lvp_descriptor *dst_desc =
428 &dst->descriptors[dst_layout->descriptor_index];
429 dst_desc += copy->dstArrayElement;
430
431 for (uint32_t j = 0; j < copy->descriptorCount; j++)
432 dst_desc[j] = src_desc[j];
433 }
434 }
435
lvp_CreateDescriptorPool(VkDevice _device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)436 VkResult lvp_CreateDescriptorPool(
437 VkDevice _device,
438 const VkDescriptorPoolCreateInfo* pCreateInfo,
439 const VkAllocationCallbacks* pAllocator,
440 VkDescriptorPool* pDescriptorPool)
441 {
442 LVP_FROM_HANDLE(lvp_device, device, _device);
443 struct lvp_descriptor_pool *pool;
444 size_t size = sizeof(struct lvp_descriptor_pool);
445 pool = vk_zalloc2(&device->vk.alloc, pAllocator, size, 8,
446 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
447 if (!pool)
448 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
449
450 vk_object_base_init(&device->vk, &pool->base,
451 VK_OBJECT_TYPE_DESCRIPTOR_POOL);
452 pool->flags = pCreateInfo->flags;
453 list_inithead(&pool->sets);
454 *pDescriptorPool = lvp_descriptor_pool_to_handle(pool);
455 return VK_SUCCESS;
456 }
457
lvp_reset_descriptor_pool(struct lvp_device * device,struct lvp_descriptor_pool * pool)458 static void lvp_reset_descriptor_pool(struct lvp_device *device,
459 struct lvp_descriptor_pool *pool)
460 {
461 struct lvp_descriptor_set *set, *tmp;
462 LIST_FOR_EACH_ENTRY_SAFE(set, tmp, &pool->sets, link) {
463 list_del(&set->link);
464 vk_free(&device->vk.alloc, set);
465 }
466 }
467
lvp_DestroyDescriptorPool(VkDevice _device,VkDescriptorPool _pool,const VkAllocationCallbacks * pAllocator)468 void lvp_DestroyDescriptorPool(
469 VkDevice _device,
470 VkDescriptorPool _pool,
471 const VkAllocationCallbacks* pAllocator)
472 {
473 LVP_FROM_HANDLE(lvp_device, device, _device);
474 LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
475
476 if (!_pool)
477 return;
478
479 lvp_reset_descriptor_pool(device, pool);
480 vk_object_base_finish(&pool->base);
481 vk_free2(&device->vk.alloc, pAllocator, pool);
482 }
483
lvp_ResetDescriptorPool(VkDevice _device,VkDescriptorPool _pool,VkDescriptorPoolResetFlags flags)484 VkResult lvp_ResetDescriptorPool(
485 VkDevice _device,
486 VkDescriptorPool _pool,
487 VkDescriptorPoolResetFlags flags)
488 {
489 LVP_FROM_HANDLE(lvp_device, device, _device);
490 LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
491
492 lvp_reset_descriptor_pool(device, pool);
493 return VK_SUCCESS;
494 }
495
lvp_GetDescriptorSetLayoutSupport(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,VkDescriptorSetLayoutSupport * pSupport)496 void lvp_GetDescriptorSetLayoutSupport(VkDevice device,
497 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
498 VkDescriptorSetLayoutSupport* pSupport)
499 {
500
501 }
502