1 /*
2 * Copyright © 2019 Red Hat.
3 * Copyright © 2022 Collabora, LTD
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "vk_alloc.h"
26 #include "vk_cmd_enqueue_entrypoints.h"
27 #include "vk_command_buffer.h"
28 #include "vk_descriptor_update_template.h"
29 #include "vk_device.h"
30 #include "vk_pipeline_layout.h"
31 #include "vk_util.h"
32
33 static inline unsigned
vk_descriptor_type_update_size(VkDescriptorType type)34 vk_descriptor_type_update_size(VkDescriptorType type)
35 {
36 switch (type) {
37 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
38 unreachable("handled in caller");
39
40 case VK_DESCRIPTOR_TYPE_SAMPLER:
41 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
42 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
43 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
44 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
45 return sizeof(VkDescriptorImageInfo);
46
47 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
48 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
49 return sizeof(VkBufferView);
50
51 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
52 return sizeof(VkAccelerationStructureKHR);
53
54 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
55 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
56 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
57 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
58 default:
59 return sizeof(VkDescriptorBufferInfo);
60 }
61 }
62
63 static void
vk_cmd_push_descriptor_set_with_template2_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)64 vk_cmd_push_descriptor_set_with_template2_free(
65 struct vk_cmd_queue *queue, struct vk_cmd_queue_entry *cmd)
66 {
67 struct vk_command_buffer *cmd_buffer =
68 container_of(queue, struct vk_command_buffer, cmd_queue);
69 struct vk_device *device = cmd_buffer->base.device;
70
71 struct vk_cmd_push_descriptor_set_with_template2 *info_ =
72 &cmd->u.push_descriptor_set_with_template2;
73
74 VkPushDescriptorSetWithTemplateInfoKHR *info =
75 info_->push_descriptor_set_with_template_info;
76
77 VK_FROM_HANDLE(vk_descriptor_update_template, templ,
78 info->descriptorUpdateTemplate);
79 VK_FROM_HANDLE(vk_pipeline_layout, layout, info->layout);
80
81 vk_descriptor_update_template_unref(device, templ);
82 vk_pipeline_layout_unref(device, layout);
83
84 if (info->pNext) {
85 VkPipelineLayoutCreateInfo *pnext = (void *)info->pNext;
86
87 vk_free(queue->alloc, (void *)pnext->pSetLayouts);
88 vk_free(queue->alloc, (void *)pnext->pPushConstantRanges);
89 vk_free(queue->alloc, pnext);
90 }
91 }
92
93 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate2(VkCommandBuffer commandBuffer,const VkPushDescriptorSetWithTemplateInfoKHR * pPushDescriptorSetWithTemplateInfo)94 vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate2(
95 VkCommandBuffer commandBuffer,
96 const VkPushDescriptorSetWithTemplateInfoKHR *pPushDescriptorSetWithTemplateInfo)
97 {
98 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
99
100 struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
101
102 struct vk_cmd_queue_entry *cmd =
103 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
104 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
105 if (!cmd)
106 return;
107
108 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE2;
109 cmd->driver_free_cb = vk_cmd_push_descriptor_set_with_template2_free;
110 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
111
112 VkPushDescriptorSetWithTemplateInfoKHR *info =
113 vk_zalloc(cmd_buffer->cmd_queue.alloc,
114 sizeof(VkPushDescriptorSetWithTemplateInfoKHR), 8,
115 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
116
117 cmd->u.push_descriptor_set_with_template2
118 .push_descriptor_set_with_template_info = info;
119
120 /* From the application's perspective, the vk_cmd_queue_entry can outlive the
121 * template. Therefore, we take a reference here and free it when the
122 * vk_cmd_queue_entry is freed, tying the lifetimes.
123 */
124 info->descriptorUpdateTemplate =
125 pPushDescriptorSetWithTemplateInfo->descriptorUpdateTemplate;
126
127 VK_FROM_HANDLE(vk_descriptor_update_template, templ,
128 info->descriptorUpdateTemplate);
129 vk_descriptor_update_template_ref(templ);
130
131 info->set = pPushDescriptorSetWithTemplateInfo->set;
132 info->sType = pPushDescriptorSetWithTemplateInfo->sType;
133
134 /* Similar concerns for the pipeline layout */
135 info->layout = pPushDescriptorSetWithTemplateInfo->layout;
136
137 VK_FROM_HANDLE(vk_pipeline_layout, layout, info->layout);
138 vk_pipeline_layout_ref(layout);
139
140 /* What makes this tricky is that the size of pData is implicit. We determine
141 * it by walking the template and determining the ranges read by the driver.
142 */
143 size_t data_size = 0;
144 for (unsigned i = 0; i < templ->entry_count; ++i) {
145 struct vk_descriptor_template_entry entry = templ->entries[i];
146 unsigned end = 0;
147
148 /* From the spec:
149 *
150 * If descriptorType is VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK then
151 * the value of stride is ignored and the stride is assumed to be 1,
152 * i.e. the descriptor update information for them is always specified
153 * as a contiguous range.
154 */
155 if (entry.type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
156 end = entry.offset + entry.array_count;
157 } else if (entry.array_count > 0) {
158 end = entry.offset + ((entry.array_count - 1) * entry.stride) +
159 vk_descriptor_type_update_size(entry.type);
160 }
161
162 data_size = MAX2(data_size, end);
163 }
164
165 uint8_t *out_pData = vk_zalloc(cmd_buffer->cmd_queue.alloc, data_size, 8,
166 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
167 const uint8_t *pData = pPushDescriptorSetWithTemplateInfo->pData;
168
169 /* Now walk the template again, copying what we actually need */
170 for (unsigned i = 0; i < templ->entry_count; ++i) {
171 struct vk_descriptor_template_entry entry = templ->entries[i];
172 unsigned size = 0;
173
174 if (entry.type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
175 size = entry.array_count;
176 } else if (entry.array_count > 0) {
177 size = ((entry.array_count - 1) * entry.stride) +
178 vk_descriptor_type_update_size(entry.type);
179 }
180
181 memcpy(out_pData + entry.offset, pData + entry.offset, size);
182 }
183
184 info->pData = out_pData;
185
186 const VkBaseInStructure *pnext = pPushDescriptorSetWithTemplateInfo->pNext;
187
188 if (pnext) {
189 switch ((int32_t)pnext->sType) {
190 /* TODO: The set layouts below would need to be reference counted. Punting
191 * until there's a cmd_enqueue-based driver implementing
192 * VK_NV_per_stage_descriptor_set.
193 */
194 #if 0
195 case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO:
196 info->pNext =
197 vk_zalloc(queue->alloc, sizeof(VkPipelineLayoutCreateInfo), 8,
198 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
199 if (info->pNext == NULL)
200 goto err;
201
202 memcpy((void *)info->pNext, pnext,
203 sizeof(VkPipelineLayoutCreateInfo));
204
205 VkPipelineLayoutCreateInfo *tmp_dst2 = (void *)info->pNext;
206 VkPipelineLayoutCreateInfo *tmp_src2 = (void *)pnext;
207
208 if (tmp_src2->pSetLayouts) {
209 tmp_dst2->pSetLayouts = vk_zalloc(
210 queue->alloc,
211 sizeof(*tmp_dst2->pSetLayouts) * tmp_dst2->setLayoutCount, 8,
212 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
213 if (tmp_dst2->pSetLayouts == NULL)
214 goto err;
215
216 memcpy(
217 (void *)tmp_dst2->pSetLayouts, tmp_src2->pSetLayouts,
218 sizeof(*tmp_dst2->pSetLayouts) * tmp_dst2->setLayoutCount);
219 }
220
221 if (tmp_src2->pPushConstantRanges) {
222 tmp_dst2->pPushConstantRanges =
223 vk_zalloc(queue->alloc,
224 sizeof(*tmp_dst2->pPushConstantRanges) *
225 tmp_dst2->pushConstantRangeCount,
226 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
227 if (tmp_dst2->pPushConstantRanges == NULL)
228 goto err;
229
230 memcpy((void *)tmp_dst2->pPushConstantRanges,
231 tmp_src2->pPushConstantRanges,
232 sizeof(*tmp_dst2->pPushConstantRanges) *
233 tmp_dst2->pushConstantRangeCount);
234 }
235 break;
236 #endif
237
238 default:
239 goto err;
240 }
241 }
242
243 return;
244
245 err:
246 if (cmd)
247 vk_cmd_push_descriptor_set_with_template2_free(queue, cmd);
248
249 vk_command_buffer_set_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
250 }
251
252 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplate descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)253 vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate(
254 VkCommandBuffer commandBuffer,
255 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
256 VkPipelineLayout layout,
257 uint32_t set,
258 const void* pData)
259 {
260 const VkPushDescriptorSetWithTemplateInfoKHR two = {
261 .sType = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR,
262 .descriptorUpdateTemplate = descriptorUpdateTemplate,
263 .layout = layout,
264 .set = set,
265 .pData = pData,
266 };
267
268 vk_cmd_enqueue_CmdPushDescriptorSetWithTemplate2(commandBuffer, &two);
269 }
270
271 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,uint32_t drawCount,const VkMultiDrawInfoEXT * pVertexInfo,uint32_t instanceCount,uint32_t firstInstance,uint32_t stride)272 vk_cmd_enqueue_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,
273 uint32_t drawCount,
274 const VkMultiDrawInfoEXT *pVertexInfo,
275 uint32_t instanceCount,
276 uint32_t firstInstance,
277 uint32_t stride)
278 {
279 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
280
281 struct vk_cmd_queue_entry *cmd =
282 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
283 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
284 if (!cmd)
285 return;
286
287 cmd->type = VK_CMD_DRAW_MULTI_EXT;
288 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
289
290 cmd->u.draw_multi_ext.draw_count = drawCount;
291 if (pVertexInfo) {
292 unsigned i = 0;
293 cmd->u.draw_multi_ext.vertex_info =
294 vk_zalloc(cmd_buffer->cmd_queue.alloc,
295 sizeof(*cmd->u.draw_multi_ext.vertex_info) * drawCount, 8,
296 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
297
298 vk_foreach_multi_draw(draw, i, pVertexInfo, drawCount, stride) {
299 memcpy(&cmd->u.draw_multi_ext.vertex_info[i], draw,
300 sizeof(*cmd->u.draw_multi_ext.vertex_info));
301 }
302 }
303 cmd->u.draw_multi_ext.instance_count = instanceCount;
304 cmd->u.draw_multi_ext.first_instance = firstInstance;
305 cmd->u.draw_multi_ext.stride = stride;
306 }
307
308 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,uint32_t drawCount,const VkMultiDrawIndexedInfoEXT * pIndexInfo,uint32_t instanceCount,uint32_t firstInstance,uint32_t stride,const int32_t * pVertexOffset)309 vk_cmd_enqueue_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,
310 uint32_t drawCount,
311 const VkMultiDrawIndexedInfoEXT *pIndexInfo,
312 uint32_t instanceCount,
313 uint32_t firstInstance,
314 uint32_t stride,
315 const int32_t *pVertexOffset)
316 {
317 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
318
319 struct vk_cmd_queue_entry *cmd =
320 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
321 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
322 if (!cmd)
323 return;
324
325 cmd->type = VK_CMD_DRAW_MULTI_INDEXED_EXT;
326 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
327
328 cmd->u.draw_multi_indexed_ext.draw_count = drawCount;
329
330 if (pIndexInfo) {
331 unsigned i = 0;
332 cmd->u.draw_multi_indexed_ext.index_info =
333 vk_zalloc(cmd_buffer->cmd_queue.alloc,
334 sizeof(*cmd->u.draw_multi_indexed_ext.index_info) * drawCount, 8,
335 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
336
337 vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
338 cmd->u.draw_multi_indexed_ext.index_info[i].firstIndex = draw->firstIndex;
339 cmd->u.draw_multi_indexed_ext.index_info[i].indexCount = draw->indexCount;
340 if (pVertexOffset == NULL)
341 cmd->u.draw_multi_indexed_ext.index_info[i].vertexOffset = draw->vertexOffset;
342 }
343 }
344
345 cmd->u.draw_multi_indexed_ext.instance_count = instanceCount;
346 cmd->u.draw_multi_indexed_ext.first_instance = firstInstance;
347 cmd->u.draw_multi_indexed_ext.stride = stride;
348
349 if (pVertexOffset) {
350 cmd->u.draw_multi_indexed_ext.vertex_offset =
351 vk_zalloc(cmd_buffer->cmd_queue.alloc,
352 sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset), 8,
353 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
354
355 memcpy(cmd->u.draw_multi_indexed_ext.vertex_offset, pVertexOffset,
356 sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset));
357 }
358 }
359
360 static void
push_descriptors_set_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)361 push_descriptors_set_free(struct vk_cmd_queue *queue,
362 struct vk_cmd_queue_entry *cmd)
363 {
364 struct vk_command_buffer *cmd_buffer =
365 container_of(queue, struct vk_command_buffer, cmd_queue);
366 struct vk_cmd_push_descriptor_set *pds = &cmd->u.push_descriptor_set;
367
368 VK_FROM_HANDLE(vk_pipeline_layout, vk_layout, pds->layout);
369 vk_pipeline_layout_unref(cmd_buffer->base.device, vk_layout);
370
371 for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
372 VkWriteDescriptorSet *entry = &pds->descriptor_writes[i];
373 switch (entry->descriptorType) {
374 case VK_DESCRIPTOR_TYPE_SAMPLER:
375 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
376 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
377 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
378 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
379 vk_free(queue->alloc, (void *)entry->pImageInfo);
380 break;
381 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
382 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
383 vk_free(queue->alloc, (void *)entry->pTexelBufferView);
384 break;
385 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
386 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
387 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
388 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
389 default:
390 vk_free(queue->alloc, (void *)entry->pBufferInfo);
391 break;
392 }
393 }
394 }
395
396 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdPushDescriptorSet(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)397 vk_cmd_enqueue_CmdPushDescriptorSet(VkCommandBuffer commandBuffer,
398 VkPipelineBindPoint pipelineBindPoint,
399 VkPipelineLayout layout,
400 uint32_t set,
401 uint32_t descriptorWriteCount,
402 const VkWriteDescriptorSet *pDescriptorWrites)
403 {
404 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
405 struct vk_cmd_push_descriptor_set *pds;
406
407 struct vk_cmd_queue_entry *cmd =
408 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
409 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
410 if (!cmd)
411 return;
412
413 pds = &cmd->u.push_descriptor_set;
414
415 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET;
416 cmd->driver_free_cb = push_descriptors_set_free;
417 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
418
419 pds->pipeline_bind_point = pipelineBindPoint;
420 pds->set = set;
421 pds->descriptor_write_count = descriptorWriteCount;
422
423 /* From the application's perspective, the vk_cmd_queue_entry can outlive the
424 * layout. Take a reference.
425 */
426 VK_FROM_HANDLE(vk_pipeline_layout, vk_layout, layout);
427 pds->layout = layout;
428 vk_pipeline_layout_ref(vk_layout);
429
430 if (pDescriptorWrites) {
431 pds->descriptor_writes =
432 vk_zalloc(cmd_buffer->cmd_queue.alloc,
433 sizeof(*pds->descriptor_writes) * descriptorWriteCount, 8,
434 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
435 memcpy(pds->descriptor_writes,
436 pDescriptorWrites,
437 sizeof(*pds->descriptor_writes) * descriptorWriteCount);
438
439 for (unsigned i = 0; i < descriptorWriteCount; i++) {
440 switch (pds->descriptor_writes[i].descriptorType) {
441 case VK_DESCRIPTOR_TYPE_SAMPLER:
442 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
443 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
444 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
445 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
446 pds->descriptor_writes[i].pImageInfo =
447 vk_zalloc(cmd_buffer->cmd_queue.alloc,
448 sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount, 8,
449 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
450 memcpy((VkDescriptorImageInfo *)pds->descriptor_writes[i].pImageInfo,
451 pDescriptorWrites[i].pImageInfo,
452 sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount);
453 break;
454 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
455 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
456 pds->descriptor_writes[i].pTexelBufferView =
457 vk_zalloc(cmd_buffer->cmd_queue.alloc,
458 sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount, 8,
459 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
460 memcpy((VkBufferView *)pds->descriptor_writes[i].pTexelBufferView,
461 pDescriptorWrites[i].pTexelBufferView,
462 sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount);
463 break;
464 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
465 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
466 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
467 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
468 default:
469 pds->descriptor_writes[i].pBufferInfo =
470 vk_zalloc(cmd_buffer->cmd_queue.alloc,
471 sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount, 8,
472 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
473 memcpy((VkDescriptorBufferInfo *)pds->descriptor_writes[i].pBufferInfo,
474 pDescriptorWrites[i].pBufferInfo,
475 sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount);
476 break;
477 }
478 }
479 }
480 }
481
482 static void
unref_pipeline_layout(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)483 unref_pipeline_layout(struct vk_cmd_queue *queue,
484 struct vk_cmd_queue_entry *cmd)
485 {
486 struct vk_command_buffer *cmd_buffer =
487 container_of(queue, struct vk_command_buffer, cmd_queue);
488 VK_FROM_HANDLE(vk_pipeline_layout, layout,
489 cmd->u.bind_descriptor_sets.layout);
490
491 assert(cmd->type == VK_CMD_BIND_DESCRIPTOR_SETS);
492
493 vk_pipeline_layout_unref(cmd_buffer->base.device, layout);
494 }
495
496 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)497 vk_cmd_enqueue_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
498 VkPipelineBindPoint pipelineBindPoint,
499 VkPipelineLayout layout,
500 uint32_t firstSet,
501 uint32_t descriptorSetCount,
502 const VkDescriptorSet* pDescriptorSets,
503 uint32_t dynamicOffsetCount,
504 const uint32_t *pDynamicOffsets)
505 {
506 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
507
508 struct vk_cmd_queue_entry *cmd =
509 vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*cmd), 8,
510 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
511 if (!cmd)
512 return;
513
514 cmd->type = VK_CMD_BIND_DESCRIPTOR_SETS;
515 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
516
517 /* We need to hold a reference to the descriptor set as long as this
518 * command is in the queue. Otherwise, it may get deleted out from under
519 * us before the command is replayed.
520 */
521 vk_pipeline_layout_ref(vk_pipeline_layout_from_handle(layout));
522 cmd->u.bind_descriptor_sets.layout = layout;
523 cmd->driver_free_cb = unref_pipeline_layout;
524
525 cmd->u.bind_descriptor_sets.pipeline_bind_point = pipelineBindPoint;
526 cmd->u.bind_descriptor_sets.first_set = firstSet;
527 cmd->u.bind_descriptor_sets.descriptor_set_count = descriptorSetCount;
528 if (pDescriptorSets) {
529 cmd->u.bind_descriptor_sets.descriptor_sets =
530 vk_zalloc(cmd_buffer->cmd_queue.alloc,
531 sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount, 8,
532 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
533
534 memcpy(cmd->u.bind_descriptor_sets.descriptor_sets, pDescriptorSets,
535 sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount);
536 }
537 cmd->u.bind_descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
538 if (pDynamicOffsets) {
539 cmd->u.bind_descriptor_sets.dynamic_offsets =
540 vk_zalloc(cmd_buffer->cmd_queue.alloc,
541 sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount, 8,
542 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
543
544 memcpy(cmd->u.bind_descriptor_sets.dynamic_offsets, pDynamicOffsets,
545 sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount);
546 }
547 }
548
549 #ifdef VK_ENABLE_BETA_EXTENSIONS
550 static void
dispatch_graph_amdx_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)551 dispatch_graph_amdx_free(struct vk_cmd_queue *queue, struct vk_cmd_queue_entry *cmd)
552 {
553 VkDispatchGraphCountInfoAMDX *count_info = cmd->u.dispatch_graph_amdx.count_info;
554 void *infos = (void *)count_info->infos.hostAddress;
555
556 for (uint32_t i = 0; i < count_info->count; i++) {
557 VkDispatchGraphInfoAMDX *info = (void *)((const uint8_t *)infos + i * count_info->stride);
558 vk_free(queue->alloc, (void *)info->payloads.hostAddress);
559 }
560
561 vk_free(queue->alloc, infos);
562 }
563
564 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdDispatchGraphAMDX(VkCommandBuffer commandBuffer,VkDeviceAddress scratch,VkDeviceSize scratchSize,const VkDispatchGraphCountInfoAMDX * pCountInfo)565 vk_cmd_enqueue_CmdDispatchGraphAMDX(VkCommandBuffer commandBuffer, VkDeviceAddress scratch,
566 VkDeviceSize scratchSize,
567 const VkDispatchGraphCountInfoAMDX *pCountInfo)
568 {
569 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
570
571 if (vk_command_buffer_has_error(cmd_buffer))
572 return;
573
574 VkResult result = VK_SUCCESS;
575 const VkAllocationCallbacks *alloc = cmd_buffer->cmd_queue.alloc;
576
577 struct vk_cmd_queue_entry *cmd =
578 vk_zalloc(alloc, sizeof(struct vk_cmd_queue_entry), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
579 if (!cmd) {
580 result = VK_ERROR_OUT_OF_HOST_MEMORY;
581 goto err;
582 }
583
584 cmd->type = VK_CMD_DISPATCH_GRAPH_AMDX;
585 cmd->driver_free_cb = dispatch_graph_amdx_free;
586
587 cmd->u.dispatch_graph_amdx.scratch = scratch;
588 cmd->u.dispatch_graph_amdx.scratch_size = scratchSize;
589
590 cmd->u.dispatch_graph_amdx.count_info =
591 vk_zalloc(alloc, sizeof(VkDispatchGraphCountInfoAMDX), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
592 if (cmd->u.dispatch_graph_amdx.count_info == NULL)
593 goto err;
594
595 memcpy((void *)cmd->u.dispatch_graph_amdx.count_info, pCountInfo,
596 sizeof(VkDispatchGraphCountInfoAMDX));
597
598 uint32_t infos_size = pCountInfo->count * pCountInfo->stride;
599 void *infos = vk_zalloc(alloc, infos_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
600 cmd->u.dispatch_graph_amdx.count_info->infos.hostAddress = infos;
601 memcpy(infos, pCountInfo->infos.hostAddress, infos_size);
602
603 for (uint32_t i = 0; i < pCountInfo->count; i++) {
604 VkDispatchGraphInfoAMDX *info = (void *)((const uint8_t *)infos + i * pCountInfo->stride);
605
606 uint32_t payloads_size = info->payloadCount * info->payloadStride;
607 void *dst_payload = vk_zalloc(alloc, payloads_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
608 memcpy(dst_payload, info->payloads.hostAddress, payloads_size);
609 info->payloads.hostAddress = dst_payload;
610 }
611
612 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
613 goto finish;
614 err:
615 if (cmd) {
616 vk_free(alloc, cmd);
617 dispatch_graph_amdx_free(&cmd_buffer->cmd_queue, cmd);
618 }
619
620 finish:
621 if (unlikely(result != VK_SUCCESS))
622 vk_command_buffer_set_error(cmd_buffer, result);
623 }
624 #endif
625
626 static void
vk_cmd_build_acceleration_structures_khr_free(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)627 vk_cmd_build_acceleration_structures_khr_free(struct vk_cmd_queue *queue,
628 struct vk_cmd_queue_entry *cmd)
629 {
630 struct vk_cmd_build_acceleration_structures_khr *build =
631 &cmd->u.build_acceleration_structures_khr;
632
633 for (uint32_t i = 0; i < build->info_count; i++) {
634 vk_free(queue->alloc, (void *)build->infos[i].pGeometries);
635 vk_free(queue->alloc, (void *)build->pp_build_range_infos[i]);
636 }
637 }
638
639 VKAPI_ATTR void VKAPI_CALL
vk_cmd_enqueue_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer,uint32_t infoCount,const VkAccelerationStructureBuildGeometryInfoKHR * pInfos,const VkAccelerationStructureBuildRangeInfoKHR * const * ppBuildRangeInfos)640 vk_cmd_enqueue_CmdBuildAccelerationStructuresKHR(
641 VkCommandBuffer commandBuffer, uint32_t infoCount,
642 const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
643 const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
644 {
645 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
646
647 if (vk_command_buffer_has_error(cmd_buffer))
648 return;
649
650 struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
651
652 struct vk_cmd_queue_entry *cmd =
653 vk_zalloc(queue->alloc, vk_cmd_queue_type_sizes[VK_CMD_BUILD_ACCELERATION_STRUCTURES_KHR], 8,
654 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
655 if (!cmd)
656 goto err;
657
658 cmd->type = VK_CMD_BUILD_ACCELERATION_STRUCTURES_KHR;
659 cmd->driver_free_cb = vk_cmd_build_acceleration_structures_khr_free;
660
661 struct vk_cmd_build_acceleration_structures_khr *build =
662 &cmd->u.build_acceleration_structures_khr;
663
664 build->info_count = infoCount;
665 if (pInfos) {
666 build->infos = vk_zalloc(queue->alloc, sizeof(*build->infos) * infoCount, 8,
667 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
668 if (!build->infos)
669 goto err;
670
671 memcpy((VkAccelerationStructureBuildGeometryInfoKHR *)build->infos, pInfos,
672 sizeof(*build->infos) * (infoCount));
673
674 for (uint32_t i = 0; i < infoCount; i++) {
675 uint32_t geometries_size =
676 build->infos[i].geometryCount * sizeof(VkAccelerationStructureGeometryKHR);
677 VkAccelerationStructureGeometryKHR *geometries =
678 vk_zalloc(queue->alloc, geometries_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
679 if (!geometries)
680 goto err;
681
682 if (pInfos[i].pGeometries) {
683 memcpy(geometries, pInfos[i].pGeometries, geometries_size);
684 } else {
685 for (uint32_t j = 0; j < build->infos[i].geometryCount; j++)
686 memcpy(&geometries[j], pInfos[i].ppGeometries[j], sizeof(VkAccelerationStructureGeometryKHR));
687 }
688
689 build->infos[i].pGeometries = geometries;
690 }
691 }
692 if (ppBuildRangeInfos) {
693 build->pp_build_range_infos =
694 vk_zalloc(queue->alloc, sizeof(*build->pp_build_range_infos) * infoCount, 8,
695 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
696 if (!build->pp_build_range_infos)
697 goto err;
698
699 VkAccelerationStructureBuildRangeInfoKHR **pp_build_range_infos =
700 (void *)build->pp_build_range_infos;
701
702 for (uint32_t i = 0; i < infoCount; i++) {
703 uint32_t build_range_size =
704 build->infos[i].geometryCount * sizeof(VkAccelerationStructureBuildRangeInfoKHR);
705 VkAccelerationStructureBuildRangeInfoKHR *p_build_range_infos =
706 vk_zalloc(queue->alloc, build_range_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
707 if (!p_build_range_infos)
708 goto err;
709
710 memcpy(p_build_range_infos, ppBuildRangeInfos[i], build_range_size);
711
712 pp_build_range_infos[i] = p_build_range_infos;
713 }
714 }
715
716 list_addtail(&cmd->cmd_link, &queue->cmds);
717 return;
718
719 err:
720 if (cmd)
721 vk_cmd_build_acceleration_structures_khr_free(queue, cmd);
722
723 vk_command_buffer_set_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
724 }
725
vk_cmd_enqueue_CmdPushConstants2(VkCommandBuffer commandBuffer,const VkPushConstantsInfoKHR * pPushConstantsInfo)726 VKAPI_ATTR void VKAPI_CALL vk_cmd_enqueue_CmdPushConstants2(
727 VkCommandBuffer commandBuffer,
728 const VkPushConstantsInfoKHR* pPushConstantsInfo)
729 {
730 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
731 struct vk_cmd_queue *queue = &cmd_buffer->cmd_queue;
732
733 struct vk_cmd_queue_entry *cmd = vk_zalloc(queue->alloc, vk_cmd_queue_type_sizes[VK_CMD_PUSH_CONSTANTS2], 8,
734 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
735 if (!cmd)
736 return;
737
738 cmd->type = VK_CMD_PUSH_CONSTANTS2;
739
740 VkPushConstantsInfoKHR *info = vk_zalloc(queue->alloc, sizeof(*info), 8,
741 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
742 void *pValues = vk_zalloc(queue->alloc, pPushConstantsInfo->size, 8,
743 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
744
745 memcpy(info, pPushConstantsInfo, sizeof(*info));
746 memcpy(pValues, pPushConstantsInfo->pValues, pPushConstantsInfo->size);
747
748 cmd->u.push_constants2.push_constants_info = info;
749 info->pValues = pValues;
750
751 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
752 }
753
754 static void
vk_free_cmd_push_descriptor_set2(struct vk_cmd_queue * queue,struct vk_cmd_queue_entry * cmd)755 vk_free_cmd_push_descriptor_set2(struct vk_cmd_queue *queue,
756 struct vk_cmd_queue_entry *cmd)
757 {
758 ralloc_free(cmd->driver_data);
759 }
760
vk_cmd_enqueue_CmdPushDescriptorSet2(VkCommandBuffer commandBuffer,const VkPushDescriptorSetInfoKHR * pPushDescriptorSetInfo)761 VKAPI_ATTR void VKAPI_CALL vk_cmd_enqueue_CmdPushDescriptorSet2(
762 VkCommandBuffer commandBuffer,
763 const VkPushDescriptorSetInfoKHR* pPushDescriptorSetInfo)
764 {
765 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
766 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->cmd_queue.alloc, vk_cmd_queue_type_sizes[VK_CMD_PUSH_DESCRIPTOR_SET2], 8,
767 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
768
769 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET2;
770 cmd->driver_free_cb = vk_free_cmd_push_descriptor_set2;
771
772 void *ctx = cmd->driver_data = ralloc_context(NULL);
773 if (pPushDescriptorSetInfo) {
774 cmd->u.push_descriptor_set2.push_descriptor_set_info = vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(VkPushDescriptorSetInfoKHR), 8,
775 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
776
777 memcpy((void*)cmd->u.push_descriptor_set2.push_descriptor_set_info, pPushDescriptorSetInfo, sizeof(VkPushDescriptorSetInfoKHR));
778 VkPushDescriptorSetInfoKHR *tmp_dst1 = (void *) cmd->u.push_descriptor_set2.push_descriptor_set_info; (void) tmp_dst1;
779 VkPushDescriptorSetInfoKHR *tmp_src1 = (void *) pPushDescriptorSetInfo; (void) tmp_src1;
780
781 const VkBaseInStructure *pnext = tmp_dst1->pNext;
782 if (pnext) {
783 switch ((int32_t)pnext->sType) {
784 case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO:
785 if (pnext) {
786 tmp_dst1->pNext = rzalloc(ctx, VkPipelineLayoutCreateInfo);
787
788 memcpy((void*)tmp_dst1->pNext, pnext, sizeof(VkPipelineLayoutCreateInfo));
789 VkPipelineLayoutCreateInfo *tmp_dst2 = (void *) tmp_dst1->pNext; (void) tmp_dst2;
790 VkPipelineLayoutCreateInfo *tmp_src2 = (void *) pnext; (void) tmp_src2;
791 if (tmp_src2->pSetLayouts) {
792 tmp_dst2->pSetLayouts = rzalloc_array_size(ctx, sizeof(*tmp_dst2->pSetLayouts), tmp_dst2->setLayoutCount);
793
794 memcpy((void*)tmp_dst2->pSetLayouts, tmp_src2->pSetLayouts, sizeof(*tmp_dst2->pSetLayouts) * tmp_dst2->setLayoutCount);
795 }
796 if (tmp_src2->pPushConstantRanges) {
797 tmp_dst2->pPushConstantRanges = rzalloc_array_size(ctx, sizeof(*tmp_dst2->pPushConstantRanges), tmp_dst2->pushConstantRangeCount);
798
799 memcpy((void*)tmp_dst2->pPushConstantRanges, tmp_src2->pPushConstantRanges, sizeof(*tmp_dst2->pPushConstantRanges) * tmp_dst2->pushConstantRangeCount);
800 }
801
802 } else {
803 tmp_dst1->pNext = NULL;
804 }
805 break;
806 }
807 }
808 if (tmp_src1->pDescriptorWrites) {
809 tmp_dst1->pDescriptorWrites = vk_zalloc(cmd_buffer->cmd_queue.alloc, sizeof(*tmp_dst1->pDescriptorWrites) * tmp_dst1->descriptorWriteCount, 8,
810 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
811
812 memcpy((void*)tmp_dst1->pDescriptorWrites, tmp_src1->pDescriptorWrites, sizeof(*tmp_dst1->pDescriptorWrites) * tmp_dst1->descriptorWriteCount);
813 for (unsigned i = 0; i < tmp_src1->descriptorWriteCount; i++) {
814 VkWriteDescriptorSet *dstwrite = (void*)&tmp_dst1->pDescriptorWrites[i];
815 const VkWriteDescriptorSet *write = &tmp_src1->pDescriptorWrites[i];
816 switch (write->descriptorType) {
817 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
818 const VkWriteDescriptorSetInlineUniformBlock *uniform_data = vk_find_struct_const(write->pNext, WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK);
819 assert(uniform_data);
820 VkWriteDescriptorSetInlineUniformBlock *dst = rzalloc(ctx, VkWriteDescriptorSetInlineUniformBlock);
821 memcpy((void*)dst, uniform_data, sizeof(*uniform_data));
822 dst->pData = ralloc_size(ctx, uniform_data->dataSize);
823 memcpy((void*)dst->pData, uniform_data->pData, uniform_data->dataSize);
824 dstwrite->pNext = dst;
825 break;
826 }
827
828 case VK_DESCRIPTOR_TYPE_SAMPLER:
829 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
830 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
831 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
832 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
833 dstwrite->pImageInfo = rzalloc_array(ctx, VkDescriptorImageInfo, write->descriptorCount);
834 {
835 VkDescriptorImageInfo *arr = (void*)dstwrite->pImageInfo;
836 typed_memcpy(arr, write->pImageInfo, write->descriptorCount);
837 }
838 break;
839
840 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
841 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
842 dstwrite->pTexelBufferView = rzalloc_array(ctx, VkBufferView, write->descriptorCount);
843 {
844 VkBufferView *arr = (void*)dstwrite->pTexelBufferView;
845 typed_memcpy(arr, write->pTexelBufferView, write->descriptorCount);
846 }
847 break;
848
849 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
850 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
851 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
852 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
853 dstwrite->pBufferInfo = rzalloc_array(ctx, VkDescriptorBufferInfo, write->descriptorCount);
854 {
855 VkDescriptorBufferInfo *arr = (void*)dstwrite->pBufferInfo;
856 typed_memcpy(arr, write->pBufferInfo, write->descriptorCount);
857 }
858 break;
859
860 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
861 const VkWriteDescriptorSetAccelerationStructureKHR *accel_structs =
862 vk_find_struct_const(write->pNext, WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR);
863
864 uint32_t accel_structs_size = sizeof(VkAccelerationStructureKHR) * accel_structs->accelerationStructureCount;
865 VkWriteDescriptorSetAccelerationStructureKHR *write_accel_structs =
866 rzalloc_size(ctx, sizeof(VkWriteDescriptorSetAccelerationStructureKHR) + accel_structs_size);
867
868 write_accel_structs->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR;
869 write_accel_structs->accelerationStructureCount = accel_structs->accelerationStructureCount;
870 write_accel_structs->pAccelerationStructures = (void *)&write_accel_structs[1];
871 memcpy((void *)write_accel_structs->pAccelerationStructures, accel_structs->pAccelerationStructures, accel_structs_size);
872
873 dstwrite->pNext = write_accel_structs;
874 break;
875 }
876
877 default:
878 break;
879 }
880 }
881 }
882 }
883
884 list_addtail(&cmd->cmd_link, &cmd_buffer->cmd_queue.cmds);
885 }
886