1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_synchronization.h"
25
26 #include "vk_alloc.h"
27 #include "vk_command_buffer.h"
28 #include "vk_common_entrypoints.h"
29 #include "vk_device.h"
30 #include "vk_queue.h"
31 #include "vk_util.h"
32 #include "../wsi/wsi_common.h"
33
34 VkAccessFlags2
vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)35 vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,
36 VkAccessFlags2 access)
37 {
38 const VkPipelineStageFlags2 all_write_access =
39 vk_write_access2_for_pipeline_stage_flags2(stages);
40
41 if (access & VK_ACCESS_2_MEMORY_WRITE_BIT)
42 access |= all_write_access;
43
44 if (access & VK_ACCESS_2_SHADER_WRITE_BIT)
45 access |= VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT;
46
47 /* We only care about write access in src flags */
48 return access & all_write_access;
49 }
50
51 VkAccessFlags2
vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)52 vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,
53 VkAccessFlags2 access)
54 {
55 const VkPipelineStageFlags2 all_read_access =
56 vk_read_access2_for_pipeline_stage_flags2(stages);
57
58 if (access & VK_ACCESS_2_MEMORY_READ_BIT)
59 access |= all_read_access;
60
61 if (access & VK_ACCESS_2_SHADER_READ_BIT)
62 access |= VK_ACCESS_2_SHADER_SAMPLED_READ_BIT |
63 VK_ACCESS_2_SHADER_STORAGE_READ_BIT |
64 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR;
65
66 /* We only care about read access in dst flags */
67 return access & all_read_access;
68 }
69
70 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)71 vk_common_CmdWriteTimestamp(
72 VkCommandBuffer commandBuffer,
73 VkPipelineStageFlagBits pipelineStage,
74 VkQueryPool queryPool,
75 uint32_t query)
76 {
77 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
78 struct vk_device *device = cmd_buffer->base.device;
79
80 device->dispatch_table.CmdWriteTimestamp2(commandBuffer,
81 (VkPipelineStageFlags2) pipelineStage,
82 queryPool,
83 query);
84 }
85
86 static VkMemoryBarrier2
upgrade_memory_barrier(const VkMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)87 upgrade_memory_barrier(const VkMemoryBarrier *barrier,
88 VkPipelineStageFlags2 src_stage_mask2,
89 VkPipelineStageFlags2 dst_stage_mask2)
90 {
91 return (VkMemoryBarrier2) {
92 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
93 .pNext = barrier->pNext,
94 .srcStageMask = src_stage_mask2,
95 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
96 .dstStageMask = dst_stage_mask2,
97 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
98 };
99 }
100
101 static VkBufferMemoryBarrier2
upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)102 upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier *barrier,
103 VkPipelineStageFlags2 src_stage_mask2,
104 VkPipelineStageFlags2 dst_stage_mask2)
105 {
106 return (VkBufferMemoryBarrier2) {
107 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,
108 .pNext = barrier->pNext,
109 .srcStageMask = src_stage_mask2,
110 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
111 .dstStageMask = dst_stage_mask2,
112 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
113 .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
114 .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
115 .buffer = barrier->buffer,
116 .offset = barrier->offset,
117 .size = barrier->size,
118 };
119 }
120
121 static VkImageMemoryBarrier2
upgrade_image_memory_barrier(const VkImageMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)122 upgrade_image_memory_barrier(const VkImageMemoryBarrier *barrier,
123 VkPipelineStageFlags2 src_stage_mask2,
124 VkPipelineStageFlags2 dst_stage_mask2)
125 {
126 return (VkImageMemoryBarrier2) {
127 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
128 .pNext = barrier->pNext,
129 .srcStageMask = src_stage_mask2,
130 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
131 .dstStageMask = dst_stage_mask2,
132 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
133 .oldLayout = barrier->oldLayout,
134 .newLayout = barrier->newLayout,
135 .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
136 .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
137 .image = barrier->image,
138 .subresourceRange = barrier->subresourceRange,
139 };
140 }
141
142 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)143 vk_common_CmdPipelineBarrier(
144 VkCommandBuffer commandBuffer,
145 VkPipelineStageFlags srcStageMask,
146 VkPipelineStageFlags dstStageMask,
147 VkDependencyFlags dependencyFlags,
148 uint32_t memoryBarrierCount,
149 const VkMemoryBarrier* pMemoryBarriers,
150 uint32_t bufferMemoryBarrierCount,
151 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
152 uint32_t imageMemoryBarrierCount,
153 const VkImageMemoryBarrier* pImageMemoryBarriers)
154 {
155 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
156 struct vk_device *device = cmd_buffer->base.device;
157
158 STACK_ARRAY(VkMemoryBarrier2, memory_barriers, memoryBarrierCount);
159 STACK_ARRAY(VkBufferMemoryBarrier2, buffer_barriers, bufferMemoryBarrierCount);
160 STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, imageMemoryBarrierCount);
161
162 VkPipelineStageFlags2 src_stage_mask2 = (VkPipelineStageFlags2) srcStageMask;
163 VkPipelineStageFlags2 dst_stage_mask2 = (VkPipelineStageFlags2) dstStageMask;
164
165 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
166 memory_barriers[i] = upgrade_memory_barrier(&pMemoryBarriers[i],
167 src_stage_mask2,
168 dst_stage_mask2);
169 }
170 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
171 buffer_barriers[i] = upgrade_buffer_memory_barrier(&pBufferMemoryBarriers[i],
172 src_stage_mask2,
173 dst_stage_mask2);
174 }
175 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
176 image_barriers[i] = upgrade_image_memory_barrier(&pImageMemoryBarriers[i],
177 src_stage_mask2,
178 dst_stage_mask2);
179 }
180
181 VkDependencyInfo dep_info = {
182 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
183 .memoryBarrierCount = memoryBarrierCount,
184 .pMemoryBarriers = memory_barriers,
185 .bufferMemoryBarrierCount = bufferMemoryBarrierCount,
186 .pBufferMemoryBarriers = buffer_barriers,
187 .imageMemoryBarrierCount = imageMemoryBarrierCount,
188 .pImageMemoryBarriers = image_barriers,
189 };
190
191 device->dispatch_table.CmdPipelineBarrier2(commandBuffer, &dep_info);
192
193 STACK_ARRAY_FINISH(memory_barriers);
194 STACK_ARRAY_FINISH(buffer_barriers);
195 STACK_ARRAY_FINISH(image_barriers);
196 }
197
198 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)199 vk_common_CmdSetEvent(
200 VkCommandBuffer commandBuffer,
201 VkEvent event,
202 VkPipelineStageFlags stageMask)
203 {
204 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
205 struct vk_device *device = cmd_buffer->base.device;
206
207 VkMemoryBarrier2 mem_barrier = {
208 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
209 .srcStageMask = (VkPipelineStageFlags2) stageMask,
210 .dstStageMask = (VkPipelineStageFlags2) stageMask,
211 };
212 VkDependencyInfo dep_info = {
213 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
214 .memoryBarrierCount = 1,
215 .pMemoryBarriers = &mem_barrier,
216 };
217
218 device->dispatch_table.CmdSetEvent2(commandBuffer, event, &dep_info);
219 }
220
221 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)222 vk_common_CmdResetEvent(
223 VkCommandBuffer commandBuffer,
224 VkEvent event,
225 VkPipelineStageFlags stageMask)
226 {
227 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
228 struct vk_device *device = cmd_buffer->base.device;
229
230 device->dispatch_table.CmdResetEvent2(commandBuffer,
231 event,
232 (VkPipelineStageFlags2) stageMask);
233 }
234
235 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags destStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)236 vk_common_CmdWaitEvents(
237 VkCommandBuffer commandBuffer,
238 uint32_t eventCount,
239 const VkEvent* pEvents,
240 VkPipelineStageFlags srcStageMask,
241 VkPipelineStageFlags destStageMask,
242 uint32_t memoryBarrierCount,
243 const VkMemoryBarrier* pMemoryBarriers,
244 uint32_t bufferMemoryBarrierCount,
245 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
246 uint32_t imageMemoryBarrierCount,
247 const VkImageMemoryBarrier* pImageMemoryBarriers)
248 {
249 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
250 struct vk_device *device = cmd_buffer->base.device;
251
252 STACK_ARRAY(VkDependencyInfo, deps, eventCount);
253
254 /* Note that dstStageMask and srcStageMask in the CmdWaitEvent2() call
255 * are the same. This is to match the CmdSetEvent2() call from
256 * vk_common_CmdSetEvent(). The actual src->dst stage barrier will
257 * happen as part of the CmdPipelineBarrier() call below.
258 */
259 VkMemoryBarrier2 stage_barrier = {
260 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
261 .srcStageMask = srcStageMask,
262 .dstStageMask = srcStageMask,
263 };
264
265 for (uint32_t i = 0; i < eventCount; i++) {
266 deps[i] = (VkDependencyInfo) {
267 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
268 .memoryBarrierCount = 1,
269 .pMemoryBarriers = &stage_barrier,
270 };
271 }
272 device->dispatch_table.CmdWaitEvents2(commandBuffer, eventCount, pEvents, deps);
273
274 STACK_ARRAY_FINISH(deps);
275
276 /* Setting dependency to 0 because :
277 *
278 * - For BY_REGION_BIT and VIEW_LOCAL_BIT, events are not allowed inside a
279 * render pass so these don't apply.
280 *
281 * - For DEVICE_GROUP_BIT, we have the following bit of spec text:
282 *
283 * "Semaphore and event dependencies are device-local and only
284 * execute on the one physical device that performs the
285 * dependency."
286 */
287 const VkDependencyFlags dep_flags = 0;
288
289 device->dispatch_table.CmdPipelineBarrier(commandBuffer,
290 srcStageMask, destStageMask,
291 dep_flags,
292 memoryBarrierCount, pMemoryBarriers,
293 bufferMemoryBarrierCount, pBufferMemoryBarriers,
294 imageMemoryBarrierCount, pImageMemoryBarriers);
295 }
296
297 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkBuffer dstBuffer,VkDeviceSize dstOffset,uint32_t marker)298 vk_common_CmdWriteBufferMarkerAMD(
299 VkCommandBuffer commandBuffer,
300 VkPipelineStageFlagBits pipelineStage,
301 VkBuffer dstBuffer,
302 VkDeviceSize dstOffset,
303 uint32_t marker)
304 {
305 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
306 struct vk_device *device = cmd_buffer->base.device;
307
308 device->dispatch_table.CmdWriteBufferMarker2AMD(commandBuffer,
309 (VkPipelineStageFlags2) pipelineStage,
310 dstBuffer,
311 dstOffset,
312 marker);
313 }
314
315 VKAPI_ATTR void VKAPI_CALL
vk_common_GetQueueCheckpointDataNV(VkQueue queue,uint32_t * pCheckpointDataCount,VkCheckpointDataNV * pCheckpointData)316 vk_common_GetQueueCheckpointDataNV(
317 VkQueue queue,
318 uint32_t* pCheckpointDataCount,
319 VkCheckpointDataNV* pCheckpointData)
320 {
321 unreachable("Entrypoint not implemented");
322 }
323
324 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)325 vk_common_QueueSubmit(
326 VkQueue _queue,
327 uint32_t submitCount,
328 const VkSubmitInfo* pSubmits,
329 VkFence fence)
330 {
331 VK_FROM_HANDLE(vk_queue, queue, _queue);
332 struct vk_device *device = queue->base.device;
333
334 STACK_ARRAY(VkSubmitInfo2, submit_info_2, submitCount);
335 STACK_ARRAY(VkPerformanceQuerySubmitInfoKHR, perf_query_submit_info, submitCount);
336 STACK_ARRAY(struct wsi_memory_signal_submit_info, wsi_mem_submit_info, submitCount);
337
338 uint32_t n_wait_semaphores = 0;
339 uint32_t n_command_buffers = 0;
340 uint32_t n_signal_semaphores = 0;
341 for (uint32_t s = 0; s < submitCount; s++) {
342 n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
343 n_command_buffers += pSubmits[s].commandBufferCount;
344 n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
345 }
346
347 STACK_ARRAY(VkSemaphoreSubmitInfo, wait_semaphores, n_wait_semaphores);
348 STACK_ARRAY(VkCommandBufferSubmitInfo, command_buffers, n_command_buffers);
349 STACK_ARRAY(VkSemaphoreSubmitInfo, signal_semaphores, n_signal_semaphores);
350
351 n_wait_semaphores = 0;
352 n_command_buffers = 0;
353 n_signal_semaphores = 0;
354
355 for (uint32_t s = 0; s < submitCount; s++) {
356 const VkTimelineSemaphoreSubmitInfo *timeline_info =
357 vk_find_struct_const(pSubmits[s].pNext,
358 TIMELINE_SEMAPHORE_SUBMIT_INFO);
359 const uint64_t *wait_values = NULL;
360 const uint64_t *signal_values = NULL;
361
362 if (timeline_info && timeline_info->waitSemaphoreValueCount) {
363 /* From the Vulkan 1.3.204 spec:
364 *
365 * VUID-VkSubmitInfo-pNext-03240
366 *
367 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
368 * and any element of pSignalSemaphores was created with a VkSemaphoreType of
369 * VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
370 * signalSemaphoreCount"
371 */
372 assert(timeline_info->waitSemaphoreValueCount == pSubmits[s].waitSemaphoreCount);
373 wait_values = timeline_info->pWaitSemaphoreValues;
374 }
375
376 if (timeline_info && timeline_info->signalSemaphoreValueCount) {
377 /* From the Vulkan 1.3.204 spec:
378 *
379 * VUID-VkSubmitInfo-pNext-03241
380 *
381 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
382 * and any element of pWaitSemaphores was created with a VkSemaphoreType of
383 * VK_SEMAPHORE_TYPE_TIMELINE, then its waitSemaphoreValueCount member must equal
384 * waitSemaphoreCount"
385 */
386 assert(timeline_info->signalSemaphoreValueCount == pSubmits[s].signalSemaphoreCount);
387 signal_values = timeline_info->pSignalSemaphoreValues;
388 }
389
390 const VkDeviceGroupSubmitInfo *group_info =
391 vk_find_struct_const(pSubmits[s].pNext, DEVICE_GROUP_SUBMIT_INFO);
392
393 for (uint32_t i = 0; i < pSubmits[s].waitSemaphoreCount; i++) {
394 wait_semaphores[n_wait_semaphores + i] = (VkSemaphoreSubmitInfo) {
395 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
396 .semaphore = pSubmits[s].pWaitSemaphores[i],
397 .value = wait_values ? wait_values[i] : 0,
398 .stageMask = pSubmits[s].pWaitDstStageMask[i],
399 .deviceIndex = group_info ? group_info->pWaitSemaphoreDeviceIndices[i] : 0,
400 };
401 }
402 for (uint32_t i = 0; i < pSubmits[s].commandBufferCount; i++) {
403 command_buffers[n_command_buffers + i] = (VkCommandBufferSubmitInfo) {
404 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
405 .commandBuffer = pSubmits[s].pCommandBuffers[i],
406 .deviceMask = group_info ? group_info->pCommandBufferDeviceMasks[i] : 0,
407 };
408 }
409 for (uint32_t i = 0; i < pSubmits[s].signalSemaphoreCount; i++) {
410 signal_semaphores[n_signal_semaphores + i] = (VkSemaphoreSubmitInfo) {
411 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
412 .semaphore = pSubmits[s].pSignalSemaphores[i],
413 .value = signal_values ? signal_values[i] : 0,
414 .stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
415 .deviceIndex = group_info ? group_info->pSignalSemaphoreDeviceIndices[i] : 0,
416 };
417 }
418
419 const VkProtectedSubmitInfo *protected_info =
420 vk_find_struct_const(pSubmits[s].pNext, PROTECTED_SUBMIT_INFO);
421
422 submit_info_2[s] = (VkSubmitInfo2) {
423 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
424 .flags = ((protected_info && protected_info->protectedSubmit) ?
425 VK_SUBMIT_PROTECTED_BIT : 0),
426 .waitSemaphoreInfoCount = pSubmits[s].waitSemaphoreCount,
427 .pWaitSemaphoreInfos = &wait_semaphores[n_wait_semaphores],
428 .commandBufferInfoCount = pSubmits[s].commandBufferCount,
429 .pCommandBufferInfos = &command_buffers[n_command_buffers],
430 .signalSemaphoreInfoCount = pSubmits[s].signalSemaphoreCount,
431 .pSignalSemaphoreInfos = &signal_semaphores[n_signal_semaphores],
432 };
433
434 const VkPerformanceQuerySubmitInfoKHR *query_info =
435 vk_find_struct_const(pSubmits[s].pNext,
436 PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
437 if (query_info) {
438 perf_query_submit_info[s] = *query_info;
439 perf_query_submit_info[s].pNext = NULL;
440 __vk_append_struct(&submit_info_2[s], &perf_query_submit_info[s]);
441 }
442
443 const struct wsi_memory_signal_submit_info *mem_signal_info =
444 vk_find_struct_const(pSubmits[s].pNext,
445 WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
446 if (mem_signal_info) {
447 wsi_mem_submit_info[s] = *mem_signal_info;
448 wsi_mem_submit_info[s].pNext = NULL;
449 __vk_append_struct(&submit_info_2[s], &wsi_mem_submit_info[s]);
450 }
451
452 n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
453 n_command_buffers += pSubmits[s].commandBufferCount;
454 n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
455 }
456
457 VkResult result = device->dispatch_table.QueueSubmit2(_queue,
458 submitCount,
459 submit_info_2,
460 fence);
461
462 STACK_ARRAY_FINISH(wait_semaphores);
463 STACK_ARRAY_FINISH(command_buffers);
464 STACK_ARRAY_FINISH(signal_semaphores);
465 STACK_ARRAY_FINISH(submit_info_2);
466 STACK_ARRAY_FINISH(perf_query_submit_info);
467 STACK_ARRAY_FINISH(wsi_mem_submit_info);
468
469 return result;
470 }
471