1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_synchronization.h"
25
26 #include "vk_alloc.h"
27 #include "vk_command_buffer.h"
28 #include "vk_common_entrypoints.h"
29 #include "vk_device.h"
30 #include "vk_queue.h"
31 #include "vk_util.h"
32 #include "../wsi/wsi_common.h"
33
34 VkAccessFlags2
vk_expand_src_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)35 vk_expand_src_access_flags2(VkPipelineStageFlags2 stages,
36 VkAccessFlags2 access)
37 {
38 if (access & VK_ACCESS_2_MEMORY_WRITE_BIT)
39 access |= vk_write_access2_for_pipeline_stage_flags2(stages);;
40
41 if (access & VK_ACCESS_2_SHADER_WRITE_BIT)
42 access |= VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT;
43
44 return access;
45 }
46
47 VkAccessFlags2
vk_expand_dst_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)48 vk_expand_dst_access_flags2(VkPipelineStageFlags2 stages,
49 VkAccessFlags2 access)
50 {
51 if (access & VK_ACCESS_2_MEMORY_READ_BIT)
52 access |= vk_read_access2_for_pipeline_stage_flags2(stages);
53
54 /* expand VK_ACCESS_2_MEMORY_WRITE_BIT for VK_ACCESS_2_HOST_WRITE_BIT */
55 if (access & VK_ACCESS_2_MEMORY_WRITE_BIT) {
56 access |= vk_write_access2_for_pipeline_stage_flags2(stages) &
57 VK_ACCESS_2_HOST_WRITE_BIT;
58 }
59
60 if (access & VK_ACCESS_2_SHADER_READ_BIT)
61 access |= VK_ACCESS_2_SHADER_SAMPLED_READ_BIT |
62 VK_ACCESS_2_SHADER_STORAGE_READ_BIT |
63 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR;
64
65 return access;
66 }
67
68 VkAccessFlags2
vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)69 vk_filter_src_access_flags2(VkPipelineStageFlags2 stages,
70 VkAccessFlags2 access)
71 {
72 const VkAccessFlags2 all_write_access =
73 vk_write_access2_for_pipeline_stage_flags2(stages);
74
75 /* We only care about write access in src flags */
76 return vk_expand_src_access_flags2(stages, access) & all_write_access;
77 }
78
79 VkAccessFlags2
vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,VkAccessFlags2 access)80 vk_filter_dst_access_flags2(VkPipelineStageFlags2 stages,
81 VkAccessFlags2 access)
82 {
83 const VkAccessFlags2 all_read_access =
84 vk_read_access2_for_pipeline_stage_flags2(stages);
85
86 /* We only care about read access (plus host write) in dst flags */
87 return vk_expand_dst_access_flags2(stages, access) &
88 (all_read_access | VK_ACCESS_2_HOST_WRITE_BIT);
89 }
90
91 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t query)92 vk_common_CmdWriteTimestamp(
93 VkCommandBuffer commandBuffer,
94 VkPipelineStageFlagBits pipelineStage,
95 VkQueryPool queryPool,
96 uint32_t query)
97 {
98 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
99 struct vk_device *device = cmd_buffer->base.device;
100
101 device->dispatch_table.CmdWriteTimestamp2(commandBuffer,
102 (VkPipelineStageFlags2) pipelineStage,
103 queryPool,
104 query);
105 }
106
107 static VkMemoryBarrier2
upgrade_memory_barrier(const VkMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)108 upgrade_memory_barrier(const VkMemoryBarrier *barrier,
109 VkPipelineStageFlags2 src_stage_mask2,
110 VkPipelineStageFlags2 dst_stage_mask2)
111 {
112 return (VkMemoryBarrier2) {
113 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
114 .pNext = barrier->pNext,
115 .srcStageMask = src_stage_mask2,
116 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
117 .dstStageMask = dst_stage_mask2,
118 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
119 };
120 }
121
122 static VkBufferMemoryBarrier2
upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)123 upgrade_buffer_memory_barrier(const VkBufferMemoryBarrier *barrier,
124 VkPipelineStageFlags2 src_stage_mask2,
125 VkPipelineStageFlags2 dst_stage_mask2)
126 {
127 return (VkBufferMemoryBarrier2) {
128 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,
129 .pNext = barrier->pNext,
130 .srcStageMask = src_stage_mask2,
131 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
132 .dstStageMask = dst_stage_mask2,
133 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
134 .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
135 .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
136 .buffer = barrier->buffer,
137 .offset = barrier->offset,
138 .size = barrier->size,
139 };
140 }
141
142 static VkImageMemoryBarrier2
upgrade_image_memory_barrier(const VkImageMemoryBarrier * barrier,VkPipelineStageFlags2 src_stage_mask2,VkPipelineStageFlags2 dst_stage_mask2)143 upgrade_image_memory_barrier(const VkImageMemoryBarrier *barrier,
144 VkPipelineStageFlags2 src_stage_mask2,
145 VkPipelineStageFlags2 dst_stage_mask2)
146 {
147 return (VkImageMemoryBarrier2) {
148 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
149 .pNext = barrier->pNext,
150 .srcStageMask = src_stage_mask2,
151 .srcAccessMask = (VkAccessFlags2) barrier->srcAccessMask,
152 .dstStageMask = dst_stage_mask2,
153 .dstAccessMask = (VkAccessFlags2) barrier->dstAccessMask,
154 .oldLayout = barrier->oldLayout,
155 .newLayout = barrier->newLayout,
156 .srcQueueFamilyIndex = barrier->srcQueueFamilyIndex,
157 .dstQueueFamilyIndex = barrier->dstQueueFamilyIndex,
158 .image = barrier->image,
159 .subresourceRange = barrier->subresourceRange,
160 };
161 }
162
163 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)164 vk_common_CmdPipelineBarrier(
165 VkCommandBuffer commandBuffer,
166 VkPipelineStageFlags srcStageMask,
167 VkPipelineStageFlags dstStageMask,
168 VkDependencyFlags dependencyFlags,
169 uint32_t memoryBarrierCount,
170 const VkMemoryBarrier* pMemoryBarriers,
171 uint32_t bufferMemoryBarrierCount,
172 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
173 uint32_t imageMemoryBarrierCount,
174 const VkImageMemoryBarrier* pImageMemoryBarriers)
175 {
176 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
177 struct vk_device *device = cmd_buffer->base.device;
178
179 STACK_ARRAY(VkMemoryBarrier2, memory_barriers, memoryBarrierCount);
180 STACK_ARRAY(VkBufferMemoryBarrier2, buffer_barriers, bufferMemoryBarrierCount);
181 STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, imageMemoryBarrierCount);
182
183 VkPipelineStageFlags2 src_stage_mask2 = (VkPipelineStageFlags2) srcStageMask;
184 VkPipelineStageFlags2 dst_stage_mask2 = (VkPipelineStageFlags2) dstStageMask;
185
186 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
187 memory_barriers[i] = upgrade_memory_barrier(&pMemoryBarriers[i],
188 src_stage_mask2,
189 dst_stage_mask2);
190 }
191 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
192 buffer_barriers[i] = upgrade_buffer_memory_barrier(&pBufferMemoryBarriers[i],
193 src_stage_mask2,
194 dst_stage_mask2);
195 }
196 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
197 image_barriers[i] = upgrade_image_memory_barrier(&pImageMemoryBarriers[i],
198 src_stage_mask2,
199 dst_stage_mask2);
200 }
201
202 VkDependencyInfo dep_info = {
203 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
204 .memoryBarrierCount = memoryBarrierCount,
205 .pMemoryBarriers = memory_barriers,
206 .bufferMemoryBarrierCount = bufferMemoryBarrierCount,
207 .pBufferMemoryBarriers = buffer_barriers,
208 .imageMemoryBarrierCount = imageMemoryBarrierCount,
209 .pImageMemoryBarriers = image_barriers,
210 };
211
212 device->dispatch_table.CmdPipelineBarrier2(commandBuffer, &dep_info);
213
214 STACK_ARRAY_FINISH(memory_barriers);
215 STACK_ARRAY_FINISH(buffer_barriers);
216 STACK_ARRAY_FINISH(image_barriers);
217 }
218
219 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)220 vk_common_CmdSetEvent(
221 VkCommandBuffer commandBuffer,
222 VkEvent event,
223 VkPipelineStageFlags stageMask)
224 {
225 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
226 struct vk_device *device = cmd_buffer->base.device;
227
228 VkMemoryBarrier2 mem_barrier = {
229 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
230 .srcStageMask = (VkPipelineStageFlags2) stageMask,
231 .dstStageMask = (VkPipelineStageFlags2) stageMask,
232 };
233 VkDependencyInfo dep_info = {
234 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
235 .memoryBarrierCount = 1,
236 .pMemoryBarriers = &mem_barrier,
237 };
238
239 device->dispatch_table.CmdSetEvent2(commandBuffer, event, &dep_info);
240 }
241
242 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)243 vk_common_CmdResetEvent(
244 VkCommandBuffer commandBuffer,
245 VkEvent event,
246 VkPipelineStageFlags stageMask)
247 {
248 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
249 struct vk_device *device = cmd_buffer->base.device;
250
251 device->dispatch_table.CmdResetEvent2(commandBuffer,
252 event,
253 (VkPipelineStageFlags2) stageMask);
254 }
255
256 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags destStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)257 vk_common_CmdWaitEvents(
258 VkCommandBuffer commandBuffer,
259 uint32_t eventCount,
260 const VkEvent* pEvents,
261 VkPipelineStageFlags srcStageMask,
262 VkPipelineStageFlags destStageMask,
263 uint32_t memoryBarrierCount,
264 const VkMemoryBarrier* pMemoryBarriers,
265 uint32_t bufferMemoryBarrierCount,
266 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
267 uint32_t imageMemoryBarrierCount,
268 const VkImageMemoryBarrier* pImageMemoryBarriers)
269 {
270 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
271 struct vk_device *device = cmd_buffer->base.device;
272
273 if (eventCount == 0)
274 return;
275
276 STACK_ARRAY(VkDependencyInfo, deps, eventCount);
277
278 /* Note that dstStageMask and srcStageMask in the CmdWaitEvent2() call
279 * are the same. This is to match the CmdSetEvent2() call from
280 * vk_common_CmdSetEvent(). The actual src->dst stage barrier will
281 * happen as part of the CmdPipelineBarrier() call below.
282 */
283 VkMemoryBarrier2 stage_barrier = {
284 .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
285 .srcStageMask = srcStageMask,
286 .dstStageMask = srcStageMask,
287 };
288
289 for (uint32_t i = 0; i < eventCount; i++) {
290 deps[i] = (VkDependencyInfo) {
291 .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
292 .memoryBarrierCount = 1,
293 .pMemoryBarriers = &stage_barrier,
294 };
295 }
296 device->dispatch_table.CmdWaitEvents2(commandBuffer, eventCount, pEvents, deps);
297
298 STACK_ARRAY_FINISH(deps);
299
300 /* Setting dependency to 0 because :
301 *
302 * - For BY_REGION_BIT and VIEW_LOCAL_BIT, events are not allowed inside a
303 * render pass so these don't apply.
304 *
305 * - For DEVICE_GROUP_BIT, we have the following bit of spec text:
306 *
307 * "Semaphore and event dependencies are device-local and only
308 * execute on the one physical device that performs the
309 * dependency."
310 */
311 const VkDependencyFlags dep_flags = 0;
312
313 device->dispatch_table.CmdPipelineBarrier(commandBuffer,
314 srcStageMask, destStageMask,
315 dep_flags,
316 memoryBarrierCount, pMemoryBarriers,
317 bufferMemoryBarrierCount, pBufferMemoryBarriers,
318 imageMemoryBarrierCount, pImageMemoryBarriers);
319 }
320
321 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkBuffer dstBuffer,VkDeviceSize dstOffset,uint32_t marker)322 vk_common_CmdWriteBufferMarkerAMD(
323 VkCommandBuffer commandBuffer,
324 VkPipelineStageFlagBits pipelineStage,
325 VkBuffer dstBuffer,
326 VkDeviceSize dstOffset,
327 uint32_t marker)
328 {
329 VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
330 struct vk_device *device = cmd_buffer->base.device;
331
332 device->dispatch_table.CmdWriteBufferMarker2AMD(commandBuffer,
333 (VkPipelineStageFlags2) pipelineStage,
334 dstBuffer,
335 dstOffset,
336 marker);
337 }
338
339 VKAPI_ATTR void VKAPI_CALL
vk_common_GetQueueCheckpointDataNV(VkQueue queue,uint32_t * pCheckpointDataCount,VkCheckpointDataNV * pCheckpointData)340 vk_common_GetQueueCheckpointDataNV(
341 VkQueue queue,
342 uint32_t* pCheckpointDataCount,
343 VkCheckpointDataNV* pCheckpointData)
344 {
345 unreachable("Entrypoint not implemented");
346 }
347
348 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)349 vk_common_QueueSubmit(
350 VkQueue _queue,
351 uint32_t submitCount,
352 const VkSubmitInfo* pSubmits,
353 VkFence fence)
354 {
355 VK_FROM_HANDLE(vk_queue, queue, _queue);
356 struct vk_device *device = queue->base.device;
357
358 STACK_ARRAY(VkSubmitInfo2, submit_info_2, submitCount);
359 STACK_ARRAY(VkPerformanceQuerySubmitInfoKHR, perf_query_submit_info, submitCount);
360 STACK_ARRAY(struct wsi_memory_signal_submit_info, wsi_mem_submit_info, submitCount);
361
362 uint32_t n_wait_semaphores = 0;
363 uint32_t n_command_buffers = 0;
364 uint32_t n_signal_semaphores = 0;
365 for (uint32_t s = 0; s < submitCount; s++) {
366 n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
367 n_command_buffers += pSubmits[s].commandBufferCount;
368 n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
369 }
370
371 STACK_ARRAY(VkSemaphoreSubmitInfo, wait_semaphores, n_wait_semaphores);
372 STACK_ARRAY(VkCommandBufferSubmitInfo, command_buffers, n_command_buffers);
373 STACK_ARRAY(VkSemaphoreSubmitInfo, signal_semaphores, n_signal_semaphores);
374
375 n_wait_semaphores = 0;
376 n_command_buffers = 0;
377 n_signal_semaphores = 0;
378
379 for (uint32_t s = 0; s < submitCount; s++) {
380 const VkTimelineSemaphoreSubmitInfo *timeline_info =
381 vk_find_struct_const(pSubmits[s].pNext,
382 TIMELINE_SEMAPHORE_SUBMIT_INFO);
383 const uint64_t *wait_values = NULL;
384 const uint64_t *signal_values = NULL;
385
386 if (timeline_info && timeline_info->waitSemaphoreValueCount) {
387 /* From the Vulkan 1.3.204 spec:
388 *
389 * VUID-VkSubmitInfo-pNext-03240
390 *
391 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
392 * and any element of pSignalSemaphores was created with a VkSemaphoreType of
393 * VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
394 * signalSemaphoreCount"
395 */
396 assert(timeline_info->waitSemaphoreValueCount == pSubmits[s].waitSemaphoreCount);
397 wait_values = timeline_info->pWaitSemaphoreValues;
398 }
399
400 if (timeline_info && timeline_info->signalSemaphoreValueCount) {
401 /* From the Vulkan 1.3.204 spec:
402 *
403 * VUID-VkSubmitInfo-pNext-03241
404 *
405 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
406 * and any element of pWaitSemaphores was created with a VkSemaphoreType of
407 * VK_SEMAPHORE_TYPE_TIMELINE, then its waitSemaphoreValueCount member must equal
408 * waitSemaphoreCount"
409 */
410 assert(timeline_info->signalSemaphoreValueCount == pSubmits[s].signalSemaphoreCount);
411 signal_values = timeline_info->pSignalSemaphoreValues;
412 }
413
414 const VkDeviceGroupSubmitInfo *group_info =
415 vk_find_struct_const(pSubmits[s].pNext, DEVICE_GROUP_SUBMIT_INFO);
416
417 for (uint32_t i = 0; i < pSubmits[s].waitSemaphoreCount; i++) {
418 wait_semaphores[n_wait_semaphores + i] = (VkSemaphoreSubmitInfo) {
419 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
420 .semaphore = pSubmits[s].pWaitSemaphores[i],
421 .value = wait_values ? wait_values[i] : 0,
422 .stageMask = pSubmits[s].pWaitDstStageMask[i],
423 .deviceIndex = group_info ? group_info->pWaitSemaphoreDeviceIndices[i] : 0,
424 };
425 }
426 for (uint32_t i = 0; i < pSubmits[s].commandBufferCount; i++) {
427 command_buffers[n_command_buffers + i] = (VkCommandBufferSubmitInfo) {
428 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO,
429 .commandBuffer = pSubmits[s].pCommandBuffers[i],
430 .deviceMask = group_info ? group_info->pCommandBufferDeviceMasks[i] : 0,
431 };
432 }
433 for (uint32_t i = 0; i < pSubmits[s].signalSemaphoreCount; i++) {
434 signal_semaphores[n_signal_semaphores + i] = (VkSemaphoreSubmitInfo) {
435 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO,
436 .semaphore = pSubmits[s].pSignalSemaphores[i],
437 .value = signal_values ? signal_values[i] : 0,
438 .stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT,
439 .deviceIndex = group_info ? group_info->pSignalSemaphoreDeviceIndices[i] : 0,
440 };
441 }
442
443 const VkProtectedSubmitInfo *protected_info =
444 vk_find_struct_const(pSubmits[s].pNext, PROTECTED_SUBMIT_INFO);
445
446 submit_info_2[s] = (VkSubmitInfo2) {
447 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2,
448 .flags = ((protected_info && protected_info->protectedSubmit) ?
449 VK_SUBMIT_PROTECTED_BIT : 0),
450 .waitSemaphoreInfoCount = pSubmits[s].waitSemaphoreCount,
451 .pWaitSemaphoreInfos = &wait_semaphores[n_wait_semaphores],
452 .commandBufferInfoCount = pSubmits[s].commandBufferCount,
453 .pCommandBufferInfos = &command_buffers[n_command_buffers],
454 .signalSemaphoreInfoCount = pSubmits[s].signalSemaphoreCount,
455 .pSignalSemaphoreInfos = &signal_semaphores[n_signal_semaphores],
456 };
457
458 const VkPerformanceQuerySubmitInfoKHR *query_info =
459 vk_find_struct_const(pSubmits[s].pNext,
460 PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
461 if (query_info) {
462 perf_query_submit_info[s] = *query_info;
463 perf_query_submit_info[s].pNext = NULL;
464 __vk_append_struct(&submit_info_2[s], &perf_query_submit_info[s]);
465 }
466
467 const struct wsi_memory_signal_submit_info *mem_signal_info =
468 vk_find_struct_const(pSubmits[s].pNext,
469 WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
470 if (mem_signal_info) {
471 wsi_mem_submit_info[s] = *mem_signal_info;
472 wsi_mem_submit_info[s].pNext = NULL;
473 __vk_append_struct(&submit_info_2[s], &wsi_mem_submit_info[s]);
474 }
475
476 n_wait_semaphores += pSubmits[s].waitSemaphoreCount;
477 n_command_buffers += pSubmits[s].commandBufferCount;
478 n_signal_semaphores += pSubmits[s].signalSemaphoreCount;
479 }
480
481 VkResult result = device->dispatch_table.QueueSubmit2(_queue,
482 submitCount,
483 submit_info_2,
484 fence);
485
486 STACK_ARRAY_FINISH(wait_semaphores);
487 STACK_ARRAY_FINISH(command_buffers);
488 STACK_ARRAY_FINISH(signal_semaphores);
489 STACK_ARRAY_FINISH(submit_info_2);
490 STACK_ARRAY_FINISH(perf_query_submit_info);
491 STACK_ARRAY_FINISH(wsi_mem_submit_info);
492
493 return result;
494 }
495