1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * based in part on radv driver which is:
5 * Copyright © 2016 Red Hat.
6 * Copyright © 2016 Bas Nieuwenhuizen
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * SOFTWARE.
26 */
27
28 /**
29 * This file implements VkQueue, VkFence, and VkSemaphore
30 */
31
32 #include <assert.h>
33 #include <stdbool.h>
34 #include <stddef.h>
35 #include <stdint.h>
36 #include <unistd.h>
37 #include <vulkan/vulkan.h>
38
39 #include "pvr_job_compute.h"
40 #include "pvr_job_context.h"
41 #include "pvr_job_render.h"
42 #include "pvr_job_transfer.h"
43 #include "pvr_limits.h"
44 #include "pvr_private.h"
45 #include "util/macros.h"
46 #include "util/u_atomic.h"
47 #include "vk_alloc.h"
48 #include "vk_fence.h"
49 #include "vk_log.h"
50 #include "vk_object.h"
51 #include "vk_queue.h"
52 #include "vk_semaphore.h"
53 #include "vk_sync.h"
54 #include "vk_sync_dummy.h"
55 #include "vk_util.h"
56
pvr_queue_init(struct pvr_device * device,struct pvr_queue * queue,const VkDeviceQueueCreateInfo * pCreateInfo,uint32_t index_in_family)57 static VkResult pvr_queue_init(struct pvr_device *device,
58 struct pvr_queue *queue,
59 const VkDeviceQueueCreateInfo *pCreateInfo,
60 uint32_t index_in_family)
61 {
62 struct pvr_transfer_ctx *transfer_ctx;
63 struct pvr_compute_ctx *compute_ctx;
64 struct pvr_render_ctx *gfx_ctx;
65 VkResult result;
66
67 result =
68 vk_queue_init(&queue->vk, &device->vk, pCreateInfo, index_in_family);
69 if (result != VK_SUCCESS)
70 return result;
71
72 result = pvr_transfer_ctx_create(device,
73 PVR_WINSYS_CTX_PRIORITY_MEDIUM,
74 &transfer_ctx);
75 if (result != VK_SUCCESS)
76 goto err_vk_queue_finish;
77
78 result = pvr_compute_ctx_create(device,
79 PVR_WINSYS_CTX_PRIORITY_MEDIUM,
80 &compute_ctx);
81 if (result != VK_SUCCESS)
82 goto err_transfer_ctx_destroy;
83
84 result =
85 pvr_render_ctx_create(device, PVR_WINSYS_CTX_PRIORITY_MEDIUM, &gfx_ctx);
86 if (result != VK_SUCCESS)
87 goto err_compute_ctx_destroy;
88
89 queue->device = device;
90 queue->gfx_ctx = gfx_ctx;
91 queue->compute_ctx = compute_ctx;
92 queue->transfer_ctx = transfer_ctx;
93
94 for (uint32_t i = 0; i < ARRAY_SIZE(queue->completion); i++)
95 queue->completion[i] = NULL;
96
97 return VK_SUCCESS;
98
99 err_compute_ctx_destroy:
100 pvr_compute_ctx_destroy(compute_ctx);
101
102 err_transfer_ctx_destroy:
103 pvr_transfer_ctx_destroy(transfer_ctx);
104
105 err_vk_queue_finish:
106 vk_queue_finish(&queue->vk);
107
108 return result;
109 }
110
pvr_queues_create(struct pvr_device * device,const VkDeviceCreateInfo * pCreateInfo)111 VkResult pvr_queues_create(struct pvr_device *device,
112 const VkDeviceCreateInfo *pCreateInfo)
113 {
114 VkResult result;
115
116 /* Check requested queue families and queues */
117 assert(pCreateInfo->queueCreateInfoCount == 1);
118 assert(pCreateInfo->pQueueCreateInfos[0].queueFamilyIndex == 0);
119 assert(pCreateInfo->pQueueCreateInfos[0].queueCount <= PVR_MAX_QUEUES);
120
121 const VkDeviceQueueCreateInfo *queue_create =
122 &pCreateInfo->pQueueCreateInfos[0];
123
124 device->queues = vk_alloc(&device->vk.alloc,
125 queue_create->queueCount * sizeof(*device->queues),
126 8,
127 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
128 if (!device->queues)
129 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
130
131 device->queue_count = 0;
132
133 for (uint32_t i = 0; i < queue_create->queueCount; i++) {
134 result = pvr_queue_init(device, &device->queues[i], queue_create, i);
135 if (result != VK_SUCCESS)
136 goto err_queues_finish;
137
138 device->queue_count++;
139 }
140
141 return VK_SUCCESS;
142
143 err_queues_finish:
144 pvr_queues_destroy(device);
145 return result;
146 }
147
pvr_queue_finish(struct pvr_queue * queue)148 static void pvr_queue_finish(struct pvr_queue *queue)
149 {
150 for (uint32_t i = 0; i < ARRAY_SIZE(queue->completion); i++) {
151 if (queue->completion[i])
152 vk_sync_destroy(&queue->device->vk, queue->completion[i]);
153 }
154
155 pvr_render_ctx_destroy(queue->gfx_ctx);
156 pvr_compute_ctx_destroy(queue->compute_ctx);
157 pvr_transfer_ctx_destroy(queue->transfer_ctx);
158
159 vk_queue_finish(&queue->vk);
160 }
161
pvr_queues_destroy(struct pvr_device * device)162 void pvr_queues_destroy(struct pvr_device *device)
163 {
164 for (uint32_t q_idx = 0; q_idx < device->queue_count; q_idx++)
165 pvr_queue_finish(&device->queues[q_idx]);
166
167 vk_free(&device->vk.alloc, device->queues);
168 }
169
pvr_QueueWaitIdle(VkQueue _queue)170 VkResult pvr_QueueWaitIdle(VkQueue _queue)
171 {
172 PVR_FROM_HANDLE(pvr_queue, queue, _queue);
173
174 for (int i = 0U; i < ARRAY_SIZE(queue->completion); i++) {
175 VkResult result;
176
177 if (!queue->completion[i])
178 continue;
179
180 result = vk_sync_wait(&queue->device->vk,
181 queue->completion[i],
182 0U,
183 VK_SYNC_WAIT_COMPLETE,
184 UINT64_MAX);
185 if (result != VK_SUCCESS)
186 return result;
187 }
188
189 return VK_SUCCESS;
190 }
191
192 static VkResult
pvr_process_graphics_cmd(struct pvr_device * device,struct pvr_queue * queue,struct pvr_cmd_buffer * cmd_buffer,struct pvr_sub_cmd_gfx * sub_cmd,struct vk_sync ** waits,uint32_t wait_count,uint32_t * stage_flags,struct vk_sync * completions[static PVR_JOB_TYPE_MAX])193 pvr_process_graphics_cmd(struct pvr_device *device,
194 struct pvr_queue *queue,
195 struct pvr_cmd_buffer *cmd_buffer,
196 struct pvr_sub_cmd_gfx *sub_cmd,
197 struct vk_sync **waits,
198 uint32_t wait_count,
199 uint32_t *stage_flags,
200 struct vk_sync *completions[static PVR_JOB_TYPE_MAX])
201 {
202 const struct pvr_framebuffer *framebuffer = sub_cmd->framebuffer;
203 struct vk_sync *sync_geom;
204 struct vk_sync *sync_frag;
205 uint32_t bo_count = 0;
206 VkResult result;
207
208 STACK_ARRAY(struct pvr_winsys_job_bo, bos, framebuffer->attachment_count);
209 if (!bos)
210 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
211
212 result = vk_sync_create(&device->vk,
213 &device->pdevice->ws->syncobj_type,
214 0U,
215 0UL,
216 &sync_geom);
217 if (result != VK_SUCCESS)
218 return result;
219
220 result = vk_sync_create(&device->vk,
221 &device->pdevice->ws->syncobj_type,
222 0U,
223 0UL,
224 &sync_frag);
225 if (result != VK_SUCCESS) {
226 vk_sync_destroy(&device->vk, sync_geom);
227 return result;
228 }
229
230 /* FIXME: DoShadowLoadOrStore() */
231
232 /* FIXME: If the framebuffer being rendered to has multiple layers then we
233 * need to split submissions that run a fragment job into two.
234 */
235 if (sub_cmd->job.run_frag && framebuffer->layers > 1)
236 pvr_finishme("Split job submission for framebuffers with > 1 layers");
237
238 /* Get any imported buffers used in framebuffer attachments. */
239 for (uint32_t i = 0U; i < framebuffer->attachment_count; i++) {
240 if (!framebuffer->attachments[i]->image->vma->bo->is_imported)
241 continue;
242
243 bos[bo_count].bo = framebuffer->attachments[i]->image->vma->bo;
244 bos[bo_count].flags = PVR_WINSYS_JOB_BO_FLAG_WRITE;
245 bo_count++;
246 }
247
248 /* This passes ownership of the wait fences to pvr_render_job_submit(). */
249 result = pvr_render_job_submit(queue->gfx_ctx,
250 &sub_cmd->job,
251 bos,
252 bo_count,
253 waits,
254 wait_count,
255 stage_flags,
256 sync_geom,
257 sync_frag);
258 STACK_ARRAY_FINISH(bos);
259 if (result != VK_SUCCESS) {
260 vk_sync_destroy(&device->vk, sync_geom);
261 vk_sync_destroy(&device->vk, sync_frag);
262 return result;
263 }
264
265 /* Replace the completion fences. */
266 if (completions[PVR_JOB_TYPE_GEOM])
267 vk_sync_destroy(&device->vk, completions[PVR_JOB_TYPE_GEOM]);
268
269 completions[PVR_JOB_TYPE_GEOM] = sync_geom;
270
271 if (completions[PVR_JOB_TYPE_FRAG])
272 vk_sync_destroy(&device->vk, completions[PVR_JOB_TYPE_FRAG]);
273
274 completions[PVR_JOB_TYPE_FRAG] = sync_frag;
275
276 /* FIXME: DoShadowLoadOrStore() */
277
278 return result;
279 }
280
281 static VkResult
pvr_process_compute_cmd(struct pvr_device * device,struct pvr_queue * queue,struct pvr_sub_cmd_compute * sub_cmd,struct vk_sync ** waits,uint32_t wait_count,uint32_t * stage_flags,struct vk_sync * completions[static PVR_JOB_TYPE_MAX])282 pvr_process_compute_cmd(struct pvr_device *device,
283 struct pvr_queue *queue,
284 struct pvr_sub_cmd_compute *sub_cmd,
285 struct vk_sync **waits,
286 uint32_t wait_count,
287 uint32_t *stage_flags,
288 struct vk_sync *completions[static PVR_JOB_TYPE_MAX])
289 {
290 struct vk_sync *sync;
291 VkResult result;
292
293 result = vk_sync_create(&device->vk,
294 &device->pdevice->ws->syncobj_type,
295 0U,
296 0UL,
297 &sync);
298 if (result != VK_SUCCESS)
299 return result;
300
301 /* This passes ownership of the wait fences to pvr_compute_job_submit(). */
302 result = pvr_compute_job_submit(queue->compute_ctx,
303 sub_cmd,
304 waits,
305 wait_count,
306 stage_flags,
307 sync);
308 if (result != VK_SUCCESS) {
309 vk_sync_destroy(&device->vk, sync);
310 return result;
311 }
312
313 /* Replace the completion fences. */
314 if (completions[PVR_JOB_TYPE_COMPUTE])
315 vk_sync_destroy(&device->vk, completions[PVR_JOB_TYPE_COMPUTE]);
316
317 completions[PVR_JOB_TYPE_COMPUTE] = sync;
318
319 return result;
320 }
321
322 static VkResult
pvr_process_transfer_cmds(struct pvr_device * device,struct pvr_queue * queue,struct pvr_sub_cmd_transfer * sub_cmd,struct vk_sync ** waits,uint32_t wait_count,uint32_t * stage_flags,struct vk_sync * completions[static PVR_JOB_TYPE_MAX])323 pvr_process_transfer_cmds(struct pvr_device *device,
324 struct pvr_queue *queue,
325 struct pvr_sub_cmd_transfer *sub_cmd,
326 struct vk_sync **waits,
327 uint32_t wait_count,
328 uint32_t *stage_flags,
329 struct vk_sync *completions[static PVR_JOB_TYPE_MAX])
330 {
331 struct vk_sync *sync;
332 VkResult result;
333
334 result = vk_sync_create(&device->vk,
335 &device->pdevice->ws->syncobj_type,
336 0U,
337 0UL,
338 &sync);
339 if (result != VK_SUCCESS)
340 return result;
341
342 /* This passes ownership of the wait fences to pvr_transfer_job_submit(). */
343 result = pvr_transfer_job_submit(device,
344 queue->transfer_ctx,
345 sub_cmd,
346 waits,
347 wait_count,
348 stage_flags,
349 sync);
350 if (result != VK_SUCCESS) {
351 vk_sync_destroy(&device->vk, sync);
352 return result;
353 }
354
355 /* Replace the completion fences. */
356 if (completions[PVR_JOB_TYPE_TRANSFER])
357 vk_sync_destroy(&device->vk, completions[PVR_JOB_TYPE_TRANSFER]);
358
359 completions[PVR_JOB_TYPE_TRANSFER] = sync;
360
361 return result;
362 }
363
364 static VkResult
pvr_set_semaphore_payloads(struct pvr_device * device,struct vk_sync * completions[static PVR_JOB_TYPE_MAX],const VkSemaphore * signals,uint32_t signal_count)365 pvr_set_semaphore_payloads(struct pvr_device *device,
366 struct vk_sync *completions[static PVR_JOB_TYPE_MAX],
367 const VkSemaphore *signals,
368 uint32_t signal_count)
369 {
370 struct vk_sync *sync;
371 VkResult result;
372 int fd = -1;
373
374 result = vk_sync_create(&device->vk,
375 &device->pdevice->ws->syncobj_type,
376 0U,
377 0UL,
378 &sync);
379 if (result != VK_SUCCESS)
380 return result;
381
382 result = device->ws->ops->null_job_submit(device->ws,
383 completions,
384 PVR_JOB_TYPE_MAX,
385 sync);
386 if (result != VK_SUCCESS)
387 goto end_set_semaphore_payloads;
388
389 /* If we have a single signal semaphore, we can simply move merged sync's
390 * payload to the signal semahpore's payload.
391 */
392 if (signal_count == 1U) {
393 VK_FROM_HANDLE(vk_semaphore, sem, signals[0]);
394 struct vk_sync *sem_sync = vk_semaphore_get_active_sync(sem);
395
396 result = vk_sync_move(&device->vk, sem_sync, sync);
397 goto end_set_semaphore_payloads;
398 }
399
400 result = vk_sync_export_sync_file(&device->vk, sync, &fd);
401 if (result != VK_SUCCESS)
402 goto end_set_semaphore_payloads;
403
404 for (uint32_t i = 0U; i < signal_count; i++) {
405 VK_FROM_HANDLE(vk_semaphore, sem, signals[i]);
406 struct vk_sync *sem_sync = vk_semaphore_get_active_sync(sem);
407
408 result = vk_sync_import_sync_file(&device->vk, sem_sync, fd);
409 if (result != VK_SUCCESS)
410 goto end_set_semaphore_payloads;
411 }
412
413 end_set_semaphore_payloads:
414 if (fd != -1)
415 close(fd);
416
417 vk_sync_destroy(&device->vk, sync);
418
419 return result;
420 }
421
422 static VkResult
pvr_set_fence_payload(struct pvr_device * device,struct vk_sync * completions[static PVR_JOB_TYPE_MAX],VkFence _fence)423 pvr_set_fence_payload(struct pvr_device *device,
424 struct vk_sync *completions[static PVR_JOB_TYPE_MAX],
425 VkFence _fence)
426 {
427 VK_FROM_HANDLE(vk_fence, fence, _fence);
428 struct vk_sync *fence_sync;
429 struct vk_sync *sync;
430 VkResult result;
431
432 result = vk_sync_create(&device->vk,
433 &device->pdevice->ws->syncobj_type,
434 0U,
435 0UL,
436 &sync);
437 if (result != VK_SUCCESS)
438 return result;
439
440 result = device->ws->ops->null_job_submit(device->ws,
441 completions,
442 PVR_JOB_TYPE_MAX,
443 sync);
444 if (result != VK_SUCCESS) {
445 vk_sync_destroy(&device->vk, sync);
446 return result;
447 }
448
449 fence_sync = vk_fence_get_active_sync(fence);
450 result = vk_sync_move(&device->vk, fence_sync, sync);
451 vk_sync_destroy(&device->vk, sync);
452
453 return result;
454 }
455
456 static VkResult
pvr_process_cmd_buffer(struct pvr_device * device,struct pvr_queue * queue,VkCommandBuffer commandBuffer,struct vk_sync ** waits,uint32_t wait_count,uint32_t * stage_flags,struct vk_sync * completions[static PVR_JOB_TYPE_MAX])457 pvr_process_cmd_buffer(struct pvr_device *device,
458 struct pvr_queue *queue,
459 VkCommandBuffer commandBuffer,
460 struct vk_sync **waits,
461 uint32_t wait_count,
462 uint32_t *stage_flags,
463 struct vk_sync *completions[static PVR_JOB_TYPE_MAX])
464 {
465 PVR_FROM_HANDLE(pvr_cmd_buffer, cmd_buffer, commandBuffer);
466 VkResult result;
467
468 assert(cmd_buffer->status == PVR_CMD_BUFFER_STATUS_EXECUTABLE);
469
470 list_for_each_entry_safe (struct pvr_sub_cmd,
471 sub_cmd,
472 &cmd_buffer->sub_cmds,
473 link) {
474 switch (sub_cmd->type) {
475 case PVR_SUB_CMD_TYPE_GRAPHICS:
476 result = pvr_process_graphics_cmd(device,
477 queue,
478 cmd_buffer,
479 &sub_cmd->gfx,
480 waits,
481 wait_count,
482 stage_flags,
483 completions);
484 break;
485
486 case PVR_SUB_CMD_TYPE_COMPUTE:
487 result = pvr_process_compute_cmd(device,
488 queue,
489 &sub_cmd->compute,
490 waits,
491 wait_count,
492 stage_flags,
493 completions);
494 break;
495
496 case PVR_SUB_CMD_TYPE_TRANSFER:
497 result = pvr_process_transfer_cmds(device,
498 queue,
499 &sub_cmd->transfer,
500 waits,
501 wait_count,
502 stage_flags,
503 completions);
504 break;
505
506 default:
507 pvr_finishme("Unsupported sub-command type %d", sub_cmd->type);
508 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
509 }
510
511 if (result != VK_SUCCESS) {
512 cmd_buffer->status = PVR_CMD_BUFFER_STATUS_INVALID;
513 return result;
514 }
515
516 p_atomic_inc(&device->global_queue_job_count);
517 }
518
519 return VK_SUCCESS;
520 }
521
522 static VkResult
pvr_submit_null_job(struct pvr_device * device,struct vk_sync ** waits,uint32_t wait_count,uint32_t * stage_flags,struct vk_sync * completions[static PVR_JOB_TYPE_MAX])523 pvr_submit_null_job(struct pvr_device *device,
524 struct vk_sync **waits,
525 uint32_t wait_count,
526 uint32_t *stage_flags,
527 struct vk_sync *completions[static PVR_JOB_TYPE_MAX])
528 {
529 VkResult result;
530
531 STATIC_ASSERT(PVR_JOB_TYPE_MAX >= PVR_NUM_SYNC_PIPELINE_STAGES);
532 for (uint32_t i = 0U; i < PVR_JOB_TYPE_MAX; i++) {
533 struct vk_sync *per_job_waits[wait_count];
534 uint32_t per_job_waits_count = 0;
535
536 /* Get the waits specific to the job type. */
537 for (uint32_t j = 0U; j < wait_count; j++) {
538 if (stage_flags[j] & (1U << i)) {
539 per_job_waits[per_job_waits_count] = waits[j];
540 per_job_waits_count++;
541 }
542 }
543
544 if (per_job_waits_count == 0U)
545 continue;
546
547 result = vk_sync_create(&device->vk,
548 &device->pdevice->ws->syncobj_type,
549 0U,
550 0UL,
551 &completions[i]);
552 if (result != VK_SUCCESS)
553 goto err_destroy_completion_syncs;
554
555 result = device->ws->ops->null_job_submit(device->ws,
556 per_job_waits,
557 per_job_waits_count,
558 completions[i]);
559 if (result != VK_SUCCESS)
560 goto err_destroy_completion_syncs;
561 }
562
563 return VK_SUCCESS;
564
565 err_destroy_completion_syncs:
566 for (uint32_t i = 0U; i < PVR_JOB_TYPE_MAX; i++) {
567 if (completions[i]) {
568 vk_sync_destroy(&device->vk, completions[i]);
569 completions[i] = NULL;
570 }
571 }
572
573 return result;
574 }
575
pvr_update_syncobjs(struct pvr_device * device,struct vk_sync * src[static PVR_JOB_TYPE_MAX],struct vk_sync * dst[static PVR_JOB_TYPE_MAX])576 static void pvr_update_syncobjs(struct pvr_device *device,
577 struct vk_sync *src[static PVR_JOB_TYPE_MAX],
578 struct vk_sync *dst[static PVR_JOB_TYPE_MAX])
579 {
580 for (uint32_t i = 0; i < PVR_JOB_TYPE_MAX; i++) {
581 if (src[i]) {
582 if (dst[i])
583 vk_sync_destroy(&device->vk, dst[i]);
584
585 dst[i] = src[i];
586 }
587 }
588 }
589
pvr_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)590 VkResult pvr_QueueSubmit(VkQueue _queue,
591 uint32_t submitCount,
592 const VkSubmitInfo *pSubmits,
593 VkFence fence)
594 {
595 PVR_FROM_HANDLE(pvr_queue, queue, _queue);
596 struct vk_sync *completion_syncobjs[PVR_JOB_TYPE_MAX] = {};
597 struct pvr_device *device = queue->device;
598 VkResult result;
599
600 for (uint32_t i = 0U; i < submitCount; i++) {
601 struct vk_sync *per_submit_completion_syncobjs[PVR_JOB_TYPE_MAX] = {};
602 const VkSubmitInfo *desc = &pSubmits[i];
603 struct vk_sync *waits[desc->waitSemaphoreCount];
604 uint32_t stage_flags[desc->waitSemaphoreCount];
605 uint32_t wait_count = 0;
606
607 for (uint32_t j = 0U; j < desc->waitSemaphoreCount; j++) {
608 VK_FROM_HANDLE(vk_semaphore, semaphore, desc->pWaitSemaphores[j]);
609 struct vk_sync *sync = vk_semaphore_get_active_sync(semaphore);
610
611 if (sync->type == &vk_sync_dummy_type)
612 continue;
613
614 /* We don't currently support timeline semaphores. */
615 assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
616
617 stage_flags[wait_count] =
618 pvr_stage_mask_dst(desc->pWaitDstStageMask[j]);
619 waits[wait_count] = vk_semaphore_get_active_sync(semaphore);
620 wait_count++;
621 }
622
623 if (desc->commandBufferCount > 0U) {
624 for (uint32_t j = 0U; j < desc->commandBufferCount; j++) {
625 result = pvr_process_cmd_buffer(device,
626 queue,
627 desc->pCommandBuffers[j],
628 waits,
629 wait_count,
630 stage_flags,
631 per_submit_completion_syncobjs);
632 if (result != VK_SUCCESS)
633 return result;
634 }
635 } else {
636 result = pvr_submit_null_job(device,
637 waits,
638 wait_count,
639 stage_flags,
640 per_submit_completion_syncobjs);
641 if (result != VK_SUCCESS)
642 return result;
643 }
644
645 if (desc->signalSemaphoreCount) {
646 result = pvr_set_semaphore_payloads(device,
647 per_submit_completion_syncobjs,
648 desc->pSignalSemaphores,
649 desc->signalSemaphoreCount);
650 if (result != VK_SUCCESS)
651 return result;
652 }
653
654 pvr_update_syncobjs(device,
655 per_submit_completion_syncobjs,
656 completion_syncobjs);
657 }
658
659 if (fence) {
660 result = pvr_set_fence_payload(device, completion_syncobjs, fence);
661 if (result != VK_SUCCESS)
662 return result;
663 }
664
665 pvr_update_syncobjs(device, completion_syncobjs, queue->completion);
666
667 return VK_SUCCESS;
668 }
669