• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_queue.h"
12 
13 #include "util/libsync.h"
14 #include "venus-protocol/vn_protocol_driver_event.h"
15 #include "venus-protocol/vn_protocol_driver_fence.h"
16 #include "venus-protocol/vn_protocol_driver_queue.h"
17 #include "venus-protocol/vn_protocol_driver_semaphore.h"
18 
19 #include "vn_device.h"
20 #include "vn_device_memory.h"
21 #include "vn_renderer.h"
22 #include "vn_wsi.h"
23 
24 /* queue commands */
25 
26 void
vn_GetDeviceQueue2(VkDevice device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)27 vn_GetDeviceQueue2(VkDevice device,
28                    const VkDeviceQueueInfo2 *pQueueInfo,
29                    VkQueue *pQueue)
30 {
31    struct vn_device *dev = vn_device_from_handle(device);
32 
33    for (uint32_t i = 0; i < dev->queue_count; i++) {
34       struct vn_queue *queue = &dev->queues[i];
35       if (queue->family == pQueueInfo->queueFamilyIndex &&
36           queue->index == pQueueInfo->queueIndex &&
37           queue->flags == pQueueInfo->flags) {
38          *pQueue = vn_queue_to_handle(queue);
39          return;
40       }
41    }
42    unreachable("bad queue family/index");
43 }
44 
45 static void
46 vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem);
47 
48 struct vn_queue_submission {
49    VkStructureType batch_type;
50    VkQueue queue;
51    uint32_t batch_count;
52    union {
53       const void *batches;
54       const VkSubmitInfo *submit_batches;
55       const VkBindSparseInfo *bind_sparse_batches;
56    };
57    VkFence fence;
58 
59    uint32_t wait_semaphore_count;
60    uint32_t wait_wsi_count;
61 
62    struct {
63       void *storage;
64 
65       union {
66          void *batches;
67          VkSubmitInfo *submit_batches;
68          VkBindSparseInfo *bind_sparse_batches;
69       };
70       VkSemaphore *semaphores;
71    } temp;
72 };
73 
74 static void
vn_queue_submission_count_batch_semaphores(struct vn_queue_submission * submit,uint32_t batch_index)75 vn_queue_submission_count_batch_semaphores(struct vn_queue_submission *submit,
76                                            uint32_t batch_index)
77 {
78    union {
79       const VkSubmitInfo *submit_batch;
80       const VkBindSparseInfo *bind_sparse_batch;
81    } u;
82    const VkSemaphore *wait_sems;
83    uint32_t wait_count;
84    switch (submit->batch_type) {
85    case VK_STRUCTURE_TYPE_SUBMIT_INFO:
86       u.submit_batch = &submit->submit_batches[batch_index];
87       wait_sems = u.submit_batch->pWaitSemaphores;
88       wait_count = u.submit_batch->waitSemaphoreCount;
89       break;
90    case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
91       u.bind_sparse_batch = &submit->bind_sparse_batches[batch_index];
92       wait_sems = u.bind_sparse_batch->pWaitSemaphores;
93       wait_count = u.bind_sparse_batch->waitSemaphoreCount;
94       break;
95    default:
96       unreachable("unexpected batch type");
97       break;
98    }
99 
100    submit->wait_semaphore_count += wait_count;
101    for (uint32_t i = 0; i < wait_count; i++) {
102       struct vn_semaphore *sem = vn_semaphore_from_handle(wait_sems[i]);
103       const struct vn_sync_payload *payload = sem->payload;
104 
105       if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
106          submit->wait_wsi_count++;
107    }
108 }
109 
110 static void
vn_queue_submission_count_semaphores(struct vn_queue_submission * submit)111 vn_queue_submission_count_semaphores(struct vn_queue_submission *submit)
112 {
113    submit->wait_semaphore_count = 0;
114    submit->wait_wsi_count = 0;
115 
116    for (uint32_t i = 0; i < submit->batch_count; i++)
117       vn_queue_submission_count_batch_semaphores(submit, i);
118 }
119 
120 static VkResult
vn_queue_submission_alloc_storage(struct vn_queue_submission * submit)121 vn_queue_submission_alloc_storage(struct vn_queue_submission *submit)
122 {
123    struct vn_queue *queue = vn_queue_from_handle(submit->queue);
124    const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
125    size_t alloc_size = 0;
126    size_t semaphores_offset = 0;
127 
128    /* we want to filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
129    if (submit->wait_wsi_count) {
130       switch (submit->batch_type) {
131       case VK_STRUCTURE_TYPE_SUBMIT_INFO:
132          alloc_size += sizeof(VkSubmitInfo) * submit->batch_count;
133          break;
134       case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
135          alloc_size += sizeof(VkBindSparseInfo) * submit->batch_count;
136          break;
137       default:
138          unreachable("unexpected batch type");
139          break;
140       }
141 
142       semaphores_offset = alloc_size;
143       alloc_size += sizeof(*submit->temp.semaphores) *
144                     (submit->wait_semaphore_count - submit->wait_wsi_count);
145    }
146 
147    if (!alloc_size) {
148       submit->temp.storage = NULL;
149       return VK_SUCCESS;
150    }
151 
152    submit->temp.storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN,
153                                    VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
154    if (!submit->temp.storage)
155       return VK_ERROR_OUT_OF_HOST_MEMORY;
156 
157    submit->temp.batches = submit->temp.storage;
158    submit->temp.semaphores = submit->temp.storage + semaphores_offset;
159 
160    return VK_SUCCESS;
161 }
162 
163 static uint32_t
vn_queue_submission_filter_batch_wsi_semaphores(struct vn_queue_submission * submit,uint32_t batch_index,uint32_t sem_base)164 vn_queue_submission_filter_batch_wsi_semaphores(
165    struct vn_queue_submission *submit,
166    uint32_t batch_index,
167    uint32_t sem_base)
168 {
169    struct vn_queue *queue = vn_queue_from_handle(submit->queue);
170 
171    union {
172       VkSubmitInfo *submit_batch;
173       VkBindSparseInfo *bind_sparse_batch;
174    } u;
175    const VkSemaphore *src_sems;
176    uint32_t src_count;
177    switch (submit->batch_type) {
178    case VK_STRUCTURE_TYPE_SUBMIT_INFO:
179       u.submit_batch = &submit->temp.submit_batches[batch_index];
180       src_sems = u.submit_batch->pWaitSemaphores;
181       src_count = u.submit_batch->waitSemaphoreCount;
182       break;
183    case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
184       u.bind_sparse_batch = &submit->temp.bind_sparse_batches[batch_index];
185       src_sems = u.bind_sparse_batch->pWaitSemaphores;
186       src_count = u.bind_sparse_batch->waitSemaphoreCount;
187       break;
188    default:
189       unreachable("unexpected batch type");
190       break;
191    }
192 
193    VkSemaphore *dst_sems = &submit->temp.semaphores[sem_base];
194    uint32_t dst_count = 0;
195 
196    /* filter out VN_SYNC_TYPE_WSI_SIGNALED wait semaphores */
197    for (uint32_t i = 0; i < src_count; i++) {
198       struct vn_semaphore *sem = vn_semaphore_from_handle(src_sems[i]);
199       const struct vn_sync_payload *payload = sem->payload;
200 
201       if (payload->type == VN_SYNC_TYPE_WSI_SIGNALED)
202          vn_semaphore_reset_wsi(queue->device, sem);
203       else
204          dst_sems[dst_count++] = src_sems[i];
205    }
206 
207    switch (submit->batch_type) {
208    case VK_STRUCTURE_TYPE_SUBMIT_INFO:
209       u.submit_batch->pWaitSemaphores = dst_sems;
210       u.submit_batch->waitSemaphoreCount = dst_count;
211       break;
212    case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
213       u.bind_sparse_batch->pWaitSemaphores = dst_sems;
214       u.bind_sparse_batch->waitSemaphoreCount = dst_count;
215       break;
216    default:
217       break;
218    }
219 
220    return dst_count;
221 }
222 
223 static void
vn_queue_submission_setup_batches(struct vn_queue_submission * submit)224 vn_queue_submission_setup_batches(struct vn_queue_submission *submit)
225 {
226    if (!submit->temp.storage)
227       return;
228 
229    /* make a copy because we need to filter out WSI semaphores */
230    if (submit->wait_wsi_count) {
231       switch (submit->batch_type) {
232       case VK_STRUCTURE_TYPE_SUBMIT_INFO:
233          memcpy(submit->temp.submit_batches, submit->submit_batches,
234                 sizeof(submit->submit_batches[0]) * submit->batch_count);
235          submit->submit_batches = submit->temp.submit_batches;
236          break;
237       case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
238          memcpy(submit->temp.bind_sparse_batches, submit->bind_sparse_batches,
239                 sizeof(submit->bind_sparse_batches[0]) * submit->batch_count);
240          submit->bind_sparse_batches = submit->temp.bind_sparse_batches;
241          break;
242       default:
243          unreachable("unexpected batch type");
244          break;
245       }
246    }
247 
248    uint32_t wait_sem_base = 0;
249    for (uint32_t i = 0; i < submit->batch_count; i++) {
250       if (submit->wait_wsi_count) {
251          wait_sem_base += vn_queue_submission_filter_batch_wsi_semaphores(
252             submit, i, wait_sem_base);
253       }
254    }
255 }
256 
257 static VkResult
vn_queue_submission_prepare_submit(struct vn_queue_submission * submit,VkQueue queue,uint32_t batch_count,const VkSubmitInfo * submit_batches,VkFence fence)258 vn_queue_submission_prepare_submit(struct vn_queue_submission *submit,
259                                    VkQueue queue,
260                                    uint32_t batch_count,
261                                    const VkSubmitInfo *submit_batches,
262                                    VkFence fence)
263 {
264    submit->batch_type = VK_STRUCTURE_TYPE_SUBMIT_INFO;
265    submit->queue = queue;
266    submit->batch_count = batch_count;
267    submit->submit_batches = submit_batches;
268    submit->fence = fence;
269 
270    vn_queue_submission_count_semaphores(submit);
271 
272    VkResult result = vn_queue_submission_alloc_storage(submit);
273    if (result != VK_SUCCESS)
274       return result;
275 
276    vn_queue_submission_setup_batches(submit);
277 
278    return VK_SUCCESS;
279 }
280 
281 static VkResult
vn_queue_submission_prepare_bind_sparse(struct vn_queue_submission * submit,VkQueue queue,uint32_t batch_count,const VkBindSparseInfo * bind_sparse_batches,VkFence fence)282 vn_queue_submission_prepare_bind_sparse(
283    struct vn_queue_submission *submit,
284    VkQueue queue,
285    uint32_t batch_count,
286    const VkBindSparseInfo *bind_sparse_batches,
287    VkFence fence)
288 {
289    submit->batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
290    submit->queue = queue;
291    submit->batch_count = batch_count;
292    submit->bind_sparse_batches = bind_sparse_batches;
293    submit->fence = fence;
294 
295    vn_queue_submission_count_semaphores(submit);
296 
297    VkResult result = vn_queue_submission_alloc_storage(submit);
298    if (result != VK_SUCCESS)
299       return result;
300 
301    vn_queue_submission_setup_batches(submit);
302 
303    return VK_SUCCESS;
304 }
305 
306 static void
vn_queue_submission_cleanup(struct vn_queue_submission * submit)307 vn_queue_submission_cleanup(struct vn_queue_submission *submit)
308 {
309    struct vn_queue *queue = vn_queue_from_handle(submit->queue);
310    const VkAllocationCallbacks *alloc = &queue->device->base.base.alloc;
311 
312    vk_free(alloc, submit->temp.storage);
313 }
314 
315 static inline uint32_t
vn_queue_family_array_index(struct vn_queue * queue)316 vn_queue_family_array_index(struct vn_queue *queue)
317 {
318    for (uint32_t i = 0; i < queue->device->queue_family_count; i++) {
319       if (queue->device->queue_families[i] == queue->family)
320          return i;
321    }
322    unreachable("invalid queue");
323 }
324 
325 static VkResult
vn_queue_submit(struct vn_instance * instance,VkQueue queue_handle,uint32_t batch_count,const VkSubmitInfo * batches,VkFence fence_handle,bool sync_submit)326 vn_queue_submit(struct vn_instance *instance,
327                 VkQueue queue_handle,
328                 uint32_t batch_count,
329                 const VkSubmitInfo *batches,
330                 VkFence fence_handle,
331                 bool sync_submit)
332 {
333    /* skip no-op submit */
334    if (!batch_count && fence_handle == VK_NULL_HANDLE)
335       return VK_SUCCESS;
336 
337    if (sync_submit) {
338       return vn_call_vkQueueSubmit(instance, queue_handle, batch_count,
339                                    batches, fence_handle);
340    }
341 
342    vn_async_vkQueueSubmit(instance, queue_handle, batch_count, batches,
343                           fence_handle);
344    return VK_SUCCESS;
345 }
346 
347 VkResult
vn_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence _fence)348 vn_QueueSubmit(VkQueue _queue,
349                uint32_t submitCount,
350                const VkSubmitInfo *pSubmits,
351                VkFence _fence)
352 {
353    VN_TRACE_FUNC();
354    struct vn_queue *queue = vn_queue_from_handle(_queue);
355    struct vn_device *dev = queue->device;
356    struct vn_fence *fence = vn_fence_from_handle(_fence);
357    const bool external_fence = fence && fence->is_external;
358    const bool feedback_fence = fence && fence->feedback.slot;
359    struct vn_queue_submission submit;
360    const struct vn_device_memory *wsi_mem = NULL;
361    bool sync_submit;
362    VkResult result;
363 
364    result = vn_queue_submission_prepare_submit(&submit, _queue, submitCount,
365                                                pSubmits, _fence);
366    if (result != VK_SUCCESS)
367       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
368 
369    if (submit.batch_count == 1) {
370       const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(
371          submit.submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
372       if (info) {
373          wsi_mem = vn_device_memory_from_handle(info->memory);
374          assert(!wsi_mem->base_memory && wsi_mem->base_bo);
375       }
376    }
377 
378    /* force synchronous submission if any of the below applies:
379     * - struct wsi_memory_signal_submit_info
380     * - fence is an external fence
381     * - NO_ASYNC_QUEUE_SUBMIT perf option enabled
382     */
383    sync_submit = wsi_mem || external_fence || VN_PERF(NO_ASYNC_QUEUE_SUBMIT);
384 
385    /* if the original submission involves a feedback fence:
386     * - defer the feedback fence to another submit to avoid deep copy
387     * - defer the potential sync_submit to the feedback fence submission
388     */
389    result = vn_queue_submit(dev->instance, submit.queue, submit.batch_count,
390                             submit.submit_batches,
391                             feedback_fence ? VK_NULL_HANDLE : submit.fence,
392                             !feedback_fence && sync_submit);
393    if (result != VK_SUCCESS) {
394       vn_queue_submission_cleanup(&submit);
395       return vn_error(dev->instance, result);
396    }
397 
398    /* TODO intercept original submit batches to append the fence feedback cmd
399     * with a per-queue cached submission builder to avoid transient allocs.
400     *
401     * vn_queue_submission bits must be fixed for VkTimelineSemaphoreSubmitInfo
402     * before adding timeline semaphore feedback.
403     */
404    if (feedback_fence) {
405       const uint32_t feedback_cmd_index = vn_queue_family_array_index(queue);
406       const VkSubmitInfo info = {
407          .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
408          .pNext = NULL,
409          .waitSemaphoreCount = 0,
410          .pWaitSemaphores = NULL,
411          .pWaitDstStageMask = NULL,
412          .commandBufferCount = 1,
413          .pCommandBuffers = &fence->feedback.commands[feedback_cmd_index],
414       };
415       result = vn_queue_submit(dev->instance, submit.queue, 1, &info,
416                                submit.fence, sync_submit);
417       if (result != VK_SUCCESS) {
418          vn_queue_submission_cleanup(&submit);
419          return vn_error(dev->instance, result);
420       }
421    }
422 
423    if (wsi_mem) {
424       /* XXX this is always false and kills the performance */
425       if (dev->instance->renderer->info.has_implicit_fencing) {
426          vn_renderer_submit(dev->renderer, &(const struct vn_renderer_submit){
427                                               .bos = &wsi_mem->base_bo,
428                                               .bo_count = 1,
429                                            });
430       } else {
431          if (VN_DEBUG(WSI)) {
432             static uint32_t ratelimit;
433             if (ratelimit < 10) {
434                vn_log(dev->instance,
435                       "forcing vkQueueWaitIdle before presenting");
436                ratelimit++;
437             }
438          }
439 
440          vn_QueueWaitIdle(submit.queue);
441       }
442    }
443 
444    vn_queue_submission_cleanup(&submit);
445 
446    return VK_SUCCESS;
447 }
448 
449 VkResult
vn_QueueBindSparse(VkQueue _queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)450 vn_QueueBindSparse(VkQueue _queue,
451                    uint32_t bindInfoCount,
452                    const VkBindSparseInfo *pBindInfo,
453                    VkFence fence)
454 {
455    VN_TRACE_FUNC();
456    struct vn_queue *queue = vn_queue_from_handle(_queue);
457    struct vn_device *dev = queue->device;
458 
459    /* TODO allow sparse resource along with sync feedback */
460    assert(VN_PERF(NO_FENCE_FEEDBACK));
461 
462    struct vn_queue_submission submit;
463    VkResult result = vn_queue_submission_prepare_bind_sparse(
464       &submit, _queue, bindInfoCount, pBindInfo, fence);
465    if (result != VK_SUCCESS)
466       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
467 
468    result = vn_call_vkQueueBindSparse(
469       dev->instance, submit.queue, submit.batch_count,
470       submit.bind_sparse_batches, submit.fence);
471    if (result != VK_SUCCESS) {
472       vn_queue_submission_cleanup(&submit);
473       return vn_error(dev->instance, result);
474    }
475 
476    vn_queue_submission_cleanup(&submit);
477 
478    return VK_SUCCESS;
479 }
480 
481 VkResult
vn_QueueWaitIdle(VkQueue _queue)482 vn_QueueWaitIdle(VkQueue _queue)
483 {
484    VN_TRACE_FUNC();
485    struct vn_queue *queue = vn_queue_from_handle(_queue);
486    VkDevice device = vn_device_to_handle(queue->device);
487 
488    VkResult result = vn_QueueSubmit(_queue, 0, NULL, queue->wait_fence);
489    if (result != VK_SUCCESS)
490       return result;
491 
492    result = vn_WaitForFences(device, 1, &queue->wait_fence, true, UINT64_MAX);
493    vn_ResetFences(device, 1, &queue->wait_fence);
494 
495    return vn_result(queue->device->instance, result);
496 }
497 
498 /* fence commands */
499 
500 static void
vn_sync_payload_release(struct vn_device * dev,struct vn_sync_payload * payload)501 vn_sync_payload_release(struct vn_device *dev,
502                         struct vn_sync_payload *payload)
503 {
504    payload->type = VN_SYNC_TYPE_INVALID;
505 }
506 
507 static VkResult
vn_fence_init_payloads(struct vn_device * dev,struct vn_fence * fence,bool signaled,const VkAllocationCallbacks * alloc)508 vn_fence_init_payloads(struct vn_device *dev,
509                        struct vn_fence *fence,
510                        bool signaled,
511                        const VkAllocationCallbacks *alloc)
512 {
513    fence->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
514    fence->temporary.type = VN_SYNC_TYPE_INVALID;
515    fence->payload = &fence->permanent;
516 
517    return VK_SUCCESS;
518 }
519 
520 void
vn_fence_signal_wsi(struct vn_device * dev,struct vn_fence * fence)521 vn_fence_signal_wsi(struct vn_device *dev, struct vn_fence *fence)
522 {
523    struct vn_sync_payload *temp = &fence->temporary;
524 
525    vn_sync_payload_release(dev, temp);
526    temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
527    fence->payload = temp;
528 }
529 
530 static VkResult
vn_fence_feedback_init(struct vn_device * dev,struct vn_fence * fence,bool signaled,const VkAllocationCallbacks * alloc)531 vn_fence_feedback_init(struct vn_device *dev,
532                        struct vn_fence *fence,
533                        bool signaled,
534                        const VkAllocationCallbacks *alloc)
535 {
536    VkDevice dev_handle = vn_device_to_handle(dev);
537    struct vn_feedback_slot *slot;
538    VkCommandBuffer *cmd_handles;
539    VkResult result;
540 
541    if (fence->is_external)
542       return VK_SUCCESS;
543 
544    /* Fence feedback implementation relies on vkWaitForFences to cover the gap
545     * between feedback slot signaling and the actual fence signal operation.
546     */
547    if (unlikely(!dev->instance->renderer->info.allow_vk_wait_syncs))
548       return VK_SUCCESS;
549 
550    if (VN_PERF(NO_FENCE_FEEDBACK))
551       return VK_SUCCESS;
552 
553    slot = vn_feedback_pool_alloc(&dev->feedback_pool, VN_FEEDBACK_TYPE_FENCE);
554    if (!slot)
555       return VK_ERROR_OUT_OF_HOST_MEMORY;
556 
557    vn_feedback_set_status(slot, signaled ? VK_SUCCESS : VK_NOT_READY);
558 
559    cmd_handles =
560       vk_zalloc(alloc, sizeof(*cmd_handles) * dev->queue_family_count,
561                 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
562    if (!cmd_handles) {
563       vn_feedback_pool_free(&dev->feedback_pool, slot);
564       return VK_ERROR_OUT_OF_HOST_MEMORY;
565    }
566 
567    for (uint32_t i = 0; i < dev->queue_family_count; i++) {
568       result = vn_feedback_fence_cmd_alloc(dev_handle, &dev->cmd_pools[i],
569                                            slot, &cmd_handles[i]);
570       if (result != VK_SUCCESS) {
571          for (uint32_t j = 0; j < i; j++) {
572             vn_feedback_fence_cmd_free(dev_handle, &dev->cmd_pools[j],
573                                        cmd_handles[j]);
574          }
575          break;
576       }
577    }
578 
579    if (result != VK_SUCCESS) {
580       vk_free(alloc, cmd_handles);
581       vn_feedback_pool_free(&dev->feedback_pool, slot);
582       return result;
583    }
584 
585    fence->feedback.slot = slot;
586    fence->feedback.commands = cmd_handles;
587 
588    return VK_SUCCESS;
589 }
590 
591 static void
vn_fence_feedback_fini(struct vn_device * dev,struct vn_fence * fence,const VkAllocationCallbacks * alloc)592 vn_fence_feedback_fini(struct vn_device *dev,
593                        struct vn_fence *fence,
594                        const VkAllocationCallbacks *alloc)
595 {
596    VkDevice dev_handle = vn_device_to_handle(dev);
597 
598    if (!fence->feedback.slot)
599       return;
600 
601    for (uint32_t i = 0; i < dev->queue_family_count; i++) {
602       vn_feedback_fence_cmd_free(dev_handle, &dev->cmd_pools[i],
603                                  fence->feedback.commands[i]);
604    }
605 
606    vn_feedback_pool_free(&dev->feedback_pool, fence->feedback.slot);
607 
608    vk_free(alloc, fence->feedback.commands);
609 }
610 
611 VkResult
vn_CreateFence(VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)612 vn_CreateFence(VkDevice device,
613                const VkFenceCreateInfo *pCreateInfo,
614                const VkAllocationCallbacks *pAllocator,
615                VkFence *pFence)
616 {
617    VN_TRACE_FUNC();
618    struct vn_device *dev = vn_device_from_handle(device);
619    const VkAllocationCallbacks *alloc =
620       pAllocator ? pAllocator : &dev->base.base.alloc;
621    const bool signaled = pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT;
622    VkResult result;
623 
624    struct vn_fence *fence = vk_zalloc(alloc, sizeof(*fence), VN_DEFAULT_ALIGN,
625                                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
626    if (!fence)
627       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
628 
629    vn_object_base_init(&fence->base, VK_OBJECT_TYPE_FENCE, &dev->base);
630 
631    const struct VkExportFenceCreateInfo *export_info =
632       vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
633    VkFenceCreateInfo local_create_info;
634    if (export_info) {
635       local_create_info = *pCreateInfo;
636       local_create_info.pNext = NULL;
637       pCreateInfo = &local_create_info;
638 
639       fence->is_external = !!export_info->handleTypes;
640    }
641 
642    result = vn_fence_init_payloads(dev, fence, signaled, alloc);
643    if (result != VK_SUCCESS)
644       goto out_object_base_fini;
645 
646    result = vn_fence_feedback_init(dev, fence, signaled, alloc);
647    if (result != VK_SUCCESS)
648       goto out_payloads_fini;
649 
650    *pFence = vn_fence_to_handle(fence);
651    vn_async_vkCreateFence(dev->instance, device, pCreateInfo, NULL, pFence);
652 
653    return VK_SUCCESS;
654 
655 out_payloads_fini:
656    vn_sync_payload_release(dev, &fence->permanent);
657    vn_sync_payload_release(dev, &fence->temporary);
658 
659 out_object_base_fini:
660    vn_object_base_fini(&fence->base);
661    vk_free(alloc, fence);
662    return vn_error(dev->instance, result);
663 }
664 
665 void
vn_DestroyFence(VkDevice device,VkFence _fence,const VkAllocationCallbacks * pAllocator)666 vn_DestroyFence(VkDevice device,
667                 VkFence _fence,
668                 const VkAllocationCallbacks *pAllocator)
669 {
670    VN_TRACE_FUNC();
671    struct vn_device *dev = vn_device_from_handle(device);
672    struct vn_fence *fence = vn_fence_from_handle(_fence);
673    const VkAllocationCallbacks *alloc =
674       pAllocator ? pAllocator : &dev->base.base.alloc;
675 
676    if (!fence)
677       return;
678 
679    vn_async_vkDestroyFence(dev->instance, device, _fence, NULL);
680 
681    vn_fence_feedback_fini(dev, fence, alloc);
682 
683    vn_sync_payload_release(dev, &fence->permanent);
684    vn_sync_payload_release(dev, &fence->temporary);
685 
686    vn_object_base_fini(&fence->base);
687    vk_free(alloc, fence);
688 }
689 
690 VkResult
vn_ResetFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences)691 vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
692 {
693    VN_TRACE_FUNC();
694    struct vn_device *dev = vn_device_from_handle(device);
695 
696    /* TODO if the fence is shared-by-ref, this needs to be synchronous */
697    if (false)
698       vn_call_vkResetFences(dev->instance, device, fenceCount, pFences);
699    else
700       vn_async_vkResetFences(dev->instance, device, fenceCount, pFences);
701 
702    for (uint32_t i = 0; i < fenceCount; i++) {
703       struct vn_fence *fence = vn_fence_from_handle(pFences[i]);
704       struct vn_sync_payload *perm = &fence->permanent;
705 
706       vn_sync_payload_release(dev, &fence->temporary);
707 
708       assert(perm->type == VN_SYNC_TYPE_DEVICE_ONLY);
709       fence->payload = perm;
710 
711       if (fence->feedback.slot)
712          vn_feedback_reset_status(fence->feedback.slot);
713    }
714 
715    return VK_SUCCESS;
716 }
717 
718 VkResult
vn_GetFenceStatus(VkDevice device,VkFence _fence)719 vn_GetFenceStatus(VkDevice device, VkFence _fence)
720 {
721    struct vn_device *dev = vn_device_from_handle(device);
722    struct vn_fence *fence = vn_fence_from_handle(_fence);
723    struct vn_sync_payload *payload = fence->payload;
724 
725    VkResult result;
726    switch (payload->type) {
727    case VN_SYNC_TYPE_DEVICE_ONLY:
728       if (fence->feedback.slot) {
729          result = vn_feedback_get_status(fence->feedback.slot);
730          if (result == VK_SUCCESS) {
731             /* When fence feedback slot gets signaled, the real fence
732              * signal operation follows after but the signaling isr can be
733              * deferred or preempted. To avoid theoretical racing, we let
734              * the renderer wait for the fence. This also helps resolve
735              * synchronization validation errors, because the layer no
736              * longer sees any fence status checks and falsely believes the
737              * caller does not sync.
738              */
739             vn_async_vkWaitForFences(dev->instance, device, 1, &_fence,
740                                      VK_TRUE, UINT64_MAX);
741          }
742       } else {
743          result = vn_call_vkGetFenceStatus(dev->instance, device, _fence);
744       }
745       break;
746    case VN_SYNC_TYPE_WSI_SIGNALED:
747       result = VK_SUCCESS;
748       break;
749    default:
750       unreachable("unexpected fence payload type");
751       break;
752    }
753 
754    return vn_result(dev->instance, result);
755 }
756 
757 static VkResult
vn_find_first_signaled_fence(VkDevice device,const VkFence * fences,uint32_t count)758 vn_find_first_signaled_fence(VkDevice device,
759                              const VkFence *fences,
760                              uint32_t count)
761 {
762    for (uint32_t i = 0; i < count; i++) {
763       VkResult result = vn_GetFenceStatus(device, fences[i]);
764       if (result == VK_SUCCESS || result < 0)
765          return result;
766    }
767    return VK_NOT_READY;
768 }
769 
770 static VkResult
vn_remove_signaled_fences(VkDevice device,VkFence * fences,uint32_t * count)771 vn_remove_signaled_fences(VkDevice device, VkFence *fences, uint32_t *count)
772 {
773    uint32_t cur = 0;
774    for (uint32_t i = 0; i < *count; i++) {
775       VkResult result = vn_GetFenceStatus(device, fences[i]);
776       if (result != VK_SUCCESS) {
777          if (result < 0)
778             return result;
779          fences[cur++] = fences[i];
780       }
781    }
782 
783    *count = cur;
784    return cur ? VK_NOT_READY : VK_SUCCESS;
785 }
786 
787 static VkResult
vn_update_sync_result(VkResult result,int64_t abs_timeout,uint32_t * iter)788 vn_update_sync_result(VkResult result, int64_t abs_timeout, uint32_t *iter)
789 {
790    switch (result) {
791    case VK_NOT_READY:
792       if (abs_timeout != OS_TIMEOUT_INFINITE &&
793           os_time_get_nano() >= abs_timeout)
794          result = VK_TIMEOUT;
795       else
796          vn_relax(iter, "client");
797       break;
798    default:
799       assert(result == VK_SUCCESS || result < 0);
800       break;
801    }
802 
803    return result;
804 }
805 
806 VkResult
vn_WaitForFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)807 vn_WaitForFences(VkDevice device,
808                  uint32_t fenceCount,
809                  const VkFence *pFences,
810                  VkBool32 waitAll,
811                  uint64_t timeout)
812 {
813    VN_TRACE_FUNC();
814    struct vn_device *dev = vn_device_from_handle(device);
815    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
816 
817    const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
818    VkResult result = VK_NOT_READY;
819    uint32_t iter = 0;
820    if (fenceCount > 1 && waitAll) {
821       VkFence local_fences[8];
822       VkFence *fences = local_fences;
823       if (fenceCount > ARRAY_SIZE(local_fences)) {
824          fences =
825             vk_alloc(alloc, sizeof(*fences) * fenceCount, VN_DEFAULT_ALIGN,
826                      VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
827          if (!fences)
828             return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
829       }
830       memcpy(fences, pFences, sizeof(*fences) * fenceCount);
831 
832       while (result == VK_NOT_READY) {
833          result = vn_remove_signaled_fences(device, fences, &fenceCount);
834          result = vn_update_sync_result(result, abs_timeout, &iter);
835       }
836 
837       if (fences != local_fences)
838          vk_free(alloc, fences);
839    } else {
840       while (result == VK_NOT_READY) {
841          result = vn_find_first_signaled_fence(device, pFences, fenceCount);
842          result = vn_update_sync_result(result, abs_timeout, &iter);
843       }
844    }
845 
846    return vn_result(dev->instance, result);
847 }
848 
849 static VkResult
vn_create_sync_file(struct vn_device * dev,int * out_fd)850 vn_create_sync_file(struct vn_device *dev, int *out_fd)
851 {
852    struct vn_renderer_sync *sync;
853    VkResult result = vn_renderer_sync_create(dev->renderer, 0,
854                                              VN_RENDERER_SYNC_BINARY, &sync);
855    if (result != VK_SUCCESS)
856       return vn_error(dev->instance, result);
857 
858    const struct vn_renderer_submit submit = {
859       .batches =
860          &(const struct vn_renderer_submit_batch){
861             .syncs = &sync,
862             .sync_values = &(const uint64_t){ 1 },
863             .sync_count = 1,
864          },
865       .batch_count = 1,
866    };
867    result = vn_renderer_submit(dev->renderer, &submit);
868    if (result != VK_SUCCESS) {
869       vn_renderer_sync_destroy(dev->renderer, sync);
870       return vn_error(dev->instance, result);
871    }
872 
873    *out_fd = vn_renderer_sync_export_syncobj(dev->renderer, sync, true);
874    vn_renderer_sync_destroy(dev->renderer, sync);
875 
876    return *out_fd >= 0 ? VK_SUCCESS : VK_ERROR_TOO_MANY_OBJECTS;
877 }
878 
879 VkResult
vn_ImportFenceFdKHR(VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)880 vn_ImportFenceFdKHR(VkDevice device,
881                     const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
882 {
883    VN_TRACE_FUNC();
884    struct vn_device *dev = vn_device_from_handle(device);
885    struct vn_fence *fence = vn_fence_from_handle(pImportFenceFdInfo->fence);
886    ASSERTED const bool sync_file = pImportFenceFdInfo->handleType ==
887                                    VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
888    const int fd = pImportFenceFdInfo->fd;
889 
890    /* TODO update fence->is_external after we support opaque fd import */
891    assert(dev->instance->experimental.globalFencing);
892    assert(sync_file);
893    if (fd >= 0) {
894       if (sync_wait(fd, -1))
895          return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
896 
897       close(fd);
898    }
899 
900    /* abuse VN_SYNC_TYPE_WSI_SIGNALED */
901    vn_fence_signal_wsi(dev, fence);
902 
903    return VK_SUCCESS;
904 }
905 
906 VkResult
vn_GetFenceFdKHR(VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)907 vn_GetFenceFdKHR(VkDevice device,
908                  const VkFenceGetFdInfoKHR *pGetFdInfo,
909                  int *pFd)
910 {
911    VN_TRACE_FUNC();
912    struct vn_device *dev = vn_device_from_handle(device);
913    struct vn_fence *fence = vn_fence_from_handle(pGetFdInfo->fence);
914    const bool sync_file =
915       pGetFdInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
916    struct vn_sync_payload *payload = fence->payload;
917 
918    assert(dev->instance->experimental.globalFencing);
919    assert(sync_file);
920    int fd = -1;
921    if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
922       VkResult result = vn_create_sync_file(dev, &fd);
923       if (result != VK_SUCCESS)
924          return vn_error(dev->instance, result);
925    }
926 
927    if (sync_file) {
928       vn_sync_payload_release(dev, &fence->temporary);
929       fence->payload = &fence->permanent;
930 
931       /* XXX implies reset operation on the host fence */
932    }
933 
934    *pFd = fd;
935    return VK_SUCCESS;
936 }
937 
938 /* semaphore commands */
939 
940 static VkResult
vn_semaphore_init_payloads(struct vn_device * dev,struct vn_semaphore * sem,uint64_t initial_val,const VkAllocationCallbacks * alloc)941 vn_semaphore_init_payloads(struct vn_device *dev,
942                            struct vn_semaphore *sem,
943                            uint64_t initial_val,
944                            const VkAllocationCallbacks *alloc)
945 {
946    sem->permanent.type = VN_SYNC_TYPE_DEVICE_ONLY;
947    sem->temporary.type = VN_SYNC_TYPE_INVALID;
948    sem->payload = &sem->permanent;
949 
950    return VK_SUCCESS;
951 }
952 
953 static void
vn_semaphore_reset_wsi(struct vn_device * dev,struct vn_semaphore * sem)954 vn_semaphore_reset_wsi(struct vn_device *dev, struct vn_semaphore *sem)
955 {
956    struct vn_sync_payload *perm = &sem->permanent;
957 
958    vn_sync_payload_release(dev, &sem->temporary);
959 
960    sem->payload = perm;
961 }
962 
963 void
vn_semaphore_signal_wsi(struct vn_device * dev,struct vn_semaphore * sem)964 vn_semaphore_signal_wsi(struct vn_device *dev, struct vn_semaphore *sem)
965 {
966    struct vn_sync_payload *temp = &sem->temporary;
967 
968    vn_sync_payload_release(dev, temp);
969    temp->type = VN_SYNC_TYPE_WSI_SIGNALED;
970    sem->payload = temp;
971 }
972 
973 VkResult
vn_CreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)974 vn_CreateSemaphore(VkDevice device,
975                    const VkSemaphoreCreateInfo *pCreateInfo,
976                    const VkAllocationCallbacks *pAllocator,
977                    VkSemaphore *pSemaphore)
978 {
979    VN_TRACE_FUNC();
980    struct vn_device *dev = vn_device_from_handle(device);
981    const VkAllocationCallbacks *alloc =
982       pAllocator ? pAllocator : &dev->base.base.alloc;
983 
984    struct vn_semaphore *sem = vk_zalloc(alloc, sizeof(*sem), VN_DEFAULT_ALIGN,
985                                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
986    if (!sem)
987       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
988 
989    vn_object_base_init(&sem->base, VK_OBJECT_TYPE_SEMAPHORE, &dev->base);
990 
991    const VkSemaphoreTypeCreateInfo *type_info =
992       vk_find_struct_const(pCreateInfo->pNext, SEMAPHORE_TYPE_CREATE_INFO);
993    uint64_t initial_val = 0;
994    if (type_info && type_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE) {
995       sem->type = VK_SEMAPHORE_TYPE_TIMELINE;
996       initial_val = type_info->initialValue;
997    } else {
998       sem->type = VK_SEMAPHORE_TYPE_BINARY;
999    }
1000 
1001    VkResult result = vn_semaphore_init_payloads(dev, sem, initial_val, alloc);
1002    if (result != VK_SUCCESS) {
1003       vn_object_base_fini(&sem->base);
1004       vk_free(alloc, sem);
1005       return vn_error(dev->instance, result);
1006    }
1007 
1008    VkSemaphore sem_handle = vn_semaphore_to_handle(sem);
1009    vn_async_vkCreateSemaphore(dev->instance, device, pCreateInfo, NULL,
1010                               &sem_handle);
1011 
1012    *pSemaphore = sem_handle;
1013 
1014    return VK_SUCCESS;
1015 }
1016 
1017 void
vn_DestroySemaphore(VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)1018 vn_DestroySemaphore(VkDevice device,
1019                     VkSemaphore semaphore,
1020                     const VkAllocationCallbacks *pAllocator)
1021 {
1022    VN_TRACE_FUNC();
1023    struct vn_device *dev = vn_device_from_handle(device);
1024    struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
1025    const VkAllocationCallbacks *alloc =
1026       pAllocator ? pAllocator : &dev->base.base.alloc;
1027 
1028    if (!sem)
1029       return;
1030 
1031    vn_async_vkDestroySemaphore(dev->instance, device, semaphore, NULL);
1032 
1033    vn_sync_payload_release(dev, &sem->permanent);
1034    vn_sync_payload_release(dev, &sem->temporary);
1035 
1036    vn_object_base_fini(&sem->base);
1037    vk_free(alloc, sem);
1038 }
1039 
1040 VkResult
vn_GetSemaphoreCounterValue(VkDevice device,VkSemaphore semaphore,uint64_t * pValue)1041 vn_GetSemaphoreCounterValue(VkDevice device,
1042                             VkSemaphore semaphore,
1043                             uint64_t *pValue)
1044 {
1045    VN_TRACE_FUNC();
1046    struct vn_device *dev = vn_device_from_handle(device);
1047    struct vn_semaphore *sem = vn_semaphore_from_handle(semaphore);
1048    ASSERTED struct vn_sync_payload *payload = sem->payload;
1049 
1050    assert(payload->type == VN_SYNC_TYPE_DEVICE_ONLY);
1051    return vn_call_vkGetSemaphoreCounterValue(dev->instance, device, semaphore,
1052                                              pValue);
1053 }
1054 
1055 VkResult
vn_SignalSemaphore(VkDevice device,const VkSemaphoreSignalInfo * pSignalInfo)1056 vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)
1057 {
1058    VN_TRACE_FUNC();
1059    struct vn_device *dev = vn_device_from_handle(device);
1060 
1061    /* TODO if the semaphore is shared-by-ref, this needs to be synchronous */
1062    if (false)
1063       vn_call_vkSignalSemaphore(dev->instance, device, pSignalInfo);
1064    else
1065       vn_async_vkSignalSemaphore(dev->instance, device, pSignalInfo);
1066 
1067    return VK_SUCCESS;
1068 }
1069 
1070 static VkResult
vn_find_first_signaled_semaphore(VkDevice device,const VkSemaphore * semaphores,const uint64_t * values,uint32_t count)1071 vn_find_first_signaled_semaphore(VkDevice device,
1072                                  const VkSemaphore *semaphores,
1073                                  const uint64_t *values,
1074                                  uint32_t count)
1075 {
1076    for (uint32_t i = 0; i < count; i++) {
1077       uint64_t val = 0;
1078       VkResult result =
1079          vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
1080       if (result != VK_SUCCESS || val >= values[i])
1081          return result;
1082    }
1083    return VK_NOT_READY;
1084 }
1085 
1086 static VkResult
vn_remove_signaled_semaphores(VkDevice device,VkSemaphore * semaphores,uint64_t * values,uint32_t * count)1087 vn_remove_signaled_semaphores(VkDevice device,
1088                               VkSemaphore *semaphores,
1089                               uint64_t *values,
1090                               uint32_t *count)
1091 {
1092    uint32_t cur = 0;
1093    for (uint32_t i = 0; i < *count; i++) {
1094       uint64_t val = 0;
1095       VkResult result =
1096          vn_GetSemaphoreCounterValue(device, semaphores[i], &val);
1097       if (result != VK_SUCCESS)
1098          return result;
1099       if (val < values[i])
1100          semaphores[cur++] = semaphores[i];
1101    }
1102 
1103    *count = cur;
1104    return cur ? VK_NOT_READY : VK_SUCCESS;
1105 }
1106 
1107 VkResult
vn_WaitSemaphores(VkDevice device,const VkSemaphoreWaitInfo * pWaitInfo,uint64_t timeout)1108 vn_WaitSemaphores(VkDevice device,
1109                   const VkSemaphoreWaitInfo *pWaitInfo,
1110                   uint64_t timeout)
1111 {
1112    VN_TRACE_FUNC();
1113    struct vn_device *dev = vn_device_from_handle(device);
1114    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
1115 
1116    const int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
1117    VkResult result = VK_NOT_READY;
1118    uint32_t iter = 0;
1119    if (pWaitInfo->semaphoreCount > 1 &&
1120        !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT)) {
1121       uint32_t semaphore_count = pWaitInfo->semaphoreCount;
1122       VkSemaphore local_semaphores[8];
1123       uint64_t local_values[8];
1124       VkSemaphore *semaphores = local_semaphores;
1125       uint64_t *values = local_values;
1126       if (semaphore_count > ARRAY_SIZE(local_semaphores)) {
1127          semaphores = vk_alloc(
1128             alloc, (sizeof(*semaphores) + sizeof(*values)) * semaphore_count,
1129             VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1130          if (!semaphores)
1131             return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1132 
1133          values = (uint64_t *)&semaphores[semaphore_count];
1134       }
1135       memcpy(semaphores, pWaitInfo->pSemaphores,
1136              sizeof(*semaphores) * semaphore_count);
1137       memcpy(values, pWaitInfo->pValues, sizeof(*values) * semaphore_count);
1138 
1139       while (result == VK_NOT_READY) {
1140          result = vn_remove_signaled_semaphores(device, semaphores, values,
1141                                                 &semaphore_count);
1142          result = vn_update_sync_result(result, abs_timeout, &iter);
1143       }
1144 
1145       if (semaphores != local_semaphores)
1146          vk_free(alloc, semaphores);
1147    } else {
1148       while (result == VK_NOT_READY) {
1149          result = vn_find_first_signaled_semaphore(
1150             device, pWaitInfo->pSemaphores, pWaitInfo->pValues,
1151             pWaitInfo->semaphoreCount);
1152          result = vn_update_sync_result(result, abs_timeout, &iter);
1153       }
1154    }
1155 
1156    return vn_result(dev->instance, result);
1157 }
1158 
1159 VkResult
vn_ImportSemaphoreFdKHR(VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)1160 vn_ImportSemaphoreFdKHR(
1161    VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
1162 {
1163    VN_TRACE_FUNC();
1164    struct vn_device *dev = vn_device_from_handle(device);
1165    struct vn_semaphore *sem =
1166       vn_semaphore_from_handle(pImportSemaphoreFdInfo->semaphore);
1167    ASSERTED const bool sync_file =
1168       pImportSemaphoreFdInfo->handleType ==
1169       VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1170    const int fd = pImportSemaphoreFdInfo->fd;
1171 
1172    assert(dev->instance->experimental.globalFencing);
1173    assert(sync_file);
1174    if (fd >= 0) {
1175       if (sync_wait(fd, -1))
1176          return vn_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
1177 
1178       close(fd);
1179    }
1180 
1181    /* abuse VN_SYNC_TYPE_WSI_SIGNALED */
1182    vn_semaphore_signal_wsi(dev, sem);
1183 
1184    return VK_SUCCESS;
1185 }
1186 
1187 VkResult
vn_GetSemaphoreFdKHR(VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)1188 vn_GetSemaphoreFdKHR(VkDevice device,
1189                      const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
1190                      int *pFd)
1191 {
1192    VN_TRACE_FUNC();
1193    struct vn_device *dev = vn_device_from_handle(device);
1194    struct vn_semaphore *sem = vn_semaphore_from_handle(pGetFdInfo->semaphore);
1195    const bool sync_file =
1196       pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1197    struct vn_sync_payload *payload = sem->payload;
1198 
1199    assert(dev->instance->experimental.globalFencing);
1200    assert(sync_file);
1201    int fd = -1;
1202    if (payload->type == VN_SYNC_TYPE_DEVICE_ONLY) {
1203       VkResult result = vn_create_sync_file(dev, &fd);
1204       if (result != VK_SUCCESS)
1205          return vn_error(dev->instance, result);
1206    }
1207 
1208    if (sync_file) {
1209       vn_sync_payload_release(dev, &sem->temporary);
1210       sem->payload = &sem->permanent;
1211 
1212       /* XXX implies wait operation on the host semaphore */
1213    }
1214 
1215    *pFd = fd;
1216    return VK_SUCCESS;
1217 }
1218 
1219 /* event commands */
1220 
1221 static VkResult
vn_event_feedback_init(struct vn_device * dev,struct vn_event * ev)1222 vn_event_feedback_init(struct vn_device *dev, struct vn_event *ev)
1223 {
1224    struct vn_feedback_slot *slot;
1225 
1226    if (VN_PERF(NO_EVENT_FEEDBACK))
1227       return VK_SUCCESS;
1228 
1229    slot = vn_feedback_pool_alloc(&dev->feedback_pool, VN_FEEDBACK_TYPE_EVENT);
1230    if (!slot)
1231       return VK_ERROR_OUT_OF_HOST_MEMORY;
1232 
1233    /* newly created event object is in the unsignaled state */
1234    vn_feedback_set_status(slot, VK_EVENT_RESET);
1235 
1236    ev->feedback_slot = slot;
1237 
1238    return VK_SUCCESS;
1239 }
1240 
1241 static inline void
vn_event_feedback_fini(struct vn_device * dev,struct vn_event * ev)1242 vn_event_feedback_fini(struct vn_device *dev, struct vn_event *ev)
1243 {
1244    if (ev->feedback_slot)
1245       vn_feedback_pool_free(&dev->feedback_pool, ev->feedback_slot);
1246 }
1247 
1248 VkResult
vn_CreateEvent(VkDevice device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)1249 vn_CreateEvent(VkDevice device,
1250                const VkEventCreateInfo *pCreateInfo,
1251                const VkAllocationCallbacks *pAllocator,
1252                VkEvent *pEvent)
1253 {
1254    VN_TRACE_FUNC();
1255    struct vn_device *dev = vn_device_from_handle(device);
1256    const VkAllocationCallbacks *alloc =
1257       pAllocator ? pAllocator : &dev->base.base.alloc;
1258 
1259    struct vn_event *ev = vk_zalloc(alloc, sizeof(*ev), VN_DEFAULT_ALIGN,
1260                                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1261    if (!ev)
1262       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1263 
1264    vn_object_base_init(&ev->base, VK_OBJECT_TYPE_EVENT, &dev->base);
1265 
1266    /* feedback is only needed to speed up host operations */
1267    if (!(pCreateInfo->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT)) {
1268       VkResult result = vn_event_feedback_init(dev, ev);
1269       if (result != VK_SUCCESS)
1270          return vn_error(dev->instance, result);
1271    }
1272 
1273    VkEvent ev_handle = vn_event_to_handle(ev);
1274    vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,
1275                           &ev_handle);
1276 
1277    *pEvent = ev_handle;
1278 
1279    return VK_SUCCESS;
1280 }
1281 
1282 void
vn_DestroyEvent(VkDevice device,VkEvent event,const VkAllocationCallbacks * pAllocator)1283 vn_DestroyEvent(VkDevice device,
1284                 VkEvent event,
1285                 const VkAllocationCallbacks *pAllocator)
1286 {
1287    VN_TRACE_FUNC();
1288    struct vn_device *dev = vn_device_from_handle(device);
1289    struct vn_event *ev = vn_event_from_handle(event);
1290    const VkAllocationCallbacks *alloc =
1291       pAllocator ? pAllocator : &dev->base.base.alloc;
1292 
1293    if (!ev)
1294       return;
1295 
1296    vn_async_vkDestroyEvent(dev->instance, device, event, NULL);
1297 
1298    vn_event_feedback_fini(dev, ev);
1299 
1300    vn_object_base_fini(&ev->base);
1301    vk_free(alloc, ev);
1302 }
1303 
1304 VkResult
vn_GetEventStatus(VkDevice device,VkEvent event)1305 vn_GetEventStatus(VkDevice device, VkEvent event)
1306 {
1307    VN_TRACE_FUNC();
1308    struct vn_device *dev = vn_device_from_handle(device);
1309    struct vn_event *ev = vn_event_from_handle(event);
1310    VkResult result;
1311 
1312    if (ev->feedback_slot)
1313       result = vn_feedback_get_status(ev->feedback_slot);
1314    else
1315       result = vn_call_vkGetEventStatus(dev->instance, device, event);
1316 
1317    return vn_result(dev->instance, result);
1318 }
1319 
1320 VkResult
vn_SetEvent(VkDevice device,VkEvent event)1321 vn_SetEvent(VkDevice device, VkEvent event)
1322 {
1323    VN_TRACE_FUNC();
1324    struct vn_device *dev = vn_device_from_handle(device);
1325    struct vn_event *ev = vn_event_from_handle(event);
1326 
1327    if (ev->feedback_slot) {
1328       vn_feedback_set_status(ev->feedback_slot, VK_EVENT_SET);
1329       vn_async_vkSetEvent(dev->instance, device, event);
1330    } else {
1331       VkResult result = vn_call_vkSetEvent(dev->instance, device, event);
1332       if (result != VK_SUCCESS)
1333          return vn_error(dev->instance, result);
1334    }
1335 
1336    return VK_SUCCESS;
1337 }
1338 
1339 VkResult
vn_ResetEvent(VkDevice device,VkEvent event)1340 vn_ResetEvent(VkDevice device, VkEvent event)
1341 {
1342    VN_TRACE_FUNC();
1343    struct vn_device *dev = vn_device_from_handle(device);
1344    struct vn_event *ev = vn_event_from_handle(event);
1345 
1346    if (ev->feedback_slot) {
1347       vn_feedback_reset_status(ev->feedback_slot);
1348       vn_async_vkResetEvent(dev->instance, device, event);
1349    } else {
1350       VkResult result = vn_call_vkResetEvent(dev->instance, device, event);
1351       if (result != VK_SUCCESS)
1352          return vn_error(dev->instance, result);
1353    }
1354 
1355    return VK_SUCCESS;
1356 }
1357