• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #ifndef VKR_QUEUE_H
7 #define VKR_QUEUE_H
8 
9 #include "vkr_common.h"
10 
11 struct vkr_queue_sync {
12    VkFence fence;
13 
14    uint32_t flags;
15    uint64_t queue_id;
16    void *fence_cookie;
17 
18    struct list_head head;
19 };
20 
21 struct vkr_queue {
22    struct vkr_object base;
23 
24    struct vkr_context *context;
25    struct vkr_device *device;
26 
27    VkDeviceQueueCreateFlags flags;
28    uint32_t family;
29    uint32_t index;
30 
31    /* Submitted fences are added to pending_syncs first.  How submitted fences
32     * are retired depends on VKR_RENDERER_THREAD_SYNC and
33     * VKR_RENDERER_ASYNC_FENCE_CB.
34     *
35     * When VKR_RENDERER_THREAD_SYNC is not set, the main thread calls
36     * vkGetFenceStatus and retires signaled fences in pending_syncs in order.
37     *
38     * When VKR_RENDERER_THREAD_SYNC is set but VKR_RENDERER_ASYNC_FENCE_CB is
39     * not set, the sync thread calls vkWaitForFences and moves signaled fences
40     * from pending_syncs to signaled_syncs in order.  The main thread simply
41     * retires all fences in signaled_syncs.
42     *
43     * When VKR_RENDERER_THREAD_SYNC and VKR_RENDERER_ASYNC_FENCE_CB are both
44     * set, the sync thread calls vkWaitForFences and retires signaled fences
45     * in pending_syncs in order.
46     */
47    int eventfd;
48    thrd_t thread;
49    mtx_t mutex;
50    cnd_t cond;
51    bool join;
52    struct list_head pending_syncs;
53    struct list_head signaled_syncs;
54 
55    struct list_head busy_head;
56 };
57 VKR_DEFINE_OBJECT_CAST(queue, VK_OBJECT_TYPE_QUEUE, VkQueue)
58 
59 struct vkr_fence {
60    struct vkr_object base;
61 };
62 VKR_DEFINE_OBJECT_CAST(fence, VK_OBJECT_TYPE_FENCE, VkFence)
63 
64 struct vkr_semaphore {
65    struct vkr_object base;
66 };
67 VKR_DEFINE_OBJECT_CAST(semaphore, VK_OBJECT_TYPE_SEMAPHORE, VkSemaphore)
68 
69 struct vkr_event {
70    struct vkr_object base;
71 };
72 VKR_DEFINE_OBJECT_CAST(event, VK_OBJECT_TYPE_EVENT, VkEvent)
73 
74 void
75 vkr_context_init_queue_dispatch(struct vkr_context *ctx);
76 
77 void
78 vkr_context_init_fence_dispatch(struct vkr_context *ctx);
79 
80 void
81 vkr_context_init_semaphore_dispatch(struct vkr_context *ctx);
82 
83 void
84 vkr_context_init_event_dispatch(struct vkr_context *ctx);
85 
86 struct vkr_queue_sync *
87 vkr_device_alloc_queue_sync(struct vkr_device *dev,
88                             uint32_t fence_flags,
89                             uint64_t queue_id,
90                             void *fence_cookie);
91 
92 void
93 vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync);
94 
95 void
96 vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
97                              struct list_head *retired_syncs,
98                              bool *queue_empty);
99 
100 struct vkr_queue *
101 vkr_queue_create(struct vkr_context *ctx,
102                  struct vkr_device *dev,
103                  VkDeviceQueueCreateFlags flags,
104                  uint32_t family,
105                  uint32_t index,
106                  VkQueue handle);
107 
108 void
109 vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue);
110 
111 #endif /* VKR_QUEUE_H */
112