• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2021 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 #ifndef VK_SYNC_H
24 #define VK_SYNC_H
25 
26 #include <stdbool.h>
27 #include <vulkan/vulkan_core.h>
28 
29 #include "util/macros.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 struct vk_device;
36 struct vk_sync;
37 
38 enum vk_sync_features {
39    /** Set if a sync type supports the binary mode of operation
40     *
41     * In binary mode, a vk_sync has two modes: signaled and unsignaled.  If
42     * it supports CPU_RESET, it can be changed from signaled to unsignaled on
43     * the CPU via vk_sync_reset().  If it supports CPU_SIGNAL, it can be
44     * changed from unsignaled to signaled on the CPU via vk_sync_signal().
45     *
46     * Binary vk_sync types may also support WAIT_PENDING in which they have a
47     * third hidden pending state.  Once such a vk_sync has been submitted to
48     * the kernel driver for signaling, it is in the pending state and remains
49     * there until the work is complete at which point it enters the signaled
50     * state.  This pending state is visible across processes for shared
51     * vk_sync types.  This is used to by the threaded submit mode to ensure
52     * that everything gets submitted to the kernel driver in-order.
53     *
54     * A vk_sync operates in binary mode if VK_SYNC_IS_TIMELINE is not set
55     * in vk_sync::flags.
56     */
57    VK_SYNC_FEATURE_BINARY              = (1 << 0),
58 
59    /** Set if a sync type supports the timeline mode of operation
60     *
61     * In timeline mode, a vk_sync has a monotonically increasing 64-bit value
62     * which represents most recently signaled time point.  Waits are relative
63     * to time points.  Instead of waiting for the vk_sync to enter a signaled
64     * state, you wait for its 64-bit value to be at least some wait value.
65     *
66     * Timeline vk_sync types can also support WAIT_PENDING.  In this case, the
67     * wait is not for a pending state, as such, but rather for someone to have
68     * submitted a kernel request which will signal a time point with at least
69     * that value.  Logically, you can think of this as having two timelines,
70     * the real timeline and a pending timeline which runs slightly ahead of
71     * the real one.  As with binary vk_sync types, this is used by threaded
72     * submit to re-order things so that the kernel requests happen in a valid
73     * linear order.
74     *
75     * A vk_sync operates in timeline mode if VK_SYNC_IS_TIMELINE is set in
76     * vk_sync::flags.
77     */
78    VK_SYNC_FEATURE_TIMELINE            = (1 << 1),
79 
80    /** Set if this sync supports GPU waits */
81    VK_SYNC_FEATURE_GPU_WAIT            = (1 << 2),
82 
83    /** Set if a sync type supports multiple GPU waits on one signal state
84     *
85     * The Vulkan spec for VkSemaphore requires GPU wait and signal operations
86     * to have a one-to-one relationship.  This formally described by saying
87     * that the VkSemaphore gets implicitly reset on wait.  However, it is
88     * often useful to have well-defined multi-wait.  If binary vk_sync
89     * supports multi-wait then any number of kernel requests can be submitted
90     * which wait on one signal operation.  This also implies that you can
91     * signal twice back-to-back (there are 0 waits on the first signal).
92     *
93     * This feature only applies to binary vk_sync objects.
94     */
95    VK_SYNC_FEATURE_GPU_MULTI_WAIT      = (1 << 3),
96 
97    /** Set if a sync type supports vk_sync_wait() and vk_sync_wait_many() */
98    VK_SYNC_FEATURE_CPU_WAIT            = (1 << 4),
99 
100    /** Set if a sync type supports vk_sync_reset()
101     *
102     * This feature only applies to binary vk_sync objects.
103     */
104    VK_SYNC_FEATURE_CPU_RESET           = (1 << 5),
105 
106    /** Set if a sync type supports vk_sync_signal() */
107    VK_SYNC_FEATURE_CPU_SIGNAL          = (1 << 6),
108 
109    /** Set if sync_type::wait_many supports the VK_SYNC_WAIT_ANY bit
110     *
111     * vk_sync_wait_many() will support the bit regardless.  If the sync type
112     * doesn't support it natively, it will be emulated.
113     */
114    VK_SYNC_FEATURE_WAIT_ANY            = (1 << 7),
115 
116    /** Set if a sync type supports the VK_SYNC_WAIT_PENDING bit
117     *
118     * See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions
119     * of what this does in each case.
120     */
121    VK_SYNC_FEATURE_WAIT_PENDING        = (1 << 8),
122 
123    /** Set if a sync type natively supports wait-before-signal
124     *
125     * If this is set then the underlying OS primitive supports submitting
126     * kernel requests which wait on the vk_sync before submitting a kernel
127     * request which would cause that wait to unblock.
128     */
129    VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL  = (1 << 9),
130 };
131 
132 struct vk_sync_wait;
133 
134 enum vk_sync_wait_flags {
135    /** Placeholder for 0 to make vk_sync_wait() calls more clear */
136    VK_SYNC_WAIT_COMPLETE   = 0,
137 
138    /** If set, only wait for the vk_sync operation to be pending
139     *
140     * See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions
141     * of what this does in each case.
142     */
143    VK_SYNC_WAIT_PENDING    = (1 << 0),
144 
145    /** If set, wait for any of of the vk_sync operations to complete
146     *
147     * This is as opposed to waiting for all of them.  There is no guarantee
148     * that vk_sync_wait_many() will return immediately after the first
149     * operation completes but it will make a best effort to return as soon as
150     * possible.
151     */
152    VK_SYNC_WAIT_ANY        = (1 << 1),
153 };
154 
155 struct vk_sync_type {
156    /** Size of this sync type */
157    size_t size;
158 
159    /** Features supported by this sync type */
160    enum vk_sync_features features;
161 
162    /** Initialize a vk_sync
163     *
164     * The base vk_sync will already be initialized and the sync type set
165     * before this function is called.  If any OS primitives need to be
166     * allocated, that should be done here.
167     */
168    VkResult (*init)(struct vk_device *device,
169                     struct vk_sync *sync,
170                     uint64_t initial_value);
171 
172    /** Finish a vk_sync
173     *
174     * This should free any internal data stored in this vk_sync.
175     */
176    void (*finish)(struct vk_device *device,
177                   struct vk_sync *sync);
178 
179    /** Signal a vk_sync
180     *
181     * For non-timeline sync types, value == 0.
182     */
183    VkResult (*signal)(struct vk_device *device,
184                       struct vk_sync *sync,
185                       uint64_t value);
186 
187    /** Get the timeline value for a vk_sync */
188    VkResult (*get_value)(struct vk_device *device,
189                          struct vk_sync *sync,
190                          uint64_t *value);
191 
192    /** Reset a non-timeline vk_sync */
193    VkResult (*reset)(struct vk_device *device,
194                      struct vk_sync *sync);
195 
196    /** Moves the guts of one binary vk_sync to another
197     *
198     * This moves the current binary vk_sync event from src to dst and resets
199     * src.  If dst contained an event, it is discarded.
200     *
201     * This is required for all binary vk_sync types that can be used for a
202     * semaphore wait in conjunction with real timeline semaphores.
203     */
204    VkResult (*move)(struct vk_device *device,
205                     struct vk_sync *dst,
206                     struct vk_sync *src);
207 
208    /** Wait on a vk_sync
209     *
210     * For a timeline vk_sync, wait_value is the timeline value to wait for.
211     * This function should not return VK_SUCCESS until get_value on that
212     * vk_sync would return a value >= wait_value.  A wait_value of zero is
213     * allowed in which case the wait is a no-op.  For a non-timeline vk_sync,
214     * wait_value should be ignored.
215     *
216     * This function is optional.  If the sync type needs to support CPU waits,
217     * at least one of wait or wait_many must be provided.  If one is missing,
218     * it will be implemented in terms of the other.
219     */
220    VkResult (*wait)(struct vk_device *device,
221                     struct vk_sync *sync,
222                     uint64_t wait_value,
223                     enum vk_sync_wait_flags wait_flags,
224                     uint64_t abs_timeout_ns);
225 
226    /** Wait for multiple vk_sync events
227     *
228     * If VK_SYNC_WAIT_ANY is set, it will return after at least one of the
229     * wait events is complete instead of waiting for all of them.
230     *
231     * See wait for more details.
232     */
233    VkResult (*wait_many)(struct vk_device *device,
234                          uint32_t wait_count,
235                          const struct vk_sync_wait *waits,
236                          enum vk_sync_wait_flags wait_flags,
237                          uint64_t abs_timeout_ns);
238 
239    /** Permanently imports the given FD into this vk_sync
240     *
241     * This replaces the guts of the given vk_sync with whatever is in the FD.
242     * In a sense, this vk_sync now aliases whatever vk_sync the FD was
243     * exported from.
244     */
245    VkResult (*import_opaque_fd)(struct vk_device *device,
246                                 struct vk_sync *sync,
247                                 int fd);
248 
249    /** Export the guts of this vk_sync to an FD */
250    VkResult (*export_opaque_fd)(struct vk_device *device,
251                                 struct vk_sync *sync,
252                                 int *fd);
253 
254    /** Imports a sync file into this binary vk_sync
255     *
256     * If this completes successfully, the vk_sync will now signal whenever
257     * the sync file signals.
258     *
259     * If sync_file == -1, the vk_sync should be signaled immediately.  If
260     * the vk_sync_type implements signal, sync_file will never be -1.
261     */
262    VkResult (*import_sync_file)(struct vk_device *device,
263                                 struct vk_sync *sync,
264                                 int sync_file);
265 
266    /** Exports the current binary vk_sync state as a sync file.
267     *
268     * The resulting sync file will contain the current event stored in this
269     * binary vk_sync must be turned into a sync file.  If the vk_sync is later
270     * modified to contain a new event, the sync file is unaffected.
271     */
272    VkResult (*export_sync_file)(struct vk_device *device,
273                                 struct vk_sync *sync,
274                                 int *sync_file);
275 };
276 
277 enum vk_sync_flags {
278    /** Set if the vk_sync is a timeline */
279    VK_SYNC_IS_TIMELINE  = (1 << 0),
280 
281    /** Set if the vk_sync can have its payload shared */
282    VK_SYNC_IS_SHAREABLE = (1 << 1),
283 
284    /** Set if the vk_sync has a shared payload */
285    VK_SYNC_IS_SHARED    = (1 << 2),
286 };
287 
288 struct vk_sync {
289    const struct vk_sync_type *type;
290    enum vk_sync_flags flags;
291 };
292 
293 /* See VkSemaphoreSubmitInfo */
294 struct vk_sync_wait {
295    struct vk_sync *sync;
296    VkPipelineStageFlags2 stage_mask;
297    uint64_t wait_value;
298 };
299 
300 /* See VkSemaphoreSubmitInfo */
301 struct vk_sync_signal {
302    struct vk_sync *sync;
303    VkPipelineStageFlags2 stage_mask;
304    uint64_t signal_value;
305 };
306 
307 VkResult MUST_CHECK vk_sync_init(struct vk_device *device,
308                                  struct vk_sync *sync,
309                                  const struct vk_sync_type *type,
310                                  enum vk_sync_flags flags,
311                                  uint64_t initial_value);
312 
313 void vk_sync_finish(struct vk_device *device,
314                     struct vk_sync *sync);
315 
316 VkResult MUST_CHECK vk_sync_create(struct vk_device *device,
317                                    const struct vk_sync_type *type,
318                                    enum vk_sync_flags flags,
319                                    uint64_t initial_value,
320                                    struct vk_sync **sync_out);
321 
322 void vk_sync_destroy(struct vk_device *device,
323                      struct vk_sync *sync);
324 
325 VkResult MUST_CHECK vk_sync_signal(struct vk_device *device,
326                                    struct vk_sync *sync,
327                                    uint64_t value);
328 
329 VkResult MUST_CHECK vk_sync_get_value(struct vk_device *device,
330                                       struct vk_sync *sync,
331                                       uint64_t *value);
332 
333 VkResult MUST_CHECK vk_sync_reset(struct vk_device *device,
334                                   struct vk_sync *sync);
335 
336 VkResult MUST_CHECK vk_sync_wait(struct vk_device *device,
337                                  struct vk_sync *sync,
338                                  uint64_t wait_value,
339                                  enum vk_sync_wait_flags wait_flags,
340                                  uint64_t abs_timeout_ns);
341 
342 VkResult MUST_CHECK vk_sync_wait_many(struct vk_device *device,
343                                       uint32_t wait_count,
344                                       const struct vk_sync_wait *waits,
345                                       enum vk_sync_wait_flags wait_flags,
346                                       uint64_t abs_timeout_ns);
347 
348 VkResult MUST_CHECK vk_sync_import_opaque_fd(struct vk_device *device,
349                                              struct vk_sync *sync,
350                                              int fd);
351 
352 VkResult MUST_CHECK vk_sync_export_opaque_fd(struct vk_device *device,
353                                              struct vk_sync *sync,
354                                              int *fd);
355 
356 VkResult MUST_CHECK vk_sync_import_sync_file(struct vk_device *device,
357                                              struct vk_sync *sync,
358                                              int sync_file);
359 
360 VkResult MUST_CHECK vk_sync_export_sync_file(struct vk_device *device,
361                                              struct vk_sync *sync,
362                                              int *sync_file);
363 
364 VkResult MUST_CHECK vk_sync_move(struct vk_device *device,
365                                  struct vk_sync *dst,
366                                  struct vk_sync *src);
367 
368 #ifdef __cplusplus
369 }
370 #endif
371 
372 #endif /* VK_SYNC_H */
373