1 /* 2 * Copyright © 2021 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 #ifndef VK_SYNC_H 24 #define VK_SYNC_H 25 26 #include <stdbool.h> 27 #include <vulkan/vulkan_core.h> 28 29 #include "util/macros.h" 30 31 #ifdef __cplusplus 32 extern "C" { 33 #endif 34 35 struct vk_device; 36 struct vk_sync; 37 38 enum vk_sync_features { 39 /** Set if a sync type supports the binary mode of operation 40 * 41 * In binary mode, a vk_sync has two modes: signaled and unsignaled. If 42 * it supports CPU_RESET, it can be changed from signaled to unsignaled on 43 * the CPU via vk_sync_reset(). If it supports CPU_SIGNAL, it can be 44 * changed from unsignaled to signaled on the CPU via vk_sync_signal(). 45 * 46 * Binary vk_sync types may also support WAIT_PENDING in which they have a 47 * third hidden pending state. Once such a vk_sync has been submitted to 48 * the kernel driver for signaling, it is in the pending state and remains 49 * there until the work is complete at which point it enters the signaled 50 * state. This pending state is visible across processes for shared 51 * vk_sync types. This is used to by the threaded submit mode to ensure 52 * that everything gets submitted to the kernel driver in-order. 53 * 54 * A vk_sync operates in binary mode if VK_SYNC_IS_TIMELINE is not set 55 * in vk_sync::flags. 56 */ 57 VK_SYNC_FEATURE_BINARY = (1 << 0), 58 59 /** Set if a sync type supports the timeline mode of operation 60 * 61 * In timeline mode, a vk_sync has a monotonically increasing 64-bit value 62 * which represents most recently signaled time point. Waits are relative 63 * to time points. Instead of waiting for the vk_sync to enter a signaled 64 * state, you wait for its 64-bit value to be at least some wait value. 65 * 66 * Timeline vk_sync types can also support WAIT_PENDING. In this case, the 67 * wait is not for a pending state, as such, but rather for someone to have 68 * submitted a kernel request which will signal a time point with at least 69 * that value. Logically, you can think of this as having two timelines, 70 * the real timeline and a pending timeline which runs slightly ahead of 71 * the real one. As with binary vk_sync types, this is used by threaded 72 * submit to re-order things so that the kernel requests happen in a valid 73 * linear order. 74 * 75 * A vk_sync operates in timeline mode if VK_SYNC_IS_TIMELINE is set in 76 * vk_sync::flags. 77 */ 78 VK_SYNC_FEATURE_TIMELINE = (1 << 1), 79 80 /** Set if this sync supports GPU waits */ 81 VK_SYNC_FEATURE_GPU_WAIT = (1 << 2), 82 83 /** Set if a sync type supports multiple GPU waits on one signal state 84 * 85 * The Vulkan spec for VkSemaphore requires GPU wait and signal operations 86 * to have a one-to-one relationship. This formally described by saying 87 * that the VkSemaphore gets implicitly reset on wait. However, it is 88 * often useful to have well-defined multi-wait. If binary vk_sync 89 * supports multi-wait then any number of kernel requests can be submitted 90 * which wait on one signal operation. This also implies that you can 91 * signal twice back-to-back (there are 0 waits on the first signal). 92 * 93 * This feature only applies to binary vk_sync objects. 94 */ 95 VK_SYNC_FEATURE_GPU_MULTI_WAIT = (1 << 3), 96 97 /** Set if a sync type supports vk_sync_wait() and vk_sync_wait_many() */ 98 VK_SYNC_FEATURE_CPU_WAIT = (1 << 4), 99 100 /** Set if a sync type supports vk_sync_reset() 101 * 102 * This feature only applies to binary vk_sync objects. 103 */ 104 VK_SYNC_FEATURE_CPU_RESET = (1 << 5), 105 106 /** Set if a sync type supports vk_sync_signal() */ 107 VK_SYNC_FEATURE_CPU_SIGNAL = (1 << 6), 108 109 /** Set if sync_type::wait_many supports the VK_SYNC_WAIT_ANY bit 110 * 111 * vk_sync_wait_many() will support the bit regardless. If the sync type 112 * doesn't support it natively, it will be emulated. 113 */ 114 VK_SYNC_FEATURE_WAIT_ANY = (1 << 7), 115 116 /** Set if a sync type supports the VK_SYNC_WAIT_PENDING bit 117 * 118 * See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions 119 * of what this does in each case. 120 */ 121 VK_SYNC_FEATURE_WAIT_PENDING = (1 << 8), 122 123 /** Set if a sync type natively supports wait-before-signal 124 * 125 * If this is set then the underlying OS primitive supports submitting 126 * kernel requests which wait on the vk_sync before submitting a kernel 127 * request which would cause that wait to unblock. 128 */ 129 VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL = (1 << 9), 130 }; 131 132 struct vk_sync_wait; 133 134 enum vk_sync_wait_flags { 135 /** Placeholder for 0 to make vk_sync_wait() calls more clear */ 136 VK_SYNC_WAIT_COMPLETE = 0, 137 138 /** If set, only wait for the vk_sync operation to be pending 139 * 140 * See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions 141 * of what this does in each case. 142 */ 143 VK_SYNC_WAIT_PENDING = (1 << 0), 144 145 /** If set, wait for any of of the vk_sync operations to complete 146 * 147 * This is as opposed to waiting for all of them. There is no guarantee 148 * that vk_sync_wait_many() will return immediately after the first 149 * operation completes but it will make a best effort to return as soon as 150 * possible. 151 */ 152 VK_SYNC_WAIT_ANY = (1 << 1), 153 }; 154 155 struct vk_sync_type { 156 /** Size of this sync type */ 157 size_t size; 158 159 /** Features supported by this sync type */ 160 enum vk_sync_features features; 161 162 /** Initialize a vk_sync 163 * 164 * The base vk_sync will already be initialized and the sync type set 165 * before this function is called. If any OS primitives need to be 166 * allocated, that should be done here. 167 */ 168 VkResult (*init)(struct vk_device *device, 169 struct vk_sync *sync, 170 uint64_t initial_value); 171 172 /** Finish a vk_sync 173 * 174 * This should free any internal data stored in this vk_sync. 175 */ 176 void (*finish)(struct vk_device *device, 177 struct vk_sync *sync); 178 179 /** Signal a vk_sync 180 * 181 * For non-timeline sync types, value == 0. 182 */ 183 VkResult (*signal)(struct vk_device *device, 184 struct vk_sync *sync, 185 uint64_t value); 186 187 /** Get the timeline value for a vk_sync */ 188 VkResult (*get_value)(struct vk_device *device, 189 struct vk_sync *sync, 190 uint64_t *value); 191 192 /** Reset a non-timeline vk_sync */ 193 VkResult (*reset)(struct vk_device *device, 194 struct vk_sync *sync); 195 196 /** Moves the guts of one binary vk_sync to another 197 * 198 * This moves the current binary vk_sync event from src to dst and resets 199 * src. If dst contained an event, it is discarded. 200 * 201 * This is required for all binary vk_sync types that can be used for a 202 * semaphore wait in conjunction with real timeline semaphores. 203 */ 204 VkResult (*move)(struct vk_device *device, 205 struct vk_sync *dst, 206 struct vk_sync *src); 207 208 /** Wait on a vk_sync 209 * 210 * For a timeline vk_sync, wait_value is the timeline value to wait for. 211 * This function should not return VK_SUCCESS until get_value on that 212 * vk_sync would return a value >= wait_value. A wait_value of zero is 213 * allowed in which case the wait is a no-op. For a non-timeline vk_sync, 214 * wait_value should be ignored. 215 * 216 * This function is optional. If the sync type needs to support CPU waits, 217 * at least one of wait or wait_many must be provided. If one is missing, 218 * it will be implemented in terms of the other. 219 */ 220 VkResult (*wait)(struct vk_device *device, 221 struct vk_sync *sync, 222 uint64_t wait_value, 223 enum vk_sync_wait_flags wait_flags, 224 uint64_t abs_timeout_ns); 225 226 /** Wait for multiple vk_sync events 227 * 228 * If VK_SYNC_WAIT_ANY is set, it will return after at least one of the 229 * wait events is complete instead of waiting for all of them. 230 * 231 * See wait for more details. 232 */ 233 VkResult (*wait_many)(struct vk_device *device, 234 uint32_t wait_count, 235 const struct vk_sync_wait *waits, 236 enum vk_sync_wait_flags wait_flags, 237 uint64_t abs_timeout_ns); 238 239 /** Permanently imports the given FD into this vk_sync 240 * 241 * This replaces the guts of the given vk_sync with whatever is in the FD. 242 * In a sense, this vk_sync now aliases whatever vk_sync the FD was 243 * exported from. 244 */ 245 VkResult (*import_opaque_fd)(struct vk_device *device, 246 struct vk_sync *sync, 247 int fd); 248 249 /** Export the guts of this vk_sync to an FD */ 250 VkResult (*export_opaque_fd)(struct vk_device *device, 251 struct vk_sync *sync, 252 int *fd); 253 254 /** Imports a sync file into this binary vk_sync 255 * 256 * If this completes successfully, the vk_sync will now signal whenever 257 * the sync file signals. 258 * 259 * If sync_file == -1, the vk_sync should be signaled immediately. If 260 * the vk_sync_type implements signal, sync_file will never be -1. 261 */ 262 VkResult (*import_sync_file)(struct vk_device *device, 263 struct vk_sync *sync, 264 int sync_file); 265 266 /** Exports the current binary vk_sync state as a sync file. 267 * 268 * The resulting sync file will contain the current event stored in this 269 * binary vk_sync must be turned into a sync file. If the vk_sync is later 270 * modified to contain a new event, the sync file is unaffected. 271 */ 272 VkResult (*export_sync_file)(struct vk_device *device, 273 struct vk_sync *sync, 274 int *sync_file); 275 276 /** Permanently imports the given handle or name into this vk_sync 277 * 278 * This replaces the guts of the given vk_sync with whatever is in the object. 279 * In a sense, this vk_sync now aliases whatever vk_sync the handle was 280 * exported from. 281 */ 282 VkResult (*import_win32_handle)(struct vk_device *device, 283 struct vk_sync *sync, 284 void *handle, 285 const wchar_t *name); 286 287 /** Export the guts of this vk_sync to a handle and/or name */ 288 VkResult (*export_win32_handle)(struct vk_device *device, 289 struct vk_sync *sync, 290 void **handle); 291 292 /** Vulkan puts these as creation params instead of export params */ 293 VkResult (*set_win32_export_params)(struct vk_device *device, 294 struct vk_sync *sync, 295 const void *security_attributes, 296 uint32_t access, 297 const wchar_t *name); 298 }; 299 300 enum vk_sync_flags { 301 /** Set if the vk_sync is a timeline */ 302 VK_SYNC_IS_TIMELINE = (1 << 0), 303 304 /** Set if the vk_sync can have its payload shared */ 305 VK_SYNC_IS_SHAREABLE = (1 << 1), 306 307 /** Set if the vk_sync has a shared payload */ 308 VK_SYNC_IS_SHARED = (1 << 2), 309 }; 310 311 struct vk_sync { 312 const struct vk_sync_type *type; 313 enum vk_sync_flags flags; 314 }; 315 316 /* See VkSemaphoreSubmitInfo */ 317 struct vk_sync_wait { 318 struct vk_sync *sync; 319 VkPipelineStageFlags2 stage_mask; 320 uint64_t wait_value; 321 }; 322 323 /* See VkSemaphoreSubmitInfo */ 324 struct vk_sync_signal { 325 struct vk_sync *sync; 326 VkPipelineStageFlags2 stage_mask; 327 uint64_t signal_value; 328 }; 329 330 VkResult MUST_CHECK vk_sync_init(struct vk_device *device, 331 struct vk_sync *sync, 332 const struct vk_sync_type *type, 333 enum vk_sync_flags flags, 334 uint64_t initial_value); 335 336 void vk_sync_finish(struct vk_device *device, 337 struct vk_sync *sync); 338 339 VkResult MUST_CHECK vk_sync_create(struct vk_device *device, 340 const struct vk_sync_type *type, 341 enum vk_sync_flags flags, 342 uint64_t initial_value, 343 struct vk_sync **sync_out); 344 345 void vk_sync_destroy(struct vk_device *device, 346 struct vk_sync *sync); 347 348 VkResult MUST_CHECK vk_sync_signal(struct vk_device *device, 349 struct vk_sync *sync, 350 uint64_t value); 351 352 VkResult MUST_CHECK vk_sync_get_value(struct vk_device *device, 353 struct vk_sync *sync, 354 uint64_t *value); 355 356 VkResult MUST_CHECK vk_sync_reset(struct vk_device *device, 357 struct vk_sync *sync); 358 359 VkResult MUST_CHECK vk_sync_wait(struct vk_device *device, 360 struct vk_sync *sync, 361 uint64_t wait_value, 362 enum vk_sync_wait_flags wait_flags, 363 uint64_t abs_timeout_ns); 364 365 VkResult MUST_CHECK vk_sync_wait_many(struct vk_device *device, 366 uint32_t wait_count, 367 const struct vk_sync_wait *waits, 368 enum vk_sync_wait_flags wait_flags, 369 uint64_t abs_timeout_ns); 370 371 VkResult MUST_CHECK vk_sync_import_opaque_fd(struct vk_device *device, 372 struct vk_sync *sync, 373 int fd); 374 375 VkResult MUST_CHECK vk_sync_export_opaque_fd(struct vk_device *device, 376 struct vk_sync *sync, 377 int *fd); 378 379 VkResult MUST_CHECK vk_sync_import_sync_file(struct vk_device *device, 380 struct vk_sync *sync, 381 int sync_file); 382 383 VkResult MUST_CHECK vk_sync_export_sync_file(struct vk_device *device, 384 struct vk_sync *sync, 385 int *sync_file); 386 387 VkResult MUST_CHECK vk_sync_import_win32_handle(struct vk_device *device, 388 struct vk_sync *sync, 389 void *handle, 390 const wchar_t *name); 391 392 VkResult MUST_CHECK vk_sync_export_win32_handle(struct vk_device *device, 393 struct vk_sync *sync, 394 void **handle); 395 396 VkResult MUST_CHECK vk_sync_set_win32_export_params(struct vk_device *device, 397 struct vk_sync *sync, 398 const void *security_attributes, 399 uint32_t access, 400 const wchar_t *name); 401 402 VkResult MUST_CHECK vk_sync_move(struct vk_device *device, 403 struct vk_sync *dst, 404 struct vk_sync *src); 405 406 #ifdef __cplusplus 407 } 408 #endif 409 410 #endif /* VK_SYNC_H */ 411