1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_sync.h"
25
26 #include <assert.h>
27 #include <string.h>
28
29 #include "util/u_debug.h"
30 #include "util/macros.h"
31 #include "util/os_time.h"
32
33 #include "vk_alloc.h"
34 #include "vk_device.h"
35 #include "vk_log.h"
36
37 static void
vk_sync_type_validate(const struct vk_sync_type * type)38 vk_sync_type_validate(const struct vk_sync_type *type)
39 {
40 assert(type->init);
41 assert(type->finish);
42
43 assert(type->features & (VK_SYNC_FEATURE_BINARY |
44 VK_SYNC_FEATURE_TIMELINE));
45
46 if (type->features & VK_SYNC_FEATURE_TIMELINE) {
47 assert(type->features & VK_SYNC_FEATURE_GPU_WAIT);
48 assert(type->features & VK_SYNC_FEATURE_CPU_WAIT);
49 assert(type->features & VK_SYNC_FEATURE_CPU_SIGNAL);
50 assert(type->features & (VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL |
51 VK_SYNC_FEATURE_WAIT_PENDING));
52 assert(type->signal);
53 assert(type->get_value);
54 }
55
56 if (!(type->features & VK_SYNC_FEATURE_BINARY)) {
57 assert(!(type->features & (VK_SYNC_FEATURE_GPU_MULTI_WAIT |
58 VK_SYNC_FEATURE_CPU_RESET)));
59 assert(!type->import_sync_file);
60 assert(!type->export_sync_file);
61 }
62
63 if (type->features & VK_SYNC_FEATURE_CPU_WAIT) {
64 assert(type->wait || type->wait_many);
65 } else {
66 assert(!(type->features & (VK_SYNC_FEATURE_WAIT_ANY |
67 VK_SYNC_FEATURE_WAIT_PENDING)));
68 }
69
70 if (type->features & VK_SYNC_FEATURE_GPU_MULTI_WAIT)
71 assert(type->features & VK_SYNC_FEATURE_GPU_WAIT);
72
73 if (type->features & VK_SYNC_FEATURE_CPU_RESET)
74 assert(type->reset);
75
76 if (type->features & VK_SYNC_FEATURE_CPU_SIGNAL)
77 assert(type->signal);
78 }
79
80 VkResult
vk_sync_init(struct vk_device * device,struct vk_sync * sync,const struct vk_sync_type * type,enum vk_sync_flags flags,uint64_t initial_value)81 vk_sync_init(struct vk_device *device,
82 struct vk_sync *sync,
83 const struct vk_sync_type *type,
84 enum vk_sync_flags flags,
85 uint64_t initial_value)
86 {
87 vk_sync_type_validate(type);
88
89 if (flags & VK_SYNC_IS_TIMELINE)
90 assert(type->features & VK_SYNC_FEATURE_TIMELINE);
91 else
92 assert(type->features & VK_SYNC_FEATURE_BINARY);
93
94 assert(type->size >= sizeof(*sync));
95 memset(sync, 0, type->size);
96 sync->type = type;
97 sync->flags = flags;
98
99 return type->init(device, sync, initial_value);
100 }
101
102 void
vk_sync_finish(struct vk_device * device,struct vk_sync * sync)103 vk_sync_finish(struct vk_device *device,
104 struct vk_sync *sync)
105 {
106 sync->type->finish(device, sync);
107 }
108
109 VkResult
vk_sync_create(struct vk_device * device,const struct vk_sync_type * type,enum vk_sync_flags flags,uint64_t initial_value,struct vk_sync ** sync_out)110 vk_sync_create(struct vk_device *device,
111 const struct vk_sync_type *type,
112 enum vk_sync_flags flags,
113 uint64_t initial_value,
114 struct vk_sync **sync_out)
115 {
116 struct vk_sync *sync;
117
118 sync = vk_alloc(&device->alloc, type->size, 8,
119 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
120 if (sync == NULL)
121 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
122
123 VkResult result = vk_sync_init(device, sync, type, flags, initial_value);
124 if (result != VK_SUCCESS) {
125 vk_free(&device->alloc, sync);
126 return result;
127 }
128
129 *sync_out = sync;
130
131 return VK_SUCCESS;
132 }
133
134 void
vk_sync_destroy(struct vk_device * device,struct vk_sync * sync)135 vk_sync_destroy(struct vk_device *device,
136 struct vk_sync *sync)
137 {
138 vk_sync_finish(device, sync);
139 vk_free(&device->alloc, sync);
140 }
141
142 VkResult
vk_sync_signal(struct vk_device * device,struct vk_sync * sync,uint64_t value)143 vk_sync_signal(struct vk_device *device,
144 struct vk_sync *sync,
145 uint64_t value)
146 {
147 assert(sync->type->features & VK_SYNC_FEATURE_CPU_SIGNAL);
148
149 if (sync->flags & VK_SYNC_IS_TIMELINE)
150 assert(value > 0);
151 else
152 assert(value == 0);
153
154 return sync->type->signal(device, sync, value);
155 }
156
157 VkResult
vk_sync_get_value(struct vk_device * device,struct vk_sync * sync,uint64_t * value)158 vk_sync_get_value(struct vk_device *device,
159 struct vk_sync *sync,
160 uint64_t *value)
161 {
162 assert(sync->flags & VK_SYNC_IS_TIMELINE);
163 return sync->type->get_value(device, sync, value);
164 }
165
166 VkResult
vk_sync_reset(struct vk_device * device,struct vk_sync * sync)167 vk_sync_reset(struct vk_device *device,
168 struct vk_sync *sync)
169 {
170 assert(sync->type->features & VK_SYNC_FEATURE_CPU_RESET);
171 assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
172 return sync->type->reset(device, sync);
173 }
174
vk_sync_move(struct vk_device * device,struct vk_sync * dst,struct vk_sync * src)175 VkResult vk_sync_move(struct vk_device *device,
176 struct vk_sync *dst,
177 struct vk_sync *src)
178 {
179 assert(!(dst->flags & VK_SYNC_IS_TIMELINE));
180 assert(!(src->flags & VK_SYNC_IS_TIMELINE));
181 assert(dst->type == src->type);
182
183 return src->type->move(device, dst, src);
184 }
185
186 static void
assert_valid_wait(struct vk_sync * sync,uint64_t wait_value,enum vk_sync_wait_flags wait_flags)187 assert_valid_wait(struct vk_sync *sync,
188 uint64_t wait_value,
189 enum vk_sync_wait_flags wait_flags)
190 {
191 assert(sync->type->features & VK_SYNC_FEATURE_CPU_WAIT);
192
193 if (!(sync->flags & VK_SYNC_IS_TIMELINE))
194 assert(wait_value == 0);
195
196 if (wait_flags & VK_SYNC_WAIT_PENDING)
197 assert(sync->type->features & VK_SYNC_FEATURE_WAIT_PENDING);
198 }
199
200 static uint64_t
get_max_abs_timeout_ns(void)201 get_max_abs_timeout_ns(void)
202 {
203 static int max_timeout_ms = -1;
204 if (max_timeout_ms < 0)
205 max_timeout_ms = debug_get_num_option("MESA_VK_MAX_TIMEOUT", 0);
206
207 if (max_timeout_ms == 0)
208 return UINT64_MAX;
209 else
210 return os_time_get_absolute_timeout(max_timeout_ms * 1000000ull);
211 }
212
213 static VkResult
__vk_sync_wait(struct vk_device * device,struct vk_sync * sync,uint64_t wait_value,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)214 __vk_sync_wait(struct vk_device *device,
215 struct vk_sync *sync,
216 uint64_t wait_value,
217 enum vk_sync_wait_flags wait_flags,
218 uint64_t abs_timeout_ns)
219 {
220 assert_valid_wait(sync, wait_value, wait_flags);
221
222 /* This doesn't make sense for a single wait */
223 assert(!(wait_flags & VK_SYNC_WAIT_ANY));
224
225 if (sync->type->wait) {
226 return sync->type->wait(device, sync, wait_value,
227 wait_flags, abs_timeout_ns);
228 } else {
229 struct vk_sync_wait wait = {
230 .sync = sync,
231 .stage_mask = ~(VkPipelineStageFlags2)0,
232 .wait_value = wait_value,
233 };
234 return sync->type->wait_many(device, 1, &wait, wait_flags,
235 abs_timeout_ns);
236 }
237 }
238
239 VkResult
vk_sync_wait(struct vk_device * device,struct vk_sync * sync,uint64_t wait_value,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)240 vk_sync_wait(struct vk_device *device,
241 struct vk_sync *sync,
242 uint64_t wait_value,
243 enum vk_sync_wait_flags wait_flags,
244 uint64_t abs_timeout_ns)
245 {
246 uint64_t max_abs_timeout_ns = get_max_abs_timeout_ns();
247 if (abs_timeout_ns > max_abs_timeout_ns) {
248 VkResult result =
249 __vk_sync_wait(device, sync, wait_value, wait_flags,
250 max_abs_timeout_ns);
251 if (unlikely(result == VK_TIMEOUT))
252 return vk_device_set_lost(device, "Maximum timeout exceeded!");
253 return result;
254 } else {
255 return __vk_sync_wait(device, sync, wait_value, wait_flags,
256 abs_timeout_ns);
257 }
258 }
259
260 static bool
can_wait_many(uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags)261 can_wait_many(uint32_t wait_count,
262 const struct vk_sync_wait *waits,
263 enum vk_sync_wait_flags wait_flags)
264 {
265 if (waits[0].sync->type->wait_many == NULL)
266 return false;
267
268 if ((wait_flags & VK_SYNC_WAIT_ANY) &&
269 !(waits[0].sync->type->features & VK_SYNC_FEATURE_WAIT_ANY))
270 return false;
271
272 for (uint32_t i = 0; i < wait_count; i++) {
273 assert_valid_wait(waits[i].sync, waits[i].wait_value, wait_flags);
274 if (waits[i].sync->type != waits[0].sync->type)
275 return false;
276 }
277
278 return true;
279 }
280
281 static VkResult
__vk_sync_wait_many(struct vk_device * device,uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)282 __vk_sync_wait_many(struct vk_device *device,
283 uint32_t wait_count,
284 const struct vk_sync_wait *waits,
285 enum vk_sync_wait_flags wait_flags,
286 uint64_t abs_timeout_ns)
287 {
288 if (wait_count == 0)
289 return VK_SUCCESS;
290
291 if (wait_count == 1) {
292 return __vk_sync_wait(device, waits[0].sync, waits[0].wait_value,
293 wait_flags & ~VK_SYNC_WAIT_ANY, abs_timeout_ns);
294 }
295
296 if (can_wait_many(wait_count, waits, wait_flags)) {
297 return waits[0].sync->type->wait_many(device, wait_count, waits,
298 wait_flags, abs_timeout_ns);
299 } else if (wait_flags & VK_SYNC_WAIT_ANY) {
300 /* If we have multiple syncs and they don't support wait_any or they're
301 * not all the same type, there's nothing better we can do than spin.
302 */
303 do {
304 for (uint32_t i = 0; i < wait_count; i++) {
305 VkResult result = __vk_sync_wait(device, waits[i].sync,
306 waits[i].wait_value,
307 wait_flags & ~VK_SYNC_WAIT_ANY,
308 0 /* abs_timeout_ns */);
309 if (result != VK_TIMEOUT)
310 return result;
311 }
312 } while (os_time_get_nano() < abs_timeout_ns);
313
314 return VK_TIMEOUT;
315 } else {
316 for (uint32_t i = 0; i < wait_count; i++) {
317 VkResult result = __vk_sync_wait(device, waits[i].sync,
318 waits[i].wait_value,
319 wait_flags, abs_timeout_ns);
320 if (result != VK_SUCCESS)
321 return result;
322 }
323 return VK_SUCCESS;
324 }
325 }
326
327 VkResult
vk_sync_wait_many(struct vk_device * device,uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)328 vk_sync_wait_many(struct vk_device *device,
329 uint32_t wait_count,
330 const struct vk_sync_wait *waits,
331 enum vk_sync_wait_flags wait_flags,
332 uint64_t abs_timeout_ns)
333 {
334 uint64_t max_abs_timeout_ns = get_max_abs_timeout_ns();
335 if (abs_timeout_ns > max_abs_timeout_ns) {
336 VkResult result =
337 __vk_sync_wait_many(device, wait_count, waits, wait_flags,
338 max_abs_timeout_ns);
339 if (unlikely(result == VK_TIMEOUT))
340 return vk_device_set_lost(device, "Maximum timeout exceeded!");
341 return result;
342 } else {
343 return __vk_sync_wait_many(device, wait_count, waits, wait_flags,
344 abs_timeout_ns);
345 }
346 }
347
348 VkResult
vk_sync_import_opaque_fd(struct vk_device * device,struct vk_sync * sync,int fd)349 vk_sync_import_opaque_fd(struct vk_device *device,
350 struct vk_sync *sync,
351 int fd)
352 {
353 VkResult result = sync->type->import_opaque_fd(device, sync, fd);
354 if (unlikely(result != VK_SUCCESS))
355 return result;
356
357 sync->flags |= VK_SYNC_IS_SHAREABLE |
358 VK_SYNC_IS_SHARED;
359
360 return VK_SUCCESS;
361 }
362
363 VkResult
vk_sync_export_opaque_fd(struct vk_device * device,struct vk_sync * sync,int * fd)364 vk_sync_export_opaque_fd(struct vk_device *device,
365 struct vk_sync *sync,
366 int *fd)
367 {
368 assert(sync->flags & VK_SYNC_IS_SHAREABLE);
369
370 VkResult result = sync->type->export_opaque_fd(device, sync, fd);
371 if (unlikely(result != VK_SUCCESS))
372 return result;
373
374 sync->flags |= VK_SYNC_IS_SHARED;
375
376 return VK_SUCCESS;
377 }
378
379 VkResult
vk_sync_import_sync_file(struct vk_device * device,struct vk_sync * sync,int sync_file)380 vk_sync_import_sync_file(struct vk_device *device,
381 struct vk_sync *sync,
382 int sync_file)
383 {
384 assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
385
386 /* Silently handle negative file descriptors in case the driver doesn't
387 * want to bother.
388 */
389 if (sync_file < 0 && sync->type->signal)
390 return sync->type->signal(device, sync, 0);
391
392 return sync->type->import_sync_file(device, sync, sync_file);
393 }
394
395 VkResult
vk_sync_export_sync_file(struct vk_device * device,struct vk_sync * sync,int * sync_file)396 vk_sync_export_sync_file(struct vk_device *device,
397 struct vk_sync *sync,
398 int *sync_file)
399 {
400 assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
401 return sync->type->export_sync_file(device, sync, sync_file);
402 }
403
404 VkResult
vk_sync_import_win32_handle(struct vk_device * device,struct vk_sync * sync,void * handle,const wchar_t * name)405 vk_sync_import_win32_handle(struct vk_device *device,
406 struct vk_sync *sync,
407 void *handle,
408 const wchar_t *name)
409 {
410 VkResult result = sync->type->import_win32_handle(device, sync, handle, name);
411 if (unlikely(result != VK_SUCCESS))
412 return result;
413
414 sync->flags |= VK_SYNC_IS_SHAREABLE |
415 VK_SYNC_IS_SHARED;
416
417 return VK_SUCCESS;
418 }
419
420 VkResult
vk_sync_export_win32_handle(struct vk_device * device,struct vk_sync * sync,void ** handle)421 vk_sync_export_win32_handle(struct vk_device *device,
422 struct vk_sync *sync,
423 void **handle)
424 {
425 assert(sync->flags & VK_SYNC_IS_SHAREABLE);
426
427 VkResult result = sync->type->export_win32_handle(device, sync, handle);
428 if (unlikely(result != VK_SUCCESS))
429 return result;
430
431 sync->flags |= VK_SYNC_IS_SHARED;
432
433 return VK_SUCCESS;
434 }
435
436 VkResult
vk_sync_set_win32_export_params(struct vk_device * device,struct vk_sync * sync,const void * security_attributes,uint32_t access,const wchar_t * name)437 vk_sync_set_win32_export_params(struct vk_device *device,
438 struct vk_sync *sync,
439 const void *security_attributes,
440 uint32_t access,
441 const wchar_t *name)
442 {
443 assert(sync->flags & VK_SYNC_IS_SHARED);
444
445 return sync->type->set_win32_export_params(device, sync, security_attributes, access, name);
446 }
447