1 /*
2 * Copyright © 2021 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_drm_syncobj.h"
25
26 #include <sched.h>
27 #include <xf86drm.h>
28
29 #include "drm-uapi/drm.h"
30
31 #include "util/os_time.h"
32
33 #include "vk_device.h"
34 #include "vk_log.h"
35 #include "vk_util.h"
36
37 static struct vk_drm_syncobj *
to_drm_syncobj(struct vk_sync * sync)38 to_drm_syncobj(struct vk_sync *sync)
39 {
40 assert(vk_sync_type_is_drm_syncobj(sync->type));
41 return container_of(sync, struct vk_drm_syncobj, base);
42 }
43
44 static VkResult
vk_drm_syncobj_init(struct vk_device * device,struct vk_sync * sync,uint64_t initial_value)45 vk_drm_syncobj_init(struct vk_device *device,
46 struct vk_sync *sync,
47 uint64_t initial_value)
48 {
49 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
50
51 uint32_t flags = 0;
52 if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value)
53 flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
54
55 assert(device->drm_fd >= 0);
56 int err = drmSyncobjCreate(device->drm_fd, flags, &sobj->syncobj);
57 if (err < 0) {
58 return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
59 "DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
60 }
61
62 if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) {
63 err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj,
64 &initial_value, 1);
65 if (err < 0) {
66 vk_drm_syncobj_finish(device, sync);
67 return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
68 "DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
69 }
70 }
71
72 return VK_SUCCESS;
73 }
74
75 void
vk_drm_syncobj_finish(struct vk_device * device,struct vk_sync * sync)76 vk_drm_syncobj_finish(struct vk_device *device,
77 struct vk_sync *sync)
78 {
79 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
80
81 assert(device->drm_fd >= 0);
82 ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
83 assert(err == 0);
84 }
85
86 static VkResult
vk_drm_syncobj_signal(struct vk_device * device,struct vk_sync * sync,uint64_t value)87 vk_drm_syncobj_signal(struct vk_device *device,
88 struct vk_sync *sync,
89 uint64_t value)
90 {
91 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
92
93 assert(device->drm_fd >= 0);
94 int err;
95 if (sync->flags & VK_SYNC_IS_TIMELINE)
96 err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj, &value, 1);
97 else
98 err = drmSyncobjSignal(device->drm_fd, &sobj->syncobj, 1);
99 if (err) {
100 return vk_errorf(device, VK_ERROR_UNKNOWN,
101 "DRM_IOCTL_SYNCOBJ_SIGNAL failed: %m");
102 }
103
104 return VK_SUCCESS;
105 }
106
107 static VkResult
vk_drm_syncobj_get_value(struct vk_device * device,struct vk_sync * sync,uint64_t * value)108 vk_drm_syncobj_get_value(struct vk_device *device,
109 struct vk_sync *sync,
110 uint64_t *value)
111 {
112 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
113
114 assert(device->drm_fd >= 0);
115 int err = drmSyncobjQuery(device->drm_fd, &sobj->syncobj, value, 1);
116 if (err) {
117 return vk_errorf(device, VK_ERROR_UNKNOWN,
118 "DRM_IOCTL_SYNCOBJ_QUERY failed: %m");
119 }
120
121 return VK_SUCCESS;
122 }
123
124 static VkResult
vk_drm_syncobj_reset(struct vk_device * device,struct vk_sync * sync)125 vk_drm_syncobj_reset(struct vk_device *device,
126 struct vk_sync *sync)
127 {
128 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
129
130 assert(device->drm_fd >= 0);
131 int err = drmSyncobjReset(device->drm_fd, &sobj->syncobj, 1);
132 if (err) {
133 return vk_errorf(device, VK_ERROR_UNKNOWN,
134 "DRM_IOCTL_SYNCOBJ_RESET failed: %m");
135 }
136
137 return VK_SUCCESS;
138 }
139
140 static VkResult
sync_has_sync_file(struct vk_device * device,struct vk_sync * sync)141 sync_has_sync_file(struct vk_device *device, struct vk_sync *sync)
142 {
143 uint32_t handle = to_drm_syncobj(sync)->syncobj;
144
145 int fd = -1;
146 int err = drmSyncobjExportSyncFile(device->drm_fd, handle, &fd);
147 if (!err) {
148 close(fd);
149 return VK_SUCCESS;
150 }
151
152 /* On the off chance the sync_file export repeatedly fails for some
153 * unexpected reason, we want to ensure this function will return success
154 * eventually. Do a zero-time syncobj wait if the export failed.
155 */
156 err = drmSyncobjWait(device->drm_fd, &handle, 1, 0 /* timeout */,
157 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
158 NULL /* first_signaled */);
159 if (!err) {
160 return VK_SUCCESS;
161 } else if (errno == ETIME) {
162 return VK_TIMEOUT;
163 } else {
164 return vk_errorf(device, VK_ERROR_UNKNOWN,
165 "DRM_IOCTL_SYNCOBJ_WAIT failed: %m");
166 }
167 }
168
169 static VkResult
spin_wait_for_sync_file(struct vk_device * device,uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)170 spin_wait_for_sync_file(struct vk_device *device,
171 uint32_t wait_count,
172 const struct vk_sync_wait *waits,
173 enum vk_sync_wait_flags wait_flags,
174 uint64_t abs_timeout_ns)
175 {
176 if (wait_flags & VK_SYNC_WAIT_ANY) {
177 while (1) {
178 for (uint32_t i = 0; i < wait_count; i++) {
179 VkResult result = sync_has_sync_file(device, waits[i].sync);
180 if (result != VK_TIMEOUT)
181 return result;
182 }
183
184 if (os_time_get_nano() >= abs_timeout_ns)
185 return VK_TIMEOUT;
186
187 sched_yield();
188 }
189 } else {
190 for (uint32_t i = 0; i < wait_count; i++) {
191 while (1) {
192 VkResult result = sync_has_sync_file(device, waits[i].sync);
193 if (result != VK_TIMEOUT)
194 return result;
195
196 if (os_time_get_nano() >= abs_timeout_ns)
197 return VK_TIMEOUT;
198
199 sched_yield();
200 }
201 }
202 }
203
204 return VK_SUCCESS;
205 }
206
207 static VkResult
vk_drm_syncobj_wait_many(struct vk_device * device,uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)208 vk_drm_syncobj_wait_many(struct vk_device *device,
209 uint32_t wait_count,
210 const struct vk_sync_wait *waits,
211 enum vk_sync_wait_flags wait_flags,
212 uint64_t abs_timeout_ns)
213 {
214 if ((wait_flags & VK_SYNC_WAIT_PENDING) &&
215 !(waits[0].sync->type->features & VK_SYNC_FEATURE_TIMELINE)) {
216 /* Sadly, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE was never implemented
217 * for drivers that don't support timelines. Instead, we have to spin
218 * on DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE until it succeeds.
219 */
220 return spin_wait_for_sync_file(device, wait_count, waits,
221 wait_flags, abs_timeout_ns);
222 }
223
224 /* Syncobj timeouts are signed */
225 abs_timeout_ns = MIN2(abs_timeout_ns, (uint64_t)INT64_MAX);
226
227 STACK_ARRAY(uint32_t, handles, wait_count);
228 STACK_ARRAY(uint64_t, wait_values, wait_count);
229
230 uint32_t j = 0;
231 bool has_timeline = false;
232 for (uint32_t i = 0; i < wait_count; i++) {
233 /* The syncobj API doesn't like wait values of 0 but it's safe to skip
234 * them because a wait for 0 is a no-op.
235 */
236 if (waits[i].sync->flags & VK_SYNC_IS_TIMELINE) {
237 if (waits[i].wait_value == 0)
238 continue;
239
240 has_timeline = true;
241 }
242
243 handles[j] = to_drm_syncobj(waits[i].sync)->syncobj;
244 wait_values[j] = waits[i].wait_value;
245 j++;
246 }
247 assert(j <= wait_count);
248 wait_count = j;
249
250 uint32_t syncobj_wait_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
251 if (!(wait_flags & VK_SYNC_WAIT_ANY))
252 syncobj_wait_flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
253
254 assert(device->drm_fd >= 0);
255 int err;
256 if (wait_count == 0) {
257 err = 0;
258 } else if (wait_flags & VK_SYNC_WAIT_PENDING) {
259 /* We always use a timeline wait for WAIT_PENDING, even for binary
260 * syncobjs because the non-timeline wait doesn't support
261 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE.
262 */
263 err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values,
264 wait_count, abs_timeout_ns,
265 syncobj_wait_flags |
266 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
267 NULL /* first_signaled */);
268 } else if (has_timeline) {
269 err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values,
270 wait_count, abs_timeout_ns,
271 syncobj_wait_flags,
272 NULL /* first_signaled */);
273 } else {
274 err = drmSyncobjWait(device->drm_fd, handles,
275 wait_count, abs_timeout_ns,
276 syncobj_wait_flags,
277 NULL /* first_signaled */);
278 }
279
280 STACK_ARRAY_FINISH(handles);
281 STACK_ARRAY_FINISH(wait_values);
282
283 if (err && errno == ETIME) {
284 return VK_TIMEOUT;
285 } else if (err) {
286 return vk_errorf(device, VK_ERROR_UNKNOWN,
287 "DRM_IOCTL_SYNCOBJ_WAIT failed: %m");
288 }
289
290 return VK_SUCCESS;
291 }
292
293 static VkResult
vk_drm_syncobj_import_opaque_fd(struct vk_device * device,struct vk_sync * sync,int fd)294 vk_drm_syncobj_import_opaque_fd(struct vk_device *device,
295 struct vk_sync *sync,
296 int fd)
297 {
298 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
299
300 assert(device->drm_fd >= 0);
301 uint32_t new_handle;
302 int err = drmSyncobjFDToHandle(device->drm_fd, fd, &new_handle);
303 if (err) {
304 return vk_errorf(device, VK_ERROR_UNKNOWN,
305 "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
306 }
307
308 err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
309 assert(!err);
310
311 sobj->syncobj = new_handle;
312
313 return VK_SUCCESS;
314 }
315
316 static VkResult
vk_drm_syncobj_export_opaque_fd(struct vk_device * device,struct vk_sync * sync,int * fd)317 vk_drm_syncobj_export_opaque_fd(struct vk_device *device,
318 struct vk_sync *sync,
319 int *fd)
320 {
321 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
322
323 assert(device->drm_fd >= 0);
324 int err = drmSyncobjHandleToFD(device->drm_fd, sobj->syncobj, fd);
325 if (err) {
326 return vk_errorf(device, VK_ERROR_UNKNOWN,
327 "DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
328 }
329
330 return VK_SUCCESS;
331 }
332
333 static VkResult
vk_drm_syncobj_import_sync_file(struct vk_device * device,struct vk_sync * sync,int sync_file)334 vk_drm_syncobj_import_sync_file(struct vk_device *device,
335 struct vk_sync *sync,
336 int sync_file)
337 {
338 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
339
340 assert(device->drm_fd >= 0);
341 int err = drmSyncobjImportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
342 if (err) {
343 return vk_errorf(device, VK_ERROR_UNKNOWN,
344 "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
345 }
346
347 return VK_SUCCESS;
348 }
349
350 static VkResult
vk_drm_syncobj_export_sync_file(struct vk_device * device,struct vk_sync * sync,int * sync_file)351 vk_drm_syncobj_export_sync_file(struct vk_device *device,
352 struct vk_sync *sync,
353 int *sync_file)
354 {
355 struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
356
357 assert(device->drm_fd >= 0);
358 int err = drmSyncobjExportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
359 if (err) {
360 return vk_errorf(device, VK_ERROR_UNKNOWN,
361 "DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
362 }
363
364 return VK_SUCCESS;
365 }
366
367 static VkResult
vk_drm_syncobj_move(struct vk_device * device,struct vk_sync * dst,struct vk_sync * src)368 vk_drm_syncobj_move(struct vk_device *device,
369 struct vk_sync *dst,
370 struct vk_sync *src)
371 {
372 struct vk_drm_syncobj *dst_sobj = to_drm_syncobj(dst);
373 struct vk_drm_syncobj *src_sobj = to_drm_syncobj(src);
374 VkResult result;
375
376 if (!(dst->flags & VK_SYNC_IS_SHARED) &&
377 !(src->flags & VK_SYNC_IS_SHARED)) {
378 result = vk_drm_syncobj_reset(device, dst);
379 if (unlikely(result != VK_SUCCESS))
380 return result;
381
382 uint32_t tmp = dst_sobj->syncobj;
383 dst_sobj->syncobj = src_sobj->syncobj;
384 src_sobj->syncobj = tmp;
385
386 return VK_SUCCESS;
387 } else {
388 int fd;
389 result = vk_drm_syncobj_export_sync_file(device, src, &fd);
390 if (result != VK_SUCCESS)
391 return result;
392
393 result = vk_drm_syncobj_import_sync_file(device, dst, fd);
394 if (fd >= 0)
395 close(fd);
396 if (result != VK_SUCCESS)
397 return result;
398
399 return vk_drm_syncobj_reset(device, src);
400 }
401 }
402
403 struct vk_sync_type
vk_drm_syncobj_get_type(int drm_fd)404 vk_drm_syncobj_get_type(int drm_fd)
405 {
406 uint32_t syncobj = 0;
407 int err = drmSyncobjCreate(drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
408 if (err < 0)
409 return (struct vk_sync_type) { .features = 0 };
410
411 struct vk_sync_type type = {
412 .size = sizeof(struct vk_drm_syncobj),
413 .features = VK_SYNC_FEATURE_BINARY |
414 VK_SYNC_FEATURE_GPU_WAIT |
415 VK_SYNC_FEATURE_CPU_RESET |
416 VK_SYNC_FEATURE_CPU_SIGNAL |
417 VK_SYNC_FEATURE_WAIT_PENDING,
418 .init = vk_drm_syncobj_init,
419 .finish = vk_drm_syncobj_finish,
420 .signal = vk_drm_syncobj_signal,
421 .reset = vk_drm_syncobj_reset,
422 .move = vk_drm_syncobj_move,
423 .import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
424 .export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
425 .import_sync_file = vk_drm_syncobj_import_sync_file,
426 .export_sync_file = vk_drm_syncobj_export_sync_file,
427 };
428
429 err = drmSyncobjWait(drm_fd, &syncobj, 1, 0,
430 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
431 NULL /* first_signaled */);
432 if (err == 0) {
433 type.wait_many = vk_drm_syncobj_wait_many;
434 type.features |= VK_SYNC_FEATURE_CPU_WAIT |
435 VK_SYNC_FEATURE_WAIT_ANY;
436 }
437
438 uint64_t cap;
439 err = drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap);
440 if (err == 0 && cap != 0) {
441 type.get_value = vk_drm_syncobj_get_value;
442 type.features |= VK_SYNC_FEATURE_TIMELINE;
443 }
444
445 err = drmSyncobjDestroy(drm_fd, syncobj);
446 assert(err == 0);
447
448 return type;
449 }
450