• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 Collabora Ltd.
3  *
4  * Derived from tu_drm.c which is:
5  * Copyright © 2018 Google, Inc.
6  * Copyright © 2015 Intel Corporation
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <xf86drm.h>
29 
30 #include "panvk_private.h"
31 
32 static VkResult
sync_create(struct panvk_device * device,struct panvk_syncobj * sync,bool signaled)33 sync_create(struct panvk_device *device,
34             struct panvk_syncobj *sync,
35             bool signaled)
36 {
37    const struct panfrost_device *pdev = &device->physical_device->pdev;
38 
39    struct drm_syncobj_create create = {
40       .flags = signaled ? DRM_SYNCOBJ_CREATE_SIGNALED : 0,
41    };
42 
43    int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
44    if (ret)
45       return VK_ERROR_OUT_OF_HOST_MEMORY;
46 
47    sync->permanent = create.handle;
48 
49    return VK_SUCCESS;
50 }
51 
52 static void
sync_set_temporary(struct panvk_device * device,struct panvk_syncobj * sync,uint32_t syncobj)53 sync_set_temporary(struct panvk_device *device, struct panvk_syncobj *sync,
54                    uint32_t syncobj)
55 {
56    const struct panfrost_device *pdev = &device->physical_device->pdev;
57 
58    if (sync->temporary) {
59       struct drm_syncobj_destroy destroy = { .handle = sync->temporary };
60       drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
61    }
62 
63    sync->temporary = syncobj;
64 }
65 
66 static void
sync_destroy(struct panvk_device * device,struct panvk_syncobj * sync)67 sync_destroy(struct panvk_device *device, struct panvk_syncobj *sync)
68 {
69    const struct panfrost_device *pdev = &device->physical_device->pdev;
70 
71    if (!sync)
72       return;
73 
74    sync_set_temporary(device, sync, 0);
75    struct drm_syncobj_destroy destroy = { .handle = sync->permanent };
76    drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
77 }
78 
79 static VkResult
sync_import(struct panvk_device * device,struct panvk_syncobj * sync,bool temporary,bool sync_fd,int fd)80 sync_import(struct panvk_device *device, struct panvk_syncobj *sync,
81             bool temporary, bool sync_fd, int fd)
82 {
83    const struct panfrost_device *pdev = &device->physical_device->pdev;
84    int ret;
85 
86    if (!sync_fd) {
87       uint32_t *dst = temporary ? &sync->temporary : &sync->permanent;
88 
89       struct drm_syncobj_handle handle = { .fd = fd };
90       ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
91       if (ret)
92          return VK_ERROR_INVALID_EXTERNAL_HANDLE;
93 
94       if (*dst) {
95          struct drm_syncobj_destroy destroy = { .handle = *dst };
96          drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
97       }
98       *dst = handle.handle;
99       close(fd);
100    } else {
101       assert(temporary);
102 
103       struct drm_syncobj_create create = {};
104 
105       if (fd == -1)
106          create.flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
107 
108       ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
109       if (ret)
110          return VK_ERROR_INVALID_EXTERNAL_HANDLE;
111 
112       if (fd != -1) {
113          struct drm_syncobj_handle handle = {
114             .fd = fd,
115             .handle = create.handle,
116             .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
117          };
118 
119          ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
120          if (ret) {
121             struct drm_syncobj_destroy destroy = { .handle = create.handle };
122             drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
123             return VK_ERROR_INVALID_EXTERNAL_HANDLE;
124          }
125          close(fd);
126       }
127 
128       sync_set_temporary(device, sync, create.handle);
129    }
130 
131    return VK_SUCCESS;
132 }
133 
134 static VkResult
sync_export(struct panvk_device * device,struct panvk_syncobj * sync,bool sync_fd,int * p_fd)135 sync_export(struct panvk_device *device, struct panvk_syncobj *sync,
136             bool sync_fd, int *p_fd)
137 {
138    const struct panfrost_device *pdev = &device->physical_device->pdev;
139 
140    struct drm_syncobj_handle handle = {
141       .handle = sync->temporary ? : sync->permanent,
142       .flags = sync_fd ? DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE : 0,
143       .fd = -1,
144    };
145    int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
146    if (ret)
147       return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
148 
149    /* restore permanent payload on export */
150    sync_set_temporary(device, sync, 0);
151 
152    *p_fd = handle.fd;
153    return VK_SUCCESS;
154 }
155 
156 VkResult
panvk_CreateSemaphore(VkDevice _device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)157 panvk_CreateSemaphore(VkDevice _device,
158                       const VkSemaphoreCreateInfo *pCreateInfo,
159                       const VkAllocationCallbacks *pAllocator,
160                       VkSemaphore *pSemaphore)
161 {
162    VK_FROM_HANDLE(panvk_device, device, _device);
163    struct panvk_semaphore *sem =
164          vk_object_zalloc(&device->vk, pAllocator, sizeof(*sem),
165                           VK_OBJECT_TYPE_SEMAPHORE);
166    if (!sem)
167       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
168 
169    VkResult ret = sync_create(device, &sem->syncobj, false);
170    if (ret != VK_SUCCESS) {
171       vk_free2(&device->vk.alloc, pAllocator, sync);
172       return ret;
173    }
174 
175    *pSemaphore = panvk_semaphore_to_handle(sem);
176    return VK_SUCCESS;
177 }
178 
179 void
panvk_DestroySemaphore(VkDevice _device,VkSemaphore _sem,const VkAllocationCallbacks * pAllocator)180 panvk_DestroySemaphore(VkDevice _device, VkSemaphore _sem, const VkAllocationCallbacks *pAllocator)
181 {
182    VK_FROM_HANDLE(panvk_device, device, _device);
183    VK_FROM_HANDLE(panvk_semaphore, sem, _sem);
184 
185    sync_destroy(device, &sem->syncobj);
186    vk_object_free(&device->vk, pAllocator, sem);
187 }
188 
189 VkResult
panvk_ImportSemaphoreFdKHR(VkDevice _device,const VkImportSemaphoreFdInfoKHR * info)190 panvk_ImportSemaphoreFdKHR(VkDevice _device, const VkImportSemaphoreFdInfoKHR *info)
191 {
192    VK_FROM_HANDLE(panvk_device, device, _device);
193    VK_FROM_HANDLE(panvk_semaphore, sem, info->semaphore);
194    bool temp = info->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
195    bool sync_fd = info->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
196 
197    return sync_import(device, &sem->syncobj, temp, sync_fd, info->fd);
198 }
199 
200 VkResult
panvk_GetSemaphoreFdKHR(VkDevice _device,const VkSemaphoreGetFdInfoKHR * info,int * pFd)201 panvk_GetSemaphoreFdKHR(VkDevice _device, const VkSemaphoreGetFdInfoKHR *info, int *pFd)
202 {
203    VK_FROM_HANDLE(panvk_device, device, _device);
204    VK_FROM_HANDLE(panvk_semaphore, sem, info->semaphore);
205    bool sync_fd = info->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
206 
207    return sync_export(device, &sem->syncobj, sync_fd, pFd);
208 }
209 
210 VkResult
panvk_CreateFence(VkDevice _device,const VkFenceCreateInfo * info,const VkAllocationCallbacks * pAllocator,VkFence * pFence)211 panvk_CreateFence(VkDevice _device,
212                   const VkFenceCreateInfo *info,
213                   const VkAllocationCallbacks *pAllocator,
214                   VkFence *pFence)
215 {
216    VK_FROM_HANDLE(panvk_device, device, _device);
217    struct panvk_fence *fence =
218          vk_object_zalloc(&device->vk, pAllocator, sizeof(*fence),
219                           VK_OBJECT_TYPE_FENCE);
220    if (!fence)
221       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
222 
223    VkResult ret = sync_create(device, &fence->syncobj,
224                               info->flags & VK_FENCE_CREATE_SIGNALED_BIT);
225    if (ret != VK_SUCCESS) {
226       vk_free2(&device->vk.alloc, pAllocator, fence);
227       return ret;
228    }
229 
230    *pFence = panvk_fence_to_handle(fence);
231    return VK_SUCCESS;
232 }
233 
234 void
panvk_DestroyFence(VkDevice _device,VkFence _fence,const VkAllocationCallbacks * pAllocator)235 panvk_DestroyFence(VkDevice _device, VkFence _fence,
236                    const VkAllocationCallbacks *pAllocator)
237 {
238    VK_FROM_HANDLE(panvk_device, device, _device);
239    VK_FROM_HANDLE(panvk_fence, fence, _fence);
240 
241    sync_destroy(device, &fence->syncobj);
242    vk_object_free(&device->vk, pAllocator, fence);
243 }
244 
245 VkResult
panvk_ImportFenceFdKHR(VkDevice _device,const VkImportFenceFdInfoKHR * info)246 panvk_ImportFenceFdKHR(VkDevice _device, const VkImportFenceFdInfoKHR *info)
247 {
248    VK_FROM_HANDLE(panvk_device, device, _device);
249    VK_FROM_HANDLE(panvk_fence, fence, info->fence);
250    bool sync_fd = info->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
251    bool temp = info->flags & VK_FENCE_IMPORT_TEMPORARY_BIT;
252 
253    return sync_import(device, &fence->syncobj, temp, sync_fd, info->fd);
254 }
255 
256 VkResult
panvk_GetFenceFdKHR(VkDevice _device,const VkFenceGetFdInfoKHR * info,int * pFd)257 panvk_GetFenceFdKHR(VkDevice _device, const VkFenceGetFdInfoKHR *info, int *pFd)
258 {
259    VK_FROM_HANDLE(panvk_device, device, _device);
260    VK_FROM_HANDLE(panvk_fence, fence, info->fence);
261    bool sync_fd = info->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
262 
263    return sync_export(device, &fence->syncobj, sync_fd, pFd);
264 }
265 
266 static VkResult
drm_syncobj_wait(struct panvk_device * device,const uint32_t * handles,uint32_t count_handles,int64_t timeout_nsec,bool wait_all)267 drm_syncobj_wait(struct panvk_device *device,
268                  const uint32_t *handles, uint32_t count_handles,
269                  int64_t timeout_nsec, bool wait_all)
270 {
271    const struct panfrost_device *pdev = &device->physical_device->pdev;
272    struct drm_syncobj_wait wait = {
273       .handles = (uint64_t) (uintptr_t) handles,
274       .count_handles = count_handles,
275       .timeout_nsec = timeout_nsec,
276       .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
277                (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0)
278    };
279 
280    int ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
281    if (ret) {
282       if (errno == ETIME)
283          return VK_TIMEOUT;
284 
285       assert(0);
286       return VK_ERROR_DEVICE_LOST; /* TODO */
287    }
288    return VK_SUCCESS;
289 }
290 
291 static uint64_t
gettime_ns(void)292 gettime_ns(void)
293 {
294    struct timespec current;
295    clock_gettime(CLOCK_MONOTONIC, &current);
296    return (uint64_t)current.tv_sec * 1000000000 + current.tv_nsec;
297 }
298 
299 /* and the kernel converts it right back to relative timeout - very smart UAPI */
300 static uint64_t
absolute_timeout(uint64_t timeout)301 absolute_timeout(uint64_t timeout)
302 {
303    if (timeout == 0)
304       return 0;
305    uint64_t current_time = gettime_ns();
306    uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
307 
308    timeout = MIN2(max_timeout, timeout);
309 
310    return (current_time + timeout);
311 }
312 
313 VkResult
panvk_WaitForFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)314 panvk_WaitForFences(VkDevice _device,
315                     uint32_t fenceCount,
316                     const VkFence *pFences,
317                     VkBool32 waitAll,
318                     uint64_t timeout)
319 {
320    VK_FROM_HANDLE(panvk_device, device, _device);
321 
322    if (panvk_device_is_lost(device))
323       return VK_ERROR_DEVICE_LOST;
324 
325    uint32_t handles[fenceCount];
326    for (unsigned i = 0; i < fenceCount; ++i) {
327       VK_FROM_HANDLE(panvk_fence, fence, pFences[i]);
328 
329       if (fence->syncobj.temporary) {
330          handles[i] = fence->syncobj.temporary;
331       } else {
332          handles[i] = fence->syncobj.permanent;
333       }
334    }
335 
336    return drm_syncobj_wait(device, handles, fenceCount, absolute_timeout(timeout), waitAll);
337 }
338 
339 VkResult
panvk_ResetFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences)340 panvk_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
341 {
342    VK_FROM_HANDLE(panvk_device, device, _device);
343    const struct panfrost_device *pdev = &device->physical_device->pdev;
344    int ret;
345 
346    uint32_t handles[fenceCount];
347    for (unsigned i = 0; i < fenceCount; ++i) {
348       VK_FROM_HANDLE(panvk_fence, fence, pFences[i]);
349 
350       sync_set_temporary(device, &fence->syncobj, 0);
351       handles[i] = fence->syncobj.permanent;
352    }
353 
354    struct drm_syncobj_array objs = {
355       .handles = (uint64_t) (uintptr_t) handles,
356       .count_handles = fenceCount,
357    };
358 
359    ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_RESET, &objs);
360    if (ret) {
361       panvk_device_set_lost(device, "DRM_IOCTL_SYNCOBJ_RESET failure: %s",
362                          strerror(errno));
363    }
364 
365    return VK_SUCCESS;
366 }
367 
368 VkResult
panvk_GetFenceStatus(VkDevice _device,VkFence _fence)369 panvk_GetFenceStatus(VkDevice _device, VkFence _fence)
370 {
371    VK_FROM_HANDLE(panvk_device, device, _device);
372    VK_FROM_HANDLE(panvk_fence, fence, _fence);
373    uint32_t handle = fence->syncobj.temporary ? : fence->syncobj.permanent;
374    VkResult result;
375 
376    result = drm_syncobj_wait(device, &handle, 1, 0, false);
377    if (result == VK_TIMEOUT)
378       result = VK_NOT_READY;
379    return result;
380 }
381 
382 int
panvk_signal_syncobjs(struct panvk_device * device,struct panvk_syncobj * syncobj1,struct panvk_syncobj * syncobj2)383 panvk_signal_syncobjs(struct panvk_device *device,
384                       struct panvk_syncobj *syncobj1,
385                       struct panvk_syncobj *syncobj2)
386 {
387    const struct panfrost_device *pdev = &device->physical_device->pdev;
388    uint32_t handles[2], count = 0;
389 
390    if (syncobj1)
391       handles[count++] = syncobj1->temporary ?: syncobj1->permanent;
392 
393    if (syncobj2)
394       handles[count++] = syncobj2->temporary ?: syncobj2->permanent;
395 
396    if (!count)
397       return 0;
398 
399    struct drm_syncobj_array objs = {
400       .handles = (uintptr_t) handles,
401       .count_handles = count
402    };
403 
404    return drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs);
405 }
406 
407 int
panvk_syncobj_to_fd(struct panvk_device * device,struct panvk_syncobj * sync)408 panvk_syncobj_to_fd(struct panvk_device *device, struct panvk_syncobj *sync)
409 {
410    const struct panfrost_device *pdev = &device->physical_device->pdev;
411    struct drm_syncobj_handle handle = { .handle = sync->permanent };
412    int ret;
413 
414    ret = drmIoctl(pdev->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
415 
416    return ret ? -1 : handle.fd;
417 }
418