• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /**
25  * This file implements VkQueue, VkFence, and VkSemaphore
26  */
27 
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/eventfd.h>
31 
32 #include "anv_private.h"
33 #include "vk_util.h"
34 
35 #include "genxml/gen7_pack.h"
36 
37 VkResult
anv_device_execbuf(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf,struct anv_bo ** execbuf_bos)38 anv_device_execbuf(struct anv_device *device,
39                    struct drm_i915_gem_execbuffer2 *execbuf,
40                    struct anv_bo **execbuf_bos)
41 {
42    int ret = anv_gem_execbuffer(device, execbuf);
43    if (ret != 0) {
44       /* We don't know the real error. */
45       device->lost = true;
46       return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
47                        "execbuf2 failed: %m");
48    }
49 
50    struct drm_i915_gem_exec_object2 *objects =
51       (void *)(uintptr_t)execbuf->buffers_ptr;
52    for (uint32_t k = 0; k < execbuf->buffer_count; k++)
53       execbuf_bos[k]->offset = objects[k].offset;
54 
55    return VK_SUCCESS;
56 }
57 
58 VkResult
anv_device_submit_simple_batch(struct anv_device * device,struct anv_batch * batch)59 anv_device_submit_simple_batch(struct anv_device *device,
60                                struct anv_batch *batch)
61 {
62    struct drm_i915_gem_execbuffer2 execbuf;
63    struct drm_i915_gem_exec_object2 exec2_objects[1];
64    struct anv_bo bo, *exec_bos[1];
65    VkResult result = VK_SUCCESS;
66    uint32_t size;
67 
68    /* Kernel driver requires 8 byte aligned batch length */
69    size = align_u32(batch->next - batch->start, 8);
70    result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
71    if (result != VK_SUCCESS)
72       return result;
73 
74    memcpy(bo.map, batch->start, size);
75    if (!device->info.has_llc)
76       gen_flush_range(bo.map, size);
77 
78    exec_bos[0] = &bo;
79    exec2_objects[0].handle = bo.gem_handle;
80    exec2_objects[0].relocation_count = 0;
81    exec2_objects[0].relocs_ptr = 0;
82    exec2_objects[0].alignment = 0;
83    exec2_objects[0].offset = bo.offset;
84    exec2_objects[0].flags = 0;
85    exec2_objects[0].rsvd1 = 0;
86    exec2_objects[0].rsvd2 = 0;
87 
88    execbuf.buffers_ptr = (uintptr_t) exec2_objects;
89    execbuf.buffer_count = 1;
90    execbuf.batch_start_offset = 0;
91    execbuf.batch_len = size;
92    execbuf.cliprects_ptr = 0;
93    execbuf.num_cliprects = 0;
94    execbuf.DR1 = 0;
95    execbuf.DR4 = 0;
96 
97    execbuf.flags =
98       I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
99    execbuf.rsvd1 = device->context_id;
100    execbuf.rsvd2 = 0;
101 
102    result = anv_device_execbuf(device, &execbuf, exec_bos);
103    if (result != VK_SUCCESS)
104       goto fail;
105 
106    result = anv_device_wait(device, &bo, INT64_MAX);
107 
108  fail:
109    anv_bo_pool_free(&device->batch_bo_pool, &bo);
110 
111    return result;
112 }
113 
anv_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)114 VkResult anv_QueueSubmit(
115     VkQueue                                     _queue,
116     uint32_t                                    submitCount,
117     const VkSubmitInfo*                         pSubmits,
118     VkFence                                     fence)
119 {
120    ANV_FROM_HANDLE(anv_queue, queue, _queue);
121    struct anv_device *device = queue->device;
122 
123    /* Query for device status prior to submitting.  Technically, we don't need
124     * to do this.  However, if we have a client that's submitting piles of
125     * garbage, we would rather break as early as possible to keep the GPU
126     * hanging contained.  If we don't check here, we'll either be waiting for
127     * the kernel to kick us or we'll have to wait until the client waits on a
128     * fence before we actually know whether or not we've hung.
129     */
130    VkResult result = anv_device_query_status(device);
131    if (result != VK_SUCCESS)
132       return result;
133 
134    /* We lock around QueueSubmit for three main reasons:
135     *
136     *  1) When a block pool is resized, we create a new gem handle with a
137     *     different size and, in the case of surface states, possibly a
138     *     different center offset but we re-use the same anv_bo struct when
139     *     we do so.  If this happens in the middle of setting up an execbuf,
140     *     we could end up with our list of BOs out of sync with our list of
141     *     gem handles.
142     *
143     *  2) The algorithm we use for building the list of unique buffers isn't
144     *     thread-safe.  While the client is supposed to syncronize around
145     *     QueueSubmit, this would be extremely difficult to debug if it ever
146     *     came up in the wild due to a broken app.  It's better to play it
147     *     safe and just lock around QueueSubmit.
148     *
149     *  3)  The anv_cmd_buffer_execbuf function may perform relocations in
150     *      userspace.  Due to the fact that the surface state buffer is shared
151     *      between batches, we can't afford to have that happen from multiple
152     *      threads at the same time.  Even though the user is supposed to
153     *      ensure this doesn't happen, we play it safe as in (2) above.
154     *
155     * Since the only other things that ever take the device lock such as block
156     * pool resize only rarely happen, this will almost never be contended so
157     * taking a lock isn't really an expensive operation in this case.
158     */
159    pthread_mutex_lock(&device->mutex);
160 
161    if (fence && submitCount == 0) {
162       /* If we don't have any command buffers, we need to submit a dummy
163        * batch to give GEM something to wait on.  We could, potentially,
164        * come up with something more efficient but this shouldn't be a
165        * common case.
166        */
167       result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
168       goto out;
169    }
170 
171    for (uint32_t i = 0; i < submitCount; i++) {
172       /* Fence for this submit.  NULL for all but the last one */
173       VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
174 
175       if (pSubmits[i].commandBufferCount == 0) {
176          /* If we don't have any command buffers, we need to submit a dummy
177           * batch to give GEM something to wait on.  We could, potentially,
178           * come up with something more efficient but this shouldn't be a
179           * common case.
180           */
181          result = anv_cmd_buffer_execbuf(device, NULL,
182                                          pSubmits[i].pWaitSemaphores,
183                                          pSubmits[i].waitSemaphoreCount,
184                                          pSubmits[i].pSignalSemaphores,
185                                          pSubmits[i].signalSemaphoreCount,
186                                          submit_fence);
187          if (result != VK_SUCCESS)
188             goto out;
189 
190          continue;
191       }
192 
193       for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
194          ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
195                          pSubmits[i].pCommandBuffers[j]);
196          assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
197          assert(!anv_batch_has_error(&cmd_buffer->batch));
198 
199          /* Fence for this execbuf.  NULL for all but the last one */
200          VkFence execbuf_fence =
201             (j == pSubmits[i].commandBufferCount - 1) ?
202             submit_fence : VK_NULL_HANDLE;
203 
204          const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
205          uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
206          if (j == 0) {
207             /* Only the first batch gets the in semaphores */
208             in_semaphores = pSubmits[i].pWaitSemaphores;
209             num_in_semaphores = pSubmits[i].waitSemaphoreCount;
210          }
211 
212          if (j == pSubmits[i].commandBufferCount - 1) {
213             /* Only the last batch gets the out semaphores */
214             out_semaphores = pSubmits[i].pSignalSemaphores;
215             num_out_semaphores = pSubmits[i].signalSemaphoreCount;
216          }
217 
218          result = anv_cmd_buffer_execbuf(device, cmd_buffer,
219                                          in_semaphores, num_in_semaphores,
220                                          out_semaphores, num_out_semaphores,
221                                          execbuf_fence);
222          if (result != VK_SUCCESS)
223             goto out;
224       }
225    }
226 
227    pthread_cond_broadcast(&device->queue_submit);
228 
229 out:
230    if (result != VK_SUCCESS) {
231       /* In the case that something has gone wrong we may end up with an
232        * inconsistent state from which it may not be trivial to recover.
233        * For example, we might have computed address relocations and
234        * any future attempt to re-submit this job will need to know about
235        * this and avoid computing relocation addresses again.
236        *
237        * To avoid this sort of issues, we assume that if something was
238        * wrong during submission we must already be in a really bad situation
239        * anyway (such us being out of memory) and return
240        * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
241        * submit the same job again to this device.
242        */
243       result = vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
244                          "vkQueueSubmit() failed");
245       device->lost = true;
246    }
247 
248    pthread_mutex_unlock(&device->mutex);
249 
250    return result;
251 }
252 
anv_QueueWaitIdle(VkQueue _queue)253 VkResult anv_QueueWaitIdle(
254     VkQueue                                     _queue)
255 {
256    ANV_FROM_HANDLE(anv_queue, queue, _queue);
257 
258    return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
259 }
260 
anv_CreateFence(VkDevice _device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)261 VkResult anv_CreateFence(
262     VkDevice                                    _device,
263     const VkFenceCreateInfo*                    pCreateInfo,
264     const VkAllocationCallbacks*                pAllocator,
265     VkFence*                                    pFence)
266 {
267    ANV_FROM_HANDLE(anv_device, device, _device);
268    struct anv_fence *fence;
269 
270    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
271 
272    fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
273                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
274    if (fence == NULL)
275       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
276 
277    if (device->instance->physicalDevice.has_syncobj_wait) {
278       fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
279 
280       uint32_t create_flags = 0;
281       if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
282          create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
283 
284       fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
285       if (!fence->permanent.syncobj)
286          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
287    } else {
288       fence->permanent.type = ANV_FENCE_TYPE_BO;
289 
290       VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
291                                           &fence->permanent.bo.bo, 4096);
292       if (result != VK_SUCCESS)
293          return result;
294 
295       if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
296          fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
297       } else {
298          fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
299       }
300    }
301 
302    *pFence = anv_fence_to_handle(fence);
303 
304    return VK_SUCCESS;
305 }
306 
307 static void
anv_fence_impl_cleanup(struct anv_device * device,struct anv_fence_impl * impl)308 anv_fence_impl_cleanup(struct anv_device *device,
309                        struct anv_fence_impl *impl)
310 {
311    switch (impl->type) {
312    case ANV_FENCE_TYPE_NONE:
313       /* Dummy.  Nothing to do */
314       return;
315 
316    case ANV_FENCE_TYPE_BO:
317       anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
318       return;
319 
320    case ANV_FENCE_TYPE_SYNCOBJ:
321       anv_gem_syncobj_destroy(device, impl->syncobj);
322       return;
323    }
324 
325    unreachable("Invalid fence type");
326 }
327 
anv_DestroyFence(VkDevice _device,VkFence _fence,const VkAllocationCallbacks * pAllocator)328 void anv_DestroyFence(
329     VkDevice                                    _device,
330     VkFence                                     _fence,
331     const VkAllocationCallbacks*                pAllocator)
332 {
333    ANV_FROM_HANDLE(anv_device, device, _device);
334    ANV_FROM_HANDLE(anv_fence, fence, _fence);
335 
336    if (!fence)
337       return;
338 
339    anv_fence_impl_cleanup(device, &fence->temporary);
340    anv_fence_impl_cleanup(device, &fence->permanent);
341 
342    vk_free2(&device->alloc, pAllocator, fence);
343 }
344 
anv_ResetFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences)345 VkResult anv_ResetFences(
346     VkDevice                                    _device,
347     uint32_t                                    fenceCount,
348     const VkFence*                              pFences)
349 {
350    ANV_FROM_HANDLE(anv_device, device, _device);
351 
352    for (uint32_t i = 0; i < fenceCount; i++) {
353       ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
354 
355       /* From the Vulkan 1.0.53 spec:
356        *
357        *    "If any member of pFences currently has its payload imported with
358        *    temporary permanence, that fence’s prior permanent payload is
359        *    first restored. The remaining operations described therefore
360        *    operate on the restored payload.
361        */
362       if (fence->temporary.type != ANV_FENCE_TYPE_NONE) {
363          anv_fence_impl_cleanup(device, &fence->temporary);
364          fence->temporary.type = ANV_FENCE_TYPE_NONE;
365       }
366 
367       struct anv_fence_impl *impl = &fence->permanent;
368 
369       switch (impl->type) {
370       case ANV_FENCE_TYPE_BO:
371          impl->bo.state = ANV_BO_FENCE_STATE_RESET;
372          break;
373 
374       case ANV_FENCE_TYPE_SYNCOBJ:
375          anv_gem_syncobj_reset(device, impl->syncobj);
376          break;
377 
378       default:
379          unreachable("Invalid fence type");
380       }
381    }
382 
383    return VK_SUCCESS;
384 }
385 
anv_GetFenceStatus(VkDevice _device,VkFence _fence)386 VkResult anv_GetFenceStatus(
387     VkDevice                                    _device,
388     VkFence                                     _fence)
389 {
390    ANV_FROM_HANDLE(anv_device, device, _device);
391    ANV_FROM_HANDLE(anv_fence, fence, _fence);
392 
393    if (unlikely(device->lost))
394       return VK_ERROR_DEVICE_LOST;
395 
396    struct anv_fence_impl *impl =
397       fence->temporary.type != ANV_FENCE_TYPE_NONE ?
398       &fence->temporary : &fence->permanent;
399 
400    switch (impl->type) {
401    case ANV_FENCE_TYPE_BO:
402       /* BO fences don't support import/export */
403       assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
404       switch (impl->bo.state) {
405       case ANV_BO_FENCE_STATE_RESET:
406          /* If it hasn't even been sent off to the GPU yet, it's not ready */
407          return VK_NOT_READY;
408 
409       case ANV_BO_FENCE_STATE_SIGNALED:
410          /* It's been signaled, return success */
411          return VK_SUCCESS;
412 
413       case ANV_BO_FENCE_STATE_SUBMITTED: {
414          VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
415          if (result == VK_SUCCESS) {
416             impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
417             return VK_SUCCESS;
418          } else {
419             return result;
420          }
421       }
422       default:
423          unreachable("Invalid fence status");
424       }
425 
426    case ANV_FENCE_TYPE_SYNCOBJ: {
427       int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
428       if (ret == -1) {
429          if (errno == ETIME) {
430             return VK_NOT_READY;
431          } else {
432             /* We don't know the real error. */
433             device->lost = true;
434             return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
435                              "drm_syncobj_wait failed: %m");
436          }
437       } else {
438          return VK_SUCCESS;
439       }
440    }
441 
442    default:
443       unreachable("Invalid fence type");
444    }
445 }
446 
447 #define NSEC_PER_SEC 1000000000
448 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
449 
450 static uint64_t
gettime_ns(void)451 gettime_ns(void)
452 {
453    struct timespec current;
454    clock_gettime(CLOCK_MONOTONIC, &current);
455    return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
456 }
457 
458 static VkResult
anv_wait_for_syncobj_fences(struct anv_device * device,uint32_t fenceCount,const VkFence * pFences,bool waitAll,uint64_t _timeout)459 anv_wait_for_syncobj_fences(struct anv_device *device,
460                             uint32_t fenceCount,
461                             const VkFence *pFences,
462                             bool waitAll,
463                             uint64_t _timeout)
464 {
465    uint32_t *syncobjs = vk_zalloc(&device->alloc,
466                                   sizeof(*syncobjs) * fenceCount, 8,
467                                   VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
468    if (!syncobjs)
469       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
470 
471    for (uint32_t i = 0; i < fenceCount; i++) {
472       ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
473       assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
474 
475       struct anv_fence_impl *impl =
476          fence->temporary.type != ANV_FENCE_TYPE_NONE ?
477          &fence->temporary : &fence->permanent;
478 
479       assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
480       syncobjs[i] = impl->syncobj;
481    }
482 
483    int64_t abs_timeout_ns = 0;
484    if (_timeout > 0) {
485       uint64_t current_ns = gettime_ns();
486 
487       /* Add but saturate to INT32_MAX */
488       if (current_ns + _timeout < current_ns)
489          abs_timeout_ns = INT64_MAX;
490       else if (current_ns + _timeout > INT64_MAX)
491          abs_timeout_ns = INT64_MAX;
492       else
493          abs_timeout_ns = current_ns + _timeout;
494    }
495 
496    /* The gem_syncobj_wait ioctl may return early due to an inherent
497     * limitation in the way it computes timeouts.  Loop until we've actually
498     * passed the timeout.
499     */
500    int ret;
501    do {
502       ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
503                                  abs_timeout_ns, waitAll);
504    } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
505 
506    vk_free(&device->alloc, syncobjs);
507 
508    if (ret == -1) {
509       if (errno == ETIME) {
510          return VK_TIMEOUT;
511       } else {
512          /* We don't know the real error. */
513          device->lost = true;
514          return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
515                           "drm_syncobj_wait failed: %m");
516       }
517    } else {
518       return VK_SUCCESS;
519    }
520 }
521 
522 static VkResult
anv_wait_for_bo_fences(struct anv_device * device,uint32_t fenceCount,const VkFence * pFences,bool waitAll,uint64_t _timeout)523 anv_wait_for_bo_fences(struct anv_device *device,
524                        uint32_t fenceCount,
525                        const VkFence *pFences,
526                        bool waitAll,
527                        uint64_t _timeout)
528 {
529    int ret;
530 
531    /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
532     * to block indefinitely timeouts <= 0.  Unfortunately, this was broken
533     * for a couple of kernel releases.  Since there's no way to know
534     * whether or not the kernel we're using is one of the broken ones, the
535     * best we can do is to clamp the timeout to INT64_MAX.  This limits the
536     * maximum timeout from 584 years to 292 years - likely not a big deal.
537     */
538    int64_t timeout = MIN2(_timeout, INT64_MAX);
539 
540    VkResult result = VK_SUCCESS;
541    uint32_t pending_fences = fenceCount;
542    while (pending_fences) {
543       pending_fences = 0;
544       bool signaled_fences = false;
545       for (uint32_t i = 0; i < fenceCount; i++) {
546          ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
547 
548          /* This function assumes that all fences are BO fences and that they
549           * have no temporary state.  Since BO fences will never be exported,
550           * this should be a safe assumption.
551           */
552          assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
553          assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
554          struct anv_fence_impl *impl = &fence->permanent;
555 
556          switch (impl->bo.state) {
557          case ANV_BO_FENCE_STATE_RESET:
558             /* This fence hasn't been submitted yet, we'll catch it the next
559              * time around.  Yes, this may mean we dead-loop but, short of
560              * lots of locking and a condition variable, there's not much that
561              * we can do about that.
562              */
563             pending_fences++;
564             continue;
565 
566          case ANV_BO_FENCE_STATE_SIGNALED:
567             /* This fence is not pending.  If waitAll isn't set, we can return
568              * early.  Otherwise, we have to keep going.
569              */
570             if (!waitAll) {
571                result = VK_SUCCESS;
572                goto done;
573             }
574             continue;
575 
576          case ANV_BO_FENCE_STATE_SUBMITTED:
577             /* These are the fences we really care about.  Go ahead and wait
578              * on it until we hit a timeout.
579              */
580             result = anv_device_wait(device, &impl->bo.bo, timeout);
581             switch (result) {
582             case VK_SUCCESS:
583                impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
584                signaled_fences = true;
585                if (!waitAll)
586                   goto done;
587                break;
588 
589             case VK_TIMEOUT:
590                goto done;
591 
592             default:
593                return result;
594             }
595          }
596       }
597 
598       if (pending_fences && !signaled_fences) {
599          /* If we've hit this then someone decided to vkWaitForFences before
600           * they've actually submitted any of them to a queue.  This is a
601           * fairly pessimal case, so it's ok to lock here and use a standard
602           * pthreads condition variable.
603           */
604          pthread_mutex_lock(&device->mutex);
605 
606          /* It's possible that some of the fences have changed state since the
607           * last time we checked.  Now that we have the lock, check for
608           * pending fences again and don't wait if it's changed.
609           */
610          uint32_t now_pending_fences = 0;
611          for (uint32_t i = 0; i < fenceCount; i++) {
612             ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
613             if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
614                now_pending_fences++;
615          }
616          assert(now_pending_fences <= pending_fences);
617 
618          if (now_pending_fences == pending_fences) {
619             struct timespec before;
620             clock_gettime(CLOCK_MONOTONIC, &before);
621 
622             uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
623             uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
624                                (timeout / NSEC_PER_SEC);
625             abs_nsec %= NSEC_PER_SEC;
626 
627             /* Avoid roll-over in tv_sec on 32-bit systems if the user
628              * provided timeout is UINT64_MAX
629              */
630             struct timespec abstime;
631             abstime.tv_nsec = abs_nsec;
632             abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
633 
634             ret = pthread_cond_timedwait(&device->queue_submit,
635                                          &device->mutex, &abstime);
636             assert(ret != EINVAL);
637 
638             struct timespec after;
639             clock_gettime(CLOCK_MONOTONIC, &after);
640             uint64_t time_elapsed =
641                ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
642                ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
643 
644             if (time_elapsed >= timeout) {
645                pthread_mutex_unlock(&device->mutex);
646                result = VK_TIMEOUT;
647                goto done;
648             }
649 
650             timeout -= time_elapsed;
651          }
652 
653          pthread_mutex_unlock(&device->mutex);
654       }
655    }
656 
657 done:
658    if (unlikely(device->lost))
659       return VK_ERROR_DEVICE_LOST;
660 
661    return result;
662 }
663 
anv_WaitForFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)664 VkResult anv_WaitForFences(
665     VkDevice                                    _device,
666     uint32_t                                    fenceCount,
667     const VkFence*                              pFences,
668     VkBool32                                    waitAll,
669     uint64_t                                    timeout)
670 {
671    ANV_FROM_HANDLE(anv_device, device, _device);
672 
673    if (unlikely(device->lost))
674       return VK_ERROR_DEVICE_LOST;
675 
676    if (device->instance->physicalDevice.has_syncobj_wait) {
677       return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
678                                          waitAll, timeout);
679    } else {
680       return anv_wait_for_bo_fences(device, fenceCount, pFences,
681                                     waitAll, timeout);
682    }
683 }
684 
anv_GetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfoKHR * pExternalFenceInfo,VkExternalFencePropertiesKHR * pExternalFenceProperties)685 void anv_GetPhysicalDeviceExternalFencePropertiesKHR(
686     VkPhysicalDevice                            physicalDevice,
687     const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
688     VkExternalFencePropertiesKHR*               pExternalFenceProperties)
689 {
690    ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
691 
692    switch (pExternalFenceInfo->handleType) {
693    case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
694    case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
695       if (device->has_syncobj_wait) {
696          pExternalFenceProperties->exportFromImportedHandleTypes =
697             VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR |
698             VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
699          pExternalFenceProperties->compatibleHandleTypes =
700             VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR |
701             VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
702          pExternalFenceProperties->externalFenceFeatures =
703             VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR |
704             VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR;
705          return;
706       }
707       break;
708 
709    default:
710       break;
711    }
712 
713    pExternalFenceProperties->exportFromImportedHandleTypes = 0;
714    pExternalFenceProperties->compatibleHandleTypes = 0;
715    pExternalFenceProperties->externalFenceFeatures = 0;
716 }
717 
anv_ImportFenceFdKHR(VkDevice _device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)718 VkResult anv_ImportFenceFdKHR(
719     VkDevice                                    _device,
720     const VkImportFenceFdInfoKHR*               pImportFenceFdInfo)
721 {
722    ANV_FROM_HANDLE(anv_device, device, _device);
723    ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
724    int fd = pImportFenceFdInfo->fd;
725 
726    assert(pImportFenceFdInfo->sType ==
727           VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
728 
729    struct anv_fence_impl new_impl = {
730       .type = ANV_FENCE_TYPE_NONE,
731    };
732 
733    switch (pImportFenceFdInfo->handleType) {
734    case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
735       new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
736 
737       new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
738       if (!new_impl.syncobj)
739          return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
740 
741       break;
742 
743    case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
744       /* Sync files are a bit tricky.  Because we want to continue using the
745        * syncobj implementation of WaitForFences, we don't use the sync file
746        * directly but instead import it into a syncobj.
747        */
748       new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
749 
750       new_impl.syncobj = anv_gem_syncobj_create(device, 0);
751       if (!new_impl.syncobj)
752          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
753 
754       if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
755          anv_gem_syncobj_destroy(device, new_impl.syncobj);
756          return vk_errorf(device->instance, NULL,
757                           VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
758                           "syncobj sync file import failed: %m");
759       }
760       break;
761 
762    default:
763       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
764    }
765 
766    /* From the Vulkan 1.0.53 spec:
767     *
768     *    "Importing a fence payload from a file descriptor transfers
769     *    ownership of the file descriptor from the application to the
770     *    Vulkan implementation. The application must not perform any
771     *    operations on the file descriptor after a successful import."
772     *
773     * If the import fails, we leave the file descriptor open.
774     */
775    close(fd);
776 
777    if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) {
778       anv_fence_impl_cleanup(device, &fence->temporary);
779       fence->temporary = new_impl;
780    } else {
781       anv_fence_impl_cleanup(device, &fence->permanent);
782       fence->permanent = new_impl;
783    }
784 
785    return VK_SUCCESS;
786 }
787 
anv_GetFenceFdKHR(VkDevice _device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)788 VkResult anv_GetFenceFdKHR(
789     VkDevice                                    _device,
790     const VkFenceGetFdInfoKHR*                  pGetFdInfo,
791     int*                                        pFd)
792 {
793    ANV_FROM_HANDLE(anv_device, device, _device);
794    ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
795 
796    assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
797 
798    struct anv_fence_impl *impl =
799       fence->temporary.type != ANV_FENCE_TYPE_NONE ?
800       &fence->temporary : &fence->permanent;
801 
802    assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
803    switch (pGetFdInfo->handleType) {
804    case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: {
805       int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
806       if (fd < 0)
807          return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
808 
809       *pFd = fd;
810       break;
811    }
812 
813    case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR: {
814       int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
815       if (fd < 0)
816          return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
817 
818       *pFd = fd;
819       break;
820    }
821 
822    default:
823       unreachable("Invalid fence export handle type");
824    }
825 
826    /* From the Vulkan 1.0.53 spec:
827     *
828     *    "Export operations have the same transference as the specified handle
829     *    type’s import operations. [...] If the fence was using a
830     *    temporarily imported payload, the fence’s prior permanent payload
831     *    will be restored.
832     */
833    if (impl == &fence->temporary)
834       anv_fence_impl_cleanup(device, impl);
835 
836    return VK_SUCCESS;
837 }
838 
839 // Queue semaphore functions
840 
anv_CreateSemaphore(VkDevice _device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)841 VkResult anv_CreateSemaphore(
842     VkDevice                                    _device,
843     const VkSemaphoreCreateInfo*                pCreateInfo,
844     const VkAllocationCallbacks*                pAllocator,
845     VkSemaphore*                                pSemaphore)
846 {
847    ANV_FROM_HANDLE(anv_device, device, _device);
848    struct anv_semaphore *semaphore;
849 
850    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
851 
852    semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
853                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
854    if (semaphore == NULL)
855       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
856 
857    const VkExportSemaphoreCreateInfoKHR *export =
858       vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
859     VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
860       export ? export->handleTypes : 0;
861 
862    if (handleTypes == 0) {
863       /* The DRM execbuffer ioctl always execute in-oder so long as you stay
864        * on the same ring.  Since we don't expose the blit engine as a DMA
865        * queue, a dummy no-op semaphore is a perfectly valid implementation.
866        */
867       semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
868    } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
869       assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
870       if (device->instance->physicalDevice.has_syncobj) {
871          semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
872          semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
873          if (!semaphore->permanent.syncobj) {
874             vk_free2(&device->alloc, pAllocator, semaphore);
875             return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
876          }
877       } else {
878          semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
879          VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
880                                               4096, &semaphore->permanent.bo);
881          if (result != VK_SUCCESS) {
882             vk_free2(&device->alloc, pAllocator, semaphore);
883             return result;
884          }
885 
886          /* If we're going to use this as a fence, we need to *not* have the
887           * EXEC_OBJECT_ASYNC bit set.
888           */
889          assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
890       }
891    } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
892       assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR);
893 
894       semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
895       semaphore->permanent.fd = -1;
896    } else {
897       assert(!"Unknown handle type");
898       vk_free2(&device->alloc, pAllocator, semaphore);
899       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
900    }
901 
902    semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
903 
904    *pSemaphore = anv_semaphore_to_handle(semaphore);
905 
906    return VK_SUCCESS;
907 }
908 
909 static void
anv_semaphore_impl_cleanup(struct anv_device * device,struct anv_semaphore_impl * impl)910 anv_semaphore_impl_cleanup(struct anv_device *device,
911                            struct anv_semaphore_impl *impl)
912 {
913    switch (impl->type) {
914    case ANV_SEMAPHORE_TYPE_NONE:
915    case ANV_SEMAPHORE_TYPE_DUMMY:
916       /* Dummy.  Nothing to do */
917       return;
918 
919    case ANV_SEMAPHORE_TYPE_BO:
920       anv_bo_cache_release(device, &device->bo_cache, impl->bo);
921       return;
922 
923    case ANV_SEMAPHORE_TYPE_SYNC_FILE:
924       close(impl->fd);
925       return;
926 
927    case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
928       anv_gem_syncobj_destroy(device, impl->syncobj);
929       return;
930    }
931 
932    unreachable("Invalid semaphore type");
933 }
934 
935 void
anv_semaphore_reset_temporary(struct anv_device * device,struct anv_semaphore * semaphore)936 anv_semaphore_reset_temporary(struct anv_device *device,
937                               struct anv_semaphore *semaphore)
938 {
939    if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
940       return;
941 
942    anv_semaphore_impl_cleanup(device, &semaphore->temporary);
943    semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
944 }
945 
anv_DestroySemaphore(VkDevice _device,VkSemaphore _semaphore,const VkAllocationCallbacks * pAllocator)946 void anv_DestroySemaphore(
947     VkDevice                                    _device,
948     VkSemaphore                                 _semaphore,
949     const VkAllocationCallbacks*                pAllocator)
950 {
951    ANV_FROM_HANDLE(anv_device, device, _device);
952    ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
953 
954    if (semaphore == NULL)
955       return;
956 
957    anv_semaphore_impl_cleanup(device, &semaphore->temporary);
958    anv_semaphore_impl_cleanup(device, &semaphore->permanent);
959 
960    vk_free2(&device->alloc, pAllocator, semaphore);
961 }
962 
anv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfoKHR * pExternalSemaphoreInfo,VkExternalSemaphorePropertiesKHR * pExternalSemaphoreProperties)963 void anv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
964     VkPhysicalDevice                            physicalDevice,
965     const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
966     VkExternalSemaphorePropertiesKHR*           pExternalSemaphoreProperties)
967 {
968    ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
969 
970    switch (pExternalSemaphoreInfo->handleType) {
971    case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
972       pExternalSemaphoreProperties->exportFromImportedHandleTypes =
973          VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
974       pExternalSemaphoreProperties->compatibleHandleTypes =
975          VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
976       pExternalSemaphoreProperties->externalSemaphoreFeatures =
977          VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
978          VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
979       return;
980 
981    case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
982       if (device->has_exec_fence) {
983          pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
984          pExternalSemaphoreProperties->compatibleHandleTypes =
985             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
986          pExternalSemaphoreProperties->externalSemaphoreFeatures =
987             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
988             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
989          return;
990       }
991       break;
992 
993    default:
994       break;
995    }
996 
997    pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
998    pExternalSemaphoreProperties->compatibleHandleTypes = 0;
999    pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1000 }
1001 
anv_ImportSemaphoreFdKHR(VkDevice _device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)1002 VkResult anv_ImportSemaphoreFdKHR(
1003     VkDevice                                    _device,
1004     const VkImportSemaphoreFdInfoKHR*           pImportSemaphoreFdInfo)
1005 {
1006    ANV_FROM_HANDLE(anv_device, device, _device);
1007    ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1008    int fd = pImportSemaphoreFdInfo->fd;
1009 
1010    struct anv_semaphore_impl new_impl = {
1011       .type = ANV_SEMAPHORE_TYPE_NONE,
1012    };
1013 
1014    switch (pImportSemaphoreFdInfo->handleType) {
1015    case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
1016       if (device->instance->physicalDevice.has_syncobj) {
1017          new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1018 
1019          new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1020          if (!new_impl.syncobj)
1021             return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1022       } else {
1023          new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1024 
1025          VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1026                                                fd, &new_impl.bo);
1027          if (result != VK_SUCCESS)
1028             return result;
1029 
1030          if (new_impl.bo->size < 4096) {
1031             anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1032             return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1033          }
1034 
1035          /* If we're going to use this as a fence, we need to *not* have the
1036           * EXEC_OBJECT_ASYNC bit set.
1037           */
1038          assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1039       }
1040 
1041       /* From the Vulkan spec:
1042        *
1043        *    "Importing semaphore state from a file descriptor transfers
1044        *    ownership of the file descriptor from the application to the
1045        *    Vulkan implementation. The application must not perform any
1046        *    operations on the file descriptor after a successful import."
1047        *
1048        * If the import fails, we leave the file descriptor open.
1049        */
1050       close(fd);
1051       break;
1052 
1053    case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
1054       new_impl = (struct anv_semaphore_impl) {
1055          .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1056          .fd = fd,
1057       };
1058       break;
1059 
1060    default:
1061       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1062    }
1063 
1064    if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
1065       anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1066       semaphore->temporary = new_impl;
1067    } else {
1068       anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1069       semaphore->permanent = new_impl;
1070    }
1071 
1072    return VK_SUCCESS;
1073 }
1074 
anv_GetSemaphoreFdKHR(VkDevice _device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)1075 VkResult anv_GetSemaphoreFdKHR(
1076     VkDevice                                    _device,
1077     const VkSemaphoreGetFdInfoKHR*              pGetFdInfo,
1078     int*                                        pFd)
1079 {
1080    ANV_FROM_HANDLE(anv_device, device, _device);
1081    ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1082    VkResult result;
1083    int fd;
1084 
1085    assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1086 
1087    struct anv_semaphore_impl *impl =
1088       semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1089       &semaphore->temporary : &semaphore->permanent;
1090 
1091    switch (impl->type) {
1092    case ANV_SEMAPHORE_TYPE_BO:
1093       result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1094       if (result != VK_SUCCESS)
1095          return result;
1096       break;
1097 
1098    case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1099       /* There are two reasons why this could happen:
1100        *
1101        *  1) The user is trying to export without submitting something that
1102        *     signals the semaphore.  If this is the case, it's their bug so
1103        *     what we return here doesn't matter.
1104        *
1105        *  2) The kernel didn't give us a file descriptor.  The most likely
1106        *     reason for this is running out of file descriptors.
1107        */
1108       if (impl->fd < 0)
1109          return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1110 
1111       *pFd = impl->fd;
1112 
1113       /* From the Vulkan 1.0.53 spec:
1114        *
1115        *    "...exporting a semaphore payload to a handle with copy
1116        *    transference has the same side effects on the source
1117        *    semaphore’s payload as executing a semaphore wait operation."
1118        *
1119        * In other words, it may still be a SYNC_FD semaphore, but it's now
1120        * considered to have been waited on and no longer has a sync file
1121        * attached.
1122        */
1123       impl->fd = -1;
1124       return VK_SUCCESS;
1125 
1126    case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1127       fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1128       if (fd < 0)
1129          return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1130       *pFd = fd;
1131       break;
1132 
1133    default:
1134       return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1135    }
1136 
1137    /* From the Vulkan 1.0.53 spec:
1138     *
1139     *    "Export operations have the same transference as the specified handle
1140     *    type’s import operations. [...] If the semaphore was using a
1141     *    temporarily imported payload, the semaphore’s prior permanent payload
1142     *    will be restored.
1143     */
1144    if (impl == &semaphore->temporary)
1145       anv_semaphore_impl_cleanup(device, impl);
1146 
1147    return VK_SUCCESS;
1148 }
1149