1 /*
2 * Copyright © 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <sys/ioctl.h>
30 #include <sys/mman.h>
31
32 #include "msm_kgsl.h"
33
34 struct tu_syncobj {
35 struct vk_object_base base;
36 uint32_t timestamp;
37 bool timestamp_valid;
38 };
39
40 static int
safe_ioctl(int fd,unsigned long request,void * arg)41 safe_ioctl(int fd, unsigned long request, void *arg)
42 {
43 int ret;
44
45 do {
46 ret = ioctl(fd, request, arg);
47 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
48
49 return ret;
50 }
51
52 int
tu_drm_submitqueue_new(const struct tu_device * dev,int priority,uint32_t * queue_id)53 tu_drm_submitqueue_new(const struct tu_device *dev,
54 int priority,
55 uint32_t *queue_id)
56 {
57 struct kgsl_drawctxt_create req = {
58 .flags = KGSL_CONTEXT_SAVE_GMEM |
59 KGSL_CONTEXT_NO_GMEM_ALLOC |
60 KGSL_CONTEXT_PREAMBLE,
61 };
62
63 int ret = safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req);
64 if (ret)
65 return ret;
66
67 *queue_id = req.drawctxt_id;
68
69 return 0;
70 }
71
72 void
tu_drm_submitqueue_close(const struct tu_device * dev,uint32_t queue_id)73 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
74 {
75 struct kgsl_drawctxt_destroy req = {
76 .drawctxt_id = queue_id,
77 };
78
79 safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
80 }
81
82 VkResult
tu_bo_init_new(struct tu_device * dev,struct tu_bo * bo,uint64_t size,bool dump)83 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size, bool dump)
84 {
85 struct kgsl_gpumem_alloc_id req = {
86 .size = size,
87 };
88 int ret;
89
90 ret = safe_ioctl(dev->physical_device->local_fd,
91 IOCTL_KGSL_GPUMEM_ALLOC_ID, &req);
92 if (ret) {
93 return vk_errorf(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY,
94 "GPUMEM_ALLOC_ID failed (%s)", strerror(errno));
95 }
96
97 *bo = (struct tu_bo) {
98 .gem_handle = req.id,
99 .size = req.mmapsize,
100 .iova = req.gpuaddr,
101 };
102
103 return VK_SUCCESS;
104 }
105
106 VkResult
tu_bo_init_dmabuf(struct tu_device * dev,struct tu_bo * bo,uint64_t size,int fd)107 tu_bo_init_dmabuf(struct tu_device *dev,
108 struct tu_bo *bo,
109 uint64_t size,
110 int fd)
111 {
112 struct kgsl_gpuobj_import_dma_buf import_dmabuf = {
113 .fd = fd,
114 };
115 struct kgsl_gpuobj_import req = {
116 .priv = (uintptr_t)&import_dmabuf,
117 .priv_len = sizeof(import_dmabuf),
118 .flags = 0,
119 .type = KGSL_USER_MEM_TYPE_DMABUF,
120 };
121 int ret;
122
123 ret = safe_ioctl(dev->physical_device->local_fd,
124 IOCTL_KGSL_GPUOBJ_IMPORT, &req);
125 if (ret)
126 return vk_errorf(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY,
127 "Failed to import dma-buf (%s)\n", strerror(errno));
128
129 struct kgsl_gpuobj_info info_req = {
130 .id = req.id,
131 };
132
133 ret = safe_ioctl(dev->physical_device->local_fd,
134 IOCTL_KGSL_GPUOBJ_INFO, &info_req);
135 if (ret)
136 return vk_errorf(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY,
137 "Failed to get dma-buf info (%s)\n", strerror(errno));
138
139 *bo = (struct tu_bo) {
140 .gem_handle = req.id,
141 .size = info_req.size,
142 .iova = info_req.gpuaddr,
143 };
144
145 return VK_SUCCESS;
146 }
147
148 int
tu_bo_export_dmabuf(struct tu_device * dev,struct tu_bo * bo)149 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
150 {
151 tu_stub();
152
153 return -1;
154 }
155
156 VkResult
tu_bo_map(struct tu_device * dev,struct tu_bo * bo)157 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
158 {
159 if (bo->map)
160 return VK_SUCCESS;
161
162 uint64_t offset = bo->gem_handle << 12;
163 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
164 dev->physical_device->local_fd, offset);
165 if (map == MAP_FAILED)
166 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
167
168 bo->map = map;
169
170 return VK_SUCCESS;
171 }
172
173 void
tu_bo_finish(struct tu_device * dev,struct tu_bo * bo)174 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
175 {
176 assert(bo->gem_handle);
177
178 if (bo->map)
179 munmap(bo->map, bo->size);
180
181 struct kgsl_gpumem_free_id req = {
182 .id = bo->gem_handle
183 };
184
185 safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_GPUMEM_FREE_ID, &req);
186 }
187
188 static VkResult
get_kgsl_prop(int fd,unsigned int type,void * value,size_t size)189 get_kgsl_prop(int fd, unsigned int type, void *value, size_t size)
190 {
191 struct kgsl_device_getproperty getprop = {
192 .type = type,
193 .value = value,
194 .sizebytes = size,
195 };
196
197 return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop);
198 }
199
200 VkResult
tu_enumerate_devices(struct tu_instance * instance)201 tu_enumerate_devices(struct tu_instance *instance)
202 {
203 static const char path[] = "/dev/kgsl-3d0";
204 int fd;
205
206 struct tu_physical_device *device = &instance->physical_devices[0];
207
208 if (instance->enabled_extensions.KHR_display)
209 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
210 "I can't KHR_display");
211
212 fd = open(path, O_RDWR | O_CLOEXEC);
213 if (fd < 0) {
214 instance->physical_device_count = 0;
215 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
216 "failed to open device %s", path);
217 }
218
219 struct kgsl_devinfo info;
220 if (get_kgsl_prop(fd, KGSL_PROP_DEVICE_INFO, &info, sizeof(info)))
221 goto fail;
222
223 uint64_t gmem_iova;
224 if (get_kgsl_prop(fd, KGSL_PROP_UCHE_GMEM_VADDR, &gmem_iova, sizeof(gmem_iova)))
225 goto fail;
226
227 /* kgsl version check? */
228
229 if (instance->debug_flags & TU_DEBUG_STARTUP)
230 mesa_logi("Found compatible device '%s'.", path);
231
232 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
233 device->instance = instance;
234 device->master_fd = -1;
235 device->local_fd = fd;
236
237 device->gpu_id =
238 ((info.chip_id >> 24) & 0xff) * 100 +
239 ((info.chip_id >> 16) & 0xff) * 10 +
240 ((info.chip_id >> 8) & 0xff);
241 device->gmem_size = info.gmem_sizebytes;
242 device->gmem_base = gmem_iova;
243
244 if (tu_physical_device_init(device, instance) != VK_SUCCESS)
245 goto fail;
246
247 instance->physical_device_count = 1;
248
249 return VK_SUCCESS;
250
251 fail:
252 close(fd);
253 return VK_ERROR_INITIALIZATION_FAILED;
254 }
255
256 static int
timestamp_to_fd(struct tu_queue * queue,uint32_t timestamp)257 timestamp_to_fd(struct tu_queue *queue, uint32_t timestamp)
258 {
259 int fd;
260 struct kgsl_timestamp_event event = {
261 .type = KGSL_TIMESTAMP_EVENT_FENCE,
262 .context_id = queue->msm_queue_id,
263 .timestamp = timestamp,
264 .priv = &fd,
265 .len = sizeof(fd),
266 };
267
268 int ret = safe_ioctl(queue->device->fd, IOCTL_KGSL_TIMESTAMP_EVENT, &event);
269 if (ret)
270 return -1;
271
272 return fd;
273 }
274
275 /* return true if timestamp a is greater (more recent) then b
276 * this relies on timestamps never having a difference > (1<<31)
277 */
278 static inline bool
timestamp_cmp(uint32_t a,uint32_t b)279 timestamp_cmp(uint32_t a, uint32_t b)
280 {
281 return (int32_t) (a - b) >= 0;
282 }
283
284 static uint32_t
max_ts(uint32_t a,uint32_t b)285 max_ts(uint32_t a, uint32_t b)
286 {
287 return timestamp_cmp(a, b) ? a : b;
288 }
289
290 static uint32_t
min_ts(uint32_t a,uint32_t b)291 min_ts(uint32_t a, uint32_t b)
292 {
293 return timestamp_cmp(a, b) ? b : a;
294 }
295
296 static struct tu_syncobj
sync_merge(const VkSemaphore * syncobjs,uint32_t count,bool wait_all,bool reset)297 sync_merge(const VkSemaphore *syncobjs, uint32_t count, bool wait_all, bool reset)
298 {
299 struct tu_syncobj ret;
300
301 ret.timestamp_valid = false;
302
303 for (uint32_t i = 0; i < count; ++i) {
304 TU_FROM_HANDLE(tu_syncobj, sync, syncobjs[i]);
305
306 /* TODO: this means the fence is unsignaled and will never become signaled */
307 if (!sync->timestamp_valid)
308 continue;
309
310 if (!ret.timestamp_valid)
311 ret.timestamp = sync->timestamp;
312 else if (wait_all)
313 ret.timestamp = max_ts(ret.timestamp, sync->timestamp);
314 else
315 ret.timestamp = min_ts(ret.timestamp, sync->timestamp);
316
317 ret.timestamp_valid = true;
318 if (reset)
319 sync->timestamp_valid = false;
320
321 }
322 return ret;
323 }
324
325 VkResult
tu_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence _fence)326 tu_QueueSubmit(VkQueue _queue,
327 uint32_t submitCount,
328 const VkSubmitInfo *pSubmits,
329 VkFence _fence)
330 {
331 TU_FROM_HANDLE(tu_queue, queue, _queue);
332 TU_FROM_HANDLE(tu_syncobj, fence, _fence);
333 VkResult result = VK_SUCCESS;
334
335 uint32_t max_entry_count = 0;
336 for (uint32_t i = 0; i < submitCount; ++i) {
337 const VkSubmitInfo *submit = pSubmits + i;
338
339 uint32_t entry_count = 0;
340 for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
341 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
342 entry_count += cmdbuf->cs.entry_count;
343 }
344
345 max_entry_count = MAX2(max_entry_count, entry_count);
346 }
347
348 struct kgsl_command_object *cmds =
349 vk_alloc(&queue->device->vk.alloc,
350 sizeof(cmds[0]) * max_entry_count, 8,
351 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
352 if (cmds == NULL)
353 return vk_error(queue->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
354
355 for (uint32_t i = 0; i < submitCount; ++i) {
356 const VkSubmitInfo *submit = pSubmits + i;
357 uint32_t entry_idx = 0;
358
359 for (uint32_t j = 0; j < submit->commandBufferCount; j++) {
360 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
361 struct tu_cs *cs = &cmdbuf->cs;
362 for (unsigned k = 0; k < cs->entry_count; k++) {
363 cmds[entry_idx++] = (struct kgsl_command_object) {
364 .offset = cs->entries[k].offset,
365 .gpuaddr = cs->entries[k].bo->iova,
366 .size = cs->entries[k].size,
367 .flags = KGSL_CMDLIST_IB,
368 .id = cs->entries[k].bo->gem_handle,
369 };
370 }
371 }
372
373 struct tu_syncobj s = sync_merge(submit->pWaitSemaphores,
374 submit->waitSemaphoreCount,
375 true, true);
376
377 struct kgsl_cmd_syncpoint_timestamp ts = {
378 .context_id = queue->msm_queue_id,
379 .timestamp = s.timestamp,
380 };
381 struct kgsl_command_syncpoint sync = {
382 .type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP,
383 .size = sizeof(ts),
384 .priv = (uintptr_t) &ts,
385 };
386
387 struct kgsl_gpu_command req = {
388 .flags = KGSL_CMDBATCH_SUBMIT_IB_LIST,
389 .context_id = queue->msm_queue_id,
390 .cmdlist = (uint64_t) (uintptr_t) cmds,
391 .numcmds = entry_idx,
392 .cmdsize = sizeof(struct kgsl_command_object),
393 .synclist = (uintptr_t) &sync,
394 .syncsize = sizeof(struct kgsl_command_syncpoint),
395 .numsyncs = s.timestamp_valid ? 1 : 0,
396 };
397
398 int ret = safe_ioctl(queue->device->physical_device->local_fd,
399 IOCTL_KGSL_GPU_COMMAND, &req);
400 if (ret) {
401 result = tu_device_set_lost(queue->device,
402 "submit failed: %s\n", strerror(errno));
403 goto fail;
404 }
405
406 for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
407 TU_FROM_HANDLE(tu_syncobj, sem, submit->pSignalSemaphores[i]);
408 sem->timestamp = req.timestamp;
409 sem->timestamp_valid = true;
410 }
411
412 /* no need to merge fences as queue execution is serialized */
413 if (i == submitCount - 1) {
414 int fd = timestamp_to_fd(queue, req.timestamp);
415 if (fd < 0) {
416 result = tu_device_set_lost(queue->device,
417 "Failed to create sync file for timestamp: %s\n",
418 strerror(errno));
419 goto fail;
420 }
421
422 if (queue->fence >= 0)
423 close(queue->fence);
424 queue->fence = fd;
425
426 if (fence) {
427 fence->timestamp = req.timestamp;
428 fence->timestamp_valid = true;
429 }
430 }
431 }
432 fail:
433 vk_free(&queue->device->vk.alloc, cmds);
434
435 return result;
436 }
437
438 static VkResult
sync_create(VkDevice _device,bool signaled,bool fence,const VkAllocationCallbacks * pAllocator,void ** p_sync)439 sync_create(VkDevice _device,
440 bool signaled,
441 bool fence,
442 const VkAllocationCallbacks *pAllocator,
443 void **p_sync)
444 {
445 TU_FROM_HANDLE(tu_device, device, _device);
446
447 struct tu_syncobj *sync =
448 vk_object_alloc(&device->vk, pAllocator, sizeof(*sync),
449 fence ? VK_OBJECT_TYPE_FENCE : VK_OBJECT_TYPE_SEMAPHORE);
450 if (!sync)
451 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
452
453 if (signaled)
454 tu_finishme("CREATE FENCE SIGNALED");
455
456 sync->timestamp_valid = false;
457 *p_sync = sync;
458
459 return VK_SUCCESS;
460 }
461
462 VkResult
tu_ImportSemaphoreFdKHR(VkDevice _device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)463 tu_ImportSemaphoreFdKHR(VkDevice _device,
464 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
465 {
466 tu_finishme("ImportSemaphoreFdKHR");
467 return VK_SUCCESS;
468 }
469
470 VkResult
tu_GetSemaphoreFdKHR(VkDevice _device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)471 tu_GetSemaphoreFdKHR(VkDevice _device,
472 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
473 int *pFd)
474 {
475 tu_finishme("GetSemaphoreFdKHR");
476 return VK_SUCCESS;
477 }
478
479 VkResult
tu_CreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)480 tu_CreateSemaphore(VkDevice device,
481 const VkSemaphoreCreateInfo *pCreateInfo,
482 const VkAllocationCallbacks *pAllocator,
483 VkSemaphore *pSemaphore)
484 {
485 return sync_create(device, false, false, pAllocator, (void**) pSemaphore);
486 }
487
488 void
tu_DestroySemaphore(VkDevice _device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)489 tu_DestroySemaphore(VkDevice _device,
490 VkSemaphore semaphore,
491 const VkAllocationCallbacks *pAllocator)
492 {
493 TU_FROM_HANDLE(tu_device, device, _device);
494 TU_FROM_HANDLE(tu_syncobj, sync, semaphore);
495
496 if (!sync)
497 return;
498
499 vk_object_free(&device->vk, pAllocator, sync);
500 }
501
502 VkResult
tu_ImportFenceFdKHR(VkDevice _device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)503 tu_ImportFenceFdKHR(VkDevice _device,
504 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
505 {
506 tu_stub();
507
508 return VK_SUCCESS;
509 }
510
511 VkResult
tu_GetFenceFdKHR(VkDevice _device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)512 tu_GetFenceFdKHR(VkDevice _device,
513 const VkFenceGetFdInfoKHR *pGetFdInfo,
514 int *pFd)
515 {
516 tu_stub();
517
518 return VK_SUCCESS;
519 }
520
521 VkResult
tu_CreateFence(VkDevice device,const VkFenceCreateInfo * info,const VkAllocationCallbacks * pAllocator,VkFence * pFence)522 tu_CreateFence(VkDevice device,
523 const VkFenceCreateInfo *info,
524 const VkAllocationCallbacks *pAllocator,
525 VkFence *pFence)
526 {
527 return sync_create(device, info->flags & VK_FENCE_CREATE_SIGNALED_BIT, true,
528 pAllocator, (void**) pFence);
529 }
530
531 void
tu_DestroyFence(VkDevice _device,VkFence fence,const VkAllocationCallbacks * pAllocator)532 tu_DestroyFence(VkDevice _device, VkFence fence, const VkAllocationCallbacks *pAllocator)
533 {
534 TU_FROM_HANDLE(tu_device, device, _device);
535 TU_FROM_HANDLE(tu_syncobj, sync, fence);
536
537 if (!sync)
538 return;
539
540 vk_object_free(&device->vk, pAllocator, sync);
541 }
542
543 VkResult
tu_WaitForFences(VkDevice _device,uint32_t count,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)544 tu_WaitForFences(VkDevice _device,
545 uint32_t count,
546 const VkFence *pFences,
547 VkBool32 waitAll,
548 uint64_t timeout)
549 {
550 TU_FROM_HANDLE(tu_device, device, _device);
551 struct tu_syncobj s = sync_merge((const VkSemaphore*) pFences, count, waitAll, false);
552
553 if (!s.timestamp_valid)
554 return VK_SUCCESS;
555
556 int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
557 &(struct kgsl_device_waittimestamp_ctxtid) {
558 .context_id = device->queues[0]->msm_queue_id,
559 .timestamp = s.timestamp,
560 .timeout = timeout / 1000000,
561 });
562 if (ret) {
563 assert(errno == ETIME);
564 return VK_TIMEOUT;
565 }
566
567 return VK_SUCCESS;
568 }
569
570 VkResult
tu_ResetFences(VkDevice _device,uint32_t count,const VkFence * pFences)571 tu_ResetFences(VkDevice _device, uint32_t count, const VkFence *pFences)
572 {
573 for (uint32_t i = 0; i < count; i++) {
574 TU_FROM_HANDLE(tu_syncobj, sync, pFences[i]);
575 sync->timestamp_valid = false;
576 }
577 return VK_SUCCESS;
578 }
579
580 VkResult
tu_GetFenceStatus(VkDevice _device,VkFence _fence)581 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
582 {
583 TU_FROM_HANDLE(tu_device, device, _device);
584 TU_FROM_HANDLE(tu_syncobj, sync, _fence);
585
586 if (!sync->timestamp_valid)
587 return VK_NOT_READY;
588
589 int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
590 &(struct kgsl_device_waittimestamp_ctxtid) {
591 .context_id = device->queues[0]->msm_queue_id,
592 .timestamp = sync->timestamp,
593 .timeout = 0,
594 });
595 if (ret) {
596 assert(errno == ETIME);
597 return VK_NOT_READY;
598 }
599
600 return VK_SUCCESS;
601 }
602
603 int
tu_signal_fences(struct tu_device * device,struct tu_syncobj * fence1,struct tu_syncobj * fence2)604 tu_signal_fences(struct tu_device *device, struct tu_syncobj *fence1, struct tu_syncobj *fence2)
605 {
606 tu_finishme("tu_signal_fences");
607 return 0;
608 }
609
610 int
tu_syncobj_to_fd(struct tu_device * device,struct tu_syncobj * sync)611 tu_syncobj_to_fd(struct tu_device *device, struct tu_syncobj *sync)
612 {
613 tu_finishme("tu_syncobj_to_fd");
614 return -1;
615 }
616
617 #ifdef ANDROID
618 VkResult
tu_QueueSignalReleaseImageANDROID(VkQueue _queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)619 tu_QueueSignalReleaseImageANDROID(VkQueue _queue,
620 uint32_t waitSemaphoreCount,
621 const VkSemaphore *pWaitSemaphores,
622 VkImage image,
623 int *pNativeFenceFd)
624 {
625 TU_FROM_HANDLE(tu_queue, queue, _queue);
626 if (!pNativeFenceFd)
627 return VK_SUCCESS;
628
629 struct tu_syncobj s = sync_merge(pWaitSemaphores, waitSemaphoreCount, true, true);
630
631 if (!s.timestamp_valid) {
632 *pNativeFenceFd = -1;
633 return VK_SUCCESS;
634 }
635
636 *pNativeFenceFd = timestamp_to_fd(queue, s.timestamp);
637
638 return VK_SUCCESS;
639 }
640 #endif
641