• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_device.h"
12 
13 #include "util/disk_cache.h"
14 #include "util/hex.h"
15 #include "venus-protocol/vn_protocol_driver_device.h"
16 
17 #include "vn_android.h"
18 #include "vn_instance.h"
19 #include "vn_physical_device.h"
20 #include "vn_queue.h"
21 
22 /* device commands */
23 
24 static void
vn_queue_fini(struct vn_queue * queue)25 vn_queue_fini(struct vn_queue *queue)
26 {
27    VkDevice dev_handle = vk_device_to_handle(queue->base.base.base.device);
28 
29    if (queue->wait_fence != VK_NULL_HANDLE) {
30       vn_DestroyFence(dev_handle, queue->wait_fence, NULL);
31    }
32    if (queue->sparse_semaphore != VK_NULL_HANDLE) {
33       vn_DestroySemaphore(dev_handle, queue->sparse_semaphore, NULL);
34    }
35    vn_cached_storage_fini(&queue->storage);
36    vn_queue_base_fini(&queue->base);
37 }
38 
39 static VkResult
vn_queue_init(struct vn_device * dev,struct vn_queue * queue,const VkDeviceQueueCreateInfo * queue_info,uint32_t queue_index)40 vn_queue_init(struct vn_device *dev,
41               struct vn_queue *queue,
42               const VkDeviceQueueCreateInfo *queue_info,
43               uint32_t queue_index)
44 {
45    VkResult result =
46       vn_queue_base_init(&queue->base, &dev->base, queue_info, queue_index);
47    if (result != VK_SUCCESS)
48       return result;
49 
50    vn_cached_storage_init(&queue->storage, &dev->base.base.alloc);
51 
52    const int ring_idx = vn_instance_acquire_ring_idx(dev->instance);
53    if (ring_idx < 0) {
54       vn_log(dev->instance, "failed binding VkQueue to renderer timeline");
55       return VK_ERROR_INITIALIZATION_FAILED;
56    }
57    queue->ring_idx = (uint32_t)ring_idx;
58 
59    const VkDeviceQueueTimelineInfoMESA timeline_info = {
60       .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA,
61       .ringIdx = queue->ring_idx,
62    };
63    const VkDeviceQueueInfo2 device_queue_info = {
64       .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
65       .pNext = &timeline_info,
66       .flags = queue_info->flags,
67       .queueFamilyIndex = queue_info->queueFamilyIndex,
68       .queueIndex = queue_index,
69    };
70 
71    VkQueue queue_handle = vn_queue_to_handle(queue);
72    vn_async_vkGetDeviceQueue2(dev->primary_ring, vn_device_to_handle(dev),
73                               &device_queue_info, &queue_handle);
74 
75    return VK_SUCCESS;
76 }
77 
78 static VkResult
vn_device_init_queues(struct vn_device * dev,const VkDeviceCreateInfo * create_info)79 vn_device_init_queues(struct vn_device *dev,
80                       const VkDeviceCreateInfo *create_info)
81 {
82    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
83 
84    uint32_t count = 0;
85    for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++)
86       count += create_info->pQueueCreateInfos[i].queueCount;
87 
88    struct vn_queue *queues =
89       vk_zalloc(alloc, sizeof(*queues) * count, VN_DEFAULT_ALIGN,
90                 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
91    if (!queues)
92       return VK_ERROR_OUT_OF_HOST_MEMORY;
93 
94    count = 0;
95    for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
96       VkResult result;
97 
98       const VkDeviceQueueCreateInfo *queue_info =
99          &create_info->pQueueCreateInfos[i];
100       for (uint32_t j = 0; j < queue_info->queueCount; j++) {
101          result = vn_queue_init(dev, &queues[count], queue_info, j);
102          if (result != VK_SUCCESS) {
103             for (uint32_t k = 0; k < count; k++)
104                vn_queue_fini(&queues[k]);
105             vk_free(alloc, queues);
106 
107             return result;
108          }
109 
110          count++;
111       }
112    }
113 
114    dev->queues = queues;
115    dev->queue_count = count;
116 
117    return VK_SUCCESS;
118 }
119 
120 static bool
vn_device_queue_family_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)121 vn_device_queue_family_init(struct vn_device *dev,
122                             const VkDeviceCreateInfo *create_info)
123 {
124    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
125    uint32_t *queue_families = NULL;
126    uint32_t count = 0;
127 
128    queue_families = vk_zalloc(
129       alloc, sizeof(*queue_families) * create_info->queueCreateInfoCount,
130       VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
131    if (!queue_families)
132       return false;
133 
134    for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
135       const uint32_t index =
136          create_info->pQueueCreateInfos[i].queueFamilyIndex;
137       bool new_index = true;
138 
139       for (uint32_t j = 0; j < count; j++) {
140          if (queue_families[j] == index) {
141             new_index = false;
142             break;
143          }
144       }
145       if (new_index)
146          queue_families[count++] = index;
147    }
148 
149    dev->queue_families = queue_families;
150    dev->queue_family_count = count;
151 
152    return true;
153 }
154 
155 static inline void
vn_device_queue_family_fini(struct vn_device * dev)156 vn_device_queue_family_fini(struct vn_device *dev)
157 {
158    vk_free(&dev->base.base.alloc, dev->queue_families);
159 }
160 
161 static VkResult
vn_device_memory_report_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)162 vn_device_memory_report_init(struct vn_device *dev,
163                              const VkDeviceCreateInfo *create_info)
164 {
165    const struct vk_features *app_feats = &dev->base.base.enabled_features;
166    if (!app_feats->deviceMemoryReport)
167       return VK_SUCCESS;
168 
169    uint32_t count = 0;
170    vk_foreach_struct_const(pnext, create_info->pNext) {
171       if (pnext->sType ==
172           VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT)
173          count++;
174    }
175 
176    struct vn_device_memory_report *mem_reports = NULL;
177    if (count) {
178       mem_reports =
179          vk_alloc(&dev->base.base.alloc, sizeof(*mem_reports) * count,
180                   VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
181       if (!mem_reports)
182          return VK_ERROR_OUT_OF_HOST_MEMORY;
183    }
184 
185    count = 0;
186    vk_foreach_struct_const(pnext, create_info->pNext) {
187       if (pnext->sType ==
188           VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
189          const struct VkDeviceDeviceMemoryReportCreateInfoEXT *report =
190             (void *)pnext;
191          mem_reports[count].callback = report->pfnUserCallback;
192          mem_reports[count].data = report->pUserData;
193          count++;
194       }
195    }
196 
197    dev->memory_report_count = count;
198    dev->memory_reports = mem_reports;
199 
200    return VK_SUCCESS;
201 }
202 
203 static inline void
vn_device_memory_report_fini(struct vn_device * dev)204 vn_device_memory_report_fini(struct vn_device *dev)
205 {
206    vk_free(&dev->base.base.alloc, dev->memory_reports);
207 }
208 
209 static bool
find_extension_names(const char * const * exts,uint32_t ext_count,const char * name)210 find_extension_names(const char *const *exts,
211                      uint32_t ext_count,
212                      const char *name)
213 {
214    for (uint32_t i = 0; i < ext_count; i++) {
215       if (!strcmp(exts[i], name))
216          return true;
217    }
218    return false;
219 }
220 
221 static bool
merge_extension_names(const char * const * exts,uint32_t ext_count,const char * const * extra_exts,uint32_t extra_count,const char * const * block_exts,uint32_t block_count,const VkAllocationCallbacks * alloc,const char * const ** out_exts,uint32_t * out_count)222 merge_extension_names(const char *const *exts,
223                       uint32_t ext_count,
224                       const char *const *extra_exts,
225                       uint32_t extra_count,
226                       const char *const *block_exts,
227                       uint32_t block_count,
228                       const VkAllocationCallbacks *alloc,
229                       const char *const **out_exts,
230                       uint32_t *out_count)
231 {
232    const char **merged =
233       vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
234                VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
235    if (!merged)
236       return false;
237 
238    uint32_t count = 0;
239    for (uint32_t i = 0; i < ext_count; i++) {
240       if (!find_extension_names(block_exts, block_count, exts[i]))
241          merged[count++] = exts[i];
242    }
243    for (uint32_t i = 0; i < extra_count; i++) {
244       if (!find_extension_names(exts, ext_count, extra_exts[i]))
245          merged[count++] = extra_exts[i];
246    }
247 
248    *out_exts = merged;
249    *out_count = count;
250    return true;
251 }
252 
253 static const VkDeviceCreateInfo *
vn_device_fix_create_info(const struct vn_device * dev,const VkDeviceCreateInfo * dev_info,const VkAllocationCallbacks * alloc,VkDeviceCreateInfo * local_info)254 vn_device_fix_create_info(const struct vn_device *dev,
255                           const VkDeviceCreateInfo *dev_info,
256                           const VkAllocationCallbacks *alloc,
257                           VkDeviceCreateInfo *local_info)
258 {
259    const struct vn_physical_device *physical_dev = dev->physical_device;
260    const struct vk_device_extension_table *app_exts =
261       &dev->base.base.enabled_extensions;
262    /* extra_exts and block_exts must not overlap */
263    const char *extra_exts[16];
264    const char *block_exts[16];
265    uint32_t extra_count = 0;
266    uint32_t block_count = 0;
267 
268    /* fix for WSI (treat AHB as WSI extension for simplicity) */
269    const bool has_wsi =
270       app_exts->KHR_swapchain || app_exts->ANDROID_native_buffer ||
271       app_exts->ANDROID_external_memory_android_hardware_buffer;
272    if (has_wsi) {
273       if (!app_exts->EXT_image_drm_format_modifier) {
274          extra_exts[extra_count++] =
275             VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME;
276 
277          if (physical_dev->renderer_version < VK_API_VERSION_1_2 &&
278              !app_exts->KHR_image_format_list) {
279             extra_exts[extra_count++] =
280                VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME;
281          }
282       }
283 
284       if (!app_exts->EXT_queue_family_foreign) {
285          extra_exts[extra_count++] =
286             VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME;
287       }
288 
289       if (app_exts->KHR_swapchain) {
290          /* see vn_physical_device_get_native_extensions */
291          block_exts[block_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
292          block_exts[block_count++] =
293             VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME;
294          block_exts[block_count++] =
295             VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
296       }
297 
298       if (app_exts->ANDROID_native_buffer) {
299          /* see vn_QueueSignalReleaseImageANDROID */
300          if (!app_exts->KHR_external_fence_fd) {
301             assert(physical_dev->renderer_sync_fd.fence_exportable);
302             extra_exts[extra_count++] =
303                VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
304          }
305 
306          block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
307       }
308 
309       if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
310          block_exts[block_count++] =
311             VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME;
312       }
313    }
314 
315    if (app_exts->KHR_external_memory_fd ||
316        app_exts->EXT_external_memory_dma_buf || has_wsi) {
317       if (physical_dev->external_memory.renderer_handle_type ==
318           VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) {
319          if (!app_exts->EXT_external_memory_dma_buf) {
320             extra_exts[extra_count++] =
321                VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
322          }
323          if (!app_exts->KHR_external_memory_fd) {
324             extra_exts[extra_count++] =
325                VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
326          }
327       }
328    }
329 
330    /* see vn_queue_submission_count_batch_semaphores */
331    if (!app_exts->KHR_external_semaphore_fd && has_wsi) {
332       assert(physical_dev->renderer_sync_fd.semaphore_importable);
333       extra_exts[extra_count++] = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
334    }
335 
336    /* see vn_cmd_set_external_acquire_unmodified */
337    if (VN_PRESENT_SRC_INTERNAL_LAYOUT != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR &&
338        physical_dev->renderer_extensions
339           .EXT_external_memory_acquire_unmodified &&
340        !app_exts->EXT_external_memory_acquire_unmodified && has_wsi) {
341       extra_exts[extra_count++] =
342          VK_EXT_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXTENSION_NAME;
343    }
344 
345    if (app_exts->EXT_device_memory_report) {
346       /* see vn_physical_device_get_native_extensions */
347       block_exts[block_count++] = VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME;
348    }
349 
350    if (app_exts->EXT_physical_device_drm) {
351       /* see vn_physical_device_get_native_extensions */
352       block_exts[block_count++] = VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME;
353    }
354 
355    if (app_exts->EXT_tooling_info) {
356       /* see vn_physical_device_get_native_extensions */
357       block_exts[block_count++] = VK_EXT_TOOLING_INFO_EXTENSION_NAME;
358    }
359 
360    if (app_exts->EXT_pci_bus_info) {
361       /* always filter for simplicity */
362       block_exts[block_count++] = VK_EXT_PCI_BUS_INFO_EXTENSION_NAME;
363    }
364 
365    assert(extra_count <= ARRAY_SIZE(extra_exts));
366    assert(block_count <= ARRAY_SIZE(block_exts));
367 
368    if (!extra_count && (!block_count || !dev_info->enabledExtensionCount))
369       return dev_info;
370 
371    *local_info = *dev_info;
372    if (!merge_extension_names(dev_info->ppEnabledExtensionNames,
373                               dev_info->enabledExtensionCount, extra_exts,
374                               extra_count, block_exts, block_count, alloc,
375                               &local_info->ppEnabledExtensionNames,
376                               &local_info->enabledExtensionCount))
377       return NULL;
378 
379    return local_info;
380 }
381 
382 static inline VkResult
vn_device_feedback_pool_init(struct vn_device * dev)383 vn_device_feedback_pool_init(struct vn_device *dev)
384 {
385    /* The feedback pool defaults to suballocate slots of 8 bytes each. Initial
386     * pool size of 4096 corresponds to a total of 512 fences, semaphores and
387     * events, which well covers the common scenarios. Pool can grow anyway.
388     */
389    static const uint32_t pool_size = 4096;
390    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
391 
392    if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK) &&
393        VN_PERF(NO_SEMAPHORE_FEEDBACK))
394       return VK_SUCCESS;
395 
396    return vn_feedback_pool_init(dev, &dev->feedback_pool, pool_size, alloc);
397 }
398 
399 static inline void
vn_device_feedback_pool_fini(struct vn_device * dev)400 vn_device_feedback_pool_fini(struct vn_device *dev)
401 {
402    if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK) &&
403        VN_PERF(NO_SEMAPHORE_FEEDBACK))
404       return;
405 
406    vn_feedback_pool_fini(&dev->feedback_pool);
407 }
408 
409 static void
vn_device_update_shader_cache_id(struct vn_device * dev)410 vn_device_update_shader_cache_id(struct vn_device *dev)
411 {
412    /* venus utilizes the host side shader cache.
413     * This is a WA to generate shader cache files containing headers
414     * with a unique cache id that will change based on host driver
415     * identifiers. This allows fossilize replay to detect if the host
416     * side shader cach is no longer up to date.
417     * The shader cache is destroyed after creating the necessary files
418     * and not utilized by venus.
419     */
420 #if !DETECT_OS_ANDROID && defined(ENABLE_SHADER_CACHE)
421    const uint8_t *device_uuid =
422       dev->physical_device->base.base.properties.pipelineCacheUUID;
423 
424    char uuid[VK_UUID_SIZE * 2 + 1];
425    mesa_bytes_to_hex(uuid, device_uuid, VK_UUID_SIZE);
426 
427    struct disk_cache *cache = disk_cache_create("venus", uuid, 0);
428    if (!cache)
429       return;
430 
431    /* The entry header is what contains the cache id / timestamp so we
432     * need to create a fake entry.
433     */
434    uint8_t key[20];
435    char data[] = "Fake Shader";
436 
437    disk_cache_compute_key(cache, data, sizeof(data), key);
438    disk_cache_put(cache, key, data, sizeof(data), NULL);
439 
440    disk_cache_destroy(cache);
441 #endif
442 }
443 
444 static VkResult
vn_device_init(struct vn_device * dev,struct vn_physical_device * physical_dev,const VkDeviceCreateInfo * create_info,const VkAllocationCallbacks * alloc)445 vn_device_init(struct vn_device *dev,
446                struct vn_physical_device *physical_dev,
447                const VkDeviceCreateInfo *create_info,
448                const VkAllocationCallbacks *alloc)
449 {
450    struct vn_instance *instance = physical_dev->instance;
451    VkPhysicalDevice physical_dev_handle =
452       vn_physical_device_to_handle(physical_dev);
453    VkDevice dev_handle = vn_device_to_handle(dev);
454    VkDeviceCreateInfo local_create_info;
455    VkResult result;
456 
457    dev->instance = instance;
458    dev->physical_device = physical_dev;
459    dev->device_mask = 1;
460    dev->renderer = instance->renderer;
461    dev->primary_ring = instance->ring.ring;
462 
463    create_info =
464       vn_device_fix_create_info(dev, create_info, alloc, &local_create_info);
465    if (!create_info)
466       return VK_ERROR_OUT_OF_HOST_MEMORY;
467 
468    const VkDeviceGroupDeviceCreateInfo *group = vk_find_struct_const(
469       create_info->pNext, DEVICE_GROUP_DEVICE_CREATE_INFO);
470    if (group && group->physicalDeviceCount)
471       dev->device_mask = (1 << group->physicalDeviceCount) - 1;
472 
473    result = vn_call_vkCreateDevice(dev->primary_ring, physical_dev_handle,
474                                    create_info, NULL, &dev_handle);
475 
476    /* free the fixed extensions here since no longer needed below */
477    if (create_info == &local_create_info)
478       vk_free(alloc, (void *)create_info->ppEnabledExtensionNames);
479 
480    if (result != VK_SUCCESS)
481       return result;
482 
483    result = vn_device_memory_report_init(dev, create_info);
484    if (result != VK_SUCCESS)
485       goto out_destroy_device;
486 
487    if (!vn_device_queue_family_init(dev, create_info)) {
488       result = VK_ERROR_OUT_OF_HOST_MEMORY;
489       goto out_memory_report_fini;
490    }
491 
492    result = vn_device_feedback_pool_init(dev);
493    if (result != VK_SUCCESS)
494       goto out_queue_family_fini;
495 
496    result = vn_feedback_cmd_pools_init(dev);
497    if (result != VK_SUCCESS)
498       goto out_feedback_pool_fini;
499 
500    result = vn_device_init_queues(dev, create_info);
501    if (result != VK_SUCCESS)
502       goto out_feedback_cmd_pools_fini;
503 
504    vn_buffer_reqs_cache_init(dev);
505    vn_image_reqs_cache_init(dev);
506 
507    /* This is a WA to allow fossilize replay to detect if the host side shader
508     * cache is no longer up to date.
509     */
510    vn_device_update_shader_cache_id(dev);
511 
512    return VK_SUCCESS;
513 
514 out_feedback_cmd_pools_fini:
515    vn_feedback_cmd_pools_fini(dev);
516 
517 out_feedback_pool_fini:
518    vn_device_feedback_pool_fini(dev);
519 
520 out_queue_family_fini:
521    vn_device_queue_family_fini(dev);
522 
523 out_memory_report_fini:
524    vn_device_memory_report_fini(dev);
525 
526 out_destroy_device:
527    vn_call_vkDestroyDevice(dev->primary_ring, dev_handle, NULL);
528 
529    return result;
530 }
531 
532 VkResult
vn_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)533 vn_CreateDevice(VkPhysicalDevice physicalDevice,
534                 const VkDeviceCreateInfo *pCreateInfo,
535                 const VkAllocationCallbacks *pAllocator,
536                 VkDevice *pDevice)
537 {
538    VN_TRACE_FUNC();
539    struct vn_physical_device *physical_dev =
540       vn_physical_device_from_handle(physicalDevice);
541    struct vn_instance *instance = physical_dev->instance;
542    const VkAllocationCallbacks *alloc =
543       pAllocator ? pAllocator : &instance->base.base.alloc;
544    struct vn_device *dev;
545    VkResult result;
546 
547    dev = vk_zalloc(alloc, sizeof(*dev), VN_DEFAULT_ALIGN,
548                    VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
549    if (!dev)
550       return vn_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
551 
552    struct vk_device_dispatch_table dispatch_table;
553    vk_device_dispatch_table_from_entrypoints(&dispatch_table,
554                                              &vn_device_entrypoints, true);
555    vk_device_dispatch_table_from_entrypoints(&dispatch_table,
556                                              &wsi_device_entrypoints, false);
557    result = vn_device_base_init(&dev->base, &physical_dev->base,
558                                 &dispatch_table, pCreateInfo, alloc);
559    if (result != VK_SUCCESS) {
560       vk_free(alloc, dev);
561       return vn_error(instance, result);
562    }
563 
564    result = vn_device_init(dev, physical_dev, pCreateInfo, alloc);
565    if (result != VK_SUCCESS) {
566       vn_device_base_fini(&dev->base);
567       vk_free(alloc, dev);
568       return vn_error(instance, result);
569    }
570 
571    if (VN_DEBUG(LOG_CTX_INFO)) {
572       vn_log(instance, "%s", physical_dev->base.base.properties.deviceName);
573       vn_log(instance, "%s", physical_dev->base.base.properties.driverInfo);
574    }
575 
576    vn_tls_set_async_pipeline_create();
577 
578    *pDevice = vn_device_to_handle(dev);
579 
580    return VK_SUCCESS;
581 }
582 
583 void
vn_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)584 vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
585 {
586    VN_TRACE_FUNC();
587    struct vn_device *dev = vn_device_from_handle(device);
588    const VkAllocationCallbacks *alloc =
589       pAllocator ? pAllocator : &dev->base.base.alloc;
590 
591    if (!dev)
592       return;
593 
594    vn_image_reqs_cache_fini(dev);
595    vn_buffer_reqs_cache_fini(dev);
596 
597    for (uint32_t i = 0; i < dev->queue_count; i++)
598       vn_queue_fini(&dev->queues[i]);
599 
600    vn_feedback_cmd_pools_fini(dev);
601 
602    vn_device_feedback_pool_fini(dev);
603 
604    vn_device_queue_family_fini(dev);
605 
606    vn_device_memory_report_fini(dev);
607 
608    vn_async_vkDestroyDevice(dev->primary_ring, device, NULL);
609 
610    /* We must emit vn_call_vkDestroyDevice before releasing bound ring_idx.
611     * Otherwise, another thread might reuse their ring_idx while they
612     * are still bound to the queues in the renderer.
613     */
614    for (uint32_t i = 0; i < dev->queue_count; i++) {
615       vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
616    }
617 
618    vk_free(alloc, dev->queues);
619 
620    vn_device_base_fini(&dev->base);
621    vk_free(alloc, dev);
622 }
623 
624 PFN_vkVoidFunction
vn_GetDeviceProcAddr(VkDevice device,const char * pName)625 vn_GetDeviceProcAddr(VkDevice device, const char *pName)
626 {
627    struct vn_device *dev = vn_device_from_handle(device);
628    return vk_device_get_proc_addr(&dev->base.base, pName);
629 }
630 
631 void
vn_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)632 vn_GetDeviceGroupPeerMemoryFeatures(
633    VkDevice device,
634    uint32_t heapIndex,
635    uint32_t localDeviceIndex,
636    uint32_t remoteDeviceIndex,
637    VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
638 {
639    struct vn_device *dev = vn_device_from_handle(device);
640 
641    /* TODO get and cache the values in vkCreateDevice */
642    vn_call_vkGetDeviceGroupPeerMemoryFeatures(
643       dev->primary_ring, device, heapIndex, localDeviceIndex,
644       remoteDeviceIndex, pPeerMemoryFeatures);
645 }
646 
647 VkResult
vn_GetCalibratedTimestampsEXT(VkDevice device,uint32_t timestampCount,const VkCalibratedTimestampInfoEXT * pTimestampInfos,uint64_t * pTimestamps,uint64_t * pMaxDeviation)648 vn_GetCalibratedTimestampsEXT(
649    VkDevice device,
650    uint32_t timestampCount,
651    const VkCalibratedTimestampInfoEXT *pTimestampInfos,
652    uint64_t *pTimestamps,
653    uint64_t *pMaxDeviation)
654 {
655    struct vn_device *dev = vn_device_from_handle(device);
656    uint64_t begin, end, max_clock_period = 0;
657    VkResult ret;
658    int domain;
659 
660 #ifdef CLOCK_MONOTONIC_RAW
661    begin = vk_clock_gettime(CLOCK_MONOTONIC_RAW);
662 #else
663    begin = vk_clock_gettime(CLOCK_MONOTONIC);
664 #endif
665 
666    for (domain = 0; domain < timestampCount; domain++) {
667       switch (pTimestampInfos[domain].timeDomain) {
668       case VK_TIME_DOMAIN_DEVICE_EXT: {
669          uint64_t device_max_deviation = 0;
670 
671          ret = vn_call_vkGetCalibratedTimestampsEXT(
672             dev->primary_ring, device, 1, &pTimestampInfos[domain],
673             &pTimestamps[domain], &device_max_deviation);
674 
675          if (ret != VK_SUCCESS)
676             return vn_error(dev->instance, ret);
677 
678          max_clock_period = MAX2(max_clock_period, device_max_deviation);
679          break;
680       }
681       case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
682          pTimestamps[domain] = vk_clock_gettime(CLOCK_MONOTONIC);
683          max_clock_period = MAX2(max_clock_period, 1);
684          break;
685 #ifdef CLOCK_MONOTONIC_RAW
686       case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
687          pTimestamps[domain] = begin;
688          break;
689 #endif
690       default:
691          pTimestamps[domain] = 0;
692          break;
693       }
694    }
695 
696 #ifdef CLOCK_MONOTONIC_RAW
697    end = vk_clock_gettime(CLOCK_MONOTONIC_RAW);
698 #else
699    end = vk_clock_gettime(CLOCK_MONOTONIC);
700 #endif
701 
702    *pMaxDeviation = vk_time_max_deviation(begin, end, max_clock_period);
703 
704    return VK_SUCCESS;
705 }
706