1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_device.h"
12
13 #include "util/disk_cache.h"
14 #include "util/hex.h"
15 #include "venus-protocol/vn_protocol_driver_device.h"
16
17 #include "vn_android.h"
18 #include "vn_instance.h"
19 #include "vn_physical_device.h"
20 #include "vn_queue.h"
21
22 /* device commands */
23
24 static void
vn_queue_fini(struct vn_queue * queue)25 vn_queue_fini(struct vn_queue *queue)
26 {
27 VkDevice dev_handle = vk_device_to_handle(queue->base.base.base.device);
28
29 if (queue->wait_fence != VK_NULL_HANDLE) {
30 vn_DestroyFence(dev_handle, queue->wait_fence, NULL);
31 }
32 if (queue->sparse_semaphore != VK_NULL_HANDLE) {
33 vn_DestroySemaphore(dev_handle, queue->sparse_semaphore, NULL);
34 }
35 vn_cached_storage_fini(&queue->storage);
36 vn_queue_base_fini(&queue->base);
37 }
38
39 static VkResult
vn_queue_init(struct vn_device * dev,struct vn_queue * queue,const VkDeviceQueueCreateInfo * queue_info,uint32_t queue_index)40 vn_queue_init(struct vn_device *dev,
41 struct vn_queue *queue,
42 const VkDeviceQueueCreateInfo *queue_info,
43 uint32_t queue_index)
44 {
45 VkResult result =
46 vn_queue_base_init(&queue->base, &dev->base, queue_info, queue_index);
47 if (result != VK_SUCCESS)
48 return result;
49
50 vn_cached_storage_init(&queue->storage, &dev->base.base.alloc);
51
52 VkDeviceQueueTimelineInfoMESA timeline_info;
53 const struct vn_renderer_info *renderer_info =
54 &dev->instance->renderer->info;
55 if (renderer_info->supports_multiple_timelines) {
56 int ring_idx = vn_instance_acquire_ring_idx(dev->instance);
57 if (ring_idx < 0) {
58 vn_log(dev->instance, "failed binding VkQueue to renderer timeline");
59 return VK_ERROR_INITIALIZATION_FAILED;
60 }
61 queue->ring_idx = (uint32_t)ring_idx;
62
63 timeline_info = (VkDeviceQueueTimelineInfoMESA){
64 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA,
65 .ringIdx = queue->ring_idx,
66 };
67 }
68
69 const VkDeviceQueueInfo2 device_queue_info = {
70 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
71 .pNext =
72 renderer_info->supports_multiple_timelines ? &timeline_info : NULL,
73 .flags = queue_info->flags,
74 .queueFamilyIndex = queue_info->queueFamilyIndex,
75 .queueIndex = queue_index,
76 };
77
78 VkQueue queue_handle = vn_queue_to_handle(queue);
79 vn_async_vkGetDeviceQueue2(dev->primary_ring, vn_device_to_handle(dev),
80 &device_queue_info, &queue_handle);
81
82 return VK_SUCCESS;
83 }
84
85 static VkResult
vn_device_init_queues(struct vn_device * dev,const VkDeviceCreateInfo * create_info)86 vn_device_init_queues(struct vn_device *dev,
87 const VkDeviceCreateInfo *create_info)
88 {
89 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
90
91 uint32_t count = 0;
92 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++)
93 count += create_info->pQueueCreateInfos[i].queueCount;
94
95 struct vn_queue *queues =
96 vk_zalloc(alloc, sizeof(*queues) * count, VN_DEFAULT_ALIGN,
97 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
98 if (!queues)
99 return VK_ERROR_OUT_OF_HOST_MEMORY;
100
101 count = 0;
102 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
103 VkResult result;
104
105 const VkDeviceQueueCreateInfo *queue_info =
106 &create_info->pQueueCreateInfos[i];
107 for (uint32_t j = 0; j < queue_info->queueCount; j++) {
108 result = vn_queue_init(dev, &queues[count], queue_info, j);
109 if (result != VK_SUCCESS) {
110 for (uint32_t k = 0; k < count; k++)
111 vn_queue_fini(&queues[k]);
112 vk_free(alloc, queues);
113
114 return result;
115 }
116
117 count++;
118 }
119 }
120
121 dev->queues = queues;
122 dev->queue_count = count;
123
124 return VK_SUCCESS;
125 }
126
127 static bool
vn_device_queue_family_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)128 vn_device_queue_family_init(struct vn_device *dev,
129 const VkDeviceCreateInfo *create_info)
130 {
131 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
132 uint32_t *queue_families = NULL;
133 uint32_t count = 0;
134
135 queue_families = vk_zalloc(
136 alloc, sizeof(*queue_families) * create_info->queueCreateInfoCount,
137 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
138 if (!queue_families)
139 return false;
140
141 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
142 const uint32_t index =
143 create_info->pQueueCreateInfos[i].queueFamilyIndex;
144 bool new_index = true;
145
146 for (uint32_t j = 0; j < count; j++) {
147 if (queue_families[j] == index) {
148 new_index = false;
149 break;
150 }
151 }
152 if (new_index)
153 queue_families[count++] = index;
154 }
155
156 dev->queue_families = queue_families;
157 dev->queue_family_count = count;
158
159 return true;
160 }
161
162 static inline void
vn_device_queue_family_fini(struct vn_device * dev)163 vn_device_queue_family_fini(struct vn_device *dev)
164 {
165 vk_free(&dev->base.base.alloc, dev->queue_families);
166 }
167
168 static VkResult
vn_device_memory_report_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)169 vn_device_memory_report_init(struct vn_device *dev,
170 const VkDeviceCreateInfo *create_info)
171 {
172 const struct vk_features *app_feats = &dev->base.base.enabled_features;
173 if (!app_feats->deviceMemoryReport)
174 return VK_SUCCESS;
175
176 uint32_t count = 0;
177 vk_foreach_struct_const(pnext, create_info->pNext) {
178 if (pnext->sType ==
179 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT)
180 count++;
181 }
182
183 struct vn_device_memory_report *mem_reports = NULL;
184 if (count) {
185 mem_reports =
186 vk_alloc(&dev->base.base.alloc, sizeof(*mem_reports) * count,
187 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
188 if (!mem_reports)
189 return VK_ERROR_OUT_OF_HOST_MEMORY;
190 }
191
192 count = 0;
193 vk_foreach_struct_const(pnext, create_info->pNext) {
194 if (pnext->sType ==
195 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
196 const struct VkDeviceDeviceMemoryReportCreateInfoEXT *report =
197 (void *)pnext;
198 mem_reports[count].callback = report->pfnUserCallback;
199 mem_reports[count].data = report->pUserData;
200 count++;
201 }
202 }
203
204 dev->memory_report_count = count;
205 dev->memory_reports = mem_reports;
206
207 return VK_SUCCESS;
208 }
209
210 static inline void
vn_device_memory_report_fini(struct vn_device * dev)211 vn_device_memory_report_fini(struct vn_device *dev)
212 {
213 vk_free(&dev->base.base.alloc, dev->memory_reports);
214 }
215
216 static bool
find_extension_names(const char * const * exts,uint32_t ext_count,const char * name)217 find_extension_names(const char *const *exts,
218 uint32_t ext_count,
219 const char *name)
220 {
221 for (uint32_t i = 0; i < ext_count; i++) {
222 if (!strcmp(exts[i], name))
223 return true;
224 }
225 return false;
226 }
227
228 static bool
merge_extension_names(const char * const * exts,uint32_t ext_count,const char * const * extra_exts,uint32_t extra_count,const char * const * block_exts,uint32_t block_count,const VkAllocationCallbacks * alloc,const char * const ** out_exts,uint32_t * out_count)229 merge_extension_names(const char *const *exts,
230 uint32_t ext_count,
231 const char *const *extra_exts,
232 uint32_t extra_count,
233 const char *const *block_exts,
234 uint32_t block_count,
235 const VkAllocationCallbacks *alloc,
236 const char *const **out_exts,
237 uint32_t *out_count)
238 {
239 const char **merged =
240 vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
241 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
242 if (!merged)
243 return false;
244
245 uint32_t count = 0;
246 for (uint32_t i = 0; i < ext_count; i++) {
247 if (!find_extension_names(block_exts, block_count, exts[i]))
248 merged[count++] = exts[i];
249 }
250 for (uint32_t i = 0; i < extra_count; i++) {
251 if (!find_extension_names(exts, ext_count, extra_exts[i]))
252 merged[count++] = extra_exts[i];
253 }
254
255 *out_exts = merged;
256 *out_count = count;
257 return true;
258 }
259
260 static const VkDeviceCreateInfo *
vn_device_fix_create_info(const struct vn_device * dev,const VkDeviceCreateInfo * dev_info,const VkAllocationCallbacks * alloc,VkDeviceCreateInfo * local_info)261 vn_device_fix_create_info(const struct vn_device *dev,
262 const VkDeviceCreateInfo *dev_info,
263 const VkAllocationCallbacks *alloc,
264 VkDeviceCreateInfo *local_info)
265 {
266 const struct vn_physical_device *physical_dev = dev->physical_device;
267 const struct vk_device_extension_table *app_exts =
268 &dev->base.base.enabled_extensions;
269 /* extra_exts and block_exts must not overlap */
270 const char *extra_exts[16];
271 const char *block_exts[16];
272 uint32_t extra_count = 0;
273 uint32_t block_count = 0;
274
275 /* fix for WSI (treat AHB as WSI extension for simplicity) */
276 const bool has_wsi =
277 app_exts->KHR_swapchain || app_exts->ANDROID_native_buffer ||
278 app_exts->ANDROID_external_memory_android_hardware_buffer;
279 if (has_wsi) {
280 if (!app_exts->EXT_image_drm_format_modifier) {
281 extra_exts[extra_count++] =
282 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME;
283
284 if (physical_dev->renderer_version < VK_API_VERSION_1_2 &&
285 !app_exts->KHR_image_format_list) {
286 extra_exts[extra_count++] =
287 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME;
288 }
289 }
290
291 if (!app_exts->EXT_queue_family_foreign) {
292 extra_exts[extra_count++] =
293 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME;
294 }
295
296 if (app_exts->KHR_swapchain) {
297 /* see vn_physical_device_get_native_extensions */
298 block_exts[block_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
299 block_exts[block_count++] =
300 VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME;
301 block_exts[block_count++] =
302 VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
303 }
304
305 if (app_exts->ANDROID_native_buffer) {
306 /* see vn_QueueSignalReleaseImageANDROID */
307 if (!app_exts->KHR_external_fence_fd) {
308 assert(physical_dev->renderer_sync_fd.fence_exportable);
309 extra_exts[extra_count++] =
310 VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
311 }
312
313 block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
314 }
315
316 if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
317 block_exts[block_count++] =
318 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME;
319 }
320 }
321
322 if (app_exts->KHR_external_memory_fd ||
323 app_exts->EXT_external_memory_dma_buf || has_wsi) {
324 if (physical_dev->external_memory.renderer_handle_type ==
325 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) {
326 if (!app_exts->EXT_external_memory_dma_buf) {
327 extra_exts[extra_count++] =
328 VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
329 }
330 if (!app_exts->KHR_external_memory_fd) {
331 extra_exts[extra_count++] =
332 VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
333 }
334 }
335 }
336
337 /* see vn_queue_submission_count_batch_semaphores */
338 if (!app_exts->KHR_external_semaphore_fd && has_wsi) {
339 assert(physical_dev->renderer_sync_fd.semaphore_importable);
340 extra_exts[extra_count++] = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
341 }
342
343 if (app_exts->EXT_device_memory_report) {
344 /* see vn_physical_device_get_native_extensions */
345 block_exts[block_count++] = VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME;
346 }
347
348 if (app_exts->EXT_physical_device_drm) {
349 /* see vn_physical_device_get_native_extensions */
350 block_exts[block_count++] = VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME;
351 }
352
353 if (app_exts->EXT_tooling_info) {
354 /* see vn_physical_device_get_native_extensions */
355 block_exts[block_count++] = VK_EXT_TOOLING_INFO_EXTENSION_NAME;
356 }
357
358 if (app_exts->EXT_pci_bus_info) {
359 /* always filter for simplicity */
360 block_exts[block_count++] = VK_EXT_PCI_BUS_INFO_EXTENSION_NAME;
361 }
362
363 assert(extra_count <= ARRAY_SIZE(extra_exts));
364 assert(block_count <= ARRAY_SIZE(block_exts));
365
366 if (!extra_count && (!block_count || !dev_info->enabledExtensionCount))
367 return dev_info;
368
369 *local_info = *dev_info;
370 if (!merge_extension_names(dev_info->ppEnabledExtensionNames,
371 dev_info->enabledExtensionCount, extra_exts,
372 extra_count, block_exts, block_count, alloc,
373 &local_info->ppEnabledExtensionNames,
374 &local_info->enabledExtensionCount))
375 return NULL;
376
377 return local_info;
378 }
379
380 static inline VkResult
vn_device_feedback_pool_init(struct vn_device * dev)381 vn_device_feedback_pool_init(struct vn_device *dev)
382 {
383 /* The feedback pool defaults to suballocate slots of 8 bytes each. Initial
384 * pool size of 4096 corresponds to a total of 512 fences, semaphores and
385 * events, which well covers the common scenarios. Pool can grow anyway.
386 */
387 static const uint32_t pool_size = 4096;
388 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
389
390 if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK) &&
391 VN_PERF(NO_SEMAPHORE_FEEDBACK))
392 return VK_SUCCESS;
393
394 return vn_feedback_pool_init(dev, &dev->feedback_pool, pool_size, alloc);
395 }
396
397 static inline void
vn_device_feedback_pool_fini(struct vn_device * dev)398 vn_device_feedback_pool_fini(struct vn_device *dev)
399 {
400 if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK) &&
401 VN_PERF(NO_SEMAPHORE_FEEDBACK))
402 return;
403
404 vn_feedback_pool_fini(&dev->feedback_pool);
405 }
406
407 static void
vn_device_update_shader_cache_id(struct vn_device * dev)408 vn_device_update_shader_cache_id(struct vn_device *dev)
409 {
410 /* venus utilizes the host side shader cache.
411 * This is a WA to generate shader cache files containing headers
412 * with a unique cache id that will change based on host driver
413 * identifiers. This allows fossilize replay to detect if the host
414 * side shader cach is no longer up to date.
415 * The shader cache is destroyed after creating the necessary files
416 * and not utilized by venus.
417 */
418 #if !DETECT_OS_ANDROID && defined(ENABLE_SHADER_CACHE)
419 const VkPhysicalDeviceProperties *vulkan_1_0_props =
420 &dev->physical_device->properties.vulkan_1_0;
421
422 char uuid[VK_UUID_SIZE * 2 + 1];
423 mesa_bytes_to_hex(uuid, vulkan_1_0_props->pipelineCacheUUID, VK_UUID_SIZE);
424
425 struct disk_cache *cache = disk_cache_create("venus", uuid, 0);
426 if (!cache)
427 return;
428
429 /* The entry header is what contains the cache id / timestamp so we
430 * need to create a fake entry.
431 */
432 uint8_t key[20];
433 char data[] = "Fake Shader";
434
435 disk_cache_compute_key(cache, data, sizeof(data), key);
436 disk_cache_put(cache, key, data, sizeof(data), NULL);
437
438 disk_cache_destroy(cache);
439 #endif
440 }
441
442 static VkResult
vn_device_init(struct vn_device * dev,struct vn_physical_device * physical_dev,const VkDeviceCreateInfo * create_info,const VkAllocationCallbacks * alloc)443 vn_device_init(struct vn_device *dev,
444 struct vn_physical_device *physical_dev,
445 const VkDeviceCreateInfo *create_info,
446 const VkAllocationCallbacks *alloc)
447 {
448 struct vn_instance *instance = physical_dev->instance;
449 VkPhysicalDevice physical_dev_handle =
450 vn_physical_device_to_handle(physical_dev);
451 VkDevice dev_handle = vn_device_to_handle(dev);
452 VkDeviceCreateInfo local_create_info;
453 VkResult result;
454
455 dev->instance = instance;
456 dev->physical_device = physical_dev;
457 dev->renderer = instance->renderer;
458 dev->primary_ring = instance->ring.ring;
459
460 create_info =
461 vn_device_fix_create_info(dev, create_info, alloc, &local_create_info);
462 if (!create_info)
463 return VK_ERROR_OUT_OF_HOST_MEMORY;
464
465 result = vn_call_vkCreateDevice(dev->primary_ring, physical_dev_handle,
466 create_info, NULL, &dev_handle);
467
468 /* free the fixed extensions here since no longer needed below */
469 if (create_info == &local_create_info)
470 vk_free(alloc, (void *)create_info->ppEnabledExtensionNames);
471
472 if (result != VK_SUCCESS)
473 return result;
474
475 result = vn_device_memory_report_init(dev, create_info);
476 if (result != VK_SUCCESS)
477 goto out_destroy_device;
478
479 if (!vn_device_queue_family_init(dev, create_info)) {
480 result = VK_ERROR_OUT_OF_HOST_MEMORY;
481 goto out_memory_report_fini;
482 }
483
484 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++) {
485 struct vn_device_memory_pool *pool = &dev->memory_pools[i];
486 mtx_init(&pool->mutex, mtx_plain);
487 }
488
489 result = vn_device_feedback_pool_init(dev);
490 if (result != VK_SUCCESS)
491 goto out_memory_pool_fini;
492
493 result = vn_feedback_cmd_pools_init(dev);
494 if (result != VK_SUCCESS)
495 goto out_feedback_pool_fini;
496
497 result = vn_device_init_queues(dev, create_info);
498 if (result != VK_SUCCESS)
499 goto out_feedback_cmd_pools_fini;
500
501 vn_buffer_reqs_cache_init(dev);
502 vn_image_reqs_cache_init(dev);
503
504 /* This is a WA to allow fossilize replay to detect if the host side shader
505 * cache is no longer up to date.
506 */
507 vn_device_update_shader_cache_id(dev);
508
509 return VK_SUCCESS;
510
511 out_feedback_cmd_pools_fini:
512 vn_feedback_cmd_pools_fini(dev);
513
514 out_feedback_pool_fini:
515 vn_device_feedback_pool_fini(dev);
516
517 out_memory_pool_fini:
518 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
519 vn_device_memory_pool_fini(dev, i);
520
521 vn_device_queue_family_fini(dev);
522
523 out_memory_report_fini:
524 vn_device_memory_report_fini(dev);
525
526 out_destroy_device:
527 vn_call_vkDestroyDevice(dev->primary_ring, dev_handle, NULL);
528
529 return result;
530 }
531
532 VkResult
vn_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)533 vn_CreateDevice(VkPhysicalDevice physicalDevice,
534 const VkDeviceCreateInfo *pCreateInfo,
535 const VkAllocationCallbacks *pAllocator,
536 VkDevice *pDevice)
537 {
538 VN_TRACE_FUNC();
539 struct vn_physical_device *physical_dev =
540 vn_physical_device_from_handle(physicalDevice);
541 struct vn_instance *instance = physical_dev->instance;
542 const VkAllocationCallbacks *alloc =
543 pAllocator ? pAllocator : &instance->base.base.alloc;
544 struct vn_device *dev;
545 VkResult result;
546
547 dev = vk_zalloc(alloc, sizeof(*dev), VN_DEFAULT_ALIGN,
548 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
549 if (!dev)
550 return vn_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
551
552 struct vk_device_dispatch_table dispatch_table;
553 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
554 &vn_device_entrypoints, true);
555 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
556 &wsi_device_entrypoints, false);
557 result = vn_device_base_init(&dev->base, &physical_dev->base,
558 &dispatch_table, pCreateInfo, alloc);
559 if (result != VK_SUCCESS) {
560 vk_free(alloc, dev);
561 return vn_error(instance, result);
562 }
563
564 result = vn_device_init(dev, physical_dev, pCreateInfo, alloc);
565 if (result != VK_SUCCESS) {
566 vn_device_base_fini(&dev->base);
567 vk_free(alloc, dev);
568 return vn_error(instance, result);
569 }
570
571 if (VN_DEBUG(LOG_CTX_INFO)) {
572 vn_log(instance, "%s", physical_dev->properties.vulkan_1_0.deviceName);
573 vn_log(instance, "%s", physical_dev->properties.vulkan_1_2.driverInfo);
574 }
575
576 vn_tls_set_async_pipeline_create();
577
578 *pDevice = vn_device_to_handle(dev);
579
580 return VK_SUCCESS;
581 }
582
583 void
vn_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)584 vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
585 {
586 VN_TRACE_FUNC();
587 struct vn_device *dev = vn_device_from_handle(device);
588 const VkAllocationCallbacks *alloc =
589 pAllocator ? pAllocator : &dev->base.base.alloc;
590
591 if (!dev)
592 return;
593
594 vn_image_reqs_cache_fini(dev);
595 vn_buffer_reqs_cache_fini(dev);
596
597 for (uint32_t i = 0; i < dev->queue_count; i++)
598 vn_queue_fini(&dev->queues[i]);
599
600 vn_feedback_cmd_pools_fini(dev);
601
602 vn_device_feedback_pool_fini(dev);
603
604 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
605 vn_device_memory_pool_fini(dev, i);
606
607 vn_device_queue_family_fini(dev);
608
609 vn_device_memory_report_fini(dev);
610
611 vn_async_vkDestroyDevice(dev->primary_ring, device, NULL);
612
613 /* We must emit vn_call_vkDestroyDevice before releasing bound ring_idx.
614 * Otherwise, another thread might reuse their ring_idx while they
615 * are still bound to the queues in the renderer.
616 */
617 if (dev->renderer->info.supports_multiple_timelines) {
618 for (uint32_t i = 0; i < dev->queue_count; i++) {
619 vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
620 }
621 }
622
623 vk_free(alloc, dev->queues);
624
625 vn_device_base_fini(&dev->base);
626 vk_free(alloc, dev);
627 }
628
629 PFN_vkVoidFunction
vn_GetDeviceProcAddr(VkDevice device,const char * pName)630 vn_GetDeviceProcAddr(VkDevice device, const char *pName)
631 {
632 struct vn_device *dev = vn_device_from_handle(device);
633 return vk_device_get_proc_addr(&dev->base.base, pName);
634 }
635
636 void
vn_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)637 vn_GetDeviceGroupPeerMemoryFeatures(
638 VkDevice device,
639 uint32_t heapIndex,
640 uint32_t localDeviceIndex,
641 uint32_t remoteDeviceIndex,
642 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
643 {
644 struct vn_device *dev = vn_device_from_handle(device);
645
646 /* TODO get and cache the values in vkCreateDevice */
647 vn_call_vkGetDeviceGroupPeerMemoryFeatures(
648 dev->primary_ring, device, heapIndex, localDeviceIndex,
649 remoteDeviceIndex, pPeerMemoryFeatures);
650 }
651
652 VkResult
vn_GetCalibratedTimestampsEXT(VkDevice device,uint32_t timestampCount,const VkCalibratedTimestampInfoEXT * pTimestampInfos,uint64_t * pTimestamps,uint64_t * pMaxDeviation)653 vn_GetCalibratedTimestampsEXT(
654 VkDevice device,
655 uint32_t timestampCount,
656 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
657 uint64_t *pTimestamps,
658 uint64_t *pMaxDeviation)
659 {
660 struct vn_device *dev = vn_device_from_handle(device);
661 uint64_t begin, end, max_clock_period = 0;
662 VkResult ret;
663 int domain;
664
665 #ifdef CLOCK_MONOTONIC_RAW
666 begin = vk_clock_gettime(CLOCK_MONOTONIC_RAW);
667 #else
668 begin = vk_clock_gettime(CLOCK_MONOTONIC);
669 #endif
670
671 for (domain = 0; domain < timestampCount; domain++) {
672 switch (pTimestampInfos[domain].timeDomain) {
673 case VK_TIME_DOMAIN_DEVICE_EXT: {
674 uint64_t device_max_deviation = 0;
675
676 ret = vn_call_vkGetCalibratedTimestampsEXT(
677 dev->primary_ring, device, 1, &pTimestampInfos[domain],
678 &pTimestamps[domain], &device_max_deviation);
679
680 if (ret != VK_SUCCESS)
681 return vn_error(dev->instance, ret);
682
683 max_clock_period = MAX2(max_clock_period, device_max_deviation);
684 break;
685 }
686 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
687 pTimestamps[domain] = vk_clock_gettime(CLOCK_MONOTONIC);
688 max_clock_period = MAX2(max_clock_period, 1);
689 break;
690 #ifdef CLOCK_MONOTONIC_RAW
691 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
692 pTimestamps[domain] = begin;
693 break;
694 #endif
695 default:
696 pTimestamps[domain] = 0;
697 break;
698 }
699 }
700
701 #ifdef CLOCK_MONOTONIC_RAW
702 end = vk_clock_gettime(CLOCK_MONOTONIC_RAW);
703 #else
704 end = vk_clock_gettime(CLOCK_MONOTONIC);
705 #endif
706
707 *pMaxDeviation = vk_time_max_deviation(begin, end, max_clock_period);
708
709 return VK_SUCCESS;
710 }
711