1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_device.h"
12
13 #include "venus-protocol/vn_protocol_driver_device.h"
14
15 #include "vn_android.h"
16 #include "vn_instance.h"
17 #include "vn_physical_device.h"
18 #include "vn_queue.h"
19
20 /* device commands */
21
22 static void
vn_queue_fini(struct vn_queue * queue)23 vn_queue_fini(struct vn_queue *queue)
24 {
25 if (queue->wait_fence != VK_NULL_HANDLE) {
26 vn_DestroyFence(vn_device_to_handle(queue->device), queue->wait_fence,
27 NULL);
28 }
29 vn_object_base_fini(&queue->base);
30 }
31
32 static VkResult
vn_queue_init(struct vn_device * dev,struct vn_queue * queue,const VkDeviceQueueCreateInfo * queue_info,uint32_t queue_index)33 vn_queue_init(struct vn_device *dev,
34 struct vn_queue *queue,
35 const VkDeviceQueueCreateInfo *queue_info,
36 uint32_t queue_index)
37 {
38 vn_object_base_init(&queue->base, VK_OBJECT_TYPE_QUEUE, &dev->base);
39
40 VkQueue queue_handle = vn_queue_to_handle(queue);
41 vn_async_vkGetDeviceQueue2(
42 dev->instance, vn_device_to_handle(dev),
43 &(VkDeviceQueueInfo2){
44 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
45 .flags = queue_info->flags,
46 .queueFamilyIndex = queue_info->queueFamilyIndex,
47 .queueIndex = queue_index,
48 },
49 &queue_handle);
50
51 queue->device = dev;
52 queue->family = queue_info->queueFamilyIndex;
53 queue->index = queue_index;
54 queue->flags = queue_info->flags;
55
56 const VkExportFenceCreateInfo export_fence_info = {
57 .sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,
58 .pNext = NULL,
59 .handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
60 };
61 const VkFenceCreateInfo fence_info = {
62 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
63 .pNext = dev->instance->experimental.globalFencing == VK_TRUE
64 ? &export_fence_info
65 : NULL,
66 .flags = 0,
67 };
68 VkResult result = vn_CreateFence(vn_device_to_handle(dev), &fence_info,
69 NULL, &queue->wait_fence);
70 if (result != VK_SUCCESS)
71 return result;
72
73 return VK_SUCCESS;
74 }
75
76 static VkResult
vn_device_init_queues(struct vn_device * dev,const VkDeviceCreateInfo * create_info)77 vn_device_init_queues(struct vn_device *dev,
78 const VkDeviceCreateInfo *create_info)
79 {
80 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
81
82 uint32_t count = 0;
83 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++)
84 count += create_info->pQueueCreateInfos[i].queueCount;
85
86 struct vn_queue *queues =
87 vk_zalloc(alloc, sizeof(*queues) * count, VN_DEFAULT_ALIGN,
88 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
89 if (!queues)
90 return VK_ERROR_OUT_OF_HOST_MEMORY;
91
92 count = 0;
93 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
94 VkResult result;
95
96 const VkDeviceQueueCreateInfo *queue_info =
97 &create_info->pQueueCreateInfos[i];
98 for (uint32_t j = 0; j < queue_info->queueCount; j++) {
99 result = vn_queue_init(dev, &queues[count], queue_info, j);
100 if (result != VK_SUCCESS) {
101 for (uint32_t k = 0; k < count; k++)
102 vn_queue_fini(&queues[k]);
103 vk_free(alloc, queues);
104
105 return result;
106 }
107
108 count++;
109 }
110 }
111
112 dev->queues = queues;
113 dev->queue_count = count;
114
115 return VK_SUCCESS;
116 }
117
118 static bool
vn_device_queue_family_init(struct vn_device * dev,const VkDeviceCreateInfo * create_info)119 vn_device_queue_family_init(struct vn_device *dev,
120 const VkDeviceCreateInfo *create_info)
121 {
122 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
123 uint32_t *queue_families = NULL;
124 uint32_t count = 0;
125
126 queue_families = vk_zalloc(
127 alloc, sizeof(*queue_families) * create_info->queueCreateInfoCount,
128 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
129 if (!queue_families)
130 return false;
131
132 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
133 const uint32_t index =
134 create_info->pQueueCreateInfos[i].queueFamilyIndex;
135 bool new_index = true;
136
137 for (uint32_t j = 0; j < count; j++) {
138 if (queue_families[j] == index) {
139 new_index = false;
140 break;
141 }
142 }
143 if (new_index)
144 queue_families[count++] = index;
145 }
146
147 dev->queue_families = queue_families;
148 dev->queue_family_count = count;
149
150 return true;
151 }
152
153 static inline void
vn_device_queue_family_fini(struct vn_device * dev)154 vn_device_queue_family_fini(struct vn_device *dev)
155 {
156 vk_free(&dev->base.base.alloc, dev->queue_families);
157 }
158
159 static bool
find_extension_names(const char * const * exts,uint32_t ext_count,const char * name)160 find_extension_names(const char *const *exts,
161 uint32_t ext_count,
162 const char *name)
163 {
164 for (uint32_t i = 0; i < ext_count; i++) {
165 if (!strcmp(exts[i], name))
166 return true;
167 }
168 return false;
169 }
170
171 static bool
merge_extension_names(const char * const * exts,uint32_t ext_count,const char * const * extra_exts,uint32_t extra_count,const char * const * block_exts,uint32_t block_count,const VkAllocationCallbacks * alloc,const char * const ** out_exts,uint32_t * out_count)172 merge_extension_names(const char *const *exts,
173 uint32_t ext_count,
174 const char *const *extra_exts,
175 uint32_t extra_count,
176 const char *const *block_exts,
177 uint32_t block_count,
178 const VkAllocationCallbacks *alloc,
179 const char *const **out_exts,
180 uint32_t *out_count)
181 {
182 const char **merged =
183 vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
184 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
185 if (!merged)
186 return false;
187
188 uint32_t count = 0;
189 for (uint32_t i = 0; i < ext_count; i++) {
190 if (!find_extension_names(block_exts, block_count, exts[i]))
191 merged[count++] = exts[i];
192 }
193 for (uint32_t i = 0; i < extra_count; i++) {
194 if (!find_extension_names(exts, ext_count, extra_exts[i]))
195 merged[count++] = extra_exts[i];
196 }
197
198 *out_exts = merged;
199 *out_count = count;
200 return true;
201 }
202
203 static const VkDeviceCreateInfo *
vn_device_fix_create_info(const struct vn_device * dev,const VkDeviceCreateInfo * dev_info,const VkAllocationCallbacks * alloc,VkDeviceCreateInfo * local_info)204 vn_device_fix_create_info(const struct vn_device *dev,
205 const VkDeviceCreateInfo *dev_info,
206 const VkAllocationCallbacks *alloc,
207 VkDeviceCreateInfo *local_info)
208 {
209 const struct vn_physical_device *physical_dev = dev->physical_device;
210 const struct vk_device_extension_table *app_exts =
211 &dev->base.base.enabled_extensions;
212 /* extra_exts and block_exts must not overlap */
213 const char *extra_exts[16];
214 const char *block_exts[16];
215 uint32_t extra_count = 0;
216 uint32_t block_count = 0;
217
218 /* fix for WSI (treat AHB as WSI extension for simplicity) */
219 const bool has_wsi =
220 app_exts->KHR_swapchain || app_exts->ANDROID_native_buffer ||
221 app_exts->ANDROID_external_memory_android_hardware_buffer;
222 if (has_wsi) {
223 /* KHR_swapchain may be advertised without the renderer support for
224 * EXT_image_drm_format_modifier
225 */
226 if (!app_exts->EXT_image_drm_format_modifier &&
227 physical_dev->renderer_extensions.EXT_image_drm_format_modifier) {
228 extra_exts[extra_count++] =
229 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME;
230
231 if (physical_dev->renderer_version < VK_API_VERSION_1_2 &&
232 !app_exts->KHR_image_format_list) {
233 extra_exts[extra_count++] =
234 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME;
235 }
236 }
237
238 /* XXX KHR_swapchain may be advertised without the renderer support for
239 * EXT_queue_family_foreign
240 */
241 if (!app_exts->EXT_queue_family_foreign &&
242 physical_dev->renderer_extensions.EXT_queue_family_foreign) {
243 extra_exts[extra_count++] =
244 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME;
245 }
246
247 if (app_exts->KHR_swapchain) {
248 /* see vn_physical_device_get_native_extensions */
249 block_exts[block_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
250 block_exts[block_count++] =
251 VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME;
252 block_exts[block_count++] =
253 VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
254 }
255
256 if (app_exts->ANDROID_native_buffer)
257 block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
258
259 if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
260 block_exts[block_count++] =
261 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME;
262 }
263 }
264
265 if (app_exts->KHR_external_memory_fd ||
266 app_exts->EXT_external_memory_dma_buf || has_wsi) {
267 switch (physical_dev->external_memory.renderer_handle_type) {
268 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
269 if (!app_exts->EXT_external_memory_dma_buf) {
270 extra_exts[extra_count++] =
271 VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
272 }
273 if (!app_exts->KHR_external_memory_fd) {
274 extra_exts[extra_count++] =
275 VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
276 }
277 break;
278 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
279 if (app_exts->EXT_external_memory_dma_buf) {
280 block_exts[block_count++] =
281 VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
282 }
283 if (!app_exts->KHR_external_memory_fd) {
284 extra_exts[extra_count++] =
285 VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
286 }
287 break;
288 default:
289 assert(!physical_dev->instance->renderer->info.has_dma_buf_import);
290 break;
291 }
292 }
293
294 if (app_exts->EXT_physical_device_drm) {
295 /* see vn_physical_device_get_native_extensions */
296 block_exts[block_count++] = VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME;
297 }
298
299 assert(extra_count <= ARRAY_SIZE(extra_exts));
300 assert(block_count <= ARRAY_SIZE(block_exts));
301
302 if (!extra_count && (!block_count || !dev_info->enabledExtensionCount))
303 return dev_info;
304
305 *local_info = *dev_info;
306 if (!merge_extension_names(dev_info->ppEnabledExtensionNames,
307 dev_info->enabledExtensionCount, extra_exts,
308 extra_count, block_exts, block_count, alloc,
309 &local_info->ppEnabledExtensionNames,
310 &local_info->enabledExtensionCount))
311 return NULL;
312
313 return local_info;
314 }
315
316 static inline VkResult
vn_device_feedback_pool_init(struct vn_device * dev)317 vn_device_feedback_pool_init(struct vn_device *dev)
318 {
319 /* The feedback pool defaults to suballocate slots of 8 bytes each. Initial
320 * pool size of 4096 corresponds to a total of 512 fences, semaphores and
321 * events, which well covers the common scenarios. Pool can grow anyway.
322 */
323 static const uint32_t pool_size = 4096;
324 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
325
326 if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK))
327 return VK_SUCCESS;
328
329 return vn_feedback_pool_init(dev, &dev->feedback_pool, pool_size, alloc);
330 }
331
332 static inline void
vn_device_feedback_pool_fini(struct vn_device * dev)333 vn_device_feedback_pool_fini(struct vn_device *dev)
334 {
335 if (VN_PERF(NO_EVENT_FEEDBACK) && VN_PERF(NO_FENCE_FEEDBACK))
336 return;
337
338 vn_feedback_pool_fini(&dev->feedback_pool);
339 }
340
341 static VkResult
vn_device_init(struct vn_device * dev,struct vn_physical_device * physical_dev,const VkDeviceCreateInfo * create_info,const VkAllocationCallbacks * alloc)342 vn_device_init(struct vn_device *dev,
343 struct vn_physical_device *physical_dev,
344 const VkDeviceCreateInfo *create_info,
345 const VkAllocationCallbacks *alloc)
346 {
347 struct vn_instance *instance = physical_dev->instance;
348 VkPhysicalDevice physical_dev_handle =
349 vn_physical_device_to_handle(physical_dev);
350 VkDevice dev_handle = vn_device_to_handle(dev);
351 VkDeviceCreateInfo local_create_info;
352 VkResult result;
353
354 dev->instance = instance;
355 dev->physical_device = physical_dev;
356 dev->renderer = instance->renderer;
357
358 create_info =
359 vn_device_fix_create_info(dev, create_info, alloc, &local_create_info);
360 if (!create_info)
361 return VK_ERROR_OUT_OF_HOST_MEMORY;
362
363 result = vn_call_vkCreateDevice(instance, physical_dev_handle, create_info,
364 NULL, &dev_handle);
365
366 /* free the fixed extensions here since no longer needed below */
367 if (create_info == &local_create_info)
368 vk_free(alloc, (void *)create_info->ppEnabledExtensionNames);
369
370 if (result != VK_SUCCESS)
371 return result;
372
373 if (!vn_device_queue_family_init(dev, create_info)) {
374 result = VK_ERROR_OUT_OF_HOST_MEMORY;
375 goto out_destroy_device;
376 }
377
378 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++) {
379 struct vn_device_memory_pool *pool = &dev->memory_pools[i];
380 mtx_init(&pool->mutex, mtx_plain);
381 }
382
383 result = vn_buffer_cache_init(dev);
384 if (result != VK_SUCCESS)
385 goto out_memory_pool_fini;
386
387 result = vn_device_feedback_pool_init(dev);
388 if (result != VK_SUCCESS)
389 goto out_buffer_cache_fini;
390
391 result = vn_feedback_cmd_pools_init(dev);
392 if (result != VK_SUCCESS)
393 goto out_feedback_pool_fini;
394
395 result = vn_device_init_queues(dev, create_info);
396 if (result != VK_SUCCESS)
397 goto out_cmd_pools_fini;
398
399 return VK_SUCCESS;
400
401 out_cmd_pools_fini:
402 vn_feedback_cmd_pools_fini(dev);
403
404 out_feedback_pool_fini:
405 vn_device_feedback_pool_fini(dev);
406
407 out_buffer_cache_fini:
408 vn_buffer_cache_fini(dev);
409
410 out_memory_pool_fini:
411 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
412 vn_device_memory_pool_fini(dev, i);
413
414 vn_device_queue_family_fini(dev);
415
416 out_destroy_device:
417 vn_call_vkDestroyDevice(instance, dev_handle, NULL);
418
419 return result;
420 }
421
422 VkResult
vn_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)423 vn_CreateDevice(VkPhysicalDevice physicalDevice,
424 const VkDeviceCreateInfo *pCreateInfo,
425 const VkAllocationCallbacks *pAllocator,
426 VkDevice *pDevice)
427 {
428 VN_TRACE_FUNC();
429 struct vn_physical_device *physical_dev =
430 vn_physical_device_from_handle(physicalDevice);
431 struct vn_instance *instance = physical_dev->instance;
432 const VkAllocationCallbacks *alloc =
433 pAllocator ? pAllocator : &instance->base.base.alloc;
434 struct vn_device *dev;
435 VkResult result;
436
437 dev = vk_zalloc(alloc, sizeof(*dev), VN_DEFAULT_ALIGN,
438 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
439 if (!dev)
440 return vn_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
441
442 struct vk_device_dispatch_table dispatch_table;
443 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
444 &vn_device_entrypoints, true);
445 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
446 &wsi_device_entrypoints, false);
447 result = vn_device_base_init(&dev->base, &physical_dev->base,
448 &dispatch_table, pCreateInfo, alloc);
449 if (result != VK_SUCCESS) {
450 vk_free(alloc, dev);
451 return vn_error(instance, result);
452 }
453
454 result = vn_device_init(dev, physical_dev, pCreateInfo, alloc);
455 if (result != VK_SUCCESS) {
456 vn_device_base_fini(&dev->base);
457 vk_free(alloc, dev);
458 return vn_error(instance, result);
459 }
460
461 *pDevice = vn_device_to_handle(dev);
462
463 return VK_SUCCESS;
464 }
465
466 void
vn_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)467 vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
468 {
469 VN_TRACE_FUNC();
470 struct vn_device *dev = vn_device_from_handle(device);
471 const VkAllocationCallbacks *alloc =
472 pAllocator ? pAllocator : &dev->base.base.alloc;
473
474 if (!dev)
475 return;
476
477 for (uint32_t i = 0; i < dev->queue_count; i++)
478 vn_queue_fini(&dev->queues[i]);
479
480 vn_feedback_cmd_pools_fini(dev);
481
482 vn_device_feedback_pool_fini(dev);
483
484 vn_buffer_cache_fini(dev);
485
486 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
487 vn_device_memory_pool_fini(dev, i);
488
489 vn_device_queue_family_fini(dev);
490
491 /* We must emit vkDestroyDevice before freeing dev->queues. Otherwise,
492 * another thread might reuse their object ids while they still refer to
493 * the queues in the renderer.
494 */
495 vn_async_vkDestroyDevice(dev->instance, device, NULL);
496
497 vk_free(alloc, dev->queues);
498
499 vn_device_base_fini(&dev->base);
500 vk_free(alloc, dev);
501 }
502
503 PFN_vkVoidFunction
vn_GetDeviceProcAddr(VkDevice device,const char * pName)504 vn_GetDeviceProcAddr(VkDevice device, const char *pName)
505 {
506 struct vn_device *dev = vn_device_from_handle(device);
507 return vk_device_get_proc_addr(&dev->base.base, pName);
508 }
509
510 void
vn_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)511 vn_GetDeviceGroupPeerMemoryFeatures(
512 VkDevice device,
513 uint32_t heapIndex,
514 uint32_t localDeviceIndex,
515 uint32_t remoteDeviceIndex,
516 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
517 {
518 struct vn_device *dev = vn_device_from_handle(device);
519
520 /* TODO get and cache the values in vkCreateDevice */
521 vn_call_vkGetDeviceGroupPeerMemoryFeatures(
522 dev->instance, device, heapIndex, localDeviceIndex, remoteDeviceIndex,
523 pPeerMemoryFeatures);
524 }
525
526 VkResult
vn_DeviceWaitIdle(VkDevice device)527 vn_DeviceWaitIdle(VkDevice device)
528 {
529 VN_TRACE_FUNC();
530 struct vn_device *dev = vn_device_from_handle(device);
531
532 for (uint32_t i = 0; i < dev->queue_count; i++) {
533 struct vn_queue *queue = &dev->queues[i];
534 VkResult result = vn_QueueWaitIdle(vn_queue_to_handle(queue));
535 if (result != VK_SUCCESS)
536 return vn_error(dev->instance, result);
537 }
538
539 return VK_SUCCESS;
540 }
541
542 VkResult
vn_GetCalibratedTimestampsEXT(VkDevice device,uint32_t timestampCount,const VkCalibratedTimestampInfoEXT * pTimestampInfos,uint64_t * pTimestamps,uint64_t * pMaxDeviation)543 vn_GetCalibratedTimestampsEXT(
544 VkDevice device,
545 uint32_t timestampCount,
546 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
547 uint64_t *pTimestamps,
548 uint64_t *pMaxDeviation)
549 {
550 struct vn_device *dev = vn_device_from_handle(device);
551
552 return vn_call_vkGetCalibratedTimestampsEXT(
553 dev->instance, device, timestampCount, pTimestampInfos, pTimestamps,
554 pMaxDeviation);
555 }
556