• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "vkr_device.h"
7 
8 #include "venus-protocol/vn_protocol_renderer_device.h"
9 
10 #include "vkr_command_buffer.h"
11 #include "vkr_context.h"
12 #include "vkr_descriptor_set.h"
13 #include "vkr_device_memory.h"
14 #include "vkr_physical_device.h"
15 #include "vkr_queue.h"
16 
17 static VkResult
vkr_device_create_queues(struct vkr_context * ctx,struct vkr_device * dev,uint32_t create_info_count,const VkDeviceQueueCreateInfo * create_infos)18 vkr_device_create_queues(struct vkr_context *ctx,
19                          struct vkr_device *dev,
20                          uint32_t create_info_count,
21                          const VkDeviceQueueCreateInfo *create_infos)
22 {
23    list_inithead(&dev->queues);
24 
25    for (uint32_t i = 0; i < create_info_count; i++) {
26       for (uint32_t j = 0; j < create_infos[i].queueCount; j++) {
27          const VkDeviceQueueInfo2 info = {
28             .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
29             .pNext = NULL,
30             .flags = create_infos[i].flags,
31             .queueFamilyIndex = create_infos[i].queueFamilyIndex,
32             .queueIndex = j,
33          };
34          VkQueue handle = VK_NULL_HANDLE;
35          vkGetDeviceQueue2(dev->base.handle.device, &info, &handle);
36 
37          struct vkr_queue *queue = vkr_queue_create(
38             ctx, dev, info.flags, info.queueFamilyIndex, info.queueIndex, handle);
39          if (!queue) {
40             struct vkr_queue *entry, *tmp;
41             LIST_FOR_EACH_ENTRY_SAFE (entry, tmp, &dev->queues, base.track_head)
42                vkr_queue_destroy(ctx, entry);
43 
44             return VK_ERROR_OUT_OF_HOST_MEMORY;
45          }
46 
47          /* queues are not tracked as device objects */
48          list_add(&queue->base.track_head, &dev->queues);
49       }
50    }
51 
52    return VK_SUCCESS;
53 }
54 
55 static void
vkr_device_init_entry_points(struct vkr_device * dev,uint32_t api_version)56 vkr_device_init_entry_points(struct vkr_device *dev, uint32_t api_version)
57 {
58    VkDevice handle = dev->base.handle.device;
59    if (api_version >= VK_API_VERSION_1_2) {
60       dev->GetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)vkGetDeviceProcAddr(
61          handle, "vkGetSemaphoreCounterValue");
62       dev->WaitSemaphores =
63          (PFN_vkWaitSemaphores)vkGetDeviceProcAddr(handle, "vkWaitSemaphores");
64       dev->SignalSemaphore =
65          (PFN_vkSignalSemaphore)vkGetDeviceProcAddr(handle, "vkSignalSemaphore");
66       dev->GetDeviceMemoryOpaqueCaptureAddress =
67          (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)vkGetDeviceProcAddr(
68             handle, "vkGetDeviceMemoryOpaqueCaptureAddress");
69       dev->GetBufferOpaqueCaptureAddress =
70          (PFN_vkGetBufferOpaqueCaptureAddress)vkGetDeviceProcAddr(
71             handle, "vkGetBufferOpaqueCaptureAddress");
72       dev->GetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)vkGetDeviceProcAddr(
73          handle, "vkGetBufferDeviceAddress");
74       dev->ResetQueryPool =
75          (PFN_vkResetQueryPool)vkGetDeviceProcAddr(handle, "vkResetQueryPool");
76       dev->CreateRenderPass2 =
77          (PFN_vkCreateRenderPass2)vkGetDeviceProcAddr(handle, "vkCreateRenderPass2");
78       dev->CmdBeginRenderPass2 =
79          (PFN_vkCmdBeginRenderPass2)vkGetDeviceProcAddr(handle, "vkCmdBeginRenderPass2");
80       dev->CmdNextSubpass2 =
81          (PFN_vkCmdNextSubpass2)vkGetDeviceProcAddr(handle, "vkCmdNextSubpass2");
82       dev->CmdEndRenderPass2 =
83          (PFN_vkCmdEndRenderPass2)vkGetDeviceProcAddr(handle, "vkCmdEndRenderPass2");
84       dev->CmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)vkGetDeviceProcAddr(
85          handle, "vkCmdDrawIndirectCount");
86       dev->CmdDrawIndexedIndirectCount =
87          (PFN_vkCmdDrawIndexedIndirectCount)vkGetDeviceProcAddr(
88             handle, "vkCmdDrawIndexedIndirectCount");
89    } else {
90       dev->GetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)vkGetDeviceProcAddr(
91          handle, "vkGetSemaphoreCounterValueKHR");
92       dev->WaitSemaphores =
93          (PFN_vkWaitSemaphores)vkGetDeviceProcAddr(handle, "vkWaitSemaphoresKHR");
94       dev->SignalSemaphore =
95          (PFN_vkSignalSemaphore)vkGetDeviceProcAddr(handle, "vkSignalSemaphoreKHR");
96       dev->GetDeviceMemoryOpaqueCaptureAddress =
97          (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)vkGetDeviceProcAddr(
98             handle, "vkGetDeviceMemoryOpaqueCaptureAddressKHR");
99       dev->GetBufferOpaqueCaptureAddress =
100          (PFN_vkGetBufferOpaqueCaptureAddress)vkGetDeviceProcAddr(
101             handle, "vkGetBufferOpaqueCaptureAddressKHR");
102       dev->GetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)vkGetDeviceProcAddr(
103          handle, "vkGetBufferDeviceAddressKHR");
104       dev->ResetQueryPool =
105          (PFN_vkResetQueryPool)vkGetDeviceProcAddr(handle, "vkResetQueryPoolEXT");
106       dev->CreateRenderPass2 =
107          (PFN_vkCreateRenderPass2)vkGetDeviceProcAddr(handle, "vkCreateRenderPass2KHR");
108       dev->CmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)vkGetDeviceProcAddr(
109          handle, "vkCmdBeginRenderPass2KHR");
110       dev->CmdNextSubpass2 =
111          (PFN_vkCmdNextSubpass2)vkGetDeviceProcAddr(handle, "vkCmdNextSubpass2KHR");
112       dev->CmdEndRenderPass2 =
113          (PFN_vkCmdEndRenderPass2)vkGetDeviceProcAddr(handle, "vkCmdEndRenderPass2KHR");
114       dev->CmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)vkGetDeviceProcAddr(
115          handle, "vkCmdDrawIndirectCountKHR");
116       dev->CmdDrawIndexedIndirectCount =
117          (PFN_vkCmdDrawIndexedIndirectCount)vkGetDeviceProcAddr(
118             handle, "vkCmdDrawIndexedIndirectCountKHR");
119    }
120 
121    dev->cmd_bind_transform_feedback_buffers =
122       (PFN_vkCmdBindTransformFeedbackBuffersEXT)vkGetDeviceProcAddr(
123          handle, "vkCmdBindTransformFeedbackBuffersEXT");
124    dev->cmd_begin_transform_feedback =
125       (PFN_vkCmdBeginTransformFeedbackEXT)vkGetDeviceProcAddr(
126          handle, "vkCmdBeginTransformFeedbackEXT");
127    dev->cmd_end_transform_feedback =
128       (PFN_vkCmdEndTransformFeedbackEXT)vkGetDeviceProcAddr(
129          handle, "vkCmdEndTransformFeedbackEXT");
130    dev->cmd_begin_query_indexed = (PFN_vkCmdBeginQueryIndexedEXT)vkGetDeviceProcAddr(
131       handle, "vkCmdBeginQueryIndexedEXT");
132    dev->cmd_end_query_indexed =
133       (PFN_vkCmdEndQueryIndexedEXT)vkGetDeviceProcAddr(handle, "vkCmdEndQueryIndexedEXT");
134    dev->cmd_draw_indirect_byte_count =
135       (PFN_vkCmdDrawIndirectByteCountEXT)vkGetDeviceProcAddr(
136          handle, "vkCmdDrawIndirectByteCountEXT");
137 
138    dev->get_image_drm_format_modifier_properties =
139       (PFN_vkGetImageDrmFormatModifierPropertiesEXT)vkGetDeviceProcAddr(
140          handle, "vkGetImageDrmFormatModifierPropertiesEXT");
141 
142    dev->get_memory_fd_properties = (PFN_vkGetMemoryFdPropertiesKHR)vkGetDeviceProcAddr(
143       handle, "vkGetMemoryFdPropertiesKHR");
144 }
145 
146 static void
vkr_dispatch_vkCreateDevice(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateDevice * args)147 vkr_dispatch_vkCreateDevice(struct vn_dispatch_context *dispatch,
148                             struct vn_command_vkCreateDevice *args)
149 {
150    struct vkr_context *ctx = dispatch->data;
151 
152    struct vkr_physical_device *physical_dev =
153       vkr_physical_device_from_handle(args->physicalDevice);
154 
155    /* append extensions for our own use */
156    const char **exts = NULL;
157    uint32_t ext_count = args->pCreateInfo->enabledExtensionCount;
158    ext_count += physical_dev->KHR_external_memory_fd;
159    ext_count += physical_dev->EXT_external_memory_dma_buf;
160    ext_count += physical_dev->KHR_external_fence_fd;
161    if (ext_count > args->pCreateInfo->enabledExtensionCount) {
162       exts = malloc(sizeof(*exts) * ext_count);
163       if (!exts) {
164          args->ret = VK_ERROR_OUT_OF_HOST_MEMORY;
165          return;
166       }
167       for (uint32_t i = 0; i < args->pCreateInfo->enabledExtensionCount; i++)
168          exts[i] = args->pCreateInfo->ppEnabledExtensionNames[i];
169 
170       ext_count = args->pCreateInfo->enabledExtensionCount;
171       if (physical_dev->KHR_external_memory_fd)
172          exts[ext_count++] = "VK_KHR_external_memory_fd";
173       if (physical_dev->EXT_external_memory_dma_buf)
174          exts[ext_count++] = "VK_EXT_external_memory_dma_buf";
175       if (physical_dev->KHR_external_fence_fd)
176          exts[ext_count++] = "VK_KHR_external_fence_fd";
177 
178       ((VkDeviceCreateInfo *)args->pCreateInfo)->ppEnabledExtensionNames = exts;
179       ((VkDeviceCreateInfo *)args->pCreateInfo)->enabledExtensionCount = ext_count;
180    }
181 
182    struct vkr_device *dev =
183       vkr_context_alloc_object(ctx, sizeof(*dev), VK_OBJECT_TYPE_DEVICE, args->pDevice);
184    if (!dev) {
185       args->ret = VK_ERROR_OUT_OF_HOST_MEMORY;
186       free(exts);
187       return;
188    }
189 
190    vn_replace_vkCreateDevice_args_handle(args);
191    args->ret = vkCreateDevice(args->physicalDevice, args->pCreateInfo, NULL,
192                               &dev->base.handle.device);
193    if (args->ret != VK_SUCCESS) {
194       free(exts);
195       free(dev);
196       return;
197    }
198 
199    free(exts);
200 
201    dev->physical_device = physical_dev;
202 
203    args->ret = vkr_device_create_queues(ctx, dev, args->pCreateInfo->queueCreateInfoCount,
204                                         args->pCreateInfo->pQueueCreateInfos);
205    if (args->ret != VK_SUCCESS) {
206       vkDestroyDevice(dev->base.handle.device, NULL);
207       free(dev);
208       return;
209    }
210 
211    vkr_device_init_entry_points(dev, physical_dev->api_version);
212 
213    mtx_init(&dev->free_sync_mutex, mtx_plain);
214    list_inithead(&dev->free_syncs);
215 
216    list_inithead(&dev->objects);
217 
218    list_add(&dev->base.track_head, &physical_dev->devices);
219 
220    vkr_context_add_object(ctx, &dev->base);
221 }
222 
223 static void
vkr_device_object_destroy(struct vkr_context * ctx,struct vkr_device * dev,struct vkr_object * obj)224 vkr_device_object_destroy(struct vkr_context *ctx,
225                           struct vkr_device *dev,
226                           struct vkr_object *obj)
227 {
228    VkDevice device = dev->base.handle.device;
229 
230    assert(vkr_device_should_track_object(obj));
231 
232    switch (obj->type) {
233    case VK_OBJECT_TYPE_SEMAPHORE:
234       vkDestroySemaphore(device, obj->handle.semaphore, NULL);
235       break;
236    case VK_OBJECT_TYPE_FENCE:
237       vkDestroyFence(device, obj->handle.fence, NULL);
238       break;
239    case VK_OBJECT_TYPE_DEVICE_MEMORY:
240       vkFreeMemory(device, obj->handle.device_memory, NULL);
241 
242       /* remove device memory from exported or attachment list */
243       list_del(&((struct vkr_device_memory *)obj)->exported_head);
244       break;
245    case VK_OBJECT_TYPE_BUFFER:
246       vkDestroyBuffer(device, obj->handle.buffer, NULL);
247       break;
248    case VK_OBJECT_TYPE_IMAGE:
249       vkDestroyImage(device, obj->handle.image, NULL);
250       break;
251    case VK_OBJECT_TYPE_EVENT:
252       vkDestroyEvent(device, obj->handle.event, NULL);
253       break;
254    case VK_OBJECT_TYPE_QUERY_POOL:
255       vkDestroyQueryPool(device, obj->handle.query_pool, NULL);
256       break;
257    case VK_OBJECT_TYPE_BUFFER_VIEW:
258       vkDestroyBufferView(device, obj->handle.buffer_view, NULL);
259       break;
260    case VK_OBJECT_TYPE_IMAGE_VIEW:
261       vkDestroyImageView(device, obj->handle.image_view, NULL);
262       break;
263    case VK_OBJECT_TYPE_SHADER_MODULE:
264       vkDestroyShaderModule(device, obj->handle.shader_module, NULL);
265       break;
266    case VK_OBJECT_TYPE_PIPELINE_CACHE:
267       vkDestroyPipelineCache(device, obj->handle.pipeline_cache, NULL);
268       break;
269    case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
270       vkDestroyPipelineLayout(device, obj->handle.pipeline_layout, NULL);
271       break;
272    case VK_OBJECT_TYPE_RENDER_PASS:
273       vkDestroyRenderPass(device, obj->handle.render_pass, NULL);
274       break;
275    case VK_OBJECT_TYPE_PIPELINE:
276       vkDestroyPipeline(device, obj->handle.pipeline, NULL);
277       break;
278    case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
279       vkDestroyDescriptorSetLayout(device, obj->handle.descriptor_set_layout, NULL);
280       break;
281    case VK_OBJECT_TYPE_SAMPLER:
282       vkDestroySampler(device, obj->handle.sampler, NULL);
283       break;
284    case VK_OBJECT_TYPE_DESCRIPTOR_POOL: {
285       /* Destroying VkDescriptorPool frees all VkDescriptorSet objects that were allocated
286        * from it.
287        */
288       vkDestroyDescriptorPool(device, obj->handle.descriptor_pool, NULL);
289 
290       struct vkr_descriptor_pool *pool = (struct vkr_descriptor_pool *)obj;
291       vkr_context_remove_objects(ctx, &pool->descriptor_sets);
292       break;
293    }
294    case VK_OBJECT_TYPE_FRAMEBUFFER:
295       vkDestroyFramebuffer(device, obj->handle.framebuffer, NULL);
296       break;
297    case VK_OBJECT_TYPE_COMMAND_POOL: {
298       /* Destroying VkCommandPool frees all VkCommandBuffer objects that were allocated
299        * from it.
300        */
301       vkDestroyCommandPool(device, obj->handle.command_pool, NULL);
302 
303       struct vkr_command_pool *pool = (struct vkr_command_pool *)obj;
304       vkr_context_remove_objects(ctx, &pool->command_buffers);
305       break;
306    }
307    case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION:
308       vkDestroySamplerYcbcrConversion(device, obj->handle.sampler_ycbcr_conversion, NULL);
309       break;
310    case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE:
311       vkDestroyDescriptorUpdateTemplate(device, obj->handle.descriptor_update_template,
312                                         NULL);
313       break;
314    default:
315       vkr_log("Unhandled vkr_object(%p) with VkObjectType(%u)", obj, (uint32_t)obj->type);
316       assert(false);
317       break;
318    };
319 
320    vkr_device_remove_object(ctx, dev, obj);
321 }
322 
323 void
vkr_device_destroy(struct vkr_context * ctx,struct vkr_device * dev)324 vkr_device_destroy(struct vkr_context *ctx, struct vkr_device *dev)
325 {
326    VkDevice device = dev->base.handle.device;
327 
328    if (!LIST_IS_EMPTY(&dev->objects))
329       vkr_log("destroying device with valid objects");
330 
331    VkResult result = vkDeviceWaitIdle(device);
332    if (result != VK_SUCCESS)
333       vkr_log("vkDeviceWaitIdle(%p) failed(%d)", dev, (int32_t)result);
334 
335    if (!LIST_IS_EMPTY(&dev->objects)) {
336       struct vkr_object *obj, *obj_tmp;
337       LIST_FOR_EACH_ENTRY_SAFE (obj, obj_tmp, &dev->objects, track_head)
338          vkr_device_object_destroy(ctx, dev, obj);
339    }
340 
341    struct vkr_queue *queue, *queue_tmp;
342    LIST_FOR_EACH_ENTRY_SAFE (queue, queue_tmp, &dev->queues, base.track_head)
343       vkr_queue_destroy(ctx, queue);
344 
345    struct vkr_queue_sync *sync, *sync_tmp;
346    LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &dev->free_syncs, head) {
347       vkDestroyFence(dev->base.handle.device, sync->fence, NULL);
348       free(sync);
349    }
350 
351    mtx_destroy(&dev->free_sync_mutex);
352 
353    vkDestroyDevice(device, NULL);
354 
355    list_del(&dev->base.track_head);
356 
357    vkr_context_remove_object(ctx, &dev->base);
358 }
359 
360 static void
vkr_dispatch_vkDestroyDevice(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroyDevice * args)361 vkr_dispatch_vkDestroyDevice(struct vn_dispatch_context *dispatch,
362                              struct vn_command_vkDestroyDevice *args)
363 {
364    struct vkr_context *ctx = dispatch->data;
365 
366    struct vkr_device *dev = vkr_device_from_handle(args->device);
367    /* this never happens */
368    if (!dev)
369       return;
370 
371    vkr_device_destroy(ctx, dev);
372 }
373 
374 static void
vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceGroupPeerMemoryFeatures * args)375 vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures(
376    UNUSED struct vn_dispatch_context *dispatch,
377    struct vn_command_vkGetDeviceGroupPeerMemoryFeatures *args)
378 {
379    vn_replace_vkGetDeviceGroupPeerMemoryFeatures_args_handle(args);
380    vkGetDeviceGroupPeerMemoryFeatures(args->device, args->heapIndex,
381                                       args->localDeviceIndex, args->remoteDeviceIndex,
382                                       args->pPeerMemoryFeatures);
383 }
384 
385 static void
vkr_dispatch_vkDeviceWaitIdle(struct vn_dispatch_context * dispatch,UNUSED struct vn_command_vkDeviceWaitIdle * args)386 vkr_dispatch_vkDeviceWaitIdle(struct vn_dispatch_context *dispatch,
387                               UNUSED struct vn_command_vkDeviceWaitIdle *args)
388 {
389    struct vkr_context *ctx = dispatch->data;
390    /* no blocking call */
391    vkr_cs_decoder_set_fatal(&ctx->decoder);
392 }
393 
394 void
vkr_context_init_device_dispatch(struct vkr_context * ctx)395 vkr_context_init_device_dispatch(struct vkr_context *ctx)
396 {
397    struct vn_dispatch_context *dispatch = &ctx->dispatch;
398 
399    dispatch->dispatch_vkCreateDevice = vkr_dispatch_vkCreateDevice;
400    dispatch->dispatch_vkDestroyDevice = vkr_dispatch_vkDestroyDevice;
401    dispatch->dispatch_vkGetDeviceProcAddr = NULL;
402    dispatch->dispatch_vkGetDeviceGroupPeerMemoryFeatures =
403       vkr_dispatch_vkGetDeviceGroupPeerMemoryFeatures;
404    dispatch->dispatch_vkDeviceWaitIdle = vkr_dispatch_vkDeviceWaitIdle;
405 }
406