• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_wsi.h"
12 
13 #include "drm-uapi/drm_fourcc.h"
14 #include "vk_enum_to_str.h"
15 #include "wsi_common_entrypoints.h"
16 
17 #include "vn_device.h"
18 #include "vn_image.h"
19 #include "vn_instance.h"
20 #include "vn_physical_device.h"
21 #include "vn_queue.h"
22 
23 /* The common WSI support makes some assumptions about the driver.
24  *
25  * In wsi_device_init, it assumes VK_EXT_pci_bus_info is available.  In
26  * wsi_create_native_image and wsi_create_prime_image, it assumes
27  * VK_KHR_external_memory_fd and VK_EXT_external_memory_dma_buf are enabled.
28  *
29  * In wsi_create_native_image, if wsi_device::supports_modifiers is set and
30  * the window system supports modifiers, it assumes
31  * VK_EXT_image_drm_format_modifier is enabled.  Otherwise, it assumes that
32  * wsi_image_create_info can be chained to VkImageCreateInfo and
33  * vkGetImageSubresourceLayout can be called even the tiling is
34  * VK_IMAGE_TILING_OPTIMAL.
35  *
36  * Together, it knows how to share dma-bufs, with explicit or implicit
37  * modifiers, to the window system.
38  *
39  * For venus, we use explicit modifiers when the renderer and the window
40  * system support them.  Otherwise, we have to fall back to
41  * VK_IMAGE_TILING_LINEAR (or trigger the prime blit path).  But the fallback
42  * can be problematic when the memory is scanned out directly and special
43  * requirements (e.g., alignments) must be met.
44  *
45  * The common WSI support makes other assumptions about the driver to support
46  * implicit fencing.  In wsi_create_native_image and wsi_create_prime_image,
47  * it assumes wsi_memory_allocate_info can be chained to VkMemoryAllocateInfo.
48  * In wsi_common_queue_present, it assumes wsi_memory_signal_submit_info can
49  * be chained to VkSubmitInfo.  Finally, in wsi_common_acquire_next_image2, it
50  * calls wsi_device::signal_semaphore_for_memory, and
51  * wsi_device::signal_fence_for_memory if the driver provides them.
52  *
53  * Some drivers use wsi_memory_allocate_info to set up implicit fencing.
54  * Others use wsi_memory_signal_submit_info to set up implicit IN-fences and
55  * use wsi_device::signal_*_for_memory to set up implicit OUT-fences.
56  *
57  * For venus, implicit fencing is broken (and there is no explicit fencing
58  * support yet).  The kernel driver assumes everything is in the same fence
59  * context and no synchronization is needed.  It should be fixed for
60  * correctness, but it is still not ideal.  venus requires explicit fencing
61  * (and renderer-side synchronization) to work well.
62  */
63 
64 /* cast a WSI object to a pointer for logging */
65 #define VN_WSI_PTR(obj) ((const void *)(uintptr_t)(obj))
66 
67 static PFN_vkVoidFunction
vn_wsi_proc_addr(VkPhysicalDevice physicalDevice,const char * pName)68 vn_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
69 {
70    struct vn_physical_device *physical_dev =
71       vn_physical_device_from_handle(physicalDevice);
72    return vk_instance_get_proc_addr_unchecked(
73       &physical_dev->instance->base.base, pName);
74 }
75 
76 VkResult
vn_wsi_init(struct vn_physical_device * physical_dev)77 vn_wsi_init(struct vn_physical_device *physical_dev)
78 {
79    const VkAllocationCallbacks *alloc =
80       &physical_dev->instance->base.base.alloc;
81    VkResult result = wsi_device_init(
82       &physical_dev->wsi_device, vn_physical_device_to_handle(physical_dev),
83       vn_wsi_proc_addr, alloc, -1, &physical_dev->instance->dri_options,
84       false);
85    if (result != VK_SUCCESS)
86       return result;
87 
88    if (physical_dev->base.base.supported_extensions
89           .EXT_image_drm_format_modifier)
90       physical_dev->wsi_device.supports_modifiers = true;
91 
92    physical_dev->base.base.wsi_device = &physical_dev->wsi_device;
93 
94    return VK_SUCCESS;
95 }
96 
97 void
vn_wsi_fini(struct vn_physical_device * physical_dev)98 vn_wsi_fini(struct vn_physical_device *physical_dev)
99 {
100    const VkAllocationCallbacks *alloc =
101       &physical_dev->instance->base.base.alloc;
102    physical_dev->base.base.wsi_device = NULL;
103    wsi_device_finish(&physical_dev->wsi_device, alloc);
104 }
105 
106 VkResult
vn_wsi_create_image(struct vn_device * dev,const VkImageCreateInfo * create_info,const struct wsi_image_create_info * wsi_info,const VkAllocationCallbacks * alloc,struct vn_image ** out_img)107 vn_wsi_create_image(struct vn_device *dev,
108                     const VkImageCreateInfo *create_info,
109                     const struct wsi_image_create_info *wsi_info,
110                     const VkAllocationCallbacks *alloc,
111                     struct vn_image **out_img)
112 {
113    /* TODO This is the legacy path used by wsi_create_native_image when there
114     * is no modifier support.  Instead of forcing linear tiling, we should ask
115     * wsi to use wsi_create_prime_image instead.
116     *
117     * In fact, this is not enough when the image is truely used for scanout by
118     * the host compositor.  There can be requirements we fail to meet.  We
119     * should require modifier support at some point.
120     */
121    const uint64_t modifier = DRM_FORMAT_MOD_LINEAR;
122    const VkImageDrmFormatModifierListCreateInfoEXT mod_list_info = {
123       .sType =
124          VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
125       .pNext = create_info->pNext,
126       .drmFormatModifierCount = 1,
127       .pDrmFormatModifiers = &modifier,
128    };
129    VkImageCreateInfo local_create_info;
130    if (wsi_info->scanout) {
131       assert(!vk_find_struct_const(
132          create_info->pNext, IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT));
133 
134       local_create_info = *create_info;
135       local_create_info.pNext = &mod_list_info;
136       local_create_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
137       create_info = &local_create_info;
138 
139       if (VN_DEBUG(WSI))
140          vn_log(dev->instance, "forcing scanout image linear");
141    }
142 
143    struct vn_image *img;
144    VkResult result = vn_image_create(dev, create_info, alloc, &img);
145    if (result != VK_SUCCESS)
146       return result;
147 
148    img->wsi.is_wsi = true;
149    img->wsi.is_prime_blit_src = wsi_info->buffer_blit_src;
150    img->wsi.tiling_override = create_info->tiling;
151 
152    if (create_info->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
153       VkDevice dev_handle = vn_device_to_handle(dev);
154       VkImage img_handle = vn_image_to_handle(img);
155 
156       VkImageDrmFormatModifierPropertiesEXT props = {
157          .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
158       };
159       result = vn_GetImageDrmFormatModifierPropertiesEXT(dev_handle,
160                                                          img_handle, &props);
161       if (result != VK_SUCCESS) {
162          vn_DestroyImage(dev_handle, img_handle, alloc);
163          return result;
164       }
165 
166       img->wsi.drm_format_modifier = props.drmFormatModifier;
167    }
168 
169    *out_img = img;
170    return VK_SUCCESS;
171 }
172 
173 VkResult
vn_wsi_create_image_from_swapchain(struct vn_device * dev,const VkImageCreateInfo * create_info,const VkImageSwapchainCreateInfoKHR * swapchain_info,const VkAllocationCallbacks * alloc,struct vn_image ** out_img)174 vn_wsi_create_image_from_swapchain(
175    struct vn_device *dev,
176    const VkImageCreateInfo *create_info,
177    const VkImageSwapchainCreateInfoKHR *swapchain_info,
178    const VkAllocationCallbacks *alloc,
179    struct vn_image **out_img)
180 {
181    const struct vn_image *swapchain_img = vn_image_from_handle(
182       wsi_common_get_image(swapchain_info->swapchain, 0));
183    assert(swapchain_img->wsi.is_wsi);
184 
185    /* must match what the common WSI and vn_wsi_create_image do */
186    VkImageCreateInfo local_create_info = *create_info;
187 
188    /* match external memory */
189    const VkExternalMemoryImageCreateInfo local_external_info = {
190       .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
191       .pNext = local_create_info.pNext,
192       .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
193    };
194    local_create_info.pNext = &local_external_info;
195 
196    /* match image tiling */
197    local_create_info.tiling = swapchain_img->wsi.tiling_override;
198 
199    VkImageDrmFormatModifierListCreateInfoEXT local_mod_info;
200    if (local_create_info.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
201       local_mod_info = (const VkImageDrmFormatModifierListCreateInfoEXT){
202          .sType =
203             VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
204          .pNext = local_create_info.pNext,
205          .drmFormatModifierCount = 1,
206          .pDrmFormatModifiers = &swapchain_img->wsi.drm_format_modifier,
207       };
208       local_create_info.pNext = &local_mod_info;
209    }
210 
211    /* match image usage */
212    if (swapchain_img->wsi.is_prime_blit_src)
213       local_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
214 
215    create_info = &local_create_info;
216 
217    struct vn_image *img;
218    VkResult result = vn_image_create(dev, create_info, alloc, &img);
219    if (result != VK_SUCCESS)
220       return result;
221 
222    img->wsi.is_wsi = true;
223    img->wsi.tiling_override = swapchain_img->wsi.tiling_override;
224    img->wsi.drm_format_modifier = swapchain_img->wsi.drm_format_modifier;
225 
226    *out_img = img;
227    return VK_SUCCESS;
228 }
229 
230 /* swapchain commands */
231 
232 VkResult
vn_CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)233 vn_CreateSwapchainKHR(VkDevice device,
234                       const VkSwapchainCreateInfoKHR *pCreateInfo,
235                       const VkAllocationCallbacks *pAllocator,
236                       VkSwapchainKHR *pSwapchain)
237 {
238    struct vn_device *dev = vn_device_from_handle(device);
239 
240    VkResult result =
241       wsi_CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
242    if (VN_DEBUG(WSI) && result == VK_SUCCESS) {
243       vn_log(dev->instance,
244              "swapchain %p: created with surface %p, min count %d, size "
245              "%dx%d, mode %s, old %p",
246              VN_WSI_PTR(*pSwapchain), VN_WSI_PTR(pCreateInfo->surface),
247              pCreateInfo->minImageCount, pCreateInfo->imageExtent.width,
248              pCreateInfo->imageExtent.height,
249              vk_PresentModeKHR_to_str(pCreateInfo->presentMode),
250              VN_WSI_PTR(pCreateInfo->oldSwapchain));
251    }
252 
253    return vn_result(dev->instance, result);
254 }
255 
256 void
vn_DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)257 vn_DestroySwapchainKHR(VkDevice device,
258                        VkSwapchainKHR swapchain,
259                        const VkAllocationCallbacks *pAllocator)
260 {
261    struct vn_device *dev = vn_device_from_handle(device);
262 
263    wsi_DestroySwapchainKHR(device, swapchain, pAllocator);
264    if (VN_DEBUG(WSI))
265       vn_log(dev->instance, "swapchain %p: destroyed", VN_WSI_PTR(swapchain));
266 }
267 
268 VkResult
vn_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)269 vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
270 {
271    VN_TRACE_FUNC();
272    struct vn_queue *queue = vn_queue_from_handle(_queue);
273 
274    VkResult result =
275       wsi_common_queue_present(&queue->device->physical_device->wsi_device,
276                                vn_device_to_handle(queue->device), _queue,
277                                queue->family, pPresentInfo);
278    if (VN_DEBUG(WSI) && result != VK_SUCCESS) {
279       for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
280          const VkResult r =
281             pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
282          vn_log(queue->device->instance,
283                 "swapchain %p: presented image %d: %s",
284                 VN_WSI_PTR(pPresentInfo->pSwapchains[i]),
285                 pPresentInfo->pImageIndices[i], vk_Result_to_str(r));
286       }
287    }
288 
289    return vn_result(queue->device->instance, result);
290 }
291 
292 VkResult
vn_AcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)293 vn_AcquireNextImage2KHR(VkDevice device,
294                         const VkAcquireNextImageInfoKHR *pAcquireInfo,
295                         uint32_t *pImageIndex)
296 {
297    VN_TRACE_FUNC();
298    struct vn_device *dev = vn_device_from_handle(device);
299 
300    VkResult result = wsi_common_acquire_next_image2(
301       &dev->physical_device->wsi_device, device, pAcquireInfo, pImageIndex);
302    if (VN_DEBUG(WSI) && result != VK_SUCCESS) {
303       const int idx = result >= VK_SUCCESS ? *pImageIndex : -1;
304       vn_log(dev->instance, "swapchain %p: acquired image %d: %s",
305              VN_WSI_PTR(pAcquireInfo->swapchain), idx,
306              vk_Result_to_str(result));
307    }
308 
309    /* XXX this relies on implicit sync */
310    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
311       struct vn_semaphore *sem =
312          vn_semaphore_from_handle(pAcquireInfo->semaphore);
313       if (sem)
314          vn_semaphore_signal_wsi(dev, sem);
315 
316       struct vn_fence *fence = vn_fence_from_handle(pAcquireInfo->fence);
317       if (fence)
318          vn_fence_signal_wsi(dev, fence);
319    }
320 
321    return vn_result(dev->instance, result);
322 }
323