• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_wsi.h"
12 
13 #include "vk_enum_to_str.h"
14 
15 #include "wsi_common_entrypoints.h"
16 
17 #include "vn_device.h"
18 #include "vn_image.h"
19 #include "vn_instance.h"
20 #include "vn_physical_device.h"
21 #include "vn_queue.h"
22 
23 /* The common WSI support makes some assumptions about the driver.
24  *
25  * In wsi_device_init, it assumes VK_EXT_pci_bus_info is available.  In
26  * wsi_create_native_image and wsi_create_prime_image, it assumes
27  * VK_KHR_external_memory_fd and VK_EXT_external_memory_dma_buf are enabled.
28  *
29  * In wsi_create_native_image, if wsi_device::supports_modifiers is set and
30  * the window system supports modifiers, it assumes
31  * VK_EXT_image_drm_format_modifier is enabled.  Otherwise, it assumes that
32  * wsi_image_create_info can be chained to VkImageCreateInfo and
33  * vkGetImageSubresourceLayout can be called even the tiling is
34  * VK_IMAGE_TILING_OPTIMAL.
35  *
36  * Together, it knows how to share dma-bufs, with explicit or implicit
37  * modifiers, to the window system.
38  *
39  * For venus, we use explicit modifiers when the renderer and the window
40  * system support them.  Otherwise, we have to fall back to
41  * VK_IMAGE_TILING_LINEAR (or trigger the prime blit path).  But the fallback
42  * can be problematic when the memory is scanned out directly and special
43  * requirements (e.g., alignments) must be met.
44  *
45  * The common WSI support makes other assumptions about the driver to support
46  * implicit fencing.  In wsi_create_native_image and wsi_create_prime_image,
47  * it assumes wsi_memory_allocate_info can be chained to VkMemoryAllocateInfo.
48  * In wsi_common_queue_present, it assumes wsi_memory_signal_submit_info can
49  * be chained to VkSubmitInfo.  Finally, in wsi_common_acquire_next_image2, it
50  * calls wsi_device::signal_semaphore_for_memory, and
51  * wsi_device::signal_fence_for_memory if the driver provides them.
52  *
53  * Some drivers use wsi_memory_allocate_info to set up implicit fencing.
54  * Others use wsi_memory_signal_submit_info to set up implicit IN-fences and
55  * use wsi_device::signal_*_for_memory to set up implicit OUT-fences.
56  *
57  * For venus, implicit fencing is broken (and there is no explicit fencing
58  * support yet).  The kernel driver assumes everything is in the same fence
59  * context and no synchronization is needed.  It should be fixed for
60  * correctness, but it is still not ideal.  venus requires explicit fencing
61  * (and renderer-side synchronization) to work well.
62  */
63 
64 /* cast a WSI object to a pointer for logging */
65 #define VN_WSI_PTR(obj) ((const void *)(uintptr_t)(obj))
66 
67 static PFN_vkVoidFunction
vn_wsi_proc_addr(VkPhysicalDevice physicalDevice,const char * pName)68 vn_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
69 {
70    struct vn_physical_device *physical_dev =
71       vn_physical_device_from_handle(physicalDevice);
72    return vk_instance_get_proc_addr_unchecked(
73       &physical_dev->instance->base.base, pName);
74 }
75 
76 VkResult
vn_wsi_init(struct vn_physical_device * physical_dev)77 vn_wsi_init(struct vn_physical_device *physical_dev)
78 {
79    const VkAllocationCallbacks *alloc =
80       &physical_dev->instance->base.base.alloc;
81    VkResult result = wsi_device_init(
82       &physical_dev->wsi_device, vn_physical_device_to_handle(physical_dev),
83       vn_wsi_proc_addr, alloc, -1, &physical_dev->instance->dri_options,
84       false);
85    if (result != VK_SUCCESS)
86       return result;
87 
88    if (physical_dev->base.base.supported_extensions
89           .EXT_image_drm_format_modifier)
90       physical_dev->wsi_device.supports_modifiers = true;
91 
92    physical_dev->base.base.wsi_device = &physical_dev->wsi_device;
93 
94    return VK_SUCCESS;
95 }
96 
97 void
vn_wsi_fini(struct vn_physical_device * physical_dev)98 vn_wsi_fini(struct vn_physical_device *physical_dev)
99 {
100    const VkAllocationCallbacks *alloc =
101       &physical_dev->instance->base.base.alloc;
102    physical_dev->base.base.wsi_device = NULL;
103    wsi_device_finish(&physical_dev->wsi_device, alloc);
104 }
105 
106 VkResult
vn_wsi_create_image(struct vn_device * dev,const VkImageCreateInfo * create_info,const struct wsi_image_create_info * wsi_info,const VkAllocationCallbacks * alloc,struct vn_image ** out_img)107 vn_wsi_create_image(struct vn_device *dev,
108                     const VkImageCreateInfo *create_info,
109                     const struct wsi_image_create_info *wsi_info,
110                     const VkAllocationCallbacks *alloc,
111                     struct vn_image **out_img)
112 {
113    /* TODO This is the legacy path used by wsi_create_native_image when there
114     * is no modifier support.  Instead of forcing VK_IMAGE_TILING_LINEAR, we
115     * should ask wsi to use wsi_create_prime_image instead.
116     *
117     * In fact, this is not enough when the image is truely used for scanout by
118     * the host compositor.  There can be requirements we fail to meet.  We
119     * should require modifier support at some point.
120     */
121    VkImageCreateInfo local_create_info;
122    if (wsi_info->scanout) {
123       local_create_info = *create_info;
124       local_create_info.tiling = VK_IMAGE_TILING_LINEAR;
125       create_info = &local_create_info;
126 
127       if (VN_DEBUG(WSI))
128          vn_log(dev->instance, "forcing scanout image linear");
129    }
130 
131    struct vn_image *img;
132    VkResult result = vn_image_create(dev, create_info, alloc, &img);
133    if (result != VK_SUCCESS)
134       return result;
135 
136    img->is_wsi = true;
137    img->is_prime_blit_src = wsi_info->prime_blit_src;
138 
139    *out_img = img;
140    return VK_SUCCESS;
141 }
142 
143 /* swapchain commands */
144 
145 VkResult
vn_CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)146 vn_CreateSwapchainKHR(VkDevice device,
147                       const VkSwapchainCreateInfoKHR *pCreateInfo,
148                       const VkAllocationCallbacks *pAllocator,
149                       VkSwapchainKHR *pSwapchain)
150 {
151    struct vn_device *dev = vn_device_from_handle(device);
152 
153    VkResult result = wsi_CreateSwapchainKHR(device, pCreateInfo,
154                                             pAllocator, pSwapchain);
155    if (VN_DEBUG(WSI) && result == VK_SUCCESS) {
156       vn_log(dev->instance,
157              "swapchain %p: created with surface %p, min count %d, size "
158              "%dx%d, mode %s, old %p",
159              VN_WSI_PTR(*pSwapchain), VN_WSI_PTR(pCreateInfo->surface),
160              pCreateInfo->minImageCount, pCreateInfo->imageExtent.width,
161              pCreateInfo->imageExtent.height,
162              vk_PresentModeKHR_to_str(pCreateInfo->presentMode),
163              VN_WSI_PTR(pCreateInfo->oldSwapchain));
164    }
165 
166    return vn_result(dev->instance, result);
167 }
168 
169 void
vn_DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)170 vn_DestroySwapchainKHR(VkDevice device,
171                        VkSwapchainKHR swapchain,
172                        const VkAllocationCallbacks *pAllocator)
173 {
174    struct vn_device *dev = vn_device_from_handle(device);
175 
176    wsi_DestroySwapchainKHR(device, swapchain, pAllocator);
177    if (VN_DEBUG(WSI))
178       vn_log(dev->instance, "swapchain %p: destroyed", VN_WSI_PTR(swapchain));
179 }
180 
181 VkResult
vn_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)182 vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
183 {
184    struct vn_queue *queue = vn_queue_from_handle(_queue);
185 
186    VkResult result =
187       wsi_common_queue_present(&queue->device->physical_device->wsi_device,
188                                vn_device_to_handle(queue->device), _queue,
189                                queue->family, pPresentInfo);
190    if (VN_DEBUG(WSI) && result != VK_SUCCESS) {
191       for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
192          const VkResult r =
193             pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
194          vn_log(queue->device->instance,
195                 "swapchain %p: presented image %d: %s",
196                 VN_WSI_PTR(pPresentInfo->pSwapchains[i]),
197                 pPresentInfo->pImageIndices[i], vk_Result_to_str(r));
198       }
199    }
200 
201    return vn_result(queue->device->instance, result);
202 }
203 
204 VkResult
vn_AcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)205 vn_AcquireNextImage2KHR(VkDevice device,
206                         const VkAcquireNextImageInfoKHR *pAcquireInfo,
207                         uint32_t *pImageIndex)
208 {
209    struct vn_device *dev = vn_device_from_handle(device);
210 
211    VkResult result = wsi_common_acquire_next_image2(
212       &dev->physical_device->wsi_device, device, pAcquireInfo, pImageIndex);
213    if (VN_DEBUG(WSI) && result != VK_SUCCESS) {
214       const int idx = result >= VK_SUCCESS ? *pImageIndex : -1;
215       vn_log(dev->instance, "swapchain %p: acquired image %d: %s",
216              VN_WSI_PTR(pAcquireInfo->swapchain), idx,
217              vk_Result_to_str(result));
218    }
219 
220    /* XXX this relies on implicit sync */
221    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
222       struct vn_semaphore *sem =
223          vn_semaphore_from_handle(pAcquireInfo->semaphore);
224       if (sem)
225          vn_semaphore_signal_wsi(dev, sem);
226 
227       struct vn_fence *fence = vn_fence_from_handle(pAcquireInfo->fence);
228       if (fence)
229          vn_fence_signal_wsi(dev, fence);
230    }
231 
232    return vn_result(dev->instance, result);
233 }
234