1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "wsi_common_private.h"
25 #include "wsi_common_entrypoints.h"
26 #include "util/macros.h"
27 #include "util/os_file.h"
28 #include "util/os_time.h"
29 #include "util/xmlconfig.h"
30 #include "vk_device.h"
31 #include "vk_instance.h"
32 #include "vk_physical_device.h"
33 #include "vk_queue.h"
34 #include "vk_util.h"
35
36 #include <time.h>
37 #include <stdlib.h>
38 #include <stdio.h>
39
40 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,bool sw_device)41 wsi_device_init(struct wsi_device *wsi,
42 VkPhysicalDevice pdevice,
43 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
44 const VkAllocationCallbacks *alloc,
45 int display_fd,
46 const struct driOptionCache *dri_options,
47 bool sw_device)
48 {
49 const char *present_mode;
50 UNUSED VkResult result;
51
52 memset(wsi, 0, sizeof(*wsi));
53
54 wsi->instance_alloc = *alloc;
55 wsi->pdevice = pdevice;
56 wsi->sw = sw_device;
57 #define WSI_GET_CB(func) \
58 PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
59 WSI_GET_CB(GetPhysicalDeviceProperties2);
60 WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
61 WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
62 #undef WSI_GET_CB
63
64 wsi->pci_bus_info.sType =
65 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
66 VkPhysicalDeviceProperties2 pdp2 = {
67 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
68 .pNext = &wsi->pci_bus_info,
69 };
70 GetPhysicalDeviceProperties2(pdevice, &pdp2);
71
72 wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
73 wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
74
75 GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
76 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
77
78 #define WSI_GET_CB(func) \
79 wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
80 WSI_GET_CB(AllocateMemory);
81 WSI_GET_CB(AllocateCommandBuffers);
82 WSI_GET_CB(BindBufferMemory);
83 WSI_GET_CB(BindImageMemory);
84 WSI_GET_CB(BeginCommandBuffer);
85 WSI_GET_CB(CmdCopyImageToBuffer);
86 WSI_GET_CB(CreateBuffer);
87 WSI_GET_CB(CreateCommandPool);
88 WSI_GET_CB(CreateFence);
89 WSI_GET_CB(CreateImage);
90 WSI_GET_CB(DestroyBuffer);
91 WSI_GET_CB(DestroyCommandPool);
92 WSI_GET_CB(DestroyFence);
93 WSI_GET_CB(DestroyImage);
94 WSI_GET_CB(EndCommandBuffer);
95 WSI_GET_CB(FreeMemory);
96 WSI_GET_CB(FreeCommandBuffers);
97 WSI_GET_CB(GetBufferMemoryRequirements);
98 WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
99 WSI_GET_CB(GetImageMemoryRequirements);
100 WSI_GET_CB(GetImageSubresourceLayout);
101 if (!wsi->sw)
102 WSI_GET_CB(GetMemoryFdKHR);
103 WSI_GET_CB(GetPhysicalDeviceFormatProperties);
104 WSI_GET_CB(GetPhysicalDeviceFormatProperties2KHR);
105 WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
106 WSI_GET_CB(ResetFences);
107 WSI_GET_CB(QueueSubmit);
108 WSI_GET_CB(WaitForFences);
109 WSI_GET_CB(MapMemory);
110 WSI_GET_CB(UnmapMemory);
111 #undef WSI_GET_CB
112
113 #ifdef VK_USE_PLATFORM_XCB_KHR
114 result = wsi_x11_init_wsi(wsi, alloc, dri_options);
115 if (result != VK_SUCCESS)
116 goto fail;
117 #endif
118
119 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
120 result = wsi_wl_init_wsi(wsi, alloc, pdevice);
121 if (result != VK_SUCCESS)
122 goto fail;
123 #endif
124
125 #ifdef VK_USE_PLATFORM_WIN32_KHR
126 result = wsi_win32_init_wsi(wsi, alloc, pdevice);
127 if (result != VK_SUCCESS)
128 goto fail;
129 #endif
130
131 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
132 result = wsi_display_init_wsi(wsi, alloc, display_fd);
133 if (result != VK_SUCCESS)
134 goto fail;
135 #endif
136
137 present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
138 if (present_mode) {
139 if (!strcmp(present_mode, "fifo")) {
140 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
141 } else if (!strcmp(present_mode, "relaxed")) {
142 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
143 } else if (!strcmp(present_mode, "mailbox")) {
144 wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
145 } else if (!strcmp(present_mode, "immediate")) {
146 wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
147 } else {
148 fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
149 }
150 }
151
152 if (dri_options) {
153 if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
154 wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
155 "adaptive_sync");
156
157 if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first", DRI_BOOL)) {
158 wsi->force_bgra8_unorm_first =
159 driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
160 }
161 }
162
163 return VK_SUCCESS;
164 #if defined(VK_USE_PLATFORM_XCB_KHR) || \
165 defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
166 defined(VK_USE_PLATFORM_WIN32_KHR) || \
167 defined(VK_USE_PLATFORM_DISPLAY_KHR)
168 fail:
169 wsi_device_finish(wsi, alloc);
170 return result;
171 #endif
172 }
173
174 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)175 wsi_device_finish(struct wsi_device *wsi,
176 const VkAllocationCallbacks *alloc)
177 {
178 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
179 wsi_display_finish_wsi(wsi, alloc);
180 #endif
181 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
182 wsi_wl_finish_wsi(wsi, alloc);
183 #endif
184 #ifdef VK_USE_PLATFORM_WIN32_KHR
185 wsi_win32_finish_wsi(wsi, alloc);
186 #endif
187 #ifdef VK_USE_PLATFORM_XCB_KHR
188 wsi_x11_finish_wsi(wsi, alloc);
189 #endif
190 }
191
192 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySurfaceKHR(VkInstance _instance,VkSurfaceKHR _surface,const VkAllocationCallbacks * pAllocator)193 wsi_DestroySurfaceKHR(VkInstance _instance,
194 VkSurfaceKHR _surface,
195 const VkAllocationCallbacks *pAllocator)
196 {
197 VK_FROM_HANDLE(vk_instance, instance, _instance);
198 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
199
200 if (!surface)
201 return;
202
203 vk_free2(&instance->alloc, pAllocator, surface);
204 }
205
206 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)207 wsi_swapchain_init(const struct wsi_device *wsi,
208 struct wsi_swapchain *chain,
209 VkDevice device,
210 const VkSwapchainCreateInfoKHR *pCreateInfo,
211 const VkAllocationCallbacks *pAllocator)
212 {
213 VkResult result;
214
215 memset(chain, 0, sizeof(*chain));
216
217 vk_object_base_init(NULL, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
218
219 chain->wsi = wsi;
220 chain->device = device;
221 chain->alloc = *pAllocator;
222 chain->use_prime_blit = false;
223
224 chain->cmd_pools =
225 vk_zalloc(pAllocator, sizeof(VkCommandPool) * wsi->queue_family_count, 8,
226 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
227 if (!chain->cmd_pools)
228 return VK_ERROR_OUT_OF_HOST_MEMORY;
229
230 for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
231 const VkCommandPoolCreateInfo cmd_pool_info = {
232 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
233 .pNext = NULL,
234 .flags = 0,
235 .queueFamilyIndex = i,
236 };
237 result = wsi->CreateCommandPool(device, &cmd_pool_info, &chain->alloc,
238 &chain->cmd_pools[i]);
239 if (result != VK_SUCCESS)
240 goto fail;
241 }
242
243 return VK_SUCCESS;
244
245 fail:
246 wsi_swapchain_finish(chain);
247 return result;
248 }
249
250 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)251 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
252 const VkSwapchainCreateInfoKHR *pCreateInfo,
253 VkPresentModeKHR mode)
254 {
255 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
256 struct wsi_interface *iface = wsi->wsi[surface->platform];
257 VkPresentModeKHR *present_modes;
258 uint32_t present_mode_count;
259 bool supported = false;
260 VkResult result;
261
262 result = iface->get_present_modes(surface, &present_mode_count, NULL);
263 if (result != VK_SUCCESS)
264 return supported;
265
266 present_modes = malloc(present_mode_count * sizeof(*present_modes));
267 if (!present_modes)
268 return supported;
269
270 result = iface->get_present_modes(surface, &present_mode_count,
271 present_modes);
272 if (result != VK_SUCCESS)
273 goto fail;
274
275 for (uint32_t i = 0; i < present_mode_count; i++) {
276 if (present_modes[i] == mode) {
277 supported = true;
278 break;
279 }
280 }
281
282 fail:
283 free(present_modes);
284 return supported;
285 }
286
287 enum VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)288 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
289 const VkSwapchainCreateInfoKHR *pCreateInfo)
290 {
291 if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
292 return pCreateInfo->presentMode;
293
294 if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
295 wsi->override_present_mode)) {
296 fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
297 return pCreateInfo->presentMode;
298 }
299
300 return wsi->override_present_mode;
301 }
302
303 void
wsi_swapchain_finish(struct wsi_swapchain * chain)304 wsi_swapchain_finish(struct wsi_swapchain *chain)
305 {
306 if (chain->fences) {
307 for (unsigned i = 0; i < chain->image_count; i++)
308 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
309
310 vk_free(&chain->alloc, chain->fences);
311 }
312
313 for (uint32_t i = 0; i < chain->wsi->queue_family_count; i++) {
314 chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
315 &chain->alloc);
316 }
317 vk_free(&chain->alloc, chain->cmd_pools);
318
319 vk_object_base_finish(&chain->base);
320 }
321
322 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)323 wsi_destroy_image(const struct wsi_swapchain *chain,
324 struct wsi_image *image)
325 {
326 const struct wsi_device *wsi = chain->wsi;
327
328 if (image->prime.blit_cmd_buffers) {
329 for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
330 wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
331 1, &image->prime.blit_cmd_buffers[i]);
332 }
333 vk_free(&chain->alloc, image->prime.blit_cmd_buffers);
334 }
335
336 wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
337 wsi->DestroyImage(chain->device, image->image, &chain->alloc);
338 wsi->FreeMemory(chain->device, image->prime.memory, &chain->alloc);
339 wsi->DestroyBuffer(chain->device, image->prime.buffer, &chain->alloc);
340 }
341
342 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)343 wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
344 uint32_t queueFamilyIndex,
345 VkSurfaceKHR _surface,
346 VkBool32 *pSupported)
347 {
348 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
349 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
350 struct wsi_device *wsi_device = device->wsi_device;
351 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
352
353 return iface->get_support(surface, wsi_device,
354 queueFamilyIndex, pSupported);
355 }
356
357 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)358 wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
359 VkPhysicalDevice physicalDevice,
360 VkSurfaceKHR _surface,
361 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
362 {
363 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
364 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
365 struct wsi_device *wsi_device = device->wsi_device;
366 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
367
368 VkSurfaceCapabilities2KHR caps2 = {
369 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
370 };
371
372 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
373
374 if (result == VK_SUCCESS)
375 *pSurfaceCapabilities = caps2.surfaceCapabilities;
376
377 return result;
378 }
379
380 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)381 wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
382 VkPhysicalDevice physicalDevice,
383 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
384 VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
385 {
386 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
387 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
388 struct wsi_device *wsi_device = device->wsi_device;
389 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
390
391 return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
392 pSurfaceCapabilities);
393 }
394
395 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)396 wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
397 VkPhysicalDevice physicalDevice,
398 VkSurfaceKHR _surface,
399 VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
400 {
401 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
402 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
403 struct wsi_device *wsi_device = device->wsi_device;
404 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
405
406 assert(pSurfaceCapabilities->sType ==
407 VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
408
409 struct wsi_surface_supported_counters counters = {
410 .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
411 .pNext = pSurfaceCapabilities->pNext,
412 .supported_surface_counters = 0,
413 };
414
415 VkSurfaceCapabilities2KHR caps2 = {
416 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
417 .pNext = &counters,
418 };
419
420 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
421
422 if (result == VK_SUCCESS) {
423 VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
424 VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
425
426 ext_caps->minImageCount = khr_caps.minImageCount;
427 ext_caps->maxImageCount = khr_caps.maxImageCount;
428 ext_caps->currentExtent = khr_caps.currentExtent;
429 ext_caps->minImageExtent = khr_caps.minImageExtent;
430 ext_caps->maxImageExtent = khr_caps.maxImageExtent;
431 ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
432 ext_caps->supportedTransforms = khr_caps.supportedTransforms;
433 ext_caps->currentTransform = khr_caps.currentTransform;
434 ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
435 ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
436 ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
437 }
438
439 return result;
440 }
441
442 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)443 wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
444 VkSurfaceKHR _surface,
445 uint32_t *pSurfaceFormatCount,
446 VkSurfaceFormatKHR *pSurfaceFormats)
447 {
448 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
449 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
450 struct wsi_device *wsi_device = device->wsi_device;
451 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
452
453 return iface->get_formats(surface, wsi_device,
454 pSurfaceFormatCount, pSurfaceFormats);
455 }
456
457 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)458 wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
459 const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
460 uint32_t *pSurfaceFormatCount,
461 VkSurfaceFormat2KHR *pSurfaceFormats)
462 {
463 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
464 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
465 struct wsi_device *wsi_device = device->wsi_device;
466 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
467
468 return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
469 pSurfaceFormatCount, pSurfaceFormats);
470 }
471
472 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)473 wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
474 VkSurfaceKHR _surface,
475 uint32_t *pPresentModeCount,
476 VkPresentModeKHR *pPresentModes)
477 {
478 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
479 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
480 struct wsi_device *wsi_device = device->wsi_device;
481 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
482
483 return iface->get_present_modes(surface, pPresentModeCount,
484 pPresentModes);
485 }
486
487 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)488 wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
489 VkSurfaceKHR _surface,
490 uint32_t *pRectCount,
491 VkRect2D *pRects)
492 {
493 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
494 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
495 struct wsi_device *wsi_device = device->wsi_device;
496 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
497
498 return iface->get_present_rectangles(surface, wsi_device,
499 pRectCount, pRects);
500 }
501
502 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateSwapchainKHR(VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)503 wsi_CreateSwapchainKHR(VkDevice _device,
504 const VkSwapchainCreateInfoKHR *pCreateInfo,
505 const VkAllocationCallbacks *pAllocator,
506 VkSwapchainKHR *pSwapchain)
507 {
508 VK_FROM_HANDLE(vk_device, device, _device);
509 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
510 struct wsi_device *wsi_device = device->physical->wsi_device;
511 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
512 const VkAllocationCallbacks *alloc;
513 struct wsi_swapchain *swapchain;
514
515 if (pAllocator)
516 alloc = pAllocator;
517 else
518 alloc = &device->alloc;
519
520 VkResult result = iface->create_swapchain(surface, _device, wsi_device,
521 pCreateInfo, alloc,
522 &swapchain);
523 if (result != VK_SUCCESS)
524 return result;
525
526 swapchain->fences = vk_zalloc(alloc,
527 sizeof (*swapchain->fences) * swapchain->image_count,
528 sizeof (*swapchain->fences),
529 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
530 if (!swapchain->fences) {
531 swapchain->destroy(swapchain, alloc);
532 return VK_ERROR_OUT_OF_HOST_MEMORY;
533 }
534
535 *pSwapchain = wsi_swapchain_to_handle(swapchain);
536
537 return VK_SUCCESS;
538 }
539
540 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySwapchainKHR(VkDevice _device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)541 wsi_DestroySwapchainKHR(VkDevice _device,
542 VkSwapchainKHR _swapchain,
543 const VkAllocationCallbacks *pAllocator)
544 {
545 VK_FROM_HANDLE(vk_device, device, _device);
546 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
547 const VkAllocationCallbacks *alloc;
548
549 if (!swapchain)
550 return;
551
552 if (pAllocator)
553 alloc = pAllocator;
554 else
555 alloc = &device->alloc;
556
557 swapchain->destroy(swapchain, alloc);
558 }
559
560 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)561 wsi_common_get_images(VkSwapchainKHR _swapchain,
562 uint32_t *pSwapchainImageCount,
563 VkImage *pSwapchainImages)
564 {
565 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
566 VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
567
568 for (uint32_t i = 0; i < swapchain->image_count; i++) {
569 vk_outarray_append_typed(VkImage, &images, image) {
570 *image = swapchain->get_wsi_image(swapchain, i)->image;
571 }
572 }
573
574 return vk_outarray_status(&images);
575 }
576
577 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)578 wsi_GetSwapchainImagesKHR(VkDevice device,
579 VkSwapchainKHR swapchain,
580 uint32_t *pSwapchainImageCount,
581 VkImage *pSwapchainImages)
582 {
583 return wsi_common_get_images(swapchain,
584 pSwapchainImageCount,
585 pSwapchainImages);
586 }
587
588 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImageKHR(VkDevice _device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)589 wsi_AcquireNextImageKHR(VkDevice _device,
590 VkSwapchainKHR swapchain,
591 uint64_t timeout,
592 VkSemaphore semaphore,
593 VkFence fence,
594 uint32_t *pImageIndex)
595 {
596 VK_FROM_HANDLE(vk_device, device, _device);
597
598 const VkAcquireNextImageInfoKHR acquire_info = {
599 .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
600 .swapchain = swapchain,
601 .timeout = timeout,
602 .semaphore = semaphore,
603 .fence = fence,
604 .deviceMask = 0,
605 };
606
607 return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
608 pImageIndex);
609 }
610
611 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)612 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
613 VkDevice device,
614 const VkAcquireNextImageInfoKHR *pAcquireInfo,
615 uint32_t *pImageIndex)
616 {
617 VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
618
619 VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
620 pImageIndex);
621 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
622 return result;
623
624 if (wsi->set_memory_ownership) {
625 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, *pImageIndex)->memory;
626 wsi->set_memory_ownership(swapchain->device, mem, true);
627 }
628
629 if (pAcquireInfo->semaphore != VK_NULL_HANDLE &&
630 wsi->signal_semaphore_for_memory != NULL) {
631 struct wsi_image *image =
632 swapchain->get_wsi_image(swapchain, *pImageIndex);
633 wsi->signal_semaphore_for_memory(device, pAcquireInfo->semaphore,
634 image->memory);
635 }
636
637 if (pAcquireInfo->fence != VK_NULL_HANDLE &&
638 wsi->signal_fence_for_memory != NULL) {
639 struct wsi_image *image =
640 swapchain->get_wsi_image(swapchain, *pImageIndex);
641 wsi->signal_fence_for_memory(device, pAcquireInfo->fence,
642 image->memory);
643 }
644
645 return result;
646 }
647
648 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImage2KHR(VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)649 wsi_AcquireNextImage2KHR(VkDevice _device,
650 const VkAcquireNextImageInfoKHR *pAcquireInfo,
651 uint32_t *pImageIndex)
652 {
653 VK_FROM_HANDLE(vk_device, device, _device);
654
655 return wsi_common_acquire_next_image2(device->physical->wsi_device,
656 _device, pAcquireInfo, pImageIndex);
657 }
658
659 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)660 wsi_common_queue_present(const struct wsi_device *wsi,
661 VkDevice device,
662 VkQueue queue,
663 int queue_family_index,
664 const VkPresentInfoKHR *pPresentInfo)
665 {
666 VkResult final_result = VK_SUCCESS;
667
668 const VkPresentRegionsKHR *regions =
669 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
670
671 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
672 VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
673 uint32_t image_index = pPresentInfo->pImageIndices[i];
674 VkResult result;
675
676 if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
677 const VkFenceCreateInfo fence_info = {
678 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
679 .pNext = NULL,
680 .flags = 0,
681 };
682 result = wsi->CreateFence(device, &fence_info,
683 &swapchain->alloc,
684 &swapchain->fences[image_index]);
685 if (result != VK_SUCCESS)
686 goto fail_present;
687 } else {
688 result =
689 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
690 true, ~0ull);
691 if (result != VK_SUCCESS)
692 goto fail_present;
693
694 result =
695 wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
696 if (result != VK_SUCCESS)
697 goto fail_present;
698 }
699
700 struct wsi_image *image =
701 swapchain->get_wsi_image(swapchain, image_index);
702
703 struct wsi_memory_signal_submit_info mem_signal = {
704 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
705 .pNext = NULL,
706 .memory = image->memory,
707 };
708
709 VkSubmitInfo submit_info = {
710 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
711 .pNext = &mem_signal,
712 };
713
714 VkPipelineStageFlags *stage_flags = NULL;
715 if (i == 0) {
716 /* We only need/want to wait on semaphores once. After that, we're
717 * guaranteed ordering since it all happens on the same queue.
718 */
719 submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
720 submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
721
722 /* Set up the pWaitDstStageMasks */
723 stage_flags = vk_alloc(&swapchain->alloc,
724 sizeof(VkPipelineStageFlags) *
725 pPresentInfo->waitSemaphoreCount,
726 8,
727 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
728 if (!stage_flags) {
729 result = VK_ERROR_OUT_OF_HOST_MEMORY;
730 goto fail_present;
731 }
732 for (uint32_t s = 0; s < pPresentInfo->waitSemaphoreCount; s++)
733 stage_flags[s] = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
734
735 submit_info.pWaitDstStageMask = stage_flags;
736 }
737
738 if (swapchain->use_prime_blit) {
739 /* If we are using prime blits, we need to perform the blit now. The
740 * command buffer is attached to the image.
741 */
742 submit_info.commandBufferCount = 1;
743 submit_info.pCommandBuffers =
744 &image->prime.blit_cmd_buffers[queue_family_index];
745 mem_signal.memory = image->prime.memory;
746 }
747
748 result = wsi->QueueSubmit(queue, 1, &submit_info, swapchain->fences[image_index]);
749 vk_free(&swapchain->alloc, stage_flags);
750 if (result != VK_SUCCESS)
751 goto fail_present;
752
753 if (wsi->sw)
754 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
755 true, ~0ull);
756
757 const VkPresentRegionKHR *region = NULL;
758 if (regions && regions->pRegions)
759 region = ®ions->pRegions[i];
760
761 result = swapchain->queue_present(swapchain, image_index, region);
762 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
763 goto fail_present;
764
765 if (wsi->set_memory_ownership) {
766 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
767 wsi->set_memory_ownership(swapchain->device, mem, false);
768 }
769
770 fail_present:
771 if (pPresentInfo->pResults != NULL)
772 pPresentInfo->pResults[i] = result;
773
774 /* Let the final result be our first unsuccessful result */
775 if (final_result == VK_SUCCESS)
776 final_result = result;
777 }
778
779 return final_result;
780 }
781
782 VKAPI_ATTR VkResult VKAPI_CALL
wsi_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)783 wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
784 {
785 VK_FROM_HANDLE(vk_queue, queue, _queue);
786
787 return wsi_common_queue_present(queue->base.device->physical->wsi_device,
788 vk_device_to_handle(queue->base.device),
789 _queue,
790 queue->queue_family_index,
791 pPresentInfo);
792 }
793
794 uint64_t
wsi_common_get_current_time(void)795 wsi_common_get_current_time(void)
796 {
797 return os_time_get_nano();
798 }
799
800 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,VkDeviceGroupPresentCapabilitiesKHR * pCapabilities)801 wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
802 VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
803 {
804 memset(pCapabilities->presentMask, 0,
805 sizeof(pCapabilities->presentMask));
806 pCapabilities->presentMask[0] = 0x1;
807 pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
808
809 return VK_SUCCESS;
810 }
811
812 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,VkSurfaceKHR surface,VkDeviceGroupPresentModeFlagsKHR * pModes)813 wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
814 VkSurfaceKHR surface,
815 VkDeviceGroupPresentModeFlagsKHR *pModes)
816 {
817 *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
818
819 return VK_SUCCESS;
820 }
821