1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "wsi_common_private.h"
25 #include "util/macros.h"
26 #include "util/os_file.h"
27 #include "util/xmlconfig.h"
28 #include "vk_util.h"
29
30 #include <time.h>
31 #include <unistd.h>
32 #include <stdlib.h>
33 #include <stdio.h>
34
35 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,bool sw_device)36 wsi_device_init(struct wsi_device *wsi,
37 VkPhysicalDevice pdevice,
38 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
39 const VkAllocationCallbacks *alloc,
40 int display_fd,
41 const struct driOptionCache *dri_options,
42 bool sw_device)
43 {
44 const char *present_mode;
45 UNUSED VkResult result;
46
47 memset(wsi, 0, sizeof(*wsi));
48
49 wsi->instance_alloc = *alloc;
50 wsi->pdevice = pdevice;
51 wsi->sw = sw_device;
52 #define WSI_GET_CB(func) \
53 PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
54 WSI_GET_CB(GetPhysicalDeviceProperties2);
55 WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
56 WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
57 #undef WSI_GET_CB
58
59 wsi->pci_bus_info.sType =
60 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
61 VkPhysicalDeviceProperties2 pdp2 = {
62 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
63 .pNext = &wsi->pci_bus_info,
64 };
65 GetPhysicalDeviceProperties2(pdevice, &pdp2);
66
67 wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
68 wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
69
70 GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
71 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
72
73 #define WSI_GET_CB(func) \
74 wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
75 WSI_GET_CB(AllocateMemory);
76 WSI_GET_CB(AllocateCommandBuffers);
77 WSI_GET_CB(BindBufferMemory);
78 WSI_GET_CB(BindImageMemory);
79 WSI_GET_CB(BeginCommandBuffer);
80 WSI_GET_CB(CmdCopyImageToBuffer);
81 WSI_GET_CB(CreateBuffer);
82 WSI_GET_CB(CreateCommandPool);
83 WSI_GET_CB(CreateFence);
84 WSI_GET_CB(CreateImage);
85 WSI_GET_CB(DestroyBuffer);
86 WSI_GET_CB(DestroyCommandPool);
87 WSI_GET_CB(DestroyFence);
88 WSI_GET_CB(DestroyImage);
89 WSI_GET_CB(EndCommandBuffer);
90 WSI_GET_CB(FreeMemory);
91 WSI_GET_CB(FreeCommandBuffers);
92 WSI_GET_CB(GetBufferMemoryRequirements);
93 WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
94 WSI_GET_CB(GetImageMemoryRequirements);
95 WSI_GET_CB(GetImageSubresourceLayout);
96 if (!wsi->sw)
97 WSI_GET_CB(GetMemoryFdKHR);
98 WSI_GET_CB(GetPhysicalDeviceFormatProperties);
99 WSI_GET_CB(GetPhysicalDeviceFormatProperties2KHR);
100 WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
101 WSI_GET_CB(ResetFences);
102 WSI_GET_CB(QueueSubmit);
103 WSI_GET_CB(WaitForFences);
104 WSI_GET_CB(MapMemory);
105 WSI_GET_CB(UnmapMemory);
106 #undef WSI_GET_CB
107
108 #ifdef VK_USE_PLATFORM_XCB_KHR
109 result = wsi_x11_init_wsi(wsi, alloc, dri_options);
110 if (result != VK_SUCCESS)
111 goto fail;
112 #endif
113
114 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
115 result = wsi_wl_init_wsi(wsi, alloc, pdevice);
116 if (result != VK_SUCCESS)
117 goto fail;
118 #endif
119
120 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
121 result = wsi_display_init_wsi(wsi, alloc, display_fd);
122 if (result != VK_SUCCESS)
123 goto fail;
124 #endif
125
126 present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
127 if (present_mode) {
128 if (!strcmp(present_mode, "fifo")) {
129 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
130 } else if (!strcmp(present_mode, "relaxed")) {
131 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
132 } else if (!strcmp(present_mode, "mailbox")) {
133 wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
134 } else if (!strcmp(present_mode, "immediate")) {
135 wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
136 } else {
137 fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
138 }
139 }
140
141 if (dri_options) {
142 if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
143 wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
144 "adaptive_sync");
145
146 if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first", DRI_BOOL)) {
147 wsi->force_bgra8_unorm_first =
148 driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
149 }
150 }
151
152 return VK_SUCCESS;
153 #if defined(VK_USE_PLATFORM_XCB_KHR) || \
154 defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
155 defined(VK_USE_PLATFORM_DISPLAY_KHR)
156 fail:
157 wsi_device_finish(wsi, alloc);
158 return result;
159 #endif
160 }
161
162 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)163 wsi_device_finish(struct wsi_device *wsi,
164 const VkAllocationCallbacks *alloc)
165 {
166 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
167 wsi_display_finish_wsi(wsi, alloc);
168 #endif
169 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
170 wsi_wl_finish_wsi(wsi, alloc);
171 #endif
172 #ifdef VK_USE_PLATFORM_XCB_KHR
173 wsi_x11_finish_wsi(wsi, alloc);
174 #endif
175 }
176
177 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)178 wsi_swapchain_init(const struct wsi_device *wsi,
179 struct wsi_swapchain *chain,
180 VkDevice device,
181 const VkSwapchainCreateInfoKHR *pCreateInfo,
182 const VkAllocationCallbacks *pAllocator)
183 {
184 VkResult result;
185
186 memset(chain, 0, sizeof(*chain));
187
188 vk_object_base_init(NULL, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
189
190 chain->wsi = wsi;
191 chain->device = device;
192 chain->alloc = *pAllocator;
193 chain->use_prime_blit = false;
194
195 chain->cmd_pools =
196 vk_zalloc(pAllocator, sizeof(VkCommandPool) * wsi->queue_family_count, 8,
197 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
198 if (!chain->cmd_pools)
199 return VK_ERROR_OUT_OF_HOST_MEMORY;
200
201 for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
202 const VkCommandPoolCreateInfo cmd_pool_info = {
203 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
204 .pNext = NULL,
205 .flags = 0,
206 .queueFamilyIndex = i,
207 };
208 result = wsi->CreateCommandPool(device, &cmd_pool_info, &chain->alloc,
209 &chain->cmd_pools[i]);
210 if (result != VK_SUCCESS)
211 goto fail;
212 }
213
214 return VK_SUCCESS;
215
216 fail:
217 wsi_swapchain_finish(chain);
218 return result;
219 }
220
221 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)222 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
223 const VkSwapchainCreateInfoKHR *pCreateInfo,
224 VkPresentModeKHR mode)
225 {
226 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
227 struct wsi_interface *iface = wsi->wsi[surface->platform];
228 VkPresentModeKHR *present_modes;
229 uint32_t present_mode_count;
230 bool supported = false;
231 VkResult result;
232
233 result = iface->get_present_modes(surface, &present_mode_count, NULL);
234 if (result != VK_SUCCESS)
235 return supported;
236
237 present_modes = malloc(present_mode_count * sizeof(*present_modes));
238 if (!present_modes)
239 return supported;
240
241 result = iface->get_present_modes(surface, &present_mode_count,
242 present_modes);
243 if (result != VK_SUCCESS)
244 goto fail;
245
246 for (uint32_t i = 0; i < present_mode_count; i++) {
247 if (present_modes[i] == mode) {
248 supported = true;
249 break;
250 }
251 }
252
253 fail:
254 free(present_modes);
255 return supported;
256 }
257
258 enum VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)259 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
260 const VkSwapchainCreateInfoKHR *pCreateInfo)
261 {
262 if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
263 return pCreateInfo->presentMode;
264
265 if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
266 wsi->override_present_mode)) {
267 fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
268 return pCreateInfo->presentMode;
269 }
270
271 return wsi->override_present_mode;
272 }
273
274 void
wsi_swapchain_finish(struct wsi_swapchain * chain)275 wsi_swapchain_finish(struct wsi_swapchain *chain)
276 {
277 if (chain->fences) {
278 for (unsigned i = 0; i < chain->image_count; i++)
279 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
280
281 vk_free(&chain->alloc, chain->fences);
282 }
283
284 for (uint32_t i = 0; i < chain->wsi->queue_family_count; i++) {
285 chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
286 &chain->alloc);
287 }
288 vk_free(&chain->alloc, chain->cmd_pools);
289
290 vk_object_base_finish(&chain->base);
291 }
292
293 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)294 wsi_destroy_image(const struct wsi_swapchain *chain,
295 struct wsi_image *image)
296 {
297 const struct wsi_device *wsi = chain->wsi;
298
299 if (image->prime.blit_cmd_buffers) {
300 for (uint32_t i = 0; i < wsi->queue_family_count; i++) {
301 wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
302 1, &image->prime.blit_cmd_buffers[i]);
303 }
304 vk_free(&chain->alloc, image->prime.blit_cmd_buffers);
305 }
306
307 wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
308 wsi->DestroyImage(chain->device, image->image, &chain->alloc);
309 wsi->FreeMemory(chain->device, image->prime.memory, &chain->alloc);
310 wsi->DestroyBuffer(chain->device, image->prime.buffer, &chain->alloc);
311 }
312
313 VkResult
wsi_common_get_surface_support(struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)314 wsi_common_get_surface_support(struct wsi_device *wsi_device,
315 uint32_t queueFamilyIndex,
316 VkSurfaceKHR _surface,
317 VkBool32* pSupported)
318 {
319 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
320 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
321
322 return iface->get_support(surface, wsi_device,
323 queueFamilyIndex, pSupported);
324 }
325
326 VkResult
wsi_common_get_surface_capabilities(struct wsi_device * wsi_device,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)327 wsi_common_get_surface_capabilities(struct wsi_device *wsi_device,
328 VkSurfaceKHR _surface,
329 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
330 {
331 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
332 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
333
334 VkSurfaceCapabilities2KHR caps2 = {
335 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
336 };
337
338 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
339
340 if (result == VK_SUCCESS)
341 *pSurfaceCapabilities = caps2.surfaceCapabilities;
342
343 return result;
344 }
345
346 VkResult
wsi_common_get_surface_capabilities2(struct wsi_device * wsi_device,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)347 wsi_common_get_surface_capabilities2(struct wsi_device *wsi_device,
348 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
349 VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
350 {
351 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
352 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
353
354 return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
355 pSurfaceCapabilities);
356 }
357
358 VkResult
wsi_common_get_surface_capabilities2ext(struct wsi_device * wsi_device,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)359 wsi_common_get_surface_capabilities2ext(
360 struct wsi_device *wsi_device,
361 VkSurfaceKHR _surface,
362 VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
363 {
364 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
365 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
366
367 assert(pSurfaceCapabilities->sType ==
368 VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
369
370 struct wsi_surface_supported_counters counters = {
371 .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
372 .pNext = pSurfaceCapabilities->pNext,
373 .supported_surface_counters = 0,
374 };
375
376 VkSurfaceCapabilities2KHR caps2 = {
377 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
378 .pNext = &counters,
379 };
380
381 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
382
383 if (result == VK_SUCCESS) {
384 VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
385 VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
386
387 ext_caps->minImageCount = khr_caps.minImageCount;
388 ext_caps->maxImageCount = khr_caps.maxImageCount;
389 ext_caps->currentExtent = khr_caps.currentExtent;
390 ext_caps->minImageExtent = khr_caps.minImageExtent;
391 ext_caps->maxImageExtent = khr_caps.maxImageExtent;
392 ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
393 ext_caps->supportedTransforms = khr_caps.supportedTransforms;
394 ext_caps->currentTransform = khr_caps.currentTransform;
395 ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
396 ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
397 ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
398 }
399
400 return result;
401 }
402
403 VkResult
wsi_common_get_surface_formats(struct wsi_device * wsi_device,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)404 wsi_common_get_surface_formats(struct wsi_device *wsi_device,
405 VkSurfaceKHR _surface,
406 uint32_t *pSurfaceFormatCount,
407 VkSurfaceFormatKHR *pSurfaceFormats)
408 {
409 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
410 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
411
412 return iface->get_formats(surface, wsi_device,
413 pSurfaceFormatCount, pSurfaceFormats);
414 }
415
416 VkResult
wsi_common_get_surface_formats2(struct wsi_device * wsi_device,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)417 wsi_common_get_surface_formats2(struct wsi_device *wsi_device,
418 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
419 uint32_t *pSurfaceFormatCount,
420 VkSurfaceFormat2KHR *pSurfaceFormats)
421 {
422 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
423 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
424
425 return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
426 pSurfaceFormatCount, pSurfaceFormats);
427 }
428
429 VkResult
wsi_common_get_surface_present_modes(struct wsi_device * wsi_device,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)430 wsi_common_get_surface_present_modes(struct wsi_device *wsi_device,
431 VkSurfaceKHR _surface,
432 uint32_t *pPresentModeCount,
433 VkPresentModeKHR *pPresentModes)
434 {
435 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
436 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
437
438 return iface->get_present_modes(surface, pPresentModeCount,
439 pPresentModes);
440 }
441
442 VkResult
wsi_common_get_present_rectangles(struct wsi_device * wsi_device,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)443 wsi_common_get_present_rectangles(struct wsi_device *wsi_device,
444 VkSurfaceKHR _surface,
445 uint32_t* pRectCount,
446 VkRect2D* pRects)
447 {
448 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
449 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
450
451 return iface->get_present_rectangles(surface, wsi_device,
452 pRectCount, pRects);
453 }
454
455 VkResult
wsi_common_create_swapchain(struct wsi_device * wsi,VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)456 wsi_common_create_swapchain(struct wsi_device *wsi,
457 VkDevice device,
458 const VkSwapchainCreateInfoKHR *pCreateInfo,
459 const VkAllocationCallbacks *pAllocator,
460 VkSwapchainKHR *pSwapchain)
461 {
462 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
463 struct wsi_interface *iface = wsi->wsi[surface->platform];
464 struct wsi_swapchain *swapchain;
465
466 VkResult result = iface->create_swapchain(surface, device, wsi,
467 pCreateInfo, pAllocator,
468 &swapchain);
469 if (result != VK_SUCCESS)
470 return result;
471
472 swapchain->fences = vk_zalloc(pAllocator,
473 sizeof (*swapchain->fences) * swapchain->image_count,
474 sizeof (*swapchain->fences),
475 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
476 if (!swapchain->fences) {
477 swapchain->destroy(swapchain, pAllocator);
478 return VK_ERROR_OUT_OF_HOST_MEMORY;
479 }
480
481 *pSwapchain = wsi_swapchain_to_handle(swapchain);
482
483 return VK_SUCCESS;
484 }
485
486 void
wsi_common_destroy_swapchain(VkDevice device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)487 wsi_common_destroy_swapchain(VkDevice device,
488 VkSwapchainKHR _swapchain,
489 const VkAllocationCallbacks *pAllocator)
490 {
491 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
492 if (!swapchain)
493 return;
494
495 swapchain->destroy(swapchain, pAllocator);
496 }
497
498 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)499 wsi_common_get_images(VkSwapchainKHR _swapchain,
500 uint32_t *pSwapchainImageCount,
501 VkImage *pSwapchainImages)
502 {
503 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
504 VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
505
506 for (uint32_t i = 0; i < swapchain->image_count; i++) {
507 vk_outarray_append_typed(VkImage, &images, image) {
508 *image = swapchain->get_wsi_image(swapchain, i)->image;
509 }
510 }
511
512 return vk_outarray_status(&images);
513 }
514
515 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)516 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
517 VkDevice device,
518 const VkAcquireNextImageInfoKHR *pAcquireInfo,
519 uint32_t *pImageIndex)
520 {
521 VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
522
523 VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
524 pImageIndex);
525 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
526 return result;
527
528 if (wsi->set_memory_ownership) {
529 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, *pImageIndex)->memory;
530 wsi->set_memory_ownership(swapchain->device, mem, true);
531 }
532
533 if (pAcquireInfo->semaphore != VK_NULL_HANDLE &&
534 wsi->signal_semaphore_for_memory != NULL) {
535 struct wsi_image *image =
536 swapchain->get_wsi_image(swapchain, *pImageIndex);
537 wsi->signal_semaphore_for_memory(device, pAcquireInfo->semaphore,
538 image->memory);
539 }
540
541 if (pAcquireInfo->fence != VK_NULL_HANDLE &&
542 wsi->signal_fence_for_memory != NULL) {
543 struct wsi_image *image =
544 swapchain->get_wsi_image(swapchain, *pImageIndex);
545 wsi->signal_fence_for_memory(device, pAcquireInfo->fence,
546 image->memory);
547 }
548
549 return result;
550 }
551
552 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)553 wsi_common_queue_present(const struct wsi_device *wsi,
554 VkDevice device,
555 VkQueue queue,
556 int queue_family_index,
557 const VkPresentInfoKHR *pPresentInfo)
558 {
559 VkResult final_result = VK_SUCCESS;
560
561 const VkPresentRegionsKHR *regions =
562 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
563
564 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
565 VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
566 uint32_t image_index = pPresentInfo->pImageIndices[i];
567 VkResult result;
568
569 if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
570 const VkFenceCreateInfo fence_info = {
571 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
572 .pNext = NULL,
573 .flags = 0,
574 };
575 result = wsi->CreateFence(device, &fence_info,
576 &swapchain->alloc,
577 &swapchain->fences[image_index]);
578 if (result != VK_SUCCESS)
579 goto fail_present;
580 } else {
581 result =
582 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
583 true, ~0ull);
584 if (result != VK_SUCCESS)
585 goto fail_present;
586
587 result =
588 wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
589 if (result != VK_SUCCESS)
590 goto fail_present;
591 }
592
593 struct wsi_image *image =
594 swapchain->get_wsi_image(swapchain, image_index);
595
596 struct wsi_memory_signal_submit_info mem_signal = {
597 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
598 .pNext = NULL,
599 .memory = image->memory,
600 };
601
602 VkSubmitInfo submit_info = {
603 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
604 .pNext = &mem_signal,
605 };
606
607 VkPipelineStageFlags *stage_flags = NULL;
608 if (i == 0) {
609 /* We only need/want to wait on semaphores once. After that, we're
610 * guaranteed ordering since it all happens on the same queue.
611 */
612 submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
613 submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
614
615 /* Set up the pWaitDstStageMasks */
616 stage_flags = vk_alloc(&swapchain->alloc,
617 sizeof(VkPipelineStageFlags) *
618 pPresentInfo->waitSemaphoreCount,
619 8,
620 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
621 if (!stage_flags) {
622 result = VK_ERROR_OUT_OF_HOST_MEMORY;
623 goto fail_present;
624 }
625 for (uint32_t s = 0; s < pPresentInfo->waitSemaphoreCount; s++)
626 stage_flags[s] = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
627
628 submit_info.pWaitDstStageMask = stage_flags;
629 }
630
631 if (swapchain->use_prime_blit) {
632 /* If we are using prime blits, we need to perform the blit now. The
633 * command buffer is attached to the image.
634 */
635 submit_info.commandBufferCount = 1;
636 submit_info.pCommandBuffers =
637 &image->prime.blit_cmd_buffers[queue_family_index];
638 mem_signal.memory = image->prime.memory;
639 }
640
641 result = wsi->QueueSubmit(queue, 1, &submit_info, swapchain->fences[image_index]);
642 vk_free(&swapchain->alloc, stage_flags);
643 if (result != VK_SUCCESS)
644 goto fail_present;
645
646 const VkPresentRegionKHR *region = NULL;
647 if (regions && regions->pRegions)
648 region = ®ions->pRegions[i];
649
650 result = swapchain->queue_present(swapchain, image_index, region);
651 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
652 goto fail_present;
653
654 if (wsi->set_memory_ownership) {
655 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
656 wsi->set_memory_ownership(swapchain->device, mem, false);
657 }
658
659 fail_present:
660 if (pPresentInfo->pResults != NULL)
661 pPresentInfo->pResults[i] = result;
662
663 /* Let the final result be our first unsuccessful result */
664 if (final_result == VK_SUCCESS)
665 final_result = result;
666 }
667
668 return final_result;
669 }
670
671 uint64_t
wsi_common_get_current_time(void)672 wsi_common_get_current_time(void)
673 {
674 struct timespec current;
675 clock_gettime(CLOCK_MONOTONIC, ¤t);
676 return current.tv_nsec + current.tv_sec * 1000000000ull;
677 }
678