1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "wsi_common_private.h"
25 #include "wsi_common_entrypoints.h"
26 #include "util/u_debug.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_time.h"
30 #include "util/xmlconfig.h"
31 #include "vk_device.h"
32 #include "vk_fence.h"
33 #include "vk_format.h"
34 #include "vk_instance.h"
35 #include "vk_physical_device.h"
36 #include "vk_queue.h"
37 #include "vk_semaphore.h"
38 #include "vk_sync.h"
39 #include "vk_sync_dummy.h"
40 #include "vk_util.h"
41
42 #include <time.h>
43 #include <stdlib.h>
44 #include <stdio.h>
45
46 #ifndef _WIN32
47 #include <unistd.h>
48 #endif
49
50 uint64_t WSI_DEBUG;
51
52 static const struct debug_control debug_control[] = {
53 { "buffer", WSI_DEBUG_BUFFER },
54 { "sw", WSI_DEBUG_SW },
55 { "noshm", WSI_DEBUG_NOSHM },
56 { "linear", WSI_DEBUG_LINEAR },
57 { "dxgi", WSI_DEBUG_DXGI },
58 { "nowlts", WSI_DEBUG_NOWLTS },
59 { NULL, },
60 };
61
present_false(VkPhysicalDevice pdevice,int fd)62 static bool present_false(VkPhysicalDevice pdevice, int fd) {
63 return false;
64 }
65
66 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,const struct wsi_device_options * device_options)67 wsi_device_init(struct wsi_device *wsi,
68 VkPhysicalDevice pdevice,
69 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
70 const VkAllocationCallbacks *alloc,
71 int display_fd,
72 const struct driOptionCache *dri_options,
73 const struct wsi_device_options *device_options)
74 {
75 const char *present_mode;
76 UNUSED VkResult result;
77
78 WSI_DEBUG = parse_debug_string(getenv("MESA_VK_WSI_DEBUG"), debug_control);
79
80 util_cpu_trace_init();
81
82 memset(wsi, 0, sizeof(*wsi));
83
84 wsi->instance_alloc = *alloc;
85 wsi->pdevice = pdevice;
86 wsi->supports_scanout = true;
87 wsi->sw = device_options->sw_device || (WSI_DEBUG & WSI_DEBUG_SW);
88 wsi->wants_linear = (WSI_DEBUG & WSI_DEBUG_LINEAR) != 0;
89 wsi->x11.extra_xwayland_image = device_options->extra_xwayland_image;
90 wsi->wayland.disable_timestamps = (WSI_DEBUG & WSI_DEBUG_NOWLTS) != 0;
91 #define WSI_GET_CB(func) \
92 PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
93 WSI_GET_CB(GetPhysicalDeviceExternalSemaphoreProperties);
94 WSI_GET_CB(GetPhysicalDeviceProperties2);
95 WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
96 WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
97 #undef WSI_GET_CB
98
99 wsi->drm_info.sType =
100 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT;
101 wsi->pci_bus_info.sType =
102 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
103 wsi->pci_bus_info.pNext = &wsi->drm_info;
104 VkPhysicalDeviceProperties2 pdp2 = {
105 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
106 .pNext = &wsi->pci_bus_info,
107 };
108 GetPhysicalDeviceProperties2(pdevice, &pdp2);
109
110 wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
111 assert(pdp2.properties.limits.optimalBufferCopyRowPitchAlignment <= UINT32_MAX);
112 wsi->optimalBufferCopyRowPitchAlignment =
113 pdp2.properties.limits.optimalBufferCopyRowPitchAlignment;
114 wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
115
116 GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
117 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
118
119 assert(wsi->queue_family_count <= 64);
120 VkQueueFamilyProperties queue_properties[64];
121 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, queue_properties);
122
123 for (unsigned i = 0; i < wsi->queue_family_count; i++) {
124 VkFlags req_flags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
125 if (queue_properties[i].queueFlags & req_flags)
126 wsi->queue_supports_blit |= BITFIELD64_BIT(i);
127 }
128
129 for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
130 handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
131 handle_type <<= 1) {
132 VkPhysicalDeviceExternalSemaphoreInfo esi = {
133 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
134 .handleType = handle_type,
135 };
136 VkExternalSemaphoreProperties esp = {
137 .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
138 };
139 GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
140
141 if (esp.externalSemaphoreFeatures &
142 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
143 wsi->semaphore_export_handle_types |= handle_type;
144
145 VkSemaphoreTypeCreateInfo timeline_tci = {
146 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
147 .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
148 };
149 esi.pNext = &timeline_tci;
150 GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
151
152 if (esp.externalSemaphoreFeatures &
153 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
154 wsi->timeline_semaphore_export_handle_types |= handle_type;
155 }
156
157 const struct vk_device_extension_table *supported_extensions =
158 &vk_physical_device_from_handle(pdevice)->supported_extensions;
159 wsi->has_import_memory_host =
160 supported_extensions->EXT_external_memory_host;
161 wsi->khr_present_wait =
162 supported_extensions->KHR_present_id &&
163 supported_extensions->KHR_present_wait;
164 wsi->has_timeline_semaphore =
165 supported_extensions->KHR_timeline_semaphore;
166
167 /* We cannot expose KHR_present_wait without timeline semaphores. */
168 assert(!wsi->khr_present_wait || supported_extensions->KHR_timeline_semaphore);
169
170 list_inithead(&wsi->hotplug_fences);
171
172 #define WSI_GET_CB(func) \
173 wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
174 WSI_GET_CB(AllocateMemory);
175 WSI_GET_CB(AllocateCommandBuffers);
176 WSI_GET_CB(BindBufferMemory);
177 WSI_GET_CB(BindImageMemory);
178 WSI_GET_CB(BeginCommandBuffer);
179 WSI_GET_CB(CmdPipelineBarrier);
180 WSI_GET_CB(CmdCopyImage);
181 WSI_GET_CB(CmdCopyImageToBuffer);
182 WSI_GET_CB(CreateBuffer);
183 WSI_GET_CB(CreateCommandPool);
184 WSI_GET_CB(CreateFence);
185 WSI_GET_CB(CreateImage);
186 WSI_GET_CB(CreateSemaphore);
187 WSI_GET_CB(DestroyBuffer);
188 WSI_GET_CB(DestroyCommandPool);
189 WSI_GET_CB(DestroyFence);
190 WSI_GET_CB(DestroyImage);
191 WSI_GET_CB(DestroySemaphore);
192 WSI_GET_CB(EndCommandBuffer);
193 WSI_GET_CB(FreeMemory);
194 WSI_GET_CB(FreeCommandBuffers);
195 WSI_GET_CB(GetBufferMemoryRequirements);
196 WSI_GET_CB(GetFenceStatus);
197 WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
198 WSI_GET_CB(GetImageMemoryRequirements);
199 WSI_GET_CB(GetImageSubresourceLayout);
200 if (!wsi->sw)
201 WSI_GET_CB(GetMemoryFdKHR);
202 WSI_GET_CB(GetPhysicalDeviceFormatProperties);
203 WSI_GET_CB(GetPhysicalDeviceFormatProperties2);
204 WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
205 WSI_GET_CB(GetSemaphoreFdKHR);
206 WSI_GET_CB(ResetFences);
207 WSI_GET_CB(QueueSubmit);
208 WSI_GET_CB(WaitForFences);
209 WSI_GET_CB(MapMemory);
210 WSI_GET_CB(UnmapMemory);
211 if (wsi->khr_present_wait)
212 WSI_GET_CB(WaitSemaphores);
213 #undef WSI_GET_CB
214
215 #if defined(VK_USE_PLATFORM_XCB_KHR)
216 result = wsi_x11_init_wsi(wsi, alloc, dri_options);
217 if (result != VK_SUCCESS)
218 goto fail;
219 #endif
220
221 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
222 result = wsi_wl_init_wsi(wsi, alloc, pdevice);
223 if (result != VK_SUCCESS)
224 goto fail;
225 #endif
226
227 #ifdef VK_USE_PLATFORM_WIN32_KHR
228 result = wsi_win32_init_wsi(wsi, alloc, pdevice);
229 if (result != VK_SUCCESS)
230 goto fail;
231 #endif
232
233 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
234 result = wsi_display_init_wsi(wsi, alloc, display_fd);
235 if (result != VK_SUCCESS)
236 goto fail;
237 #endif
238
239 #ifdef VK_USE_PLATFORM_METAL_EXT
240 result = wsi_metal_init_wsi(wsi, alloc, pdevice);
241 if (result != VK_SUCCESS)
242 goto fail;
243 #endif
244
245 #ifndef VK_USE_PLATFORM_WIN32_KHR
246 result = wsi_headless_init_wsi(wsi, alloc, pdevice);
247 if (result != VK_SUCCESS)
248 goto fail;
249 #endif
250
251 present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
252 if (present_mode) {
253 if (!strcmp(present_mode, "fifo")) {
254 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
255 } else if (!strcmp(present_mode, "relaxed")) {
256 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
257 } else if (!strcmp(present_mode, "mailbox")) {
258 wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
259 } else if (!strcmp(present_mode, "immediate")) {
260 wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
261 } else {
262 fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
263 }
264 }
265
266 wsi->force_headless_swapchain =
267 debug_get_bool_option("MESA_VK_WSI_HEADLESS_SWAPCHAIN", false);
268
269 if (dri_options) {
270 if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
271 wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
272 "adaptive_sync");
273
274 if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first", DRI_BOOL)) {
275 wsi->force_bgra8_unorm_first =
276 driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
277 }
278
279 if (driCheckOption(dri_options, "vk_wsi_force_swapchain_to_current_extent", DRI_BOOL)) {
280 wsi->force_swapchain_to_currentExtent =
281 driQueryOptionb(dri_options, "vk_wsi_force_swapchain_to_current_extent");
282 }
283 }
284
285 /* can_present_on_device is a function pointer used to determine if images
286 * can be presented directly on a given device file descriptor (fd).
287 * If HAVE_LIBDRM is defined, it will be initialized to a platform-specific
288 * function (wsi_device_matches_drm_fd). Otherwise, it is initialized to
289 * present_false to ensure that it always returns false, preventing potential
290 * segmentation faults from unchecked calls.
291 * Drivers for non-PCI based GPUs are expected to override this after calling
292 * wsi_device_init().
293 */
294 #ifdef HAVE_LIBDRM
295 wsi->can_present_on_device = wsi_device_matches_drm_fd;
296 #else
297 wsi->can_present_on_device = present_false;
298 #endif
299
300 return VK_SUCCESS;
301 fail:
302 wsi_device_finish(wsi, alloc);
303 return result;
304 }
305
306 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)307 wsi_device_finish(struct wsi_device *wsi,
308 const VkAllocationCallbacks *alloc)
309 {
310 #ifndef VK_USE_PLATFORM_WIN32_KHR
311 wsi_headless_finish_wsi(wsi, alloc);
312 #endif
313 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
314 wsi_display_finish_wsi(wsi, alloc);
315 #endif
316 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
317 wsi_wl_finish_wsi(wsi, alloc);
318 #endif
319 #ifdef VK_USE_PLATFORM_WIN32_KHR
320 wsi_win32_finish_wsi(wsi, alloc);
321 #endif
322 #if defined(VK_USE_PLATFORM_XCB_KHR)
323 wsi_x11_finish_wsi(wsi, alloc);
324 #endif
325 #if defined(VK_USE_PLATFORM_METAL_EXT)
326 wsi_metal_finish_wsi(wsi, alloc);
327 #endif
328 }
329
330 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySurfaceKHR(VkInstance _instance,VkSurfaceKHR _surface,const VkAllocationCallbacks * pAllocator)331 wsi_DestroySurfaceKHR(VkInstance _instance,
332 VkSurfaceKHR _surface,
333 const VkAllocationCallbacks *pAllocator)
334 {
335 VK_FROM_HANDLE(vk_instance, instance, _instance);
336 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
337
338 if (!surface)
339 return;
340
341 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
342 if (surface->platform == VK_ICD_WSI_PLATFORM_WAYLAND) {
343 wsi_wl_surface_destroy(surface, _instance, pAllocator);
344 return;
345 }
346 #endif
347 #ifdef VK_USE_PLATFORM_WIN32_KHR
348 if (surface->platform == VK_ICD_WSI_PLATFORM_WIN32) {
349 wsi_win32_surface_destroy(surface, _instance, pAllocator);
350 return;
351 }
352 #endif
353
354 vk_free2(&instance->alloc, pAllocator, surface);
355 }
356
357 void
wsi_device_setup_syncobj_fd(struct wsi_device * wsi_device,int fd)358 wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
359 int fd)
360 {
361 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
362 wsi_display_setup_syncobj_fd(wsi_device, fd);
363 #endif
364 }
365
366 static enum wsi_swapchain_blit_type
get_blit_type(const struct wsi_device * wsi,const struct wsi_base_image_params * params,VkDevice device)367 get_blit_type(const struct wsi_device *wsi,
368 const struct wsi_base_image_params *params,
369 VkDevice device)
370 {
371 switch (params->image_type) {
372 case WSI_IMAGE_TYPE_CPU: {
373 const struct wsi_cpu_image_params *cpu_params =
374 container_of(params, const struct wsi_cpu_image_params, base);
375 return wsi_cpu_image_needs_buffer_blit(wsi, cpu_params) ?
376 WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
377 }
378 #ifdef HAVE_LIBDRM
379 case WSI_IMAGE_TYPE_DRM: {
380 const struct wsi_drm_image_params *drm_params =
381 container_of(params, const struct wsi_drm_image_params, base);
382 return wsi_drm_image_needs_buffer_blit(wsi, drm_params) ?
383 WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
384 }
385 #endif
386 #ifdef _WIN32
387 case WSI_IMAGE_TYPE_DXGI: {
388 const struct wsi_dxgi_image_params *dxgi_params =
389 container_of(params, const struct wsi_dxgi_image_params, base);
390 return wsi_dxgi_image_needs_blit(wsi, dxgi_params, device);
391 }
392 #endif
393 default:
394 unreachable("Invalid image type");
395 }
396 }
397
398 static VkResult
configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * params,struct wsi_image_info * info)399 configure_image(const struct wsi_swapchain *chain,
400 const VkSwapchainCreateInfoKHR *pCreateInfo,
401 const struct wsi_base_image_params *params,
402 struct wsi_image_info *info)
403 {
404 info->image_type = params->image_type;
405 switch (params->image_type) {
406 case WSI_IMAGE_TYPE_CPU: {
407 const struct wsi_cpu_image_params *cpu_params =
408 container_of(params, const struct wsi_cpu_image_params, base);
409 return wsi_configure_cpu_image(chain, pCreateInfo, cpu_params, info);
410 }
411 #ifdef HAVE_LIBDRM
412 case WSI_IMAGE_TYPE_DRM: {
413 const struct wsi_drm_image_params *drm_params =
414 container_of(params, const struct wsi_drm_image_params, base);
415 return wsi_drm_configure_image(chain, pCreateInfo, drm_params, info);
416 }
417 #endif
418 #ifdef _WIN32
419 case WSI_IMAGE_TYPE_DXGI: {
420 const struct wsi_dxgi_image_params *dxgi_params =
421 container_of(params, const struct wsi_dxgi_image_params, base);
422 return wsi_dxgi_configure_image(chain, pCreateInfo, dxgi_params, info);
423 }
424 #endif
425 default:
426 unreachable("Invalid image type");
427 }
428 }
429
430 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * image_params,const VkAllocationCallbacks * pAllocator)431 wsi_swapchain_init(const struct wsi_device *wsi,
432 struct wsi_swapchain *chain,
433 VkDevice _device,
434 const VkSwapchainCreateInfoKHR *pCreateInfo,
435 const struct wsi_base_image_params *image_params,
436 const VkAllocationCallbacks *pAllocator)
437 {
438 VK_FROM_HANDLE(vk_device, device, _device);
439 VkResult result;
440
441 memset(chain, 0, sizeof(*chain));
442
443 vk_object_base_init(device, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
444
445 chain->wsi = wsi;
446 chain->device = _device;
447 chain->alloc = *pAllocator;
448 chain->blit.type = get_blit_type(wsi, image_params, _device);
449
450 chain->blit.queue = VK_NULL_HANDLE;
451 if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT && wsi->get_blit_queue)
452 chain->blit.queue = wsi->get_blit_queue(_device);
453
454 int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
455
456 chain->cmd_pools =
457 vk_zalloc(pAllocator, sizeof(VkCommandPool) * cmd_pools_count, 8,
458 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
459 if (!chain->cmd_pools)
460 return VK_ERROR_OUT_OF_HOST_MEMORY;
461
462 for (uint32_t i = 0; i < cmd_pools_count; i++) {
463 int queue_family_index = i;
464
465 if (chain->blit.queue != VK_NULL_HANDLE) {
466 VK_FROM_HANDLE(vk_queue, queue, chain->blit.queue);
467 queue_family_index = queue->queue_family_index;
468 } else {
469 /* Queues returned by get_blit_queue() might not be listed in
470 * GetPhysicalDeviceQueueFamilyProperties, so this check is skipped for those queues.
471 */
472 if (!(wsi->queue_supports_blit & BITFIELD64_BIT(queue_family_index)))
473 continue;
474 }
475
476 const VkCommandPoolCreateInfo cmd_pool_info = {
477 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
478 .pNext = NULL,
479 .flags = 0,
480 .queueFamilyIndex = queue_family_index,
481 };
482 result = wsi->CreateCommandPool(_device, &cmd_pool_info, &chain->alloc,
483 &chain->cmd_pools[i]);
484 if (result != VK_SUCCESS)
485 goto fail;
486 }
487
488 result = configure_image(chain, pCreateInfo, image_params,
489 &chain->image_info);
490 if (result != VK_SUCCESS)
491 goto fail;
492
493 return VK_SUCCESS;
494
495 fail:
496 wsi_swapchain_finish(chain);
497 return result;
498 }
499
500 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)501 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
502 const VkSwapchainCreateInfoKHR *pCreateInfo,
503 VkPresentModeKHR mode)
504 {
505 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
506 struct wsi_interface *iface = wsi->wsi[surface->platform];
507 VkPresentModeKHR *present_modes;
508 uint32_t present_mode_count;
509 bool supported = false;
510 VkResult result;
511
512 result = iface->get_present_modes(surface, wsi, &present_mode_count, NULL);
513 if (result != VK_SUCCESS)
514 return supported;
515
516 present_modes = malloc(present_mode_count * sizeof(*present_modes));
517 if (!present_modes)
518 return supported;
519
520 result = iface->get_present_modes(surface, wsi, &present_mode_count,
521 present_modes);
522 if (result != VK_SUCCESS)
523 goto fail;
524
525 for (uint32_t i = 0; i < present_mode_count; i++) {
526 if (present_modes[i] == mode) {
527 supported = true;
528 break;
529 }
530 }
531
532 fail:
533 free(present_modes);
534 return supported;
535 }
536
537 VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)538 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
539 const VkSwapchainCreateInfoKHR *pCreateInfo)
540 {
541 if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
542 return pCreateInfo->presentMode;
543
544 if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
545 wsi->override_present_mode)) {
546 fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
547 return pCreateInfo->presentMode;
548 }
549
550 return wsi->override_present_mode;
551 }
552
553 void
wsi_swapchain_finish(struct wsi_swapchain * chain)554 wsi_swapchain_finish(struct wsi_swapchain *chain)
555 {
556 wsi_destroy_image_info(chain, &chain->image_info);
557
558 if (chain->fences) {
559 for (unsigned i = 0; i < chain->image_count; i++)
560 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
561
562 vk_free(&chain->alloc, chain->fences);
563 }
564 if (chain->blit.semaphores) {
565 for (unsigned i = 0; i < chain->image_count; i++)
566 chain->wsi->DestroySemaphore(chain->device, chain->blit.semaphores[i], &chain->alloc);
567
568 vk_free(&chain->alloc, chain->blit.semaphores);
569 }
570 chain->wsi->DestroySemaphore(chain->device, chain->dma_buf_semaphore,
571 &chain->alloc);
572 chain->wsi->DestroySemaphore(chain->device, chain->present_id_timeline,
573 &chain->alloc);
574
575 int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ?
576 1 : chain->wsi->queue_family_count;
577 for (uint32_t i = 0; i < cmd_pools_count; i++) {
578 if (!chain->cmd_pools[i])
579 continue;
580 chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
581 &chain->alloc);
582 }
583 vk_free(&chain->alloc, chain->cmd_pools);
584
585 vk_object_base_finish(&chain->base);
586 }
587
588 VkResult
wsi_configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,VkExternalMemoryHandleTypeFlags handle_types,struct wsi_image_info * info)589 wsi_configure_image(const struct wsi_swapchain *chain,
590 const VkSwapchainCreateInfoKHR *pCreateInfo,
591 VkExternalMemoryHandleTypeFlags handle_types,
592 struct wsi_image_info *info)
593 {
594 memset(info, 0, sizeof(*info));
595 uint32_t queue_family_count = 1;
596
597 if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
598 queue_family_count = pCreateInfo->queueFamilyIndexCount;
599
600 /*
601 * TODO: there should be no reason to allocate this, but
602 * 15331 shows that games crashed without doing this.
603 */
604 uint32_t *queue_family_indices =
605 vk_alloc(&chain->alloc,
606 sizeof(*queue_family_indices) *
607 queue_family_count,
608 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
609 if (!queue_family_indices)
610 goto err_oom;
611
612 if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
613 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; i++)
614 queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
615
616 info->create = (VkImageCreateInfo) {
617 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
618 .flags = VK_IMAGE_CREATE_ALIAS_BIT,
619 .imageType = VK_IMAGE_TYPE_2D,
620 .format = pCreateInfo->imageFormat,
621 .extent = {
622 .width = pCreateInfo->imageExtent.width,
623 .height = pCreateInfo->imageExtent.height,
624 .depth = 1,
625 },
626 .mipLevels = 1,
627 .arrayLayers = 1,
628 .samples = VK_SAMPLE_COUNT_1_BIT,
629 .tiling = VK_IMAGE_TILING_OPTIMAL,
630 .usage = pCreateInfo->imageUsage,
631 .sharingMode = pCreateInfo->imageSharingMode,
632 .queueFamilyIndexCount = queue_family_count,
633 .pQueueFamilyIndices = queue_family_indices,
634 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
635 };
636
637 if (handle_types != 0) {
638 info->ext_mem = (VkExternalMemoryImageCreateInfo) {
639 .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
640 .handleTypes = handle_types,
641 };
642 __vk_append_struct(&info->create, &info->ext_mem);
643 }
644
645 info->wsi = (struct wsi_image_create_info) {
646 .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
647 };
648 __vk_append_struct(&info->create, &info->wsi);
649
650 if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
651 info->create.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
652 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
653
654 const VkImageFormatListCreateInfo *format_list_in =
655 vk_find_struct_const(pCreateInfo->pNext,
656 IMAGE_FORMAT_LIST_CREATE_INFO);
657
658 assume(format_list_in && format_list_in->viewFormatCount > 0);
659
660 const uint32_t view_format_count = format_list_in->viewFormatCount;
661 VkFormat *view_formats =
662 vk_alloc(&chain->alloc, sizeof(VkFormat) * view_format_count,
663 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
664 if (!view_formats)
665 goto err_oom;
666
667 ASSERTED bool format_found = false;
668 for (uint32_t i = 0; i < format_list_in->viewFormatCount; i++) {
669 if (pCreateInfo->imageFormat == format_list_in->pViewFormats[i])
670 format_found = true;
671 view_formats[i] = format_list_in->pViewFormats[i];
672 }
673 assert(format_found);
674
675 info->format_list = (VkImageFormatListCreateInfo) {
676 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,
677 .viewFormatCount = view_format_count,
678 .pViewFormats = view_formats,
679 };
680 __vk_append_struct(&info->create, &info->format_list);
681 }
682
683 return VK_SUCCESS;
684
685 err_oom:
686 wsi_destroy_image_info(chain, info);
687 return VK_ERROR_OUT_OF_HOST_MEMORY;
688 }
689
690 void
wsi_destroy_image_info(const struct wsi_swapchain * chain,struct wsi_image_info * info)691 wsi_destroy_image_info(const struct wsi_swapchain *chain,
692 struct wsi_image_info *info)
693 {
694 if (info->create.pQueueFamilyIndices != NULL) {
695 vk_free(&chain->alloc, (void *)info->create.pQueueFamilyIndices);
696 info->create.pQueueFamilyIndices = NULL;
697 }
698 if (info->format_list.pViewFormats != NULL) {
699 vk_free(&chain->alloc, (void *)info->format_list.pViewFormats);
700 info->format_list.pViewFormats = NULL;
701 }
702 if (info->drm_mod_list.pDrmFormatModifiers != NULL) {
703 vk_free(&chain->alloc, (void *)info->drm_mod_list.pDrmFormatModifiers);
704 info->drm_mod_list.pDrmFormatModifiers = NULL;
705 }
706 if (info->modifier_props != NULL) {
707 vk_free(&chain->alloc, info->modifier_props);
708 info->modifier_props = NULL;
709 }
710 }
711
712 VkResult
wsi_create_image(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)713 wsi_create_image(const struct wsi_swapchain *chain,
714 const struct wsi_image_info *info,
715 struct wsi_image *image)
716 {
717 const struct wsi_device *wsi = chain->wsi;
718 VkResult result;
719
720 memset(image, 0, sizeof(*image));
721
722 #ifndef _WIN32
723 image->dma_buf_fd = -1;
724 for (uint32_t i = 0; i < WSI_ES_COUNT; i++)
725 image->explicit_sync[i].fd = -1;
726 #endif
727
728 result = wsi->CreateImage(chain->device, &info->create,
729 &chain->alloc, &image->image);
730 if (result != VK_SUCCESS)
731 goto fail;
732
733 result = info->create_mem(chain, info, image);
734 if (result != VK_SUCCESS)
735 goto fail;
736
737 result = wsi->BindImageMemory(chain->device, image->image,
738 image->memory, 0);
739 if (result != VK_SUCCESS)
740 goto fail;
741
742 if (info->finish_create) {
743 result = info->finish_create(chain, info, image);
744 if (result != VK_SUCCESS)
745 goto fail;
746 }
747
748 if (info->explicit_sync) {
749 #if HAVE_LIBDRM
750 result = wsi_create_image_explicit_sync_drm(chain, image);
751 if (result != VK_SUCCESS)
752 goto fail;
753 #else
754 result = VK_ERROR_FEATURE_NOT_PRESENT;
755 goto fail;
756 #endif
757 }
758
759 return VK_SUCCESS;
760
761 fail:
762 wsi_destroy_image(chain, image);
763 return result;
764 }
765
766 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)767 wsi_destroy_image(const struct wsi_swapchain *chain,
768 struct wsi_image *image)
769 {
770 const struct wsi_device *wsi = chain->wsi;
771
772 #ifndef _WIN32
773 if (image->dma_buf_fd >= 0)
774 close(image->dma_buf_fd);
775 #endif
776
777 if (image->explicit_sync[WSI_ES_ACQUIRE].semaphore) {
778 #if HAVE_LIBDRM
779 wsi_destroy_image_explicit_sync_drm(chain, image);
780 #endif
781 }
782
783 if (image->cpu_map != NULL) {
784 wsi->UnmapMemory(chain->device, image->blit.buffer != VK_NULL_HANDLE ?
785 image->blit.memory : image->memory);
786 }
787
788 if (image->blit.cmd_buffers) {
789 int cmd_buffer_count =
790 chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
791
792 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
793 if (!chain->cmd_pools[i])
794 continue;
795 wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
796 1, &image->blit.cmd_buffers[i]);
797 }
798 vk_free(&chain->alloc, image->blit.cmd_buffers);
799 }
800
801 wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
802 wsi->DestroyImage(chain->device, image->image, &chain->alloc);
803 wsi->DestroyImage(chain->device, image->blit.image, &chain->alloc);
804 wsi->FreeMemory(chain->device, image->blit.memory, &chain->alloc);
805 wsi->DestroyBuffer(chain->device, image->blit.buffer, &chain->alloc);
806 }
807
808 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)809 wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
810 uint32_t queueFamilyIndex,
811 VkSurfaceKHR _surface,
812 VkBool32 *pSupported)
813 {
814 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
815 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
816 struct wsi_device *wsi_device = device->wsi_device;
817 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
818
819 VkResult res = iface->get_support(surface, wsi_device,
820 queueFamilyIndex, pSupported);
821 if (res == VK_SUCCESS) {
822 bool blit = (wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)) != 0;
823 *pSupported = (bool)*pSupported && blit;
824 }
825
826 return res;
827 }
828
829 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)830 wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
831 VkPhysicalDevice physicalDevice,
832 VkSurfaceKHR _surface,
833 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
834 {
835 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
836 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
837 struct wsi_device *wsi_device = device->wsi_device;
838 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
839
840 VkSurfaceCapabilities2KHR caps2 = {
841 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
842 };
843
844 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
845
846 if (result == VK_SUCCESS)
847 *pSurfaceCapabilities = caps2.surfaceCapabilities;
848
849 return result;
850 }
851
852 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)853 wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
854 VkPhysicalDevice physicalDevice,
855 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
856 VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
857 {
858 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
859 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
860 struct wsi_device *wsi_device = device->wsi_device;
861 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
862
863 return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
864 pSurfaceCapabilities);
865 }
866
867 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)868 wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
869 VkPhysicalDevice physicalDevice,
870 VkSurfaceKHR _surface,
871 VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
872 {
873 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
874 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
875 struct wsi_device *wsi_device = device->wsi_device;
876 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
877
878 assert(pSurfaceCapabilities->sType ==
879 VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
880
881 struct wsi_surface_supported_counters counters = {
882 .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
883 .pNext = pSurfaceCapabilities->pNext,
884 .supported_surface_counters = 0,
885 };
886
887 VkSurfaceCapabilities2KHR caps2 = {
888 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
889 .pNext = &counters,
890 };
891
892 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
893
894 if (result == VK_SUCCESS) {
895 VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
896 VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
897
898 ext_caps->minImageCount = khr_caps.minImageCount;
899 ext_caps->maxImageCount = khr_caps.maxImageCount;
900 ext_caps->currentExtent = khr_caps.currentExtent;
901 ext_caps->minImageExtent = khr_caps.minImageExtent;
902 ext_caps->maxImageExtent = khr_caps.maxImageExtent;
903 ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
904 ext_caps->supportedTransforms = khr_caps.supportedTransforms;
905 ext_caps->currentTransform = khr_caps.currentTransform;
906 ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
907 ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
908 ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
909 }
910
911 return result;
912 }
913
914 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)915 wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
916 VkSurfaceKHR _surface,
917 uint32_t *pSurfaceFormatCount,
918 VkSurfaceFormatKHR *pSurfaceFormats)
919 {
920 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
921 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
922 struct wsi_device *wsi_device = device->wsi_device;
923 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
924
925 return iface->get_formats(surface, wsi_device,
926 pSurfaceFormatCount, pSurfaceFormats);
927 }
928
929 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)930 wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
931 const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
932 uint32_t *pSurfaceFormatCount,
933 VkSurfaceFormat2KHR *pSurfaceFormats)
934 {
935 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
936 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
937 struct wsi_device *wsi_device = device->wsi_device;
938 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
939
940 return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
941 pSurfaceFormatCount, pSurfaceFormats);
942 }
943
944 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)945 wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
946 VkSurfaceKHR _surface,
947 uint32_t *pPresentModeCount,
948 VkPresentModeKHR *pPresentModes)
949 {
950 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
951 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
952 struct wsi_device *wsi_device = device->wsi_device;
953 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
954
955 return iface->get_present_modes(surface, wsi_device, pPresentModeCount,
956 pPresentModes);
957 }
958
959 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)960 wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
961 VkSurfaceKHR _surface,
962 uint32_t *pRectCount,
963 VkRect2D *pRects)
964 {
965 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
966 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
967 struct wsi_device *wsi_device = device->wsi_device;
968 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
969
970 return iface->get_present_rectangles(surface, wsi_device,
971 pRectCount, pRects);
972 }
973
974 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateSwapchainKHR(VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)975 wsi_CreateSwapchainKHR(VkDevice _device,
976 const VkSwapchainCreateInfoKHR *pCreateInfo,
977 const VkAllocationCallbacks *pAllocator,
978 VkSwapchainKHR *pSwapchain)
979 {
980 MESA_TRACE_FUNC();
981 VK_FROM_HANDLE(vk_device, device, _device);
982 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
983 struct wsi_device *wsi_device = device->physical->wsi_device;
984 struct wsi_interface *iface = wsi_device->force_headless_swapchain ?
985 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] :
986 wsi_device->wsi[surface->platform];
987 const VkAllocationCallbacks *alloc;
988 struct wsi_swapchain *swapchain;
989
990 if (pAllocator)
991 alloc = pAllocator;
992 else
993 alloc = &device->alloc;
994
995 VkSwapchainCreateInfoKHR info = *pCreateInfo;
996
997 if (wsi_device->force_swapchain_to_currentExtent) {
998 VkSurfaceCapabilities2KHR caps2 = {
999 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
1000 };
1001 iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
1002 info.imageExtent = caps2.surfaceCapabilities.currentExtent;
1003 }
1004
1005 /* Ignore DEFERRED_MEMORY_ALLOCATION_BIT. Would require deep plumbing to be able to take advantage of it.
1006 * bool deferred_allocation = pCreateInfo->flags & VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT;
1007 */
1008
1009 VkResult result = iface->create_swapchain(surface, _device, wsi_device,
1010 &info, alloc,
1011 &swapchain);
1012 if (result != VK_SUCCESS)
1013 return result;
1014
1015 swapchain->fences = vk_zalloc(alloc,
1016 sizeof (*swapchain->fences) * swapchain->image_count,
1017 sizeof (*swapchain->fences),
1018 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1019 if (!swapchain->fences) {
1020 swapchain->destroy(swapchain, alloc);
1021 return VK_ERROR_OUT_OF_HOST_MEMORY;
1022 }
1023
1024 if (wsi_device->khr_present_wait) {
1025 const VkSemaphoreTypeCreateInfo type_info = {
1026 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
1027 .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
1028 };
1029
1030 const VkSemaphoreCreateInfo sem_info = {
1031 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1032 .pNext = &type_info,
1033 .flags = 0,
1034 };
1035
1036 /* We assume here that a driver exposing present_wait also exposes VK_KHR_timeline_semaphore. */
1037 result = wsi_device->CreateSemaphore(_device, &sem_info, alloc, &swapchain->present_id_timeline);
1038 if (result != VK_SUCCESS) {
1039 swapchain->destroy(swapchain, alloc);
1040 return VK_ERROR_OUT_OF_HOST_MEMORY;
1041 }
1042 }
1043
1044 if (swapchain->blit.queue != VK_NULL_HANDLE) {
1045 swapchain->blit.semaphores = vk_zalloc(alloc,
1046 sizeof (*swapchain->blit.semaphores) * swapchain->image_count,
1047 sizeof (*swapchain->blit.semaphores),
1048 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1049 if (!swapchain->blit.semaphores) {
1050 wsi_device->DestroySemaphore(_device, swapchain->present_id_timeline, alloc);
1051 swapchain->destroy(swapchain, alloc);
1052 return VK_ERROR_OUT_OF_HOST_MEMORY;
1053 }
1054 }
1055
1056 *pSwapchain = wsi_swapchain_to_handle(swapchain);
1057
1058 return VK_SUCCESS;
1059 }
1060
1061 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySwapchainKHR(VkDevice _device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)1062 wsi_DestroySwapchainKHR(VkDevice _device,
1063 VkSwapchainKHR _swapchain,
1064 const VkAllocationCallbacks *pAllocator)
1065 {
1066 MESA_TRACE_FUNC();
1067 VK_FROM_HANDLE(vk_device, device, _device);
1068 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1069 const VkAllocationCallbacks *alloc;
1070
1071 if (!swapchain)
1072 return;
1073
1074 if (pAllocator)
1075 alloc = pAllocator;
1076 else
1077 alloc = &device->alloc;
1078
1079 swapchain->destroy(swapchain, alloc);
1080 }
1081
1082 VKAPI_ATTR VkResult VKAPI_CALL
wsi_ReleaseSwapchainImagesEXT(VkDevice _device,const VkReleaseSwapchainImagesInfoEXT * pReleaseInfo)1083 wsi_ReleaseSwapchainImagesEXT(VkDevice _device,
1084 const VkReleaseSwapchainImagesInfoEXT *pReleaseInfo)
1085 {
1086 VK_FROM_HANDLE(wsi_swapchain, swapchain, pReleaseInfo->swapchain);
1087
1088 for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
1089 uint32_t index = pReleaseInfo->pImageIndices[i];
1090 assert(index < swapchain->image_count);
1091 struct wsi_image *image = swapchain->get_wsi_image(swapchain, index);
1092 assert(image->acquired);
1093 image->acquired = false;
1094 }
1095
1096 VkResult result = swapchain->release_images(swapchain,
1097 pReleaseInfo->imageIndexCount,
1098 pReleaseInfo->pImageIndices);
1099
1100 if (result != VK_SUCCESS)
1101 return result;
1102
1103 if (swapchain->wsi->set_memory_ownership) {
1104 for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
1105 uint32_t image_index = pReleaseInfo->pImageIndices[i];
1106 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1107 swapchain->wsi->set_memory_ownership(swapchain->device, mem, false);
1108 }
1109 }
1110
1111 return VK_SUCCESS;
1112 }
1113
1114 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1115 wsi_common_get_images(VkSwapchainKHR _swapchain,
1116 uint32_t *pSwapchainImageCount,
1117 VkImage *pSwapchainImages)
1118 {
1119 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1120 VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
1121
1122 for (uint32_t i = 0; i < swapchain->image_count; i++) {
1123 vk_outarray_append_typed(VkImage, &images, image) {
1124 *image = swapchain->get_wsi_image(swapchain, i)->image;
1125 }
1126 }
1127
1128 return vk_outarray_status(&images);
1129 }
1130
1131 VkImage
wsi_common_get_image(VkSwapchainKHR _swapchain,uint32_t index)1132 wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index)
1133 {
1134 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1135 assert(index < swapchain->image_count);
1136 return swapchain->get_wsi_image(swapchain, index)->image;
1137 }
1138
1139 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1140 wsi_GetSwapchainImagesKHR(VkDevice device,
1141 VkSwapchainKHR swapchain,
1142 uint32_t *pSwapchainImageCount,
1143 VkImage *pSwapchainImages)
1144 {
1145 MESA_TRACE_FUNC();
1146 return wsi_common_get_images(swapchain,
1147 pSwapchainImageCount,
1148 pSwapchainImages);
1149 }
1150
1151 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImageKHR(VkDevice _device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)1152 wsi_AcquireNextImageKHR(VkDevice _device,
1153 VkSwapchainKHR swapchain,
1154 uint64_t timeout,
1155 VkSemaphore semaphore,
1156 VkFence fence,
1157 uint32_t *pImageIndex)
1158 {
1159 MESA_TRACE_FUNC();
1160 VK_FROM_HANDLE(vk_device, device, _device);
1161
1162 const VkAcquireNextImageInfoKHR acquire_info = {
1163 .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
1164 .swapchain = swapchain,
1165 .timeout = timeout,
1166 .semaphore = semaphore,
1167 .fence = fence,
1168 .deviceMask = 0,
1169 };
1170
1171 return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
1172 pImageIndex);
1173 }
1174
1175 static VkResult
wsi_signal_semaphore_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkSemaphore _semaphore)1176 wsi_signal_semaphore_for_image(struct vk_device *device,
1177 const struct wsi_swapchain *chain,
1178 const struct wsi_image *image,
1179 VkSemaphore _semaphore)
1180 {
1181 if (device->physical->supported_sync_types == NULL)
1182 return VK_SUCCESS;
1183
1184 VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
1185
1186 vk_semaphore_reset_temporary(device, semaphore);
1187
1188 #ifdef HAVE_LIBDRM
1189 VkResult result = chain->image_info.explicit_sync ?
1190 wsi_create_sync_for_image_syncobj(chain, image,
1191 VK_SYNC_FEATURE_GPU_WAIT,
1192 &semaphore->temporary) :
1193 wsi_create_sync_for_dma_buf_wait(chain, image,
1194 VK_SYNC_FEATURE_GPU_WAIT,
1195 &semaphore->temporary);
1196 if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1197 return result;
1198 #endif
1199
1200 if (chain->wsi->signal_semaphore_with_memory) {
1201 return device->create_sync_for_memory(device, image->memory,
1202 false /* signal_memory */,
1203 &semaphore->temporary);
1204 } else {
1205 return vk_sync_create(device, &vk_sync_dummy_type,
1206 0 /* flags */, 0 /* initial_value */,
1207 &semaphore->temporary);
1208 }
1209 }
1210
1211 static VkResult
wsi_signal_fence_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkFence _fence)1212 wsi_signal_fence_for_image(struct vk_device *device,
1213 const struct wsi_swapchain *chain,
1214 const struct wsi_image *image,
1215 VkFence _fence)
1216 {
1217 if (device->physical->supported_sync_types == NULL)
1218 return VK_SUCCESS;
1219
1220 VK_FROM_HANDLE(vk_fence, fence, _fence);
1221
1222 vk_fence_reset_temporary(device, fence);
1223
1224 #ifdef HAVE_LIBDRM
1225 VkResult result = chain->image_info.explicit_sync ?
1226 wsi_create_sync_for_image_syncobj(chain, image,
1227 VK_SYNC_FEATURE_CPU_WAIT,
1228 &fence->temporary) :
1229 wsi_create_sync_for_dma_buf_wait(chain, image,
1230 VK_SYNC_FEATURE_CPU_WAIT,
1231 &fence->temporary);
1232 if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1233 return result;
1234 #endif
1235
1236 if (chain->wsi->signal_fence_with_memory) {
1237 return device->create_sync_for_memory(device, image->memory,
1238 false /* signal_memory */,
1239 &fence->temporary);
1240 } else {
1241 return vk_sync_create(device, &vk_sync_dummy_type,
1242 0 /* flags */, 0 /* initial_value */,
1243 &fence->temporary);
1244 }
1245 }
1246
1247 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1248 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
1249 VkDevice _device,
1250 const VkAcquireNextImageInfoKHR *pAcquireInfo,
1251 uint32_t *pImageIndex)
1252 {
1253 VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
1254 VK_FROM_HANDLE(vk_device, device, _device);
1255
1256 VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
1257 pImageIndex);
1258 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1259 return result;
1260 struct wsi_image *image =
1261 swapchain->get_wsi_image(swapchain, *pImageIndex);
1262
1263 image->acquired = true;
1264
1265 if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
1266 VkResult signal_result =
1267 wsi_signal_semaphore_for_image(device, swapchain, image,
1268 pAcquireInfo->semaphore);
1269 if (signal_result != VK_SUCCESS)
1270 return signal_result;
1271 }
1272
1273 if (pAcquireInfo->fence != VK_NULL_HANDLE) {
1274 VkResult signal_result =
1275 wsi_signal_fence_for_image(device, swapchain, image,
1276 pAcquireInfo->fence);
1277 if (signal_result != VK_SUCCESS)
1278 return signal_result;
1279 }
1280
1281 if (wsi->set_memory_ownership)
1282 wsi->set_memory_ownership(swapchain->device, image->memory, true);
1283
1284 return result;
1285 }
1286
1287 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImage2KHR(VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1288 wsi_AcquireNextImage2KHR(VkDevice _device,
1289 const VkAcquireNextImageInfoKHR *pAcquireInfo,
1290 uint32_t *pImageIndex)
1291 {
1292 MESA_TRACE_FUNC();
1293 VK_FROM_HANDLE(vk_device, device, _device);
1294
1295 return wsi_common_acquire_next_image2(device->physical->wsi_device,
1296 _device, pAcquireInfo, pImageIndex);
1297 }
1298
wsi_signal_present_id_timeline(struct wsi_swapchain * swapchain,VkQueue queue,uint64_t present_id,VkFence present_fence)1299 static VkResult wsi_signal_present_id_timeline(struct wsi_swapchain *swapchain,
1300 VkQueue queue, uint64_t present_id,
1301 VkFence present_fence)
1302 {
1303 assert(swapchain->present_id_timeline || present_fence);
1304
1305 const VkTimelineSemaphoreSubmitInfo timeline_info = {
1306 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
1307 .pSignalSemaphoreValues = &present_id,
1308 .signalSemaphoreValueCount = 1,
1309 };
1310
1311 const VkSubmitInfo submit_info = {
1312 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1313 .pNext = &timeline_info,
1314 .signalSemaphoreCount = 1,
1315 .pSignalSemaphores = &swapchain->present_id_timeline,
1316 };
1317
1318 uint32_t submit_count = present_id ? 1 : 0;
1319 return swapchain->wsi->QueueSubmit(queue, submit_count, &submit_info, present_fence);
1320 }
1321
1322 static VkResult
handle_trace(VkQueue queue,struct vk_device * device,uint32_t current_frame)1323 handle_trace(VkQueue queue, struct vk_device *device, uint32_t current_frame)
1324 {
1325 struct vk_instance *instance = device->physical->instance;
1326 if (!instance->trace_mode)
1327 return VK_SUCCESS;
1328
1329 simple_mtx_lock(&device->trace_mtx);
1330
1331 bool frame_trigger = device->current_frame == instance->trace_frame;
1332
1333 bool file_trigger = false;
1334 #ifndef _WIN32
1335 if (instance->trace_trigger_file && access(instance->trace_trigger_file, W_OK) == 0) {
1336 if (unlink(instance->trace_trigger_file) == 0) {
1337 file_trigger = true;
1338 } else {
1339 /* Do not enable tracing if we cannot remove the file,
1340 * because by then we'll trace every frame ... */
1341 fprintf(stderr, "Could not remove trace trigger file, ignoring\n");
1342 }
1343 }
1344 #endif
1345
1346 VkResult result = VK_SUCCESS;
1347 if (frame_trigger || file_trigger || device->trace_hotkey_trigger)
1348 result = device->capture_trace(queue);
1349
1350 device->trace_hotkey_trigger = false;
1351
1352 simple_mtx_unlock(&device->trace_mtx);
1353
1354 return result;
1355 }
1356
1357 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)1358 wsi_common_queue_present(const struct wsi_device *wsi,
1359 VkDevice device,
1360 VkQueue queue,
1361 int queue_family_index,
1362 const VkPresentInfoKHR *pPresentInfo)
1363 {
1364 struct vk_device *dev = vk_device_from_handle(device);
1365 uint32_t current_frame = p_atomic_fetch_add(&dev->current_frame, 1);
1366 VkResult final_result = handle_trace(queue, dev, current_frame);
1367
1368 STACK_ARRAY(VkPipelineStageFlags, stage_flags,
1369 MAX2(1, pPresentInfo->waitSemaphoreCount));
1370 for (uint32_t s = 0; s < MAX2(1, pPresentInfo->waitSemaphoreCount); s++)
1371 stage_flags[s] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1372
1373 const VkPresentRegionsKHR *regions =
1374 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
1375 const VkPresentIdKHR *present_ids =
1376 vk_find_struct_const(pPresentInfo->pNext, PRESENT_ID_KHR);
1377 const VkSwapchainPresentFenceInfoEXT *present_fence_info =
1378 vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_FENCE_INFO_EXT);
1379 const VkSwapchainPresentModeInfoEXT *present_mode_info =
1380 vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_MODE_INFO_EXT);
1381
1382 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1383 VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
1384 uint32_t image_index = pPresentInfo->pImageIndices[i];
1385 VkResult result;
1386
1387 /* Update the present mode for this present and any subsequent present.
1388 * Only update the present mode when MESA_VK_WSI_PRESENT_MODE is not used.
1389 * We should also turn any VkSwapchainPresentModesCreateInfoEXT into a nop,
1390 * but none of the WSI backends use that currently. */
1391 if (present_mode_info && present_mode_info->pPresentModes &&
1392 swapchain->set_present_mode && wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR) {
1393 swapchain->set_present_mode(swapchain, present_mode_info->pPresentModes[i]);
1394 }
1395
1396 if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
1397 const VkFenceCreateInfo fence_info = {
1398 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
1399 .pNext = NULL,
1400 .flags = VK_FENCE_CREATE_SIGNALED_BIT,
1401 };
1402 result = wsi->CreateFence(device, &fence_info,
1403 &swapchain->alloc,
1404 &swapchain->fences[image_index]);
1405 if (result != VK_SUCCESS)
1406 goto fail_present;
1407
1408 if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT &&
1409 swapchain->blit.queue != VK_NULL_HANDLE) {
1410 const VkSemaphoreCreateInfo sem_info = {
1411 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1412 .pNext = NULL,
1413 .flags = 0,
1414 };
1415 result = wsi->CreateSemaphore(device, &sem_info,
1416 &swapchain->alloc,
1417 &swapchain->blit.semaphores[image_index]);
1418 if (result != VK_SUCCESS)
1419 goto fail_present;
1420 }
1421 } else {
1422 MESA_TRACE_SCOPE("throttle");
1423 result =
1424 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1425 true, ~0ull);
1426 if (result != VK_SUCCESS)
1427 goto fail_present;
1428 }
1429
1430 result = wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
1431 if (result != VK_SUCCESS)
1432 goto fail_present;
1433
1434 VkTimelineSemaphoreSubmitInfo timeline_submit_info = {
1435 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
1436 };
1437
1438 VkSubmitInfo submit_info = {
1439 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1440 };
1441
1442 if (i == 0) {
1443 /* We only need/want to wait on semaphores once. After that, we're
1444 * guaranteed ordering since it all happens on the same queue.
1445 */
1446 submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
1447 submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
1448 submit_info.pWaitDstStageMask = stage_flags;
1449 }
1450
1451 struct wsi_image *image =
1452 swapchain->get_wsi_image(swapchain, image_index);
1453
1454 VkQueue submit_queue = queue;
1455 if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
1456 if (swapchain->blit.queue == VK_NULL_HANDLE) {
1457 submit_info.commandBufferCount = 1;
1458 submit_info.pCommandBuffers =
1459 &image->blit.cmd_buffers[queue_family_index];
1460 } else {
1461 /* If we are using a blit using the driver's private queue, then
1462 * do an empty submit signalling a semaphore, and then submit the
1463 * blit waiting on that. This ensures proper queue ordering of
1464 * vkQueueSubmit() calls.
1465 */
1466 submit_info.signalSemaphoreCount = 1;
1467 submit_info.pSignalSemaphores =
1468 &swapchain->blit.semaphores[image_index];
1469
1470 result = wsi->QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
1471 if (result != VK_SUCCESS)
1472 goto fail_present;
1473
1474 /* Now prepare the blit submit. It needs to then wait on the
1475 * semaphore we signaled above.
1476 */
1477 submit_queue = swapchain->blit.queue;
1478 submit_info.waitSemaphoreCount = 1;
1479 submit_info.pWaitSemaphores = submit_info.pSignalSemaphores;
1480 submit_info.signalSemaphoreCount = 0;
1481 submit_info.pSignalSemaphores = NULL;
1482 submit_info.commandBufferCount = 1;
1483 submit_info.pCommandBuffers = &image->blit.cmd_buffers[0];
1484 submit_info.pWaitDstStageMask = stage_flags;
1485 }
1486 }
1487
1488 VkFence fence = swapchain->fences[image_index];
1489
1490 struct wsi_memory_signal_submit_info mem_signal;
1491 bool has_signal_dma_buf = false;
1492 bool explicit_sync = swapchain->image_info.explicit_sync;
1493 if (explicit_sync) {
1494 /* We will signal this acquire value ourselves when GPU work is done. */
1495 image->explicit_sync[WSI_ES_ACQUIRE].timeline++;
1496 /* The compositor will signal this value when it is done with the image. */
1497 image->explicit_sync[WSI_ES_RELEASE].timeline++;
1498
1499 timeline_submit_info.signalSemaphoreValueCount = 1;
1500 timeline_submit_info.pSignalSemaphoreValues = &image->explicit_sync[WSI_ES_ACQUIRE].timeline;
1501
1502 assert(submit_info.signalSemaphoreCount == 0);
1503 submit_info.signalSemaphoreCount = 1;
1504 submit_info.pSignalSemaphores = &image->explicit_sync[WSI_ES_ACQUIRE].semaphore;
1505 __vk_append_struct(&submit_info, &timeline_submit_info);
1506 } else {
1507 #ifdef HAVE_LIBDRM
1508 result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
1509 if (result == VK_SUCCESS) {
1510 assert(submit_info.signalSemaphoreCount == 0);
1511 submit_info.signalSemaphoreCount = 1;
1512 submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
1513 has_signal_dma_buf = true;
1514 } else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
1515 result = VK_SUCCESS;
1516 has_signal_dma_buf = false;
1517 } else {
1518 goto fail_present;
1519 }
1520 #endif
1521
1522 if (!has_signal_dma_buf) {
1523 /* If we don't have dma-buf signaling, signal the memory object by
1524 * chaining wsi_memory_signal_submit_info into VkSubmitInfo.
1525 */
1526 result = VK_SUCCESS;
1527 has_signal_dma_buf = false;
1528 mem_signal = (struct wsi_memory_signal_submit_info) {
1529 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
1530 .memory = image->memory,
1531 };
1532 __vk_append_struct(&submit_info, &mem_signal);
1533 }
1534 }
1535
1536 result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
1537 if (result != VK_SUCCESS)
1538 goto fail_present;
1539
1540 /* The app can only submit images they have acquired. */
1541 assert(image->acquired);
1542 image->acquired = false;
1543 image->present_serial = ++swapchain->present_serial;
1544
1545 if (!explicit_sync) {
1546 #ifdef HAVE_LIBDRM
1547 if (has_signal_dma_buf) {
1548 result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
1549 if (result != VK_SUCCESS)
1550 goto fail_present;
1551 }
1552 #else
1553 assert(!has_signal_dma_buf);
1554 #endif
1555 }
1556
1557 if (wsi->sw)
1558 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1559 true, ~0ull);
1560
1561 const VkPresentRegionKHR *region = NULL;
1562 if (regions && regions->pRegions)
1563 region = ®ions->pRegions[i];
1564
1565 uint64_t present_id = 0;
1566 if (present_ids && present_ids->pPresentIds)
1567 present_id = present_ids->pPresentIds[i];
1568 VkFence present_fence = VK_NULL_HANDLE;
1569 if (present_fence_info && present_fence_info->pFences)
1570 present_fence = present_fence_info->pFences[i];
1571
1572 if (present_id || present_fence) {
1573 result = wsi_signal_present_id_timeline(swapchain, queue, present_id, present_fence);
1574 if (result != VK_SUCCESS)
1575 goto fail_present;
1576 }
1577
1578 result = swapchain->queue_present(swapchain, image_index, present_id, region);
1579 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1580 goto fail_present;
1581
1582 if (wsi->set_memory_ownership) {
1583 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1584 wsi->set_memory_ownership(swapchain->device, mem, false);
1585 }
1586
1587 fail_present:
1588 if (pPresentInfo->pResults != NULL)
1589 pPresentInfo->pResults[i] = result;
1590
1591 /* Let the final result be our first unsuccessful result */
1592 if (final_result == VK_SUCCESS)
1593 final_result = result;
1594 }
1595
1596 STACK_ARRAY_FINISH(stage_flags);
1597
1598 return final_result;
1599 }
1600
1601 VKAPI_ATTR VkResult VKAPI_CALL
wsi_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)1602 wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
1603 {
1604 MESA_TRACE_FUNC();
1605 VK_FROM_HANDLE(vk_queue, queue, _queue);
1606
1607 return wsi_common_queue_present(queue->base.device->physical->wsi_device,
1608 vk_device_to_handle(queue->base.device),
1609 _queue,
1610 queue->queue_family_index,
1611 pPresentInfo);
1612 }
1613
1614 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,VkDeviceGroupPresentCapabilitiesKHR * pCapabilities)1615 wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
1616 VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
1617 {
1618 memset(pCapabilities->presentMask, 0,
1619 sizeof(pCapabilities->presentMask));
1620 pCapabilities->presentMask[0] = 0x1;
1621 pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1622
1623 return VK_SUCCESS;
1624 }
1625
1626 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,VkSurfaceKHR surface,VkDeviceGroupPresentModeFlagsKHR * pModes)1627 wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
1628 VkSurfaceKHR surface,
1629 VkDeviceGroupPresentModeFlagsKHR *pModes)
1630 {
1631 *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1632
1633 return VK_SUCCESS;
1634 }
1635
1636 bool
wsi_common_vk_instance_supports_present_wait(const struct vk_instance * instance)1637 wsi_common_vk_instance_supports_present_wait(const struct vk_instance *instance)
1638 {
1639 /* We can only expose KHR_present_wait and KHR_present_id
1640 * if we are guaranteed support on all potential VkSurfaceKHR objects. */
1641 if (instance->enabled_extensions.KHR_win32_surface ||
1642 instance->enabled_extensions.KHR_android_surface) {
1643 return false;
1644 }
1645
1646 return true;
1647 }
1648
1649 VkResult
wsi_common_create_swapchain_image(const struct wsi_device * wsi,const VkImageCreateInfo * pCreateInfo,VkSwapchainKHR _swapchain,VkImage * pImage)1650 wsi_common_create_swapchain_image(const struct wsi_device *wsi,
1651 const VkImageCreateInfo *pCreateInfo,
1652 VkSwapchainKHR _swapchain,
1653 VkImage *pImage)
1654 {
1655 VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1656
1657 #ifndef NDEBUG
1658 const VkImageCreateInfo *swcInfo = &chain->image_info.create;
1659 assert(pCreateInfo->flags == 0);
1660 assert(pCreateInfo->imageType == swcInfo->imageType);
1661 assert(pCreateInfo->format == swcInfo->format);
1662 assert(pCreateInfo->extent.width == swcInfo->extent.width);
1663 assert(pCreateInfo->extent.height == swcInfo->extent.height);
1664 assert(pCreateInfo->extent.depth == swcInfo->extent.depth);
1665 assert(pCreateInfo->mipLevels == swcInfo->mipLevels);
1666 assert(pCreateInfo->arrayLayers == swcInfo->arrayLayers);
1667 assert(pCreateInfo->samples == swcInfo->samples);
1668 assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
1669 assert(!(pCreateInfo->usage & ~swcInfo->usage));
1670
1671 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
1672 switch (ext->sType) {
1673 case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: {
1674 const VkImageFormatListCreateInfo *iflci =
1675 (const VkImageFormatListCreateInfo *)ext;
1676 const VkImageFormatListCreateInfo *swc_iflci =
1677 &chain->image_info.format_list;
1678
1679 for (uint32_t i = 0; i < iflci->viewFormatCount; i++) {
1680 bool found = false;
1681 for (uint32_t j = 0; j < swc_iflci->viewFormatCount; j++) {
1682 if (iflci->pViewFormats[i] == swc_iflci->pViewFormats[j]) {
1683 found = true;
1684 break;
1685 }
1686 }
1687 assert(found);
1688 }
1689 break;
1690 }
1691
1692 case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
1693 break;
1694
1695 default:
1696 assert(!"Unsupported image create extension");
1697 }
1698 }
1699 #endif
1700
1701 return wsi->CreateImage(chain->device, &chain->image_info.create,
1702 &chain->alloc, pImage);
1703 }
1704
1705 VkResult
wsi_common_bind_swapchain_image(const struct wsi_device * wsi,VkImage vk_image,VkSwapchainKHR _swapchain,uint32_t image_idx)1706 wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
1707 VkImage vk_image,
1708 VkSwapchainKHR _swapchain,
1709 uint32_t image_idx)
1710 {
1711 VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1712 struct wsi_image *image = chain->get_wsi_image(chain, image_idx);
1713
1714 return wsi->BindImageMemory(chain->device, vk_image, image->memory, 0);
1715 }
1716
1717 VkResult
wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain * chain,uint64_t present_id,uint64_t timeout)1718 wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain *chain,
1719 uint64_t present_id, uint64_t timeout)
1720 {
1721 assert(chain->present_id_timeline);
1722 const VkSemaphoreWaitInfo wait_info = {
1723 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,
1724 .semaphoreCount = 1,
1725 .pSemaphores = &chain->present_id_timeline,
1726 .pValues = &present_id,
1727 };
1728
1729 return chain->wsi->WaitSemaphores(chain->device, &wait_info, timeout);
1730 }
1731
1732 uint32_t
wsi_select_memory_type(const struct wsi_device * wsi,VkMemoryPropertyFlags req_props,VkMemoryPropertyFlags deny_props,uint32_t type_bits)1733 wsi_select_memory_type(const struct wsi_device *wsi,
1734 VkMemoryPropertyFlags req_props,
1735 VkMemoryPropertyFlags deny_props,
1736 uint32_t type_bits)
1737 {
1738 assert(type_bits != 0);
1739
1740 VkMemoryPropertyFlags common_props = ~0;
1741 u_foreach_bit(t, type_bits) {
1742 const VkMemoryType type = wsi->memory_props.memoryTypes[t];
1743
1744 common_props &= type.propertyFlags;
1745
1746 if (deny_props & type.propertyFlags)
1747 continue;
1748
1749 if (!(req_props & ~type.propertyFlags))
1750 return t;
1751 }
1752
1753 if ((deny_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
1754 (common_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
1755 /* If they asked for non-device-local and all the types are device-local
1756 * (this is commonly true for UMA platforms), try again without denying
1757 * device-local types
1758 */
1759 deny_props &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1760 return wsi_select_memory_type(wsi, req_props, deny_props, type_bits);
1761 }
1762
1763 unreachable("No memory type found");
1764 }
1765
1766 uint32_t
wsi_select_device_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1767 wsi_select_device_memory_type(const struct wsi_device *wsi,
1768 uint32_t type_bits)
1769 {
1770 return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1771 0 /* deny_props */, type_bits);
1772 }
1773
1774 static uint32_t
wsi_select_host_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1775 wsi_select_host_memory_type(const struct wsi_device *wsi,
1776 uint32_t type_bits)
1777 {
1778 return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1779 0 /* deny_props */, type_bits);
1780 }
1781
1782 VkResult
wsi_create_buffer_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image,VkExternalMemoryHandleTypeFlags handle_types)1783 wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
1784 const struct wsi_image_info *info,
1785 struct wsi_image *image,
1786 VkExternalMemoryHandleTypeFlags handle_types)
1787 {
1788 assert(chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
1789
1790 const struct wsi_device *wsi = chain->wsi;
1791 VkResult result;
1792
1793 const VkExternalMemoryBufferCreateInfo buffer_external_info = {
1794 .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
1795 .pNext = NULL,
1796 .handleTypes = handle_types,
1797 };
1798 const VkBufferCreateInfo buffer_info = {
1799 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1800 .pNext = &buffer_external_info,
1801 .size = info->linear_size,
1802 .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
1803 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1804 };
1805 result = wsi->CreateBuffer(chain->device, &buffer_info,
1806 &chain->alloc, &image->blit.buffer);
1807 if (result != VK_SUCCESS)
1808 return result;
1809
1810 VkMemoryRequirements reqs;
1811 wsi->GetBufferMemoryRequirements(chain->device, image->blit.buffer, &reqs);
1812 assert(reqs.size <= info->linear_size);
1813
1814 struct wsi_memory_allocate_info memory_wsi_info = {
1815 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
1816 .pNext = NULL,
1817 .implicit_sync = info->image_type == WSI_IMAGE_TYPE_DRM &&
1818 !info->explicit_sync,
1819 };
1820 VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
1821 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1822 .pNext = &memory_wsi_info,
1823 .image = VK_NULL_HANDLE,
1824 .buffer = image->blit.buffer,
1825 };
1826 VkMemoryAllocateInfo buf_mem_info = {
1827 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1828 .pNext = &buf_mem_dedicated_info,
1829 .allocationSize = info->linear_size,
1830 .memoryTypeIndex =
1831 info->select_blit_dst_memory_type(wsi, reqs.memoryTypeBits),
1832 };
1833
1834 void *sw_host_ptr = NULL;
1835 if (info->alloc_shm)
1836 sw_host_ptr = info->alloc_shm(image, info->linear_size);
1837
1838 VkExportMemoryAllocateInfo memory_export_info;
1839 VkImportMemoryHostPointerInfoEXT host_ptr_info;
1840 if (sw_host_ptr != NULL) {
1841 host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
1842 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
1843 .pHostPointer = sw_host_ptr,
1844 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
1845 };
1846 __vk_append_struct(&buf_mem_info, &host_ptr_info);
1847 } else if (handle_types != 0) {
1848 memory_export_info = (VkExportMemoryAllocateInfo) {
1849 .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
1850 .handleTypes = handle_types,
1851 };
1852 __vk_append_struct(&buf_mem_info, &memory_export_info);
1853 }
1854
1855 result = wsi->AllocateMemory(chain->device, &buf_mem_info,
1856 &chain->alloc, &image->blit.memory);
1857 if (result != VK_SUCCESS)
1858 return result;
1859
1860 result = wsi->BindBufferMemory(chain->device, image->blit.buffer,
1861 image->blit.memory, 0);
1862 if (result != VK_SUCCESS)
1863 return result;
1864
1865 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1866
1867 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1868 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1869 .pNext = NULL,
1870 .image = image->image,
1871 .buffer = VK_NULL_HANDLE,
1872 };
1873 const VkMemoryAllocateInfo memory_info = {
1874 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1875 .pNext = &memory_dedicated_info,
1876 .allocationSize = reqs.size,
1877 .memoryTypeIndex =
1878 info->select_image_memory_type(wsi, reqs.memoryTypeBits),
1879 };
1880
1881 result = wsi->AllocateMemory(chain->device, &memory_info,
1882 &chain->alloc, &image->memory);
1883 if (result != VK_SUCCESS)
1884 return result;
1885
1886 image->num_planes = 1;
1887 image->sizes[0] = info->linear_size;
1888 image->row_pitches[0] = info->linear_stride;
1889 image->offsets[0] = 0;
1890
1891 return VK_SUCCESS;
1892 }
1893
1894 VkResult
wsi_finish_create_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1895 wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
1896 const struct wsi_image_info *info,
1897 struct wsi_image *image)
1898 {
1899 const struct wsi_device *wsi = chain->wsi;
1900 VkResult result;
1901
1902 int cmd_buffer_count =
1903 chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
1904 image->blit.cmd_buffers =
1905 vk_zalloc(&chain->alloc,
1906 sizeof(VkCommandBuffer) * cmd_buffer_count, 8,
1907 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1908 if (!image->blit.cmd_buffers)
1909 return VK_ERROR_OUT_OF_HOST_MEMORY;
1910
1911 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1912 if (!chain->cmd_pools[i])
1913 continue;
1914
1915 const VkCommandBufferAllocateInfo cmd_buffer_info = {
1916 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
1917 .pNext = NULL,
1918 .commandPool = chain->cmd_pools[i],
1919 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
1920 .commandBufferCount = 1,
1921 };
1922 result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
1923 &image->blit.cmd_buffers[i]);
1924 if (result != VK_SUCCESS)
1925 return result;
1926
1927 const VkCommandBufferBeginInfo begin_info = {
1928 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
1929 };
1930 wsi->BeginCommandBuffer(image->blit.cmd_buffers[i], &begin_info);
1931
1932 VkImageMemoryBarrier img_mem_barriers[] = {
1933 {
1934 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1935 .pNext = NULL,
1936 .srcAccessMask = 0,
1937 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
1938 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1939 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1940 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1941 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1942 .image = image->image,
1943 .subresourceRange = {
1944 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1945 .baseMipLevel = 0,
1946 .levelCount = 1,
1947 .baseArrayLayer = 0,
1948 .layerCount = 1,
1949 },
1950 },
1951 {
1952 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1953 .pNext = NULL,
1954 .srcAccessMask = 0,
1955 .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
1956 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1957 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1958 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1959 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1960 .image = image->blit.image,
1961 .subresourceRange = {
1962 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1963 .baseMipLevel = 0,
1964 .levelCount = 1,
1965 .baseArrayLayer = 0,
1966 .layerCount = 1,
1967 },
1968 },
1969 };
1970 uint32_t img_mem_barrier_count =
1971 chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT ? 1 : 2;
1972 wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
1973 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1974 VK_PIPELINE_STAGE_TRANSFER_BIT,
1975 0,
1976 0, NULL,
1977 0, NULL,
1978 1, img_mem_barriers);
1979
1980 if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
1981 struct VkBufferImageCopy buffer_image_copy = {
1982 .bufferOffset = 0,
1983 .bufferRowLength = info->linear_stride /
1984 vk_format_get_blocksize(info->create.format),
1985 .bufferImageHeight = 0,
1986 .imageSubresource = {
1987 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1988 .mipLevel = 0,
1989 .baseArrayLayer = 0,
1990 .layerCount = 1,
1991 },
1992 .imageOffset = { .x = 0, .y = 0, .z = 0 },
1993 .imageExtent = info->create.extent,
1994 };
1995 wsi->CmdCopyImageToBuffer(image->blit.cmd_buffers[i],
1996 image->image,
1997 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1998 image->blit.buffer,
1999 1, &buffer_image_copy);
2000 } else {
2001 struct VkImageCopy image_copy = {
2002 .srcSubresource = {
2003 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2004 .mipLevel = 0,
2005 .baseArrayLayer = 0,
2006 .layerCount = 1,
2007 },
2008 .srcOffset = { .x = 0, .y = 0, .z = 0 },
2009 .dstSubresource = {
2010 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2011 .mipLevel = 0,
2012 .baseArrayLayer = 0,
2013 .layerCount = 1,
2014 },
2015 .dstOffset = { .x = 0, .y = 0, .z = 0 },
2016 .extent = info->create.extent,
2017 };
2018
2019 wsi->CmdCopyImage(image->blit.cmd_buffers[i],
2020 image->image,
2021 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2022 image->blit.image,
2023 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2024 1, &image_copy);
2025 }
2026
2027 img_mem_barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
2028 img_mem_barriers[0].dstAccessMask = 0;
2029 img_mem_barriers[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
2030 img_mem_barriers[0].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
2031 img_mem_barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2032 img_mem_barriers[1].dstAccessMask = 0;
2033 img_mem_barriers[1].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
2034 img_mem_barriers[1].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
2035 wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
2036 VK_PIPELINE_STAGE_TRANSFER_BIT,
2037 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
2038 0,
2039 0, NULL,
2040 0, NULL,
2041 img_mem_barrier_count, img_mem_barriers);
2042
2043 result = wsi->EndCommandBuffer(image->blit.cmd_buffers[i]);
2044 if (result != VK_SUCCESS)
2045 return result;
2046 }
2047
2048 return VK_SUCCESS;
2049 }
2050
2051 void
wsi_configure_buffer_image(UNUSED const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,uint32_t stride_align,uint32_t size_align,struct wsi_image_info * info)2052 wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
2053 const VkSwapchainCreateInfoKHR *pCreateInfo,
2054 uint32_t stride_align, uint32_t size_align,
2055 struct wsi_image_info *info)
2056 {
2057 const struct wsi_device *wsi = chain->wsi;
2058
2059 assert(util_is_power_of_two_nonzero(stride_align));
2060 assert(util_is_power_of_two_nonzero(size_align));
2061
2062 info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2063 info->wsi.blit_src = true;
2064
2065 const uint32_t cpp = vk_format_get_blocksize(pCreateInfo->imageFormat);
2066 info->linear_stride = pCreateInfo->imageExtent.width * cpp;
2067 info->linear_stride = align(info->linear_stride, stride_align);
2068
2069 /* Since we can pick the stride to be whatever we want, also align to the
2070 * device's optimalBufferCopyRowPitchAlignment so we get efficient copies.
2071 */
2072 assert(wsi->optimalBufferCopyRowPitchAlignment > 0);
2073 info->linear_stride = align(info->linear_stride,
2074 wsi->optimalBufferCopyRowPitchAlignment);
2075
2076 info->linear_size = (uint64_t)info->linear_stride *
2077 pCreateInfo->imageExtent.height;
2078 info->linear_size = align64(info->linear_size, size_align);
2079
2080 info->finish_create = wsi_finish_create_blit_context;
2081 }
2082
2083 void
wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain * chain,struct wsi_image_info * info)2084 wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain *chain,
2085 struct wsi_image_info *info)
2086 {
2087 info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2088 info->wsi.blit_src = true;
2089 info->finish_create = wsi_finish_create_blit_context;
2090 }
2091
2092 static VkResult
wsi_create_cpu_linear_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2093 wsi_create_cpu_linear_image_mem(const struct wsi_swapchain *chain,
2094 const struct wsi_image_info *info,
2095 struct wsi_image *image)
2096 {
2097 const struct wsi_device *wsi = chain->wsi;
2098 VkResult result;
2099
2100 VkMemoryRequirements reqs;
2101 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
2102
2103 VkSubresourceLayout layout;
2104 wsi->GetImageSubresourceLayout(chain->device, image->image,
2105 &(VkImageSubresource) {
2106 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2107 .mipLevel = 0,
2108 .arrayLayer = 0,
2109 }, &layout);
2110 assert(layout.offset == 0);
2111
2112 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
2113 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
2114 .image = image->image,
2115 .buffer = VK_NULL_HANDLE,
2116 };
2117 VkMemoryAllocateInfo memory_info = {
2118 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
2119 .pNext = &memory_dedicated_info,
2120 .allocationSize = reqs.size,
2121 .memoryTypeIndex =
2122 wsi_select_host_memory_type(wsi, reqs.memoryTypeBits),
2123 };
2124
2125 void *sw_host_ptr = NULL;
2126 if (info->alloc_shm)
2127 sw_host_ptr = info->alloc_shm(image, layout.size);
2128
2129 VkImportMemoryHostPointerInfoEXT host_ptr_info;
2130 if (sw_host_ptr != NULL) {
2131 host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
2132 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
2133 .pHostPointer = sw_host_ptr,
2134 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
2135 };
2136 __vk_append_struct(&memory_info, &host_ptr_info);
2137 }
2138
2139 result = wsi->AllocateMemory(chain->device, &memory_info,
2140 &chain->alloc, &image->memory);
2141 if (result != VK_SUCCESS)
2142 return result;
2143
2144 result = wsi->MapMemory(chain->device, image->memory,
2145 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2146 if (result != VK_SUCCESS)
2147 return result;
2148
2149 image->num_planes = 1;
2150 image->sizes[0] = reqs.size;
2151 image->row_pitches[0] = layout.rowPitch;
2152 image->offsets[0] = 0;
2153
2154 return VK_SUCCESS;
2155 }
2156
2157 static VkResult
wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2158 wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
2159 const struct wsi_image_info *info,
2160 struct wsi_image *image)
2161 {
2162 VkResult result;
2163
2164 result = wsi_create_buffer_blit_context(chain, info, image, 0);
2165 if (result != VK_SUCCESS)
2166 return result;
2167
2168 result = chain->wsi->MapMemory(chain->device, image->blit.memory,
2169 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2170 if (result != VK_SUCCESS)
2171 return result;
2172
2173 return VK_SUCCESS;
2174 }
2175
2176 bool
wsi_cpu_image_needs_buffer_blit(const struct wsi_device * wsi,const struct wsi_cpu_image_params * params)2177 wsi_cpu_image_needs_buffer_blit(const struct wsi_device *wsi,
2178 const struct wsi_cpu_image_params *params)
2179 {
2180 if (WSI_DEBUG & WSI_DEBUG_BUFFER)
2181 return true;
2182
2183 if (wsi->wants_linear)
2184 return false;
2185
2186 return true;
2187 }
2188
2189 VkResult
wsi_configure_cpu_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_cpu_image_params * params,struct wsi_image_info * info)2190 wsi_configure_cpu_image(const struct wsi_swapchain *chain,
2191 const VkSwapchainCreateInfoKHR *pCreateInfo,
2192 const struct wsi_cpu_image_params *params,
2193 struct wsi_image_info *info)
2194 {
2195 assert(params->base.image_type == WSI_IMAGE_TYPE_CPU);
2196 assert(chain->blit.type == WSI_SWAPCHAIN_NO_BLIT ||
2197 chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
2198
2199 VkExternalMemoryHandleTypeFlags handle_types = 0;
2200 if (params->alloc_shm && chain->blit.type != WSI_SWAPCHAIN_NO_BLIT)
2201 handle_types = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
2202
2203 VkResult result = wsi_configure_image(chain, pCreateInfo,
2204 handle_types, info);
2205 if (result != VK_SUCCESS)
2206 return result;
2207
2208 if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
2209 wsi_configure_buffer_image(chain, pCreateInfo,
2210 1 /* stride_align */,
2211 1 /* size_align */,
2212 info);
2213
2214 info->select_blit_dst_memory_type = wsi_select_host_memory_type;
2215 info->select_image_memory_type = wsi_select_device_memory_type;
2216 info->create_mem = wsi_create_cpu_buffer_image_mem;
2217 } else {
2218 /* Force the image to be linear */
2219 info->create.tiling = VK_IMAGE_TILING_LINEAR;
2220
2221 info->create_mem = wsi_create_cpu_linear_image_mem;
2222 }
2223
2224 info->alloc_shm = params->alloc_shm;
2225
2226 return VK_SUCCESS;
2227 }
2228
2229 VKAPI_ATTR VkResult VKAPI_CALL
wsi_WaitForPresentKHR(VkDevice device,VkSwapchainKHR _swapchain,uint64_t presentId,uint64_t timeout)2230 wsi_WaitForPresentKHR(VkDevice device, VkSwapchainKHR _swapchain,
2231 uint64_t presentId, uint64_t timeout)
2232 {
2233 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
2234 assert(swapchain->wait_for_present);
2235 return swapchain->wait_for_present(swapchain, presentId, timeout);
2236 }
2237
2238 VkImageUsageFlags
wsi_caps_get_image_usage(void)2239 wsi_caps_get_image_usage(void)
2240 {
2241 return VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2242 VK_IMAGE_USAGE_SAMPLED_BIT |
2243 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
2244 VK_IMAGE_USAGE_STORAGE_BIT |
2245 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2246 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
2247 }
2248
2249 bool
wsi_device_supports_explicit_sync(struct wsi_device * device)2250 wsi_device_supports_explicit_sync(struct wsi_device *device)
2251 {
2252 return !device->sw && device->has_timeline_semaphore &&
2253 (device->timeline_semaphore_export_handle_types &
2254 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
2255 }
2256