1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "wsi_common_private.h"
25 #include "wsi_common_entrypoints.h"
26 #include "util/u_debug.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_time.h"
30 #include "util/xmlconfig.h"
31 #include "vk_device.h"
32 #include "vk_fence.h"
33 #include "vk_format.h"
34 #include "vk_instance.h"
35 #include "vk_physical_device.h"
36 #include "vk_queue.h"
37 #include "vk_semaphore.h"
38 #include "vk_sync.h"
39 #include "vk_sync_dummy.h"
40 #include "vk_util.h"
41
42 #include <time.h>
43 #include <stdlib.h>
44 #include <stdio.h>
45
46 #ifndef _WIN32
47 #include <unistd.h>
48 #endif
49
50 uint64_t WSI_DEBUG;
51
52 static const struct debug_control debug_control[] = {
53 { "buffer", WSI_DEBUG_BUFFER },
54 { "sw", WSI_DEBUG_SW },
55 { "noshm", WSI_DEBUG_NOSHM },
56 { "linear", WSI_DEBUG_LINEAR },
57 { "dxgi", WSI_DEBUG_DXGI },
58 { NULL, },
59 };
60
61 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,const struct wsi_device_options * device_options)62 wsi_device_init(struct wsi_device *wsi,
63 VkPhysicalDevice pdevice,
64 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
65 const VkAllocationCallbacks *alloc,
66 int display_fd,
67 const struct driOptionCache *dri_options,
68 const struct wsi_device_options *device_options)
69 {
70 const char *present_mode;
71 UNUSED VkResult result;
72
73 WSI_DEBUG = parse_debug_string(getenv("MESA_VK_WSI_DEBUG"), debug_control);
74
75 util_cpu_trace_init();
76
77 memset(wsi, 0, sizeof(*wsi));
78
79 wsi->instance_alloc = *alloc;
80 wsi->pdevice = pdevice;
81 wsi->supports_scanout = true;
82 wsi->sw = device_options->sw_device || (WSI_DEBUG & WSI_DEBUG_SW);
83 wsi->wants_linear = (WSI_DEBUG & WSI_DEBUG_LINEAR) != 0;
84 wsi->x11.extra_xwayland_image = device_options->extra_xwayland_image;
85 #define WSI_GET_CB(func) \
86 PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
87 WSI_GET_CB(GetPhysicalDeviceExternalSemaphoreProperties);
88 WSI_GET_CB(GetPhysicalDeviceProperties2);
89 WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
90 WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
91 #undef WSI_GET_CB
92
93 wsi->drm_info.sType =
94 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT;
95 wsi->pci_bus_info.sType =
96 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
97 wsi->pci_bus_info.pNext = &wsi->drm_info;
98 VkPhysicalDeviceProperties2 pdp2 = {
99 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
100 .pNext = &wsi->pci_bus_info,
101 };
102 GetPhysicalDeviceProperties2(pdevice, &pdp2);
103
104 wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
105 assert(pdp2.properties.limits.optimalBufferCopyRowPitchAlignment <= UINT32_MAX);
106 wsi->optimalBufferCopyRowPitchAlignment =
107 pdp2.properties.limits.optimalBufferCopyRowPitchAlignment;
108 wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
109
110 GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
111 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
112
113 assert(wsi->queue_family_count <= 64);
114 VkQueueFamilyProperties queue_properties[64];
115 GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, queue_properties);
116
117 for (unsigned i = 0; i < wsi->queue_family_count; i++) {
118 VkFlags req_flags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
119 if (queue_properties[i].queueFlags & req_flags)
120 wsi->queue_supports_blit |= BITFIELD64_BIT(i);
121 }
122
123 for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
124 handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
125 handle_type <<= 1) {
126 const VkPhysicalDeviceExternalSemaphoreInfo esi = {
127 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
128 .handleType = handle_type,
129 };
130 VkExternalSemaphoreProperties esp = {
131 .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
132 };
133 GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
134
135 if (esp.externalSemaphoreFeatures &
136 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
137 wsi->semaphore_export_handle_types |= handle_type;
138 }
139
140 const struct vk_device_extension_table *supported_extensions =
141 &vk_physical_device_from_handle(pdevice)->supported_extensions;
142 wsi->has_import_memory_host =
143 supported_extensions->EXT_external_memory_host;
144 wsi->khr_present_wait =
145 supported_extensions->KHR_present_id &&
146 supported_extensions->KHR_present_wait;
147
148 /* We cannot expose KHR_present_wait without timeline semaphores. */
149 assert(!wsi->khr_present_wait || supported_extensions->KHR_timeline_semaphore);
150
151 list_inithead(&wsi->hotplug_fences);
152
153 #define WSI_GET_CB(func) \
154 wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
155 WSI_GET_CB(AllocateMemory);
156 WSI_GET_CB(AllocateCommandBuffers);
157 WSI_GET_CB(BindBufferMemory);
158 WSI_GET_CB(BindImageMemory);
159 WSI_GET_CB(BeginCommandBuffer);
160 WSI_GET_CB(CmdPipelineBarrier);
161 WSI_GET_CB(CmdCopyImage);
162 WSI_GET_CB(CmdCopyImageToBuffer);
163 WSI_GET_CB(CreateBuffer);
164 WSI_GET_CB(CreateCommandPool);
165 WSI_GET_CB(CreateFence);
166 WSI_GET_CB(CreateImage);
167 WSI_GET_CB(CreateSemaphore);
168 WSI_GET_CB(DestroyBuffer);
169 WSI_GET_CB(DestroyCommandPool);
170 WSI_GET_CB(DestroyFence);
171 WSI_GET_CB(DestroyImage);
172 WSI_GET_CB(DestroySemaphore);
173 WSI_GET_CB(EndCommandBuffer);
174 WSI_GET_CB(FreeMemory);
175 WSI_GET_CB(FreeCommandBuffers);
176 WSI_GET_CB(GetBufferMemoryRequirements);
177 WSI_GET_CB(GetFenceStatus);
178 WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
179 WSI_GET_CB(GetImageMemoryRequirements);
180 WSI_GET_CB(GetImageSubresourceLayout);
181 if (!wsi->sw)
182 WSI_GET_CB(GetMemoryFdKHR);
183 WSI_GET_CB(GetPhysicalDeviceFormatProperties);
184 WSI_GET_CB(GetPhysicalDeviceFormatProperties2);
185 WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
186 WSI_GET_CB(GetSemaphoreFdKHR);
187 WSI_GET_CB(ResetFences);
188 WSI_GET_CB(QueueSubmit);
189 WSI_GET_CB(WaitForFences);
190 WSI_GET_CB(MapMemory);
191 WSI_GET_CB(UnmapMemory);
192 if (wsi->khr_present_wait)
193 WSI_GET_CB(WaitSemaphores);
194 #undef WSI_GET_CB
195
196 #ifdef VK_USE_PLATFORM_XCB_KHR
197 result = wsi_x11_init_wsi(wsi, alloc, dri_options);
198 if (result != VK_SUCCESS)
199 goto fail;
200 #endif
201
202 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
203 result = wsi_wl_init_wsi(wsi, alloc, pdevice);
204 if (result != VK_SUCCESS)
205 goto fail;
206 #endif
207
208 #ifdef VK_USE_PLATFORM_WIN32_KHR
209 result = wsi_win32_init_wsi(wsi, alloc, pdevice);
210 if (result != VK_SUCCESS)
211 goto fail;
212 #endif
213
214 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
215 result = wsi_display_init_wsi(wsi, alloc, display_fd);
216 if (result != VK_SUCCESS)
217 goto fail;
218 #endif
219
220 #ifndef VK_USE_PLATFORM_WIN32_KHR
221 result = wsi_headless_init_wsi(wsi, alloc, pdevice);
222 if (result != VK_SUCCESS)
223 goto fail;
224 #endif
225
226 present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
227 if (present_mode) {
228 if (!strcmp(present_mode, "fifo")) {
229 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
230 } else if (!strcmp(present_mode, "relaxed")) {
231 wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
232 } else if (!strcmp(present_mode, "mailbox")) {
233 wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
234 } else if (!strcmp(present_mode, "immediate")) {
235 wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
236 } else {
237 fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
238 }
239 }
240
241 wsi->force_headless_swapchain =
242 debug_get_bool_option("MESA_VK_WSI_HEADLESS_SWAPCHAIN", false);
243
244 if (dri_options) {
245 if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
246 wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
247 "adaptive_sync");
248
249 if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first", DRI_BOOL)) {
250 wsi->force_bgra8_unorm_first =
251 driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
252 }
253
254 if (driCheckOption(dri_options, "vk_wsi_force_swapchain_to_current_extent", DRI_BOOL)) {
255 wsi->force_swapchain_to_currentExtent =
256 driQueryOptionb(dri_options, "vk_wsi_force_swapchain_to_current_extent");
257 }
258 }
259
260 return VK_SUCCESS;
261 fail:
262 wsi_device_finish(wsi, alloc);
263 return result;
264 }
265
266 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)267 wsi_device_finish(struct wsi_device *wsi,
268 const VkAllocationCallbacks *alloc)
269 {
270 #ifndef VK_USE_PLATFORM_WIN32_KHR
271 wsi_headless_finish_wsi(wsi, alloc);
272 #endif
273 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
274 wsi_display_finish_wsi(wsi, alloc);
275 #endif
276 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
277 wsi_wl_finish_wsi(wsi, alloc);
278 #endif
279 #ifdef VK_USE_PLATFORM_WIN32_KHR
280 wsi_win32_finish_wsi(wsi, alloc);
281 #endif
282 #ifdef VK_USE_PLATFORM_XCB_KHR
283 wsi_x11_finish_wsi(wsi, alloc);
284 #endif
285 }
286
287 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySurfaceKHR(VkInstance _instance,VkSurfaceKHR _surface,const VkAllocationCallbacks * pAllocator)288 wsi_DestroySurfaceKHR(VkInstance _instance,
289 VkSurfaceKHR _surface,
290 const VkAllocationCallbacks *pAllocator)
291 {
292 VK_FROM_HANDLE(vk_instance, instance, _instance);
293 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
294
295 if (!surface)
296 return;
297
298 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
299 if (surface->platform == VK_ICD_WSI_PLATFORM_WAYLAND) {
300 wsi_wl_surface_destroy(surface, _instance, pAllocator);
301 return;
302 }
303 #endif
304 #ifdef VK_USE_PLATFORM_WIN32_KHR
305 if (surface->platform == VK_ICD_WSI_PLATFORM_WIN32) {
306 wsi_win32_surface_destroy(surface, _instance, pAllocator);
307 return;
308 }
309 #endif
310
311 vk_free2(&instance->alloc, pAllocator, surface);
312 }
313
314 void
wsi_device_setup_syncobj_fd(struct wsi_device * wsi_device,int fd)315 wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
316 int fd)
317 {
318 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
319 wsi_display_setup_syncobj_fd(wsi_device, fd);
320 #endif
321 }
322
323 static enum wsi_swapchain_blit_type
get_blit_type(const struct wsi_device * wsi,const struct wsi_base_image_params * params,VkDevice device)324 get_blit_type(const struct wsi_device *wsi,
325 const struct wsi_base_image_params *params,
326 VkDevice device)
327 {
328 switch (params->image_type) {
329 case WSI_IMAGE_TYPE_CPU: {
330 const struct wsi_cpu_image_params *cpu_params =
331 container_of(params, const struct wsi_cpu_image_params, base);
332 return wsi_cpu_image_needs_buffer_blit(wsi, cpu_params) ?
333 WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
334 }
335 #ifdef HAVE_LIBDRM
336 case WSI_IMAGE_TYPE_DRM: {
337 const struct wsi_drm_image_params *drm_params =
338 container_of(params, const struct wsi_drm_image_params, base);
339 return wsi_drm_image_needs_buffer_blit(wsi, drm_params) ?
340 WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
341 }
342 #endif
343 #ifdef _WIN32
344 case WSI_IMAGE_TYPE_DXGI: {
345 const struct wsi_dxgi_image_params *dxgi_params =
346 container_of(params, const struct wsi_dxgi_image_params, base);
347 return wsi_dxgi_image_needs_blit(wsi, dxgi_params, device);
348 }
349 #endif
350 default:
351 unreachable("Invalid image type");
352 }
353 }
354
355 static VkResult
configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * params,struct wsi_image_info * info)356 configure_image(const struct wsi_swapchain *chain,
357 const VkSwapchainCreateInfoKHR *pCreateInfo,
358 const struct wsi_base_image_params *params,
359 struct wsi_image_info *info)
360 {
361 switch (params->image_type) {
362 case WSI_IMAGE_TYPE_CPU: {
363 const struct wsi_cpu_image_params *cpu_params =
364 container_of(params, const struct wsi_cpu_image_params, base);
365 return wsi_configure_cpu_image(chain, pCreateInfo, cpu_params, info);
366 }
367 #ifdef HAVE_LIBDRM
368 case WSI_IMAGE_TYPE_DRM: {
369 const struct wsi_drm_image_params *drm_params =
370 container_of(params, const struct wsi_drm_image_params, base);
371 return wsi_drm_configure_image(chain, pCreateInfo, drm_params, info);
372 }
373 #endif
374 #ifdef _WIN32
375 case WSI_IMAGE_TYPE_DXGI: {
376 const struct wsi_dxgi_image_params *dxgi_params =
377 container_of(params, const struct wsi_dxgi_image_params, base);
378 return wsi_dxgi_configure_image(chain, pCreateInfo, dxgi_params, info);
379 }
380 #endif
381 default:
382 unreachable("Invalid image type");
383 }
384 }
385
386 #if defined(HAVE_PTHREAD) && !defined(_WIN32)
387 bool
wsi_init_pthread_cond_monotonic(pthread_cond_t * cond)388 wsi_init_pthread_cond_monotonic(pthread_cond_t *cond)
389 {
390 pthread_condattr_t condattr;
391 bool ret = false;
392
393 if (pthread_condattr_init(&condattr) != 0)
394 goto fail_attr_init;
395
396 if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0)
397 goto fail_attr_set;
398
399 if (pthread_cond_init(cond, &condattr) != 0)
400 goto fail_cond_init;
401
402 ret = true;
403
404 fail_cond_init:
405 fail_attr_set:
406 pthread_condattr_destroy(&condattr);
407 fail_attr_init:
408 return ret;
409 }
410 #endif
411
412 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * image_params,const VkAllocationCallbacks * pAllocator)413 wsi_swapchain_init(const struct wsi_device *wsi,
414 struct wsi_swapchain *chain,
415 VkDevice _device,
416 const VkSwapchainCreateInfoKHR *pCreateInfo,
417 const struct wsi_base_image_params *image_params,
418 const VkAllocationCallbacks *pAllocator)
419 {
420 VK_FROM_HANDLE(vk_device, device, _device);
421 VkResult result;
422
423 memset(chain, 0, sizeof(*chain));
424
425 vk_object_base_init(device, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
426
427 chain->wsi = wsi;
428 chain->device = _device;
429 chain->alloc = *pAllocator;
430 chain->blit.type = get_blit_type(wsi, image_params, _device);
431
432 chain->blit.queue = VK_NULL_HANDLE;
433 if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT && wsi->get_blit_queue)
434 chain->blit.queue = wsi->get_blit_queue(_device);
435
436 int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
437
438 chain->cmd_pools =
439 vk_zalloc(pAllocator, sizeof(VkCommandPool) * cmd_pools_count, 8,
440 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
441 if (!chain->cmd_pools)
442 return VK_ERROR_OUT_OF_HOST_MEMORY;
443
444 for (uint32_t i = 0; i < cmd_pools_count; i++) {
445 int queue_family_index = i;
446
447 if (chain->blit.queue != VK_NULL_HANDLE) {
448 VK_FROM_HANDLE(vk_queue, queue, chain->blit.queue);
449 queue_family_index = queue->queue_family_index;
450 } else {
451 /* Queues returned by get_blit_queue() might not be listed in
452 * GetPhysicalDeviceQueueFamilyProperties, so this check is skipped for those queues.
453 */
454 if (!(wsi->queue_supports_blit & BITFIELD64_BIT(queue_family_index)))
455 continue;
456 }
457
458 const VkCommandPoolCreateInfo cmd_pool_info = {
459 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
460 .pNext = NULL,
461 .flags = 0,
462 .queueFamilyIndex = queue_family_index,
463 };
464 result = wsi->CreateCommandPool(_device, &cmd_pool_info, &chain->alloc,
465 &chain->cmd_pools[i]);
466 if (result != VK_SUCCESS)
467 goto fail;
468 }
469
470 result = configure_image(chain, pCreateInfo, image_params,
471 &chain->image_info);
472 if (result != VK_SUCCESS)
473 goto fail;
474
475 return VK_SUCCESS;
476
477 fail:
478 wsi_swapchain_finish(chain);
479 return result;
480 }
481
482 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)483 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
484 const VkSwapchainCreateInfoKHR *pCreateInfo,
485 VkPresentModeKHR mode)
486 {
487 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
488 struct wsi_interface *iface = wsi->wsi[surface->platform];
489 VkPresentModeKHR *present_modes;
490 uint32_t present_mode_count;
491 bool supported = false;
492 VkResult result;
493
494 result = iface->get_present_modes(surface, wsi, &present_mode_count, NULL);
495 if (result != VK_SUCCESS)
496 return supported;
497
498 present_modes = malloc(present_mode_count * sizeof(*present_modes));
499 if (!present_modes)
500 return supported;
501
502 result = iface->get_present_modes(surface, wsi, &present_mode_count,
503 present_modes);
504 if (result != VK_SUCCESS)
505 goto fail;
506
507 for (uint32_t i = 0; i < present_mode_count; i++) {
508 if (present_modes[i] == mode) {
509 supported = true;
510 break;
511 }
512 }
513
514 fail:
515 free(present_modes);
516 return supported;
517 }
518
519 enum VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)520 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
521 const VkSwapchainCreateInfoKHR *pCreateInfo)
522 {
523 if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
524 return pCreateInfo->presentMode;
525
526 if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
527 wsi->override_present_mode)) {
528 fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
529 return pCreateInfo->presentMode;
530 }
531
532 return wsi->override_present_mode;
533 }
534
535 void
wsi_swapchain_finish(struct wsi_swapchain * chain)536 wsi_swapchain_finish(struct wsi_swapchain *chain)
537 {
538 wsi_destroy_image_info(chain, &chain->image_info);
539
540 if (chain->fences) {
541 for (unsigned i = 0; i < chain->image_count; i++)
542 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
543
544 vk_free(&chain->alloc, chain->fences);
545 }
546 if (chain->blit.semaphores) {
547 for (unsigned i = 0; i < chain->image_count; i++)
548 chain->wsi->DestroySemaphore(chain->device, chain->blit.semaphores[i], &chain->alloc);
549
550 vk_free(&chain->alloc, chain->blit.semaphores);
551 }
552 chain->wsi->DestroySemaphore(chain->device, chain->dma_buf_semaphore,
553 &chain->alloc);
554 chain->wsi->DestroySemaphore(chain->device, chain->present_id_timeline,
555 &chain->alloc);
556
557 int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ?
558 1 : chain->wsi->queue_family_count;
559 for (uint32_t i = 0; i < cmd_pools_count; i++) {
560 if (!chain->cmd_pools[i])
561 continue;
562 chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
563 &chain->alloc);
564 }
565 vk_free(&chain->alloc, chain->cmd_pools);
566
567 vk_object_base_finish(&chain->base);
568 }
569
570 VkResult
wsi_configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,VkExternalMemoryHandleTypeFlags handle_types,struct wsi_image_info * info)571 wsi_configure_image(const struct wsi_swapchain *chain,
572 const VkSwapchainCreateInfoKHR *pCreateInfo,
573 VkExternalMemoryHandleTypeFlags handle_types,
574 struct wsi_image_info *info)
575 {
576 memset(info, 0, sizeof(*info));
577 uint32_t queue_family_count = 1;
578
579 if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
580 queue_family_count = pCreateInfo->queueFamilyIndexCount;
581
582 /*
583 * TODO: there should be no reason to allocate this, but
584 * 15331 shows that games crashed without doing this.
585 */
586 uint32_t *queue_family_indices =
587 vk_alloc(&chain->alloc,
588 sizeof(*queue_family_indices) *
589 queue_family_count,
590 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
591 if (!queue_family_indices)
592 goto err_oom;
593
594 if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
595 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; i++)
596 queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
597
598 info->create = (VkImageCreateInfo) {
599 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
600 .flags = VK_IMAGE_CREATE_ALIAS_BIT,
601 .imageType = VK_IMAGE_TYPE_2D,
602 .format = pCreateInfo->imageFormat,
603 .extent = {
604 .width = pCreateInfo->imageExtent.width,
605 .height = pCreateInfo->imageExtent.height,
606 .depth = 1,
607 },
608 .mipLevels = 1,
609 .arrayLayers = 1,
610 .samples = VK_SAMPLE_COUNT_1_BIT,
611 .tiling = VK_IMAGE_TILING_OPTIMAL,
612 .usage = pCreateInfo->imageUsage,
613 .sharingMode = pCreateInfo->imageSharingMode,
614 .queueFamilyIndexCount = queue_family_count,
615 .pQueueFamilyIndices = queue_family_indices,
616 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
617 };
618
619 if (handle_types != 0) {
620 info->ext_mem = (VkExternalMemoryImageCreateInfo) {
621 .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
622 .handleTypes = handle_types,
623 };
624 __vk_append_struct(&info->create, &info->ext_mem);
625 }
626
627 info->wsi = (struct wsi_image_create_info) {
628 .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
629 };
630 __vk_append_struct(&info->create, &info->wsi);
631
632 if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
633 info->create.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
634 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
635
636 const VkImageFormatListCreateInfo *format_list_in =
637 vk_find_struct_const(pCreateInfo->pNext,
638 IMAGE_FORMAT_LIST_CREATE_INFO);
639
640 assume(format_list_in && format_list_in->viewFormatCount > 0);
641
642 const uint32_t view_format_count = format_list_in->viewFormatCount;
643 VkFormat *view_formats =
644 vk_alloc(&chain->alloc, sizeof(VkFormat) * view_format_count,
645 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
646 if (!view_formats)
647 goto err_oom;
648
649 ASSERTED bool format_found = false;
650 for (uint32_t i = 0; i < format_list_in->viewFormatCount; i++) {
651 if (pCreateInfo->imageFormat == format_list_in->pViewFormats[i])
652 format_found = true;
653 view_formats[i] = format_list_in->pViewFormats[i];
654 }
655 assert(format_found);
656
657 info->format_list = (VkImageFormatListCreateInfo) {
658 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,
659 .viewFormatCount = view_format_count,
660 .pViewFormats = view_formats,
661 };
662 __vk_append_struct(&info->create, &info->format_list);
663 }
664
665 return VK_SUCCESS;
666
667 err_oom:
668 wsi_destroy_image_info(chain, info);
669 return VK_ERROR_OUT_OF_HOST_MEMORY;
670 }
671
672 void
wsi_destroy_image_info(const struct wsi_swapchain * chain,struct wsi_image_info * info)673 wsi_destroy_image_info(const struct wsi_swapchain *chain,
674 struct wsi_image_info *info)
675 {
676 if (info->create.pQueueFamilyIndices != NULL) {
677 vk_free(&chain->alloc, (void *)info->create.pQueueFamilyIndices);
678 info->create.pQueueFamilyIndices = NULL;
679 }
680 if (info->format_list.pViewFormats != NULL) {
681 vk_free(&chain->alloc, (void *)info->format_list.pViewFormats);
682 info->format_list.pViewFormats = NULL;
683 }
684 if (info->drm_mod_list.pDrmFormatModifiers != NULL) {
685 vk_free(&chain->alloc, (void *)info->drm_mod_list.pDrmFormatModifiers);
686 info->drm_mod_list.pDrmFormatModifiers = NULL;
687 }
688 if (info->modifier_props != NULL) {
689 vk_free(&chain->alloc, info->modifier_props);
690 info->modifier_props = NULL;
691 }
692 }
693
694 VkResult
wsi_create_image(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)695 wsi_create_image(const struct wsi_swapchain *chain,
696 const struct wsi_image_info *info,
697 struct wsi_image *image)
698 {
699 const struct wsi_device *wsi = chain->wsi;
700 VkResult result;
701
702 memset(image, 0, sizeof(*image));
703
704 #ifndef _WIN32
705 image->dma_buf_fd = -1;
706 #endif
707
708 result = wsi->CreateImage(chain->device, &info->create,
709 &chain->alloc, &image->image);
710 if (result != VK_SUCCESS)
711 goto fail;
712
713 result = info->create_mem(chain, info, image);
714 if (result != VK_SUCCESS)
715 goto fail;
716
717 result = wsi->BindImageMemory(chain->device, image->image,
718 image->memory, 0);
719 if (result != VK_SUCCESS)
720 goto fail;
721
722 if (info->finish_create) {
723 result = info->finish_create(chain, info, image);
724 if (result != VK_SUCCESS)
725 goto fail;
726 }
727
728 return VK_SUCCESS;
729
730 fail:
731 wsi_destroy_image(chain, image);
732 return result;
733 }
734
735 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)736 wsi_destroy_image(const struct wsi_swapchain *chain,
737 struct wsi_image *image)
738 {
739 const struct wsi_device *wsi = chain->wsi;
740
741 #ifndef _WIN32
742 if (image->dma_buf_fd >= 0)
743 close(image->dma_buf_fd);
744 #endif
745
746 if (image->cpu_map != NULL) {
747 wsi->UnmapMemory(chain->device, image->blit.buffer != VK_NULL_HANDLE ?
748 image->blit.memory : image->memory);
749 }
750
751 if (image->blit.cmd_buffers) {
752 int cmd_buffer_count =
753 chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
754
755 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
756 if (!chain->cmd_pools[i])
757 continue;
758 wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
759 1, &image->blit.cmd_buffers[i]);
760 }
761 vk_free(&chain->alloc, image->blit.cmd_buffers);
762 }
763
764 wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
765 wsi->DestroyImage(chain->device, image->image, &chain->alloc);
766 wsi->DestroyImage(chain->device, image->blit.image, &chain->alloc);
767 wsi->FreeMemory(chain->device, image->blit.memory, &chain->alloc);
768 wsi->DestroyBuffer(chain->device, image->blit.buffer, &chain->alloc);
769 }
770
771 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)772 wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
773 uint32_t queueFamilyIndex,
774 VkSurfaceKHR _surface,
775 VkBool32 *pSupported)
776 {
777 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
778 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
779 struct wsi_device *wsi_device = device->wsi_device;
780 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
781
782 VkResult res = iface->get_support(surface, wsi_device,
783 queueFamilyIndex, pSupported);
784 if (res == VK_SUCCESS) {
785 bool blit = (wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)) != 0;
786 *pSupported = (bool)*pSupported && blit;
787 }
788
789 return res;
790 }
791
792 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)793 wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
794 VkPhysicalDevice physicalDevice,
795 VkSurfaceKHR _surface,
796 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
797 {
798 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
799 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
800 struct wsi_device *wsi_device = device->wsi_device;
801 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
802
803 VkSurfaceCapabilities2KHR caps2 = {
804 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
805 };
806
807 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
808
809 if (result == VK_SUCCESS)
810 *pSurfaceCapabilities = caps2.surfaceCapabilities;
811
812 return result;
813 }
814
815 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)816 wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
817 VkPhysicalDevice physicalDevice,
818 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
819 VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
820 {
821 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
822 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
823 struct wsi_device *wsi_device = device->wsi_device;
824 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
825
826 return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
827 pSurfaceCapabilities);
828 }
829
830 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)831 wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
832 VkPhysicalDevice physicalDevice,
833 VkSurfaceKHR _surface,
834 VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
835 {
836 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
837 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
838 struct wsi_device *wsi_device = device->wsi_device;
839 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
840
841 assert(pSurfaceCapabilities->sType ==
842 VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
843
844 struct wsi_surface_supported_counters counters = {
845 .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
846 .pNext = pSurfaceCapabilities->pNext,
847 .supported_surface_counters = 0,
848 };
849
850 VkSurfaceCapabilities2KHR caps2 = {
851 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
852 .pNext = &counters,
853 };
854
855 VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
856
857 if (result == VK_SUCCESS) {
858 VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
859 VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
860
861 ext_caps->minImageCount = khr_caps.minImageCount;
862 ext_caps->maxImageCount = khr_caps.maxImageCount;
863 ext_caps->currentExtent = khr_caps.currentExtent;
864 ext_caps->minImageExtent = khr_caps.minImageExtent;
865 ext_caps->maxImageExtent = khr_caps.maxImageExtent;
866 ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
867 ext_caps->supportedTransforms = khr_caps.supportedTransforms;
868 ext_caps->currentTransform = khr_caps.currentTransform;
869 ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
870 ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
871 ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
872 }
873
874 return result;
875 }
876
877 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)878 wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
879 VkSurfaceKHR _surface,
880 uint32_t *pSurfaceFormatCount,
881 VkSurfaceFormatKHR *pSurfaceFormats)
882 {
883 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
884 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
885 struct wsi_device *wsi_device = device->wsi_device;
886 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
887
888 return iface->get_formats(surface, wsi_device,
889 pSurfaceFormatCount, pSurfaceFormats);
890 }
891
892 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)893 wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
894 const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
895 uint32_t *pSurfaceFormatCount,
896 VkSurfaceFormat2KHR *pSurfaceFormats)
897 {
898 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
899 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
900 struct wsi_device *wsi_device = device->wsi_device;
901 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
902
903 return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
904 pSurfaceFormatCount, pSurfaceFormats);
905 }
906
907 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)908 wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
909 VkSurfaceKHR _surface,
910 uint32_t *pPresentModeCount,
911 VkPresentModeKHR *pPresentModes)
912 {
913 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
914 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
915 struct wsi_device *wsi_device = device->wsi_device;
916 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
917
918 return iface->get_present_modes(surface, wsi_device, pPresentModeCount,
919 pPresentModes);
920 }
921
922 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)923 wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
924 VkSurfaceKHR _surface,
925 uint32_t *pRectCount,
926 VkRect2D *pRects)
927 {
928 VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
929 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
930 struct wsi_device *wsi_device = device->wsi_device;
931 struct wsi_interface *iface = wsi_device->wsi[surface->platform];
932
933 return iface->get_present_rectangles(surface, wsi_device,
934 pRectCount, pRects);
935 }
936
937 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateSwapchainKHR(VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)938 wsi_CreateSwapchainKHR(VkDevice _device,
939 const VkSwapchainCreateInfoKHR *pCreateInfo,
940 const VkAllocationCallbacks *pAllocator,
941 VkSwapchainKHR *pSwapchain)
942 {
943 MESA_TRACE_FUNC();
944 VK_FROM_HANDLE(vk_device, device, _device);
945 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
946 struct wsi_device *wsi_device = device->physical->wsi_device;
947 struct wsi_interface *iface = wsi_device->force_headless_swapchain ?
948 wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] :
949 wsi_device->wsi[surface->platform];
950 const VkAllocationCallbacks *alloc;
951 struct wsi_swapchain *swapchain;
952
953 if (pAllocator)
954 alloc = pAllocator;
955 else
956 alloc = &device->alloc;
957
958 VkSwapchainCreateInfoKHR info = *pCreateInfo;
959
960 if (wsi_device->force_swapchain_to_currentExtent) {
961 VkSurfaceCapabilities2KHR caps2 = {
962 .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
963 };
964 iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
965 info.imageExtent = caps2.surfaceCapabilities.currentExtent;
966 }
967
968 /* Ignore DEFERRED_MEMORY_ALLOCATION_BIT. Would require deep plumbing to be able to take advantage of it.
969 * bool deferred_allocation = pCreateInfo->flags & VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT;
970 */
971
972 VkResult result = iface->create_swapchain(surface, _device, wsi_device,
973 &info, alloc,
974 &swapchain);
975 if (result != VK_SUCCESS)
976 return result;
977
978 swapchain->fences = vk_zalloc(alloc,
979 sizeof (*swapchain->fences) * swapchain->image_count,
980 sizeof (*swapchain->fences),
981 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
982 if (!swapchain->fences) {
983 swapchain->destroy(swapchain, alloc);
984 return VK_ERROR_OUT_OF_HOST_MEMORY;
985 }
986
987 if (wsi_device->khr_present_wait) {
988 const VkSemaphoreTypeCreateInfo type_info = {
989 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
990 .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
991 };
992
993 const VkSemaphoreCreateInfo sem_info = {
994 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
995 .pNext = &type_info,
996 .flags = 0,
997 };
998
999 /* We assume here that a driver exposing present_wait also exposes VK_KHR_timeline_semaphore. */
1000 result = wsi_device->CreateSemaphore(_device, &sem_info, alloc, &swapchain->present_id_timeline);
1001 if (result != VK_SUCCESS) {
1002 swapchain->destroy(swapchain, alloc);
1003 return VK_ERROR_OUT_OF_HOST_MEMORY;
1004 }
1005 }
1006
1007 if (swapchain->blit.queue != VK_NULL_HANDLE) {
1008 swapchain->blit.semaphores = vk_zalloc(alloc,
1009 sizeof (*swapchain->blit.semaphores) * swapchain->image_count,
1010 sizeof (*swapchain->blit.semaphores),
1011 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1012 if (!swapchain->blit.semaphores) {
1013 wsi_device->DestroySemaphore(_device, swapchain->present_id_timeline, alloc);
1014 swapchain->destroy(swapchain, alloc);
1015 return VK_ERROR_OUT_OF_HOST_MEMORY;
1016 }
1017 }
1018
1019 *pSwapchain = wsi_swapchain_to_handle(swapchain);
1020
1021 return VK_SUCCESS;
1022 }
1023
1024 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySwapchainKHR(VkDevice _device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)1025 wsi_DestroySwapchainKHR(VkDevice _device,
1026 VkSwapchainKHR _swapchain,
1027 const VkAllocationCallbacks *pAllocator)
1028 {
1029 MESA_TRACE_FUNC();
1030 VK_FROM_HANDLE(vk_device, device, _device);
1031 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1032 const VkAllocationCallbacks *alloc;
1033
1034 if (!swapchain)
1035 return;
1036
1037 if (pAllocator)
1038 alloc = pAllocator;
1039 else
1040 alloc = &device->alloc;
1041
1042 swapchain->destroy(swapchain, alloc);
1043 }
1044
1045 VKAPI_ATTR VkResult VKAPI_CALL
wsi_ReleaseSwapchainImagesEXT(VkDevice _device,const VkReleaseSwapchainImagesInfoEXT * pReleaseInfo)1046 wsi_ReleaseSwapchainImagesEXT(VkDevice _device,
1047 const VkReleaseSwapchainImagesInfoEXT *pReleaseInfo)
1048 {
1049 VK_FROM_HANDLE(wsi_swapchain, swapchain, pReleaseInfo->swapchain);
1050 VkResult result = swapchain->release_images(swapchain,
1051 pReleaseInfo->imageIndexCount,
1052 pReleaseInfo->pImageIndices);
1053
1054 if (result != VK_SUCCESS)
1055 return result;
1056
1057 if (swapchain->wsi->set_memory_ownership) {
1058 for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
1059 uint32_t image_index = pReleaseInfo->pImageIndices[i];
1060 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1061 swapchain->wsi->set_memory_ownership(swapchain->device, mem, false);
1062 }
1063 }
1064
1065 return VK_SUCCESS;
1066 }
1067
1068 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1069 wsi_common_get_images(VkSwapchainKHR _swapchain,
1070 uint32_t *pSwapchainImageCount,
1071 VkImage *pSwapchainImages)
1072 {
1073 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1074 VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
1075
1076 for (uint32_t i = 0; i < swapchain->image_count; i++) {
1077 vk_outarray_append_typed(VkImage, &images, image) {
1078 *image = swapchain->get_wsi_image(swapchain, i)->image;
1079 }
1080 }
1081
1082 return vk_outarray_status(&images);
1083 }
1084
1085 VkImage
wsi_common_get_image(VkSwapchainKHR _swapchain,uint32_t index)1086 wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index)
1087 {
1088 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1089 assert(index < swapchain->image_count);
1090 return swapchain->get_wsi_image(swapchain, index)->image;
1091 }
1092
1093 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1094 wsi_GetSwapchainImagesKHR(VkDevice device,
1095 VkSwapchainKHR swapchain,
1096 uint32_t *pSwapchainImageCount,
1097 VkImage *pSwapchainImages)
1098 {
1099 MESA_TRACE_FUNC();
1100 return wsi_common_get_images(swapchain,
1101 pSwapchainImageCount,
1102 pSwapchainImages);
1103 }
1104
1105 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImageKHR(VkDevice _device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)1106 wsi_AcquireNextImageKHR(VkDevice _device,
1107 VkSwapchainKHR swapchain,
1108 uint64_t timeout,
1109 VkSemaphore semaphore,
1110 VkFence fence,
1111 uint32_t *pImageIndex)
1112 {
1113 MESA_TRACE_FUNC();
1114 VK_FROM_HANDLE(vk_device, device, _device);
1115
1116 const VkAcquireNextImageInfoKHR acquire_info = {
1117 .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
1118 .swapchain = swapchain,
1119 .timeout = timeout,
1120 .semaphore = semaphore,
1121 .fence = fence,
1122 .deviceMask = 0,
1123 };
1124
1125 return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
1126 pImageIndex);
1127 }
1128
1129 static VkResult
wsi_signal_semaphore_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkSemaphore _semaphore)1130 wsi_signal_semaphore_for_image(struct vk_device *device,
1131 const struct wsi_swapchain *chain,
1132 const struct wsi_image *image,
1133 VkSemaphore _semaphore)
1134 {
1135 if (device->physical->supported_sync_types == NULL)
1136 return VK_SUCCESS;
1137
1138 VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
1139
1140 vk_semaphore_reset_temporary(device, semaphore);
1141
1142 #ifdef HAVE_LIBDRM
1143 VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
1144 VK_SYNC_FEATURE_GPU_WAIT,
1145 &semaphore->temporary);
1146 if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1147 return result;
1148 #endif
1149
1150 if (chain->wsi->signal_semaphore_with_memory) {
1151 return device->create_sync_for_memory(device, image->memory,
1152 false /* signal_memory */,
1153 &semaphore->temporary);
1154 } else {
1155 return vk_sync_create(device, &vk_sync_dummy_type,
1156 0 /* flags */, 0 /* initial_value */,
1157 &semaphore->temporary);
1158 }
1159 }
1160
1161 static VkResult
wsi_signal_fence_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkFence _fence)1162 wsi_signal_fence_for_image(struct vk_device *device,
1163 const struct wsi_swapchain *chain,
1164 const struct wsi_image *image,
1165 VkFence _fence)
1166 {
1167 if (device->physical->supported_sync_types == NULL)
1168 return VK_SUCCESS;
1169
1170 VK_FROM_HANDLE(vk_fence, fence, _fence);
1171
1172 vk_fence_reset_temporary(device, fence);
1173
1174 #ifdef HAVE_LIBDRM
1175 VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
1176 VK_SYNC_FEATURE_CPU_WAIT,
1177 &fence->temporary);
1178 if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1179 return result;
1180 #endif
1181
1182 if (chain->wsi->signal_fence_with_memory) {
1183 return device->create_sync_for_memory(device, image->memory,
1184 false /* signal_memory */,
1185 &fence->temporary);
1186 } else {
1187 return vk_sync_create(device, &vk_sync_dummy_type,
1188 0 /* flags */, 0 /* initial_value */,
1189 &fence->temporary);
1190 }
1191 }
1192
1193 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1194 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
1195 VkDevice _device,
1196 const VkAcquireNextImageInfoKHR *pAcquireInfo,
1197 uint32_t *pImageIndex)
1198 {
1199 VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
1200 VK_FROM_HANDLE(vk_device, device, _device);
1201
1202 VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
1203 pImageIndex);
1204 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1205 return result;
1206 struct wsi_image *image =
1207 swapchain->get_wsi_image(swapchain, *pImageIndex);
1208
1209 if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
1210 VkResult signal_result =
1211 wsi_signal_semaphore_for_image(device, swapchain, image,
1212 pAcquireInfo->semaphore);
1213 if (signal_result != VK_SUCCESS)
1214 return signal_result;
1215 }
1216
1217 if (pAcquireInfo->fence != VK_NULL_HANDLE) {
1218 VkResult signal_result =
1219 wsi_signal_fence_for_image(device, swapchain, image,
1220 pAcquireInfo->fence);
1221 if (signal_result != VK_SUCCESS)
1222 return signal_result;
1223 }
1224
1225 if (wsi->set_memory_ownership)
1226 wsi->set_memory_ownership(swapchain->device, image->memory, true);
1227
1228 return result;
1229 }
1230
1231 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImage2KHR(VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1232 wsi_AcquireNextImage2KHR(VkDevice _device,
1233 const VkAcquireNextImageInfoKHR *pAcquireInfo,
1234 uint32_t *pImageIndex)
1235 {
1236 MESA_TRACE_FUNC();
1237 VK_FROM_HANDLE(vk_device, device, _device);
1238
1239 return wsi_common_acquire_next_image2(device->physical->wsi_device,
1240 _device, pAcquireInfo, pImageIndex);
1241 }
1242
wsi_signal_present_id_timeline(struct wsi_swapchain * swapchain,VkQueue queue,uint64_t present_id,VkFence present_fence)1243 static VkResult wsi_signal_present_id_timeline(struct wsi_swapchain *swapchain,
1244 VkQueue queue, uint64_t present_id,
1245 VkFence present_fence)
1246 {
1247 assert(swapchain->present_id_timeline || present_fence);
1248
1249 const VkTimelineSemaphoreSubmitInfo timeline_info = {
1250 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
1251 .pSignalSemaphoreValues = &present_id,
1252 .signalSemaphoreValueCount = 1,
1253 };
1254
1255 const VkSubmitInfo submit_info = {
1256 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1257 .pNext = &timeline_info,
1258 .signalSemaphoreCount = 1,
1259 .pSignalSemaphores = &swapchain->present_id_timeline,
1260 };
1261
1262 uint32_t submit_count = present_id ? 1 : 0;
1263 return swapchain->wsi->QueueSubmit(queue, submit_count, &submit_info, present_fence);
1264 }
1265
1266 static VkResult
handle_trace(VkQueue queue,struct vk_device * device)1267 handle_trace(VkQueue queue, struct vk_device *device)
1268 {
1269 struct vk_instance *instance = device->physical->instance;
1270 if (!instance->trace_mode)
1271 return VK_SUCCESS;
1272
1273 simple_mtx_lock(&device->trace_mtx);
1274
1275 bool frame_trigger = device->current_frame == instance->trace_frame;
1276 if (device->current_frame <= instance->trace_frame)
1277 device->current_frame++;
1278
1279 bool file_trigger = false;
1280 #ifndef _WIN32
1281 if (instance->trace_trigger_file && access(instance->trace_trigger_file, W_OK) == 0) {
1282 if (unlink(instance->trace_trigger_file) == 0) {
1283 file_trigger = true;
1284 } else {
1285 /* Do not enable tracing if we cannot remove the file,
1286 * because by then we'll trace every frame ... */
1287 fprintf(stderr, "Could not remove trace trigger file, ignoring\n");
1288 }
1289 }
1290 #endif
1291
1292 VkResult result = VK_SUCCESS;
1293 if (frame_trigger || file_trigger || device->trace_hotkey_trigger)
1294 result = device->capture_trace(queue);
1295
1296 device->trace_hotkey_trigger = false;
1297
1298 simple_mtx_unlock(&device->trace_mtx);
1299
1300 return result;
1301 }
1302
1303 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)1304 wsi_common_queue_present(const struct wsi_device *wsi,
1305 VkDevice device,
1306 VkQueue queue,
1307 int queue_family_index,
1308 const VkPresentInfoKHR *pPresentInfo)
1309 {
1310 VkResult final_result = handle_trace(queue, vk_device_from_handle(device));
1311
1312 STACK_ARRAY(VkPipelineStageFlags, stage_flags,
1313 MAX2(1, pPresentInfo->waitSemaphoreCount));
1314 for (uint32_t s = 0; s < MAX2(1, pPresentInfo->waitSemaphoreCount); s++)
1315 stage_flags[s] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1316
1317 const VkPresentRegionsKHR *regions =
1318 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
1319 const VkPresentIdKHR *present_ids =
1320 vk_find_struct_const(pPresentInfo->pNext, PRESENT_ID_KHR);
1321 const VkSwapchainPresentFenceInfoEXT *present_fence_info =
1322 vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_FENCE_INFO_EXT);
1323 const VkSwapchainPresentModeInfoEXT *present_mode_info =
1324 vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_MODE_INFO_EXT);
1325
1326 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1327 VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
1328 uint32_t image_index = pPresentInfo->pImageIndices[i];
1329 VkResult result;
1330
1331 /* Update the present mode for this present and any subsequent present. */
1332 if (present_mode_info && present_mode_info->pPresentModes && swapchain->set_present_mode)
1333 swapchain->set_present_mode(swapchain, present_mode_info->pPresentModes[i]);
1334
1335 if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
1336 const VkFenceCreateInfo fence_info = {
1337 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
1338 .pNext = NULL,
1339 .flags = VK_FENCE_CREATE_SIGNALED_BIT,
1340 };
1341 result = wsi->CreateFence(device, &fence_info,
1342 &swapchain->alloc,
1343 &swapchain->fences[image_index]);
1344 if (result != VK_SUCCESS)
1345 goto fail_present;
1346
1347 if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT &&
1348 swapchain->blit.queue != VK_NULL_HANDLE) {
1349 const VkSemaphoreCreateInfo sem_info = {
1350 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1351 .pNext = NULL,
1352 .flags = 0,
1353 };
1354 result = wsi->CreateSemaphore(device, &sem_info,
1355 &swapchain->alloc,
1356 &swapchain->blit.semaphores[image_index]);
1357 if (result != VK_SUCCESS)
1358 goto fail_present;
1359 }
1360 } else {
1361 MESA_TRACE_SCOPE("throttle");
1362 result =
1363 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1364 true, ~0ull);
1365 if (result != VK_SUCCESS)
1366 goto fail_present;
1367 }
1368
1369 result = wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
1370 if (result != VK_SUCCESS)
1371 goto fail_present;
1372
1373 VkSubmitInfo submit_info = {
1374 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1375 };
1376
1377 if (i == 0) {
1378 /* We only need/want to wait on semaphores once. After that, we're
1379 * guaranteed ordering since it all happens on the same queue.
1380 */
1381 submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
1382 submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
1383 submit_info.pWaitDstStageMask = stage_flags;
1384 }
1385
1386 struct wsi_image *image =
1387 swapchain->get_wsi_image(swapchain, image_index);
1388
1389 VkQueue submit_queue = queue;
1390 if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
1391 if (swapchain->blit.queue == VK_NULL_HANDLE) {
1392 submit_info.commandBufferCount = 1;
1393 submit_info.pCommandBuffers =
1394 &image->blit.cmd_buffers[queue_family_index];
1395 } else {
1396 /* If we are using a blit using the driver's private queue, then
1397 * do an empty submit signalling a semaphore, and then submit the
1398 * blit waiting on that. This ensures proper queue ordering of
1399 * vkQueueSubmit() calls.
1400 */
1401 submit_info.signalSemaphoreCount = 1;
1402 submit_info.pSignalSemaphores =
1403 &swapchain->blit.semaphores[image_index];
1404
1405 result = wsi->QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
1406 if (result != VK_SUCCESS)
1407 goto fail_present;
1408
1409 /* Now prepare the blit submit. It needs to then wait on the
1410 * semaphore we signaled above.
1411 */
1412 submit_queue = swapchain->blit.queue;
1413 submit_info.waitSemaphoreCount = 1;
1414 submit_info.pWaitSemaphores = submit_info.pSignalSemaphores;
1415 submit_info.signalSemaphoreCount = 0;
1416 submit_info.pSignalSemaphores = NULL;
1417 submit_info.commandBufferCount = 1;
1418 submit_info.pCommandBuffers = &image->blit.cmd_buffers[0];
1419 submit_info.pWaitDstStageMask = stage_flags;
1420 }
1421 }
1422
1423 VkFence fence = swapchain->fences[image_index];
1424
1425 bool has_signal_dma_buf = false;
1426 #ifdef HAVE_LIBDRM
1427 result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
1428 if (result == VK_SUCCESS) {
1429 assert(submit_info.signalSemaphoreCount == 0);
1430 submit_info.signalSemaphoreCount = 1;
1431 submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
1432 has_signal_dma_buf = true;
1433 } else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
1434 result = VK_SUCCESS;
1435 has_signal_dma_buf = false;
1436 } else {
1437 goto fail_present;
1438 }
1439 #endif
1440
1441 struct wsi_memory_signal_submit_info mem_signal;
1442 if (!has_signal_dma_buf) {
1443 /* If we don't have dma-buf signaling, signal the memory object by
1444 * chaining wsi_memory_signal_submit_info into VkSubmitInfo.
1445 */
1446 result = VK_SUCCESS;
1447 has_signal_dma_buf = false;
1448 mem_signal = (struct wsi_memory_signal_submit_info) {
1449 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
1450 .memory = image->memory,
1451 };
1452 __vk_append_struct(&submit_info, &mem_signal);
1453 }
1454
1455 result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
1456 if (result != VK_SUCCESS)
1457 goto fail_present;
1458
1459 #ifdef HAVE_LIBDRM
1460 if (has_signal_dma_buf) {
1461 result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
1462 if (result != VK_SUCCESS)
1463 goto fail_present;
1464 }
1465 #else
1466 assert(!has_signal_dma_buf);
1467 #endif
1468
1469 if (wsi->sw)
1470 wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1471 true, ~0ull);
1472
1473 const VkPresentRegionKHR *region = NULL;
1474 if (regions && regions->pRegions)
1475 region = ®ions->pRegions[i];
1476
1477 uint64_t present_id = 0;
1478 if (present_ids && present_ids->pPresentIds)
1479 present_id = present_ids->pPresentIds[i];
1480 VkFence present_fence = VK_NULL_HANDLE;
1481 if (present_fence_info && present_fence_info->pFences)
1482 present_fence = present_fence_info->pFences[i];
1483
1484 if (present_id || present_fence) {
1485 result = wsi_signal_present_id_timeline(swapchain, queue, present_id, present_fence);
1486 if (result != VK_SUCCESS)
1487 goto fail_present;
1488 }
1489
1490 result = swapchain->queue_present(swapchain, image_index, present_id, region);
1491 if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1492 goto fail_present;
1493
1494 if (wsi->set_memory_ownership) {
1495 VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1496 wsi->set_memory_ownership(swapchain->device, mem, false);
1497 }
1498
1499 fail_present:
1500 if (pPresentInfo->pResults != NULL)
1501 pPresentInfo->pResults[i] = result;
1502
1503 /* Let the final result be our first unsuccessful result */
1504 if (final_result == VK_SUCCESS)
1505 final_result = result;
1506 }
1507
1508 STACK_ARRAY_FINISH(stage_flags);
1509
1510 return final_result;
1511 }
1512
1513 VKAPI_ATTR VkResult VKAPI_CALL
wsi_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)1514 wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
1515 {
1516 MESA_TRACE_FUNC();
1517 VK_FROM_HANDLE(vk_queue, queue, _queue);
1518
1519 return wsi_common_queue_present(queue->base.device->physical->wsi_device,
1520 vk_device_to_handle(queue->base.device),
1521 _queue,
1522 queue->queue_family_index,
1523 pPresentInfo);
1524 }
1525
1526 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,VkDeviceGroupPresentCapabilitiesKHR * pCapabilities)1527 wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
1528 VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
1529 {
1530 memset(pCapabilities->presentMask, 0,
1531 sizeof(pCapabilities->presentMask));
1532 pCapabilities->presentMask[0] = 0x1;
1533 pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1534
1535 return VK_SUCCESS;
1536 }
1537
1538 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,VkSurfaceKHR surface,VkDeviceGroupPresentModeFlagsKHR * pModes)1539 wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
1540 VkSurfaceKHR surface,
1541 VkDeviceGroupPresentModeFlagsKHR *pModes)
1542 {
1543 *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1544
1545 return VK_SUCCESS;
1546 }
1547
1548 bool
wsi_common_vk_instance_supports_present_wait(const struct vk_instance * instance)1549 wsi_common_vk_instance_supports_present_wait(const struct vk_instance *instance)
1550 {
1551 /* We can only expose KHR_present_wait and KHR_present_id
1552 * if we are guaranteed support on all potential VkSurfaceKHR objects. */
1553 if (instance->enabled_extensions.KHR_win32_surface ||
1554 instance->enabled_extensions.KHR_android_surface) {
1555 return false;
1556 }
1557
1558 return true;
1559 }
1560
1561 VkResult
wsi_common_create_swapchain_image(const struct wsi_device * wsi,const VkImageCreateInfo * pCreateInfo,VkSwapchainKHR _swapchain,VkImage * pImage)1562 wsi_common_create_swapchain_image(const struct wsi_device *wsi,
1563 const VkImageCreateInfo *pCreateInfo,
1564 VkSwapchainKHR _swapchain,
1565 VkImage *pImage)
1566 {
1567 VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1568
1569 #ifndef NDEBUG
1570 const VkImageCreateInfo *swcInfo = &chain->image_info.create;
1571 assert(pCreateInfo->flags == 0);
1572 assert(pCreateInfo->imageType == swcInfo->imageType);
1573 assert(pCreateInfo->format == swcInfo->format);
1574 assert(pCreateInfo->extent.width == swcInfo->extent.width);
1575 assert(pCreateInfo->extent.height == swcInfo->extent.height);
1576 assert(pCreateInfo->extent.depth == swcInfo->extent.depth);
1577 assert(pCreateInfo->mipLevels == swcInfo->mipLevels);
1578 assert(pCreateInfo->arrayLayers == swcInfo->arrayLayers);
1579 assert(pCreateInfo->samples == swcInfo->samples);
1580 assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
1581 assert(!(pCreateInfo->usage & ~swcInfo->usage));
1582
1583 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
1584 switch (ext->sType) {
1585 case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: {
1586 const VkImageFormatListCreateInfo *iflci =
1587 (const VkImageFormatListCreateInfo *)ext;
1588 const VkImageFormatListCreateInfo *swc_iflci =
1589 &chain->image_info.format_list;
1590
1591 for (uint32_t i = 0; i < iflci->viewFormatCount; i++) {
1592 bool found = false;
1593 for (uint32_t j = 0; j < swc_iflci->viewFormatCount; j++) {
1594 if (iflci->pViewFormats[i] == swc_iflci->pViewFormats[j]) {
1595 found = true;
1596 break;
1597 }
1598 }
1599 assert(found);
1600 }
1601 break;
1602 }
1603
1604 case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
1605 break;
1606
1607 default:
1608 assert(!"Unsupported image create extension");
1609 }
1610 }
1611 #endif
1612
1613 return wsi->CreateImage(chain->device, &chain->image_info.create,
1614 &chain->alloc, pImage);
1615 }
1616
1617 VkResult
wsi_common_bind_swapchain_image(const struct wsi_device * wsi,VkImage vk_image,VkSwapchainKHR _swapchain,uint32_t image_idx)1618 wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
1619 VkImage vk_image,
1620 VkSwapchainKHR _swapchain,
1621 uint32_t image_idx)
1622 {
1623 VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1624 struct wsi_image *image = chain->get_wsi_image(chain, image_idx);
1625
1626 return wsi->BindImageMemory(chain->device, vk_image, image->memory, 0);
1627 }
1628
1629 VkResult
wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain * chain,uint64_t present_id,uint64_t timeout)1630 wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain *chain,
1631 uint64_t present_id, uint64_t timeout)
1632 {
1633 assert(chain->present_id_timeline);
1634 const VkSemaphoreWaitInfo wait_info = {
1635 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,
1636 .semaphoreCount = 1,
1637 .pSemaphores = &chain->present_id_timeline,
1638 .pValues = &present_id,
1639 };
1640
1641 return chain->wsi->WaitSemaphores(chain->device, &wait_info, timeout);
1642 }
1643
1644 uint32_t
wsi_select_memory_type(const struct wsi_device * wsi,VkMemoryPropertyFlags req_props,VkMemoryPropertyFlags deny_props,uint32_t type_bits)1645 wsi_select_memory_type(const struct wsi_device *wsi,
1646 VkMemoryPropertyFlags req_props,
1647 VkMemoryPropertyFlags deny_props,
1648 uint32_t type_bits)
1649 {
1650 assert(type_bits != 0);
1651
1652 VkMemoryPropertyFlags common_props = ~0;
1653 u_foreach_bit(t, type_bits) {
1654 const VkMemoryType type = wsi->memory_props.memoryTypes[t];
1655
1656 common_props &= type.propertyFlags;
1657
1658 if (deny_props & type.propertyFlags)
1659 continue;
1660
1661 if (!(req_props & ~type.propertyFlags))
1662 return t;
1663 }
1664
1665 if ((deny_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
1666 (common_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
1667 /* If they asked for non-device-local and all the types are device-local
1668 * (this is commonly true for UMA platforms), try again without denying
1669 * device-local types
1670 */
1671 deny_props &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1672 return wsi_select_memory_type(wsi, req_props, deny_props, type_bits);
1673 }
1674
1675 unreachable("No memory type found");
1676 }
1677
1678 uint32_t
wsi_select_device_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1679 wsi_select_device_memory_type(const struct wsi_device *wsi,
1680 uint32_t type_bits)
1681 {
1682 return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1683 0 /* deny_props */, type_bits);
1684 }
1685
1686 static uint32_t
wsi_select_host_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1687 wsi_select_host_memory_type(const struct wsi_device *wsi,
1688 uint32_t type_bits)
1689 {
1690 return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1691 0 /* deny_props */, type_bits);
1692 }
1693
1694 VkResult
wsi_create_buffer_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image,VkExternalMemoryHandleTypeFlags handle_types,bool implicit_sync)1695 wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
1696 const struct wsi_image_info *info,
1697 struct wsi_image *image,
1698 VkExternalMemoryHandleTypeFlags handle_types,
1699 bool implicit_sync)
1700 {
1701 assert(chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
1702
1703 const struct wsi_device *wsi = chain->wsi;
1704 VkResult result;
1705
1706 const VkExternalMemoryBufferCreateInfo buffer_external_info = {
1707 .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
1708 .pNext = NULL,
1709 .handleTypes = handle_types,
1710 };
1711 const VkBufferCreateInfo buffer_info = {
1712 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1713 .pNext = &buffer_external_info,
1714 .size = info->linear_size,
1715 .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
1716 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1717 };
1718 result = wsi->CreateBuffer(chain->device, &buffer_info,
1719 &chain->alloc, &image->blit.buffer);
1720 if (result != VK_SUCCESS)
1721 return result;
1722
1723 VkMemoryRequirements reqs;
1724 wsi->GetBufferMemoryRequirements(chain->device, image->blit.buffer, &reqs);
1725 assert(reqs.size <= info->linear_size);
1726
1727 struct wsi_memory_allocate_info memory_wsi_info = {
1728 .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
1729 .pNext = NULL,
1730 .implicit_sync = implicit_sync,
1731 };
1732 VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
1733 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1734 .pNext = &memory_wsi_info,
1735 .image = VK_NULL_HANDLE,
1736 .buffer = image->blit.buffer,
1737 };
1738 VkMemoryAllocateInfo buf_mem_info = {
1739 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1740 .pNext = &buf_mem_dedicated_info,
1741 .allocationSize = info->linear_size,
1742 .memoryTypeIndex =
1743 info->select_blit_dst_memory_type(wsi, reqs.memoryTypeBits),
1744 };
1745
1746 void *sw_host_ptr = NULL;
1747 if (info->alloc_shm)
1748 sw_host_ptr = info->alloc_shm(image, info->linear_size);
1749
1750 VkExportMemoryAllocateInfo memory_export_info;
1751 VkImportMemoryHostPointerInfoEXT host_ptr_info;
1752 if (sw_host_ptr != NULL) {
1753 host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
1754 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
1755 .pHostPointer = sw_host_ptr,
1756 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
1757 };
1758 __vk_append_struct(&buf_mem_info, &host_ptr_info);
1759 } else if (handle_types != 0) {
1760 memory_export_info = (VkExportMemoryAllocateInfo) {
1761 .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
1762 .handleTypes = handle_types,
1763 };
1764 __vk_append_struct(&buf_mem_info, &memory_export_info);
1765 }
1766
1767 result = wsi->AllocateMemory(chain->device, &buf_mem_info,
1768 &chain->alloc, &image->blit.memory);
1769 if (result != VK_SUCCESS)
1770 return result;
1771
1772 result = wsi->BindBufferMemory(chain->device, image->blit.buffer,
1773 image->blit.memory, 0);
1774 if (result != VK_SUCCESS)
1775 return result;
1776
1777 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1778
1779 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1780 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1781 .pNext = NULL,
1782 .image = image->image,
1783 .buffer = VK_NULL_HANDLE,
1784 };
1785 const VkMemoryAllocateInfo memory_info = {
1786 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1787 .pNext = &memory_dedicated_info,
1788 .allocationSize = reqs.size,
1789 .memoryTypeIndex =
1790 info->select_image_memory_type(wsi, reqs.memoryTypeBits),
1791 };
1792
1793 result = wsi->AllocateMemory(chain->device, &memory_info,
1794 &chain->alloc, &image->memory);
1795 if (result != VK_SUCCESS)
1796 return result;
1797
1798 image->num_planes = 1;
1799 image->sizes[0] = info->linear_size;
1800 image->row_pitches[0] = info->linear_stride;
1801 image->offsets[0] = 0;
1802
1803 return VK_SUCCESS;
1804 }
1805
1806 VkResult
wsi_finish_create_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1807 wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
1808 const struct wsi_image_info *info,
1809 struct wsi_image *image)
1810 {
1811 const struct wsi_device *wsi = chain->wsi;
1812 VkResult result;
1813
1814 int cmd_buffer_count =
1815 chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
1816 image->blit.cmd_buffers =
1817 vk_zalloc(&chain->alloc,
1818 sizeof(VkCommandBuffer) * cmd_buffer_count, 8,
1819 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1820 if (!image->blit.cmd_buffers)
1821 return VK_ERROR_OUT_OF_HOST_MEMORY;
1822
1823 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1824 if (!chain->cmd_pools[i])
1825 continue;
1826
1827 const VkCommandBufferAllocateInfo cmd_buffer_info = {
1828 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
1829 .pNext = NULL,
1830 .commandPool = chain->cmd_pools[i],
1831 .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
1832 .commandBufferCount = 1,
1833 };
1834 result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
1835 &image->blit.cmd_buffers[i]);
1836 if (result != VK_SUCCESS)
1837 return result;
1838
1839 const VkCommandBufferBeginInfo begin_info = {
1840 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
1841 };
1842 wsi->BeginCommandBuffer(image->blit.cmd_buffers[i], &begin_info);
1843
1844 VkImageMemoryBarrier img_mem_barriers[] = {
1845 {
1846 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1847 .pNext = NULL,
1848 .srcAccessMask = 0,
1849 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
1850 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1851 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1852 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1853 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1854 .image = image->image,
1855 .subresourceRange = {
1856 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1857 .baseMipLevel = 0,
1858 .levelCount = 1,
1859 .baseArrayLayer = 0,
1860 .layerCount = 1,
1861 },
1862 },
1863 {
1864 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1865 .pNext = NULL,
1866 .srcAccessMask = 0,
1867 .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
1868 .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1869 .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1870 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1871 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1872 .image = image->blit.image,
1873 .subresourceRange = {
1874 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1875 .baseMipLevel = 0,
1876 .levelCount = 1,
1877 .baseArrayLayer = 0,
1878 .layerCount = 1,
1879 },
1880 },
1881 };
1882 uint32_t img_mem_barrier_count =
1883 chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT ? 1 : 2;
1884 wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
1885 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1886 VK_PIPELINE_STAGE_TRANSFER_BIT,
1887 0,
1888 0, NULL,
1889 0, NULL,
1890 1, img_mem_barriers);
1891
1892 if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
1893 struct VkBufferImageCopy buffer_image_copy = {
1894 .bufferOffset = 0,
1895 .bufferRowLength = info->linear_stride /
1896 vk_format_get_blocksize(info->create.format),
1897 .bufferImageHeight = 0,
1898 .imageSubresource = {
1899 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1900 .mipLevel = 0,
1901 .baseArrayLayer = 0,
1902 .layerCount = 1,
1903 },
1904 .imageOffset = { .x = 0, .y = 0, .z = 0 },
1905 .imageExtent = info->create.extent,
1906 };
1907 wsi->CmdCopyImageToBuffer(image->blit.cmd_buffers[i],
1908 image->image,
1909 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1910 image->blit.buffer,
1911 1, &buffer_image_copy);
1912 } else {
1913 struct VkImageCopy image_copy = {
1914 .srcSubresource = {
1915 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1916 .mipLevel = 0,
1917 .baseArrayLayer = 0,
1918 .layerCount = 1,
1919 },
1920 .srcOffset = { .x = 0, .y = 0, .z = 0 },
1921 .dstSubresource = {
1922 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1923 .mipLevel = 0,
1924 .baseArrayLayer = 0,
1925 .layerCount = 1,
1926 },
1927 .dstOffset = { .x = 0, .y = 0, .z = 0 },
1928 .extent = info->create.extent,
1929 };
1930
1931 wsi->CmdCopyImage(image->blit.cmd_buffers[i],
1932 image->image,
1933 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1934 image->blit.image,
1935 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1936 1, &image_copy);
1937 }
1938
1939 img_mem_barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1940 img_mem_barriers[0].dstAccessMask = 0;
1941 img_mem_barriers[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
1942 img_mem_barriers[0].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1943 img_mem_barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1944 img_mem_barriers[1].dstAccessMask = 0;
1945 img_mem_barriers[1].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1946 img_mem_barriers[1].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1947 wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
1948 VK_PIPELINE_STAGE_TRANSFER_BIT,
1949 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1950 0,
1951 0, NULL,
1952 0, NULL,
1953 img_mem_barrier_count, img_mem_barriers);
1954
1955 result = wsi->EndCommandBuffer(image->blit.cmd_buffers[i]);
1956 if (result != VK_SUCCESS)
1957 return result;
1958 }
1959
1960 return VK_SUCCESS;
1961 }
1962
1963 void
wsi_configure_buffer_image(UNUSED const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,uint32_t stride_align,uint32_t size_align,struct wsi_image_info * info)1964 wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
1965 const VkSwapchainCreateInfoKHR *pCreateInfo,
1966 uint32_t stride_align, uint32_t size_align,
1967 struct wsi_image_info *info)
1968 {
1969 const struct wsi_device *wsi = chain->wsi;
1970
1971 assert(util_is_power_of_two_nonzero(stride_align));
1972 assert(util_is_power_of_two_nonzero(size_align));
1973
1974 info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1975 info->wsi.blit_src = true;
1976
1977 const uint32_t cpp = vk_format_get_blocksize(pCreateInfo->imageFormat);
1978 info->linear_stride = pCreateInfo->imageExtent.width * cpp;
1979 info->linear_stride = align(info->linear_stride, stride_align);
1980
1981 /* Since we can pick the stride to be whatever we want, also align to the
1982 * device's optimalBufferCopyRowPitchAlignment so we get efficient copies.
1983 */
1984 assert(wsi->optimalBufferCopyRowPitchAlignment > 0);
1985 info->linear_stride = align(info->linear_stride,
1986 wsi->optimalBufferCopyRowPitchAlignment);
1987
1988 info->linear_size = (uint64_t)info->linear_stride *
1989 pCreateInfo->imageExtent.height;
1990 info->linear_size = align64(info->linear_size, size_align);
1991
1992 info->finish_create = wsi_finish_create_blit_context;
1993 }
1994
1995 void
wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain * chain,struct wsi_image_info * info)1996 wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain *chain,
1997 struct wsi_image_info *info)
1998 {
1999 info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2000 info->wsi.blit_src = true;
2001 info->finish_create = wsi_finish_create_blit_context;
2002 }
2003
2004 static VkResult
wsi_create_cpu_linear_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2005 wsi_create_cpu_linear_image_mem(const struct wsi_swapchain *chain,
2006 const struct wsi_image_info *info,
2007 struct wsi_image *image)
2008 {
2009 const struct wsi_device *wsi = chain->wsi;
2010 VkResult result;
2011
2012 VkMemoryRequirements reqs;
2013 wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
2014
2015 VkSubresourceLayout layout;
2016 wsi->GetImageSubresourceLayout(chain->device, image->image,
2017 &(VkImageSubresource) {
2018 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
2019 .mipLevel = 0,
2020 .arrayLayer = 0,
2021 }, &layout);
2022 assert(layout.offset == 0);
2023
2024 const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
2025 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
2026 .image = image->image,
2027 .buffer = VK_NULL_HANDLE,
2028 };
2029 VkMemoryAllocateInfo memory_info = {
2030 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
2031 .pNext = &memory_dedicated_info,
2032 .allocationSize = reqs.size,
2033 .memoryTypeIndex =
2034 wsi_select_host_memory_type(wsi, reqs.memoryTypeBits),
2035 };
2036
2037 void *sw_host_ptr = NULL;
2038 if (info->alloc_shm)
2039 sw_host_ptr = info->alloc_shm(image, layout.size);
2040
2041 VkImportMemoryHostPointerInfoEXT host_ptr_info;
2042 if (sw_host_ptr != NULL) {
2043 host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
2044 .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
2045 .pHostPointer = sw_host_ptr,
2046 .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
2047 };
2048 __vk_append_struct(&memory_info, &host_ptr_info);
2049 }
2050
2051 result = wsi->AllocateMemory(chain->device, &memory_info,
2052 &chain->alloc, &image->memory);
2053 if (result != VK_SUCCESS)
2054 return result;
2055
2056 result = wsi->MapMemory(chain->device, image->memory,
2057 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2058 if (result != VK_SUCCESS)
2059 return result;
2060
2061 image->num_planes = 1;
2062 image->sizes[0] = reqs.size;
2063 image->row_pitches[0] = layout.rowPitch;
2064 image->offsets[0] = 0;
2065
2066 return VK_SUCCESS;
2067 }
2068
2069 static VkResult
wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2070 wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
2071 const struct wsi_image_info *info,
2072 struct wsi_image *image)
2073 {
2074 VkResult result;
2075
2076 result = wsi_create_buffer_blit_context(chain, info, image, 0,
2077 false /* implicit_sync */);
2078 if (result != VK_SUCCESS)
2079 return result;
2080
2081 result = chain->wsi->MapMemory(chain->device, image->blit.memory,
2082 0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2083 if (result != VK_SUCCESS)
2084 return result;
2085
2086 return VK_SUCCESS;
2087 }
2088
2089 bool
wsi_cpu_image_needs_buffer_blit(const struct wsi_device * wsi,const struct wsi_cpu_image_params * params)2090 wsi_cpu_image_needs_buffer_blit(const struct wsi_device *wsi,
2091 const struct wsi_cpu_image_params *params)
2092 {
2093 if (WSI_DEBUG & WSI_DEBUG_BUFFER)
2094 return true;
2095
2096 if (wsi->wants_linear)
2097 return false;
2098
2099 return true;
2100 }
2101
2102 VkResult
wsi_configure_cpu_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_cpu_image_params * params,struct wsi_image_info * info)2103 wsi_configure_cpu_image(const struct wsi_swapchain *chain,
2104 const VkSwapchainCreateInfoKHR *pCreateInfo,
2105 const struct wsi_cpu_image_params *params,
2106 struct wsi_image_info *info)
2107 {
2108 assert(params->base.image_type == WSI_IMAGE_TYPE_CPU);
2109 assert(chain->blit.type == WSI_SWAPCHAIN_NO_BLIT ||
2110 chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
2111
2112 VkExternalMemoryHandleTypeFlags handle_types = 0;
2113 if (params->alloc_shm && chain->blit.type != WSI_SWAPCHAIN_NO_BLIT)
2114 handle_types = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
2115
2116 VkResult result = wsi_configure_image(chain, pCreateInfo,
2117 handle_types, info);
2118 if (result != VK_SUCCESS)
2119 return result;
2120
2121 if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
2122 wsi_configure_buffer_image(chain, pCreateInfo,
2123 1 /* stride_align */,
2124 1 /* size_align */,
2125 info);
2126
2127 info->select_blit_dst_memory_type = wsi_select_host_memory_type;
2128 info->select_image_memory_type = wsi_select_device_memory_type;
2129 info->create_mem = wsi_create_cpu_buffer_image_mem;
2130 } else {
2131 /* Force the image to be linear */
2132 info->create.tiling = VK_IMAGE_TILING_LINEAR;
2133
2134 info->create_mem = wsi_create_cpu_linear_image_mem;
2135 }
2136
2137 info->alloc_shm = params->alloc_shm;
2138
2139 return VK_SUCCESS;
2140 }
2141
2142 VKAPI_ATTR VkResult VKAPI_CALL
wsi_WaitForPresentKHR(VkDevice device,VkSwapchainKHR _swapchain,uint64_t presentId,uint64_t timeout)2143 wsi_WaitForPresentKHR(VkDevice device, VkSwapchainKHR _swapchain,
2144 uint64_t presentId, uint64_t timeout)
2145 {
2146 VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
2147 assert(swapchain->wait_for_present);
2148 return swapchain->wait_for_present(swapchain, presentId, timeout);
2149 }
2150
2151 VkImageUsageFlags
wsi_caps_get_image_usage(void)2152 wsi_caps_get_image_usage(void)
2153 {
2154 return VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2155 VK_IMAGE_USAGE_SAMPLED_BIT |
2156 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
2157 VK_IMAGE_USAGE_STORAGE_BIT |
2158 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
2159 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
2160 }
2161