• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "wsi_common_private.h"
25 #include "wsi_common_entrypoints.h"
26 #include "util/debug.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/xmlconfig.h"
30 #include "vk_device.h"
31 #include "vk_fence.h"
32 #include "vk_format.h"
33 #include "vk_instance.h"
34 #include "vk_physical_device.h"
35 #include "vk_queue.h"
36 #include "vk_semaphore.h"
37 #include "vk_sync.h"
38 #include "vk_sync_dummy.h"
39 #include "vk_util.h"
40 
41 #include <time.h>
42 #include <stdlib.h>
43 #include <stdio.h>
44 
45 #ifndef _WIN32
46 #include <unistd.h>
47 #endif
48 
49 uint64_t WSI_DEBUG;
50 
51 static const struct debug_control debug_control[] = {
52    { "buffer",       WSI_DEBUG_BUFFER },
53    { "sw",           WSI_DEBUG_SW },
54    { "noshm",        WSI_DEBUG_NOSHM },
55    { "linear",       WSI_DEBUG_LINEAR },
56    { NULL, },
57 };
58 
59 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,bool sw_device)60 wsi_device_init(struct wsi_device *wsi,
61                 VkPhysicalDevice pdevice,
62                 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
63                 const VkAllocationCallbacks *alloc,
64                 int display_fd,
65                 const struct driOptionCache *dri_options,
66                 bool sw_device)
67 {
68    const char *present_mode;
69    UNUSED VkResult result;
70 
71    WSI_DEBUG = parse_debug_string(getenv("MESA_VK_WSI_DEBUG"), debug_control);
72 
73    memset(wsi, 0, sizeof(*wsi));
74 
75    wsi->instance_alloc = *alloc;
76    wsi->pdevice = pdevice;
77    wsi->sw = sw_device || (WSI_DEBUG & WSI_DEBUG_SW);
78    wsi->wants_linear = (WSI_DEBUG & WSI_DEBUG_LINEAR) != 0;
79 #define WSI_GET_CB(func) \
80    PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
81    WSI_GET_CB(GetPhysicalDeviceExternalSemaphoreProperties);
82    WSI_GET_CB(GetPhysicalDeviceProperties2);
83    WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
84    WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
85 #undef WSI_GET_CB
86 
87    wsi->pci_bus_info.sType =
88       VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
89    VkPhysicalDeviceProperties2 pdp2 = {
90       .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
91       .pNext = &wsi->pci_bus_info,
92    };
93    GetPhysicalDeviceProperties2(pdevice, &pdp2);
94 
95    wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
96    assert(pdp2.properties.limits.optimalBufferCopyRowPitchAlignment <= UINT32_MAX);
97    wsi->optimalBufferCopyRowPitchAlignment =
98       pdp2.properties.limits.optimalBufferCopyRowPitchAlignment;
99    wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
100 
101    GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
102    GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
103 
104    for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
105         handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
106         handle_type <<= 1) {
107       const VkPhysicalDeviceExternalSemaphoreInfo esi = {
108          .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
109          .handleType = handle_type,
110       };
111       VkExternalSemaphoreProperties esp = {
112          .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
113       };
114       GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
115 
116       if (esp.externalSemaphoreFeatures &
117           VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
118          wsi->semaphore_export_handle_types |= handle_type;
119    }
120 
121    const struct vk_device_extension_table *supported_extensions =
122       &vk_physical_device_from_handle(pdevice)->supported_extensions;
123    wsi->has_import_memory_host =
124       supported_extensions->EXT_external_memory_host;
125 
126    list_inithead(&wsi->hotplug_fences);
127 
128 #define WSI_GET_CB(func) \
129    wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
130    WSI_GET_CB(AllocateMemory);
131    WSI_GET_CB(AllocateCommandBuffers);
132    WSI_GET_CB(BindBufferMemory);
133    WSI_GET_CB(BindImageMemory);
134    WSI_GET_CB(BeginCommandBuffer);
135    WSI_GET_CB(CmdPipelineBarrier);
136    WSI_GET_CB(CmdCopyImageToBuffer);
137    WSI_GET_CB(CreateBuffer);
138    WSI_GET_CB(CreateCommandPool);
139    WSI_GET_CB(CreateFence);
140    WSI_GET_CB(CreateImage);
141    WSI_GET_CB(CreateSemaphore);
142    WSI_GET_CB(DestroyBuffer);
143    WSI_GET_CB(DestroyCommandPool);
144    WSI_GET_CB(DestroyFence);
145    WSI_GET_CB(DestroyImage);
146    WSI_GET_CB(DestroySemaphore);
147    WSI_GET_CB(EndCommandBuffer);
148    WSI_GET_CB(FreeMemory);
149    WSI_GET_CB(FreeCommandBuffers);
150    WSI_GET_CB(GetBufferMemoryRequirements);
151    WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
152    WSI_GET_CB(GetImageMemoryRequirements);
153    WSI_GET_CB(GetImageSubresourceLayout);
154    if (!wsi->sw)
155       WSI_GET_CB(GetMemoryFdKHR);
156    WSI_GET_CB(GetPhysicalDeviceFormatProperties);
157    WSI_GET_CB(GetPhysicalDeviceFormatProperties2KHR);
158    WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
159    WSI_GET_CB(GetSemaphoreFdKHR);
160    WSI_GET_CB(ResetFences);
161    WSI_GET_CB(QueueSubmit);
162    WSI_GET_CB(WaitForFences);
163    WSI_GET_CB(MapMemory);
164    WSI_GET_CB(UnmapMemory);
165 #undef WSI_GET_CB
166 
167 #ifdef VK_USE_PLATFORM_XCB_KHR
168    result = wsi_x11_init_wsi(wsi, alloc, dri_options);
169    if (result != VK_SUCCESS)
170       goto fail;
171 #endif
172 
173 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
174    result = wsi_wl_init_wsi(wsi, alloc, pdevice);
175    if (result != VK_SUCCESS)
176       goto fail;
177 #endif
178 
179 #ifdef VK_USE_PLATFORM_WIN32_KHR
180    result = wsi_win32_init_wsi(wsi, alloc, pdevice);
181    if (result != VK_SUCCESS)
182       goto fail;
183 #endif
184 
185 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
186    result = wsi_display_init_wsi(wsi, alloc, display_fd);
187    if (result != VK_SUCCESS)
188       goto fail;
189 #endif
190 
191    present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
192    if (present_mode) {
193       if (!strcmp(present_mode, "fifo")) {
194          wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
195       } else if (!strcmp(present_mode, "relaxed")) {
196           wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
197       } else if (!strcmp(present_mode, "mailbox")) {
198          wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
199       } else if (!strcmp(present_mode, "immediate")) {
200          wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
201       } else {
202          fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
203       }
204    }
205 
206    if (dri_options) {
207       if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
208          wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
209                                                      "adaptive_sync");
210 
211       if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first",  DRI_BOOL)) {
212          wsi->force_bgra8_unorm_first =
213             driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
214       }
215    }
216 
217    return VK_SUCCESS;
218 #if defined(VK_USE_PLATFORM_XCB_KHR) || \
219    defined(VK_USE_PLATFORM_WAYLAND_KHR) || \
220    defined(VK_USE_PLATFORM_WIN32_KHR) || \
221    defined(VK_USE_PLATFORM_DISPLAY_KHR)
222 fail:
223    wsi_device_finish(wsi, alloc);
224    return result;
225 #endif
226 }
227 
228 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)229 wsi_device_finish(struct wsi_device *wsi,
230                   const VkAllocationCallbacks *alloc)
231 {
232 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
233    wsi_display_finish_wsi(wsi, alloc);
234 #endif
235 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
236    wsi_wl_finish_wsi(wsi, alloc);
237 #endif
238 #ifdef VK_USE_PLATFORM_WIN32_KHR
239    wsi_win32_finish_wsi(wsi, alloc);
240 #endif
241 #ifdef VK_USE_PLATFORM_XCB_KHR
242    wsi_x11_finish_wsi(wsi, alloc);
243 #endif
244 }
245 
246 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySurfaceKHR(VkInstance _instance,VkSurfaceKHR _surface,const VkAllocationCallbacks * pAllocator)247 wsi_DestroySurfaceKHR(VkInstance _instance,
248                       VkSurfaceKHR _surface,
249                       const VkAllocationCallbacks *pAllocator)
250 {
251    VK_FROM_HANDLE(vk_instance, instance, _instance);
252    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
253 
254    if (!surface)
255       return;
256 
257    vk_free2(&instance->alloc, pAllocator, surface);
258 }
259 
260 void
wsi_device_setup_syncobj_fd(struct wsi_device * wsi_device,int fd)261 wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
262                             int fd)
263 {
264 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
265    wsi_display_setup_syncobj_fd(wsi_device, fd);
266 #endif
267 }
268 
269 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,bool use_buffer_blit)270 wsi_swapchain_init(const struct wsi_device *wsi,
271                    struct wsi_swapchain *chain,
272                    VkDevice _device,
273                    const VkSwapchainCreateInfoKHR *pCreateInfo,
274                    const VkAllocationCallbacks *pAllocator,
275                    bool use_buffer_blit)
276 {
277    VK_FROM_HANDLE(vk_device, device, _device);
278    VkResult result;
279 
280    memset(chain, 0, sizeof(*chain));
281 
282    vk_object_base_init(device, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
283 
284    chain->wsi = wsi;
285    chain->device = _device;
286    chain->alloc = *pAllocator;
287 
288    chain->use_buffer_blit = use_buffer_blit || (WSI_DEBUG & WSI_DEBUG_BUFFER);
289    if (wsi->sw && !wsi->wants_linear)
290       chain->use_buffer_blit = true;
291 
292    chain->buffer_blit_queue = VK_NULL_HANDLE;
293    if (use_buffer_blit && wsi->get_buffer_blit_queue)
294       chain->buffer_blit_queue = wsi->get_buffer_blit_queue(_device);
295 
296    int cmd_pools_count = chain->buffer_blit_queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
297 
298    chain->cmd_pools =
299       vk_zalloc(pAllocator, sizeof(VkCommandPool) * cmd_pools_count, 8,
300                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
301    if (!chain->cmd_pools)
302       return VK_ERROR_OUT_OF_HOST_MEMORY;
303 
304    for (uint32_t i = 0; i < cmd_pools_count; i++) {
305       int queue_family_index = i;
306 
307       if (chain->buffer_blit_queue != VK_NULL_HANDLE) {
308          VK_FROM_HANDLE(vk_queue, queue, chain->buffer_blit_queue);
309          queue_family_index = queue->queue_family_index;
310       }
311       const VkCommandPoolCreateInfo cmd_pool_info = {
312          .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
313          .pNext = NULL,
314          .flags = 0,
315          .queueFamilyIndex = queue_family_index,
316       };
317       result = wsi->CreateCommandPool(_device, &cmd_pool_info, &chain->alloc,
318                                       &chain->cmd_pools[i]);
319       if (result != VK_SUCCESS)
320          goto fail;
321    }
322 
323    return VK_SUCCESS;
324 
325 fail:
326    wsi_swapchain_finish(chain);
327    return result;
328 }
329 
330 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)331 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
332                                         const VkSwapchainCreateInfoKHR *pCreateInfo,
333                                         VkPresentModeKHR mode)
334 {
335       ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
336       struct wsi_interface *iface = wsi->wsi[surface->platform];
337       VkPresentModeKHR *present_modes;
338       uint32_t present_mode_count;
339       bool supported = false;
340       VkResult result;
341 
342       result = iface->get_present_modes(surface, &present_mode_count, NULL);
343       if (result != VK_SUCCESS)
344          return supported;
345 
346       present_modes = malloc(present_mode_count * sizeof(*present_modes));
347       if (!present_modes)
348          return supported;
349 
350       result = iface->get_present_modes(surface, &present_mode_count,
351                                         present_modes);
352       if (result != VK_SUCCESS)
353          goto fail;
354 
355       for (uint32_t i = 0; i < present_mode_count; i++) {
356          if (present_modes[i] == mode) {
357             supported = true;
358             break;
359          }
360       }
361 
362 fail:
363       free(present_modes);
364       return supported;
365 }
366 
367 enum VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)368 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
369                                const VkSwapchainCreateInfoKHR *pCreateInfo)
370 {
371    if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
372       return pCreateInfo->presentMode;
373 
374    if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
375                                                 wsi->override_present_mode)) {
376       fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
377       return pCreateInfo->presentMode;
378    }
379 
380    return wsi->override_present_mode;
381 }
382 
383 void
wsi_swapchain_finish(struct wsi_swapchain * chain)384 wsi_swapchain_finish(struct wsi_swapchain *chain)
385 {
386    if (chain->fences) {
387       for (unsigned i = 0; i < chain->image_count; i++)
388          chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
389 
390       vk_free(&chain->alloc, chain->fences);
391    }
392    if (chain->buffer_blit_semaphores) {
393       for (unsigned i = 0; i < chain->image_count; i++)
394          chain->wsi->DestroySemaphore(chain->device, chain->buffer_blit_semaphores[i], &chain->alloc);
395 
396       vk_free(&chain->alloc, chain->buffer_blit_semaphores);
397    }
398    chain->wsi->DestroySemaphore(chain->device, chain->dma_buf_semaphore,
399                                 &chain->alloc);
400 
401    int cmd_pools_count = chain->buffer_blit_queue != VK_NULL_HANDLE ?
402       1 : chain->wsi->queue_family_count;
403    for (uint32_t i = 0; i < cmd_pools_count; i++) {
404       chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
405                                      &chain->alloc);
406    }
407    vk_free(&chain->alloc, chain->cmd_pools);
408 
409    vk_object_base_finish(&chain->base);
410 }
411 
412 VkResult
wsi_configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,VkExternalMemoryHandleTypeFlags handle_types,struct wsi_image_info * info)413 wsi_configure_image(const struct wsi_swapchain *chain,
414                     const VkSwapchainCreateInfoKHR *pCreateInfo,
415                     VkExternalMemoryHandleTypeFlags handle_types,
416                     struct wsi_image_info *info)
417 {
418    memset(info, 0, sizeof(*info));
419    uint32_t queue_family_count = 1;
420 
421    if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
422       queue_family_count = pCreateInfo->queueFamilyIndexCount;
423 
424    /*
425     * TODO: there should be no reason to allocate this, but
426     * 15331 shows that games crashed without doing this.
427     */
428    uint32_t *queue_family_indices =
429       vk_alloc(&chain->alloc,
430                sizeof(*queue_family_indices) *
431                queue_family_count,
432                8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
433    if (!queue_family_indices)
434       goto err_oom;
435 
436    if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
437       for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; i++)
438          queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
439 
440    info->create = (VkImageCreateInfo) {
441       .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
442       .flags = VK_IMAGE_CREATE_ALIAS_BIT,
443       .imageType = VK_IMAGE_TYPE_2D,
444       .format = pCreateInfo->imageFormat,
445       .extent = {
446          .width = pCreateInfo->imageExtent.width,
447          .height = pCreateInfo->imageExtent.height,
448          .depth = 1,
449       },
450       .mipLevels = 1,
451       .arrayLayers = 1,
452       .samples = VK_SAMPLE_COUNT_1_BIT,
453       .tiling = VK_IMAGE_TILING_OPTIMAL,
454       .usage = pCreateInfo->imageUsage,
455       .sharingMode = pCreateInfo->imageSharingMode,
456       .queueFamilyIndexCount = queue_family_count,
457       .pQueueFamilyIndices = queue_family_indices,
458       .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
459    };
460 
461    if (handle_types != 0) {
462       info->ext_mem = (VkExternalMemoryImageCreateInfo) {
463          .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
464          .handleTypes = handle_types,
465       };
466       __vk_append_struct(&info->create, &info->ext_mem);
467    }
468 
469    info->wsi = (struct wsi_image_create_info) {
470       .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
471    };
472    __vk_append_struct(&info->create, &info->wsi);
473 
474    if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
475       info->create.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
476                             VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
477 
478       const VkImageFormatListCreateInfo *format_list_in =
479          vk_find_struct_const(pCreateInfo->pNext,
480                               IMAGE_FORMAT_LIST_CREATE_INFO);
481 
482       assume(format_list_in && format_list_in->viewFormatCount > 0);
483 
484       const uint32_t view_format_count = format_list_in->viewFormatCount;
485       VkFormat *view_formats =
486          vk_alloc(&chain->alloc, sizeof(VkFormat) * view_format_count,
487                   8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
488       if (!view_formats)
489          goto err_oom;
490 
491       ASSERTED bool format_found = false;
492       for (uint32_t i = 0; i < format_list_in->viewFormatCount; i++) {
493          if (pCreateInfo->imageFormat == format_list_in->pViewFormats[i])
494             format_found = true;
495          view_formats[i] = format_list_in->pViewFormats[i];
496       }
497       assert(format_found);
498 
499       info->format_list = (VkImageFormatListCreateInfo) {
500          .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,
501          .viewFormatCount = view_format_count,
502          .pViewFormats = view_formats,
503       };
504       __vk_append_struct(&info->create, &info->format_list);
505    }
506 
507    return VK_SUCCESS;
508 
509 err_oom:
510    wsi_destroy_image_info(chain, info);
511    return VK_ERROR_OUT_OF_HOST_MEMORY;
512 }
513 
514 void
wsi_destroy_image_info(const struct wsi_swapchain * chain,struct wsi_image_info * info)515 wsi_destroy_image_info(const struct wsi_swapchain *chain,
516                        struct wsi_image_info *info)
517 {
518    vk_free(&chain->alloc, (void *)info->create.pQueueFamilyIndices);
519    vk_free(&chain->alloc, (void *)info->format_list.pViewFormats);
520    vk_free(&chain->alloc, (void *)info->drm_mod_list.pDrmFormatModifiers);
521    vk_free(&chain->alloc, info->modifier_props);
522 }
523 
524 VkResult
wsi_create_image(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)525 wsi_create_image(const struct wsi_swapchain *chain,
526                  const struct wsi_image_info *info,
527                  struct wsi_image *image)
528 {
529    const struct wsi_device *wsi = chain->wsi;
530    VkResult result;
531 
532    memset(image, 0, sizeof(*image));
533 
534 #ifndef _WIN32
535    image->dma_buf_fd = -1;
536 #endif
537 
538    result = wsi->CreateImage(chain->device, &info->create,
539                              &chain->alloc, &image->image);
540    if (result != VK_SUCCESS)
541       goto fail;
542 
543    result = info->create_mem(chain, info, image);
544    if (result != VK_SUCCESS)
545       goto fail;
546 
547    result = wsi->BindImageMemory(chain->device, image->image,
548                                  image->memory, 0);
549    if (result != VK_SUCCESS)
550       goto fail;
551 
552    if (info->finish_create) {
553       result = info->finish_create(chain, info, image);
554       if (result != VK_SUCCESS)
555          goto fail;
556    }
557 
558    return VK_SUCCESS;
559 
560 fail:
561    wsi_destroy_image(chain, image);
562    return result;
563 }
564 
565 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)566 wsi_destroy_image(const struct wsi_swapchain *chain,
567                   struct wsi_image *image)
568 {
569    const struct wsi_device *wsi = chain->wsi;
570 
571 #ifndef _WIN32
572    if (image->dma_buf_fd >= 0)
573       close(image->dma_buf_fd);
574 #endif
575 
576    if (image->cpu_map != NULL) {
577       wsi->UnmapMemory(chain->device, image->buffer.buffer != VK_NULL_HANDLE ?
578                                       image->buffer.memory : image->memory);
579    }
580 
581    if (image->buffer.blit_cmd_buffers) {
582       int cmd_buffer_count =
583          chain->buffer_blit_queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
584 
585       for (uint32_t i = 0; i < cmd_buffer_count; i++) {
586          wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
587                                  1, &image->buffer.blit_cmd_buffers[i]);
588       }
589       vk_free(&chain->alloc, image->buffer.blit_cmd_buffers);
590    }
591 
592    wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
593    wsi->DestroyImage(chain->device, image->image, &chain->alloc);
594    wsi->FreeMemory(chain->device, image->buffer.memory, &chain->alloc);
595    wsi->DestroyBuffer(chain->device, image->buffer.buffer, &chain->alloc);
596 }
597 
598 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)599 wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
600                                        uint32_t queueFamilyIndex,
601                                        VkSurfaceKHR _surface,
602                                        VkBool32 *pSupported)
603 {
604    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
605    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
606    struct wsi_device *wsi_device = device->wsi_device;
607    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
608 
609    return iface->get_support(surface, wsi_device,
610                              queueFamilyIndex, pSupported);
611 }
612 
613 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)614 wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
615    VkPhysicalDevice physicalDevice,
616    VkSurfaceKHR _surface,
617    VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
618 {
619    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
620    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
621    struct wsi_device *wsi_device = device->wsi_device;
622    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
623 
624    VkSurfaceCapabilities2KHR caps2 = {
625       .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
626    };
627 
628    VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
629 
630    if (result == VK_SUCCESS)
631       *pSurfaceCapabilities = caps2.surfaceCapabilities;
632 
633    return result;
634 }
635 
636 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)637 wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
638    VkPhysicalDevice physicalDevice,
639    const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
640    VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
641 {
642    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
643    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
644    struct wsi_device *wsi_device = device->wsi_device;
645    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
646 
647    return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
648                                    pSurfaceCapabilities);
649 }
650 
651 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)652 wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
653    VkPhysicalDevice physicalDevice,
654    VkSurfaceKHR _surface,
655    VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
656 {
657    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
658    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
659    struct wsi_device *wsi_device = device->wsi_device;
660    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
661 
662    assert(pSurfaceCapabilities->sType ==
663           VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
664 
665    struct wsi_surface_supported_counters counters = {
666       .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
667       .pNext = pSurfaceCapabilities->pNext,
668       .supported_surface_counters = 0,
669    };
670 
671    VkSurfaceCapabilities2KHR caps2 = {
672       .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
673       .pNext = &counters,
674    };
675 
676    VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
677 
678    if (result == VK_SUCCESS) {
679       VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
680       VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
681 
682       ext_caps->minImageCount = khr_caps.minImageCount;
683       ext_caps->maxImageCount = khr_caps.maxImageCount;
684       ext_caps->currentExtent = khr_caps.currentExtent;
685       ext_caps->minImageExtent = khr_caps.minImageExtent;
686       ext_caps->maxImageExtent = khr_caps.maxImageExtent;
687       ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
688       ext_caps->supportedTransforms = khr_caps.supportedTransforms;
689       ext_caps->currentTransform = khr_caps.currentTransform;
690       ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
691       ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
692       ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
693    }
694 
695    return result;
696 }
697 
698 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)699 wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
700                                        VkSurfaceKHR _surface,
701                                        uint32_t *pSurfaceFormatCount,
702                                        VkSurfaceFormatKHR *pSurfaceFormats)
703 {
704    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
705    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
706    struct wsi_device *wsi_device = device->wsi_device;
707    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
708 
709    return iface->get_formats(surface, wsi_device,
710                              pSurfaceFormatCount, pSurfaceFormats);
711 }
712 
713 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)714 wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
715                                         const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
716                                         uint32_t *pSurfaceFormatCount,
717                                         VkSurfaceFormat2KHR *pSurfaceFormats)
718 {
719    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
720    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
721    struct wsi_device *wsi_device = device->wsi_device;
722    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
723 
724    return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
725                               pSurfaceFormatCount, pSurfaceFormats);
726 }
727 
728 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)729 wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
730                                             VkSurfaceKHR _surface,
731                                             uint32_t *pPresentModeCount,
732                                             VkPresentModeKHR *pPresentModes)
733 {
734    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
735    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
736    struct wsi_device *wsi_device = device->wsi_device;
737    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
738 
739    return iface->get_present_modes(surface, pPresentModeCount,
740                                    pPresentModes);
741 }
742 
743 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)744 wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
745                                           VkSurfaceKHR _surface,
746                                           uint32_t *pRectCount,
747                                           VkRect2D *pRects)
748 {
749    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
750    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
751    struct wsi_device *wsi_device = device->wsi_device;
752    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
753 
754    return iface->get_present_rectangles(surface, wsi_device,
755                                         pRectCount, pRects);
756 }
757 
758 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateSwapchainKHR(VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)759 wsi_CreateSwapchainKHR(VkDevice _device,
760                        const VkSwapchainCreateInfoKHR *pCreateInfo,
761                        const VkAllocationCallbacks *pAllocator,
762                        VkSwapchainKHR *pSwapchain)
763 {
764    VK_FROM_HANDLE(vk_device, device, _device);
765    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
766    struct wsi_device *wsi_device = device->physical->wsi_device;
767    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
768    const VkAllocationCallbacks *alloc;
769    struct wsi_swapchain *swapchain;
770 
771    if (pAllocator)
772      alloc = pAllocator;
773    else
774      alloc = &device->alloc;
775 
776    VkResult result = iface->create_swapchain(surface, _device, wsi_device,
777                                              pCreateInfo, alloc,
778                                              &swapchain);
779    if (result != VK_SUCCESS)
780       return result;
781 
782    swapchain->fences = vk_zalloc(alloc,
783                                  sizeof (*swapchain->fences) * swapchain->image_count,
784                                  sizeof (*swapchain->fences),
785                                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
786    if (!swapchain->fences) {
787       swapchain->destroy(swapchain, alloc);
788       return VK_ERROR_OUT_OF_HOST_MEMORY;
789    }
790 
791    if (swapchain->buffer_blit_queue != VK_NULL_HANDLE) {
792       swapchain->buffer_blit_semaphores = vk_zalloc(alloc,
793                                          sizeof (*swapchain->buffer_blit_semaphores) * swapchain->image_count,
794                                          sizeof (*swapchain->buffer_blit_semaphores),
795                                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
796       if (!swapchain->buffer_blit_semaphores) {
797          swapchain->destroy(swapchain, alloc);
798          return VK_ERROR_OUT_OF_HOST_MEMORY;
799       }
800    }
801 
802    *pSwapchain = wsi_swapchain_to_handle(swapchain);
803 
804    return VK_SUCCESS;
805 }
806 
807 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySwapchainKHR(VkDevice _device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)808 wsi_DestroySwapchainKHR(VkDevice _device,
809                         VkSwapchainKHR _swapchain,
810                         const VkAllocationCallbacks *pAllocator)
811 {
812    VK_FROM_HANDLE(vk_device, device, _device);
813    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
814    const VkAllocationCallbacks *alloc;
815 
816    if (!swapchain)
817       return;
818 
819    if (pAllocator)
820      alloc = pAllocator;
821    else
822      alloc = &device->alloc;
823 
824    swapchain->destroy(swapchain, alloc);
825 }
826 
827 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)828 wsi_common_get_images(VkSwapchainKHR _swapchain,
829                       uint32_t *pSwapchainImageCount,
830                       VkImage *pSwapchainImages)
831 {
832    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
833    VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
834 
835    for (uint32_t i = 0; i < swapchain->image_count; i++) {
836       vk_outarray_append_typed(VkImage, &images, image) {
837          *image = swapchain->get_wsi_image(swapchain, i)->image;
838       }
839    }
840 
841    return vk_outarray_status(&images);
842 }
843 
844 VkImage
wsi_common_get_image(VkSwapchainKHR _swapchain,uint32_t index)845 wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index)
846 {
847    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
848    assert(index < swapchain->image_count);
849    return swapchain->get_wsi_image(swapchain, index)->image;
850 }
851 
852 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)853 wsi_GetSwapchainImagesKHR(VkDevice device,
854                           VkSwapchainKHR swapchain,
855                           uint32_t *pSwapchainImageCount,
856                           VkImage *pSwapchainImages)
857 {
858    return wsi_common_get_images(swapchain,
859                                 pSwapchainImageCount,
860                                 pSwapchainImages);
861 }
862 
863 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImageKHR(VkDevice _device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)864 wsi_AcquireNextImageKHR(VkDevice _device,
865                         VkSwapchainKHR swapchain,
866                         uint64_t timeout,
867                         VkSemaphore semaphore,
868                         VkFence fence,
869                         uint32_t *pImageIndex)
870 {
871    VK_FROM_HANDLE(vk_device, device, _device);
872 
873    const VkAcquireNextImageInfoKHR acquire_info = {
874       .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
875       .swapchain = swapchain,
876       .timeout = timeout,
877       .semaphore = semaphore,
878       .fence = fence,
879       .deviceMask = 0,
880    };
881 
882    return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
883                                                       pImageIndex);
884 }
885 
886 static VkResult
wsi_signal_semaphore_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkSemaphore _semaphore)887 wsi_signal_semaphore_for_image(struct vk_device *device,
888                                const struct wsi_swapchain *chain,
889                                const struct wsi_image *image,
890                                VkSemaphore _semaphore)
891 {
892    if (device->physical->supported_sync_types == NULL)
893       return VK_SUCCESS;
894 
895    VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
896 
897    vk_semaphore_reset_temporary(device, semaphore);
898 
899 #ifdef HAVE_LIBDRM
900    VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
901                                                       VK_SYNC_FEATURE_GPU_WAIT,
902                                                       &semaphore->temporary);
903    if (result != VK_ERROR_FEATURE_NOT_PRESENT)
904       return result;
905 #endif
906 
907    if (chain->wsi->signal_semaphore_with_memory) {
908       return device->create_sync_for_memory(device, image->memory,
909                                             false /* signal_memory */,
910                                             &semaphore->temporary);
911    } else {
912       return vk_sync_create(device, &vk_sync_dummy_type,
913                             0 /* flags */, 0 /* initial_value */,
914                             &semaphore->temporary);
915    }
916 }
917 
918 static VkResult
wsi_signal_fence_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkFence _fence)919 wsi_signal_fence_for_image(struct vk_device *device,
920                            const struct wsi_swapchain *chain,
921                            const struct wsi_image *image,
922                            VkFence _fence)
923 {
924    if (device->physical->supported_sync_types == NULL)
925       return VK_SUCCESS;
926 
927    VK_FROM_HANDLE(vk_fence, fence, _fence);
928 
929    vk_fence_reset_temporary(device, fence);
930 
931 #ifdef HAVE_LIBDRM
932    VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
933                                                       VK_SYNC_FEATURE_CPU_WAIT,
934                                                       &fence->temporary);
935    if (result != VK_ERROR_FEATURE_NOT_PRESENT)
936       return result;
937 #endif
938 
939    if (chain->wsi->signal_fence_with_memory) {
940       return device->create_sync_for_memory(device, image->memory,
941                                             false /* signal_memory */,
942                                             &fence->temporary);
943    } else {
944       return vk_sync_create(device, &vk_sync_dummy_type,
945                             0 /* flags */, 0 /* initial_value */,
946                             &fence->temporary);
947    }
948 }
949 
950 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)951 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
952                                VkDevice _device,
953                                const VkAcquireNextImageInfoKHR *pAcquireInfo,
954                                uint32_t *pImageIndex)
955 {
956    VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
957    VK_FROM_HANDLE(vk_device, device, _device);
958 
959    VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
960                                                    pImageIndex);
961    if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
962       return result;
963    struct wsi_image *image =
964       swapchain->get_wsi_image(swapchain, *pImageIndex);
965 
966    if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
967       VkResult signal_result =
968          wsi_signal_semaphore_for_image(device, swapchain, image,
969                                         pAcquireInfo->semaphore);
970       if (signal_result != VK_SUCCESS)
971          return signal_result;
972    }
973 
974    if (pAcquireInfo->fence != VK_NULL_HANDLE) {
975       VkResult signal_result =
976          wsi_signal_fence_for_image(device, swapchain, image,
977                                     pAcquireInfo->fence);
978       if (signal_result != VK_SUCCESS)
979          return signal_result;
980    }
981 
982    if (wsi->set_memory_ownership)
983       wsi->set_memory_ownership(swapchain->device, image->memory, true);
984 
985    return result;
986 }
987 
988 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImage2KHR(VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)989 wsi_AcquireNextImage2KHR(VkDevice _device,
990                          const VkAcquireNextImageInfoKHR *pAcquireInfo,
991                          uint32_t *pImageIndex)
992 {
993    VK_FROM_HANDLE(vk_device, device, _device);
994 
995    return wsi_common_acquire_next_image2(device->physical->wsi_device,
996                                          _device, pAcquireInfo, pImageIndex);
997 }
998 
999 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)1000 wsi_common_queue_present(const struct wsi_device *wsi,
1001                          VkDevice device,
1002                          VkQueue queue,
1003                          int queue_family_index,
1004                          const VkPresentInfoKHR *pPresentInfo)
1005 {
1006    VkResult final_result = VK_SUCCESS;
1007 
1008    STACK_ARRAY(VkPipelineStageFlags, stage_flags,
1009                MAX2(1, pPresentInfo->waitSemaphoreCount));
1010    for (uint32_t s = 0; s < MAX2(1, pPresentInfo->waitSemaphoreCount); s++)
1011       stage_flags[s] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1012 
1013    const VkPresentRegionsKHR *regions =
1014       vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
1015 
1016    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1017       VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
1018       uint32_t image_index = pPresentInfo->pImageIndices[i];
1019       VkResult result;
1020 
1021       if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
1022          const VkFenceCreateInfo fence_info = {
1023             .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
1024             .pNext = NULL,
1025             .flags = VK_FENCE_CREATE_SIGNALED_BIT,
1026          };
1027          result = wsi->CreateFence(device, &fence_info,
1028                                    &swapchain->alloc,
1029                                    &swapchain->fences[image_index]);
1030          if (result != VK_SUCCESS)
1031             goto fail_present;
1032 
1033          if (swapchain->use_buffer_blit && swapchain->buffer_blit_queue != VK_NULL_HANDLE) {
1034             const VkSemaphoreCreateInfo sem_info = {
1035                .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1036                .pNext = NULL,
1037                .flags = 0,
1038             };
1039             result = wsi->CreateSemaphore(device, &sem_info,
1040                                           &swapchain->alloc,
1041                                           &swapchain->buffer_blit_semaphores[image_index]);
1042             if (result != VK_SUCCESS)
1043                goto fail_present;
1044          }
1045       } else {
1046          result =
1047             wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1048                                true, ~0ull);
1049          if (result != VK_SUCCESS)
1050             goto fail_present;
1051       }
1052 
1053       result = wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
1054       if (result != VK_SUCCESS)
1055          goto fail_present;
1056 
1057       VkSubmitInfo submit_info = {
1058          .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1059       };
1060 
1061       if (i == 0) {
1062          /* We only need/want to wait on semaphores once.  After that, we're
1063           * guaranteed ordering since it all happens on the same queue.
1064           */
1065          submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
1066          submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
1067          submit_info.pWaitDstStageMask = stage_flags;
1068       }
1069 
1070       struct wsi_image *image =
1071          swapchain->get_wsi_image(swapchain, image_index);
1072 
1073       VkQueue submit_queue = queue;
1074       if (swapchain->use_buffer_blit) {
1075          if (swapchain->buffer_blit_queue == VK_NULL_HANDLE) {
1076             submit_info.commandBufferCount = 1;
1077             submit_info.pCommandBuffers =
1078                &image->buffer.blit_cmd_buffers[queue_family_index];
1079          } else {
1080             /* If we are using a blit using the driver's private queue, then
1081              * do an empty submit signalling a semaphore, and then submit the
1082              * blit waiting on that.  This ensures proper queue ordering of
1083              * vkQueueSubmit() calls.
1084              */
1085             submit_info.signalSemaphoreCount = 1;
1086             submit_info.pSignalSemaphores =
1087                &swapchain->buffer_blit_semaphores[image_index];
1088 
1089             result = wsi->QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
1090             if (result != VK_SUCCESS)
1091                goto fail_present;
1092 
1093             /* Now prepare the blit submit.  It needs to then wait on the
1094              * semaphore we signaled above.
1095              */
1096             submit_queue = swapchain->buffer_blit_queue;
1097             submit_info.waitSemaphoreCount = 1;
1098             submit_info.pWaitSemaphores = submit_info.pSignalSemaphores;
1099             submit_info.signalSemaphoreCount = 0;
1100             submit_info.pSignalSemaphores = NULL;
1101             submit_info.commandBufferCount = 1;
1102             submit_info.pCommandBuffers = &image->buffer.blit_cmd_buffers[0];
1103             submit_info.pWaitDstStageMask = stage_flags;
1104          }
1105       }
1106 
1107       VkFence fence = swapchain->fences[image_index];
1108 
1109       bool has_signal_dma_buf = false;
1110 #ifdef HAVE_LIBDRM
1111       result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
1112       if (result == VK_SUCCESS) {
1113          assert(submit_info.signalSemaphoreCount == 0);
1114          submit_info.signalSemaphoreCount = 1;
1115          submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
1116          has_signal_dma_buf = true;
1117       } else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
1118          result = VK_SUCCESS;
1119          has_signal_dma_buf = false;
1120       } else {
1121          goto fail_present;
1122       }
1123 #endif
1124 
1125       struct wsi_memory_signal_submit_info mem_signal;
1126       if (!has_signal_dma_buf) {
1127          /* If we don't have dma-buf signaling, signal the memory object by
1128           * chaining wsi_memory_signal_submit_info into VkSubmitInfo.
1129           */
1130          result = VK_SUCCESS;
1131          has_signal_dma_buf = false;
1132          mem_signal = (struct wsi_memory_signal_submit_info) {
1133             .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
1134             .memory = image->memory,
1135          };
1136          __vk_append_struct(&submit_info, &mem_signal);
1137       }
1138 
1139       result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
1140       if (result != VK_SUCCESS)
1141          goto fail_present;
1142 
1143 #ifdef HAVE_LIBDRM
1144       if (has_signal_dma_buf) {
1145          result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
1146          if (result != VK_SUCCESS)
1147             goto fail_present;
1148       }
1149 #else
1150       assert(!has_signal_dma_buf);
1151 #endif
1152 
1153       if (wsi->sw)
1154 	      wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1155 				 true, ~0ull);
1156 
1157       const VkPresentRegionKHR *region = NULL;
1158       if (regions && regions->pRegions)
1159          region = &regions->pRegions[i];
1160 
1161       result = swapchain->queue_present(swapchain, image_index, region);
1162       if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1163          goto fail_present;
1164 
1165       if (wsi->set_memory_ownership) {
1166          VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1167          wsi->set_memory_ownership(swapchain->device, mem, false);
1168       }
1169 
1170    fail_present:
1171       if (pPresentInfo->pResults != NULL)
1172          pPresentInfo->pResults[i] = result;
1173 
1174       /* Let the final result be our first unsuccessful result */
1175       if (final_result == VK_SUCCESS)
1176          final_result = result;
1177    }
1178 
1179    STACK_ARRAY_FINISH(stage_flags);
1180 
1181    return final_result;
1182 }
1183 
1184 VKAPI_ATTR VkResult VKAPI_CALL
wsi_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)1185 wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
1186 {
1187    VK_FROM_HANDLE(vk_queue, queue, _queue);
1188 
1189    return wsi_common_queue_present(queue->base.device->physical->wsi_device,
1190                                    vk_device_to_handle(queue->base.device),
1191                                    _queue,
1192                                    queue->queue_family_index,
1193                                    pPresentInfo);
1194 }
1195 
1196 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,VkDeviceGroupPresentCapabilitiesKHR * pCapabilities)1197 wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
1198                                          VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
1199 {
1200    memset(pCapabilities->presentMask, 0,
1201           sizeof(pCapabilities->presentMask));
1202    pCapabilities->presentMask[0] = 0x1;
1203    pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1204 
1205    return VK_SUCCESS;
1206 }
1207 
1208 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,VkSurfaceKHR surface,VkDeviceGroupPresentModeFlagsKHR * pModes)1209 wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
1210                                          VkSurfaceKHR surface,
1211                                          VkDeviceGroupPresentModeFlagsKHR *pModes)
1212 {
1213    *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1214 
1215    return VK_SUCCESS;
1216 }
1217 
1218 VkResult
wsi_common_create_swapchain_image(const struct wsi_device * wsi,const VkImageCreateInfo * pCreateInfo,VkSwapchainKHR _swapchain,VkImage * pImage)1219 wsi_common_create_swapchain_image(const struct wsi_device *wsi,
1220                                   const VkImageCreateInfo *pCreateInfo,
1221                                   VkSwapchainKHR _swapchain,
1222                                   VkImage *pImage)
1223 {
1224    VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1225 
1226 #ifndef NDEBUG
1227    const VkImageCreateInfo *swcInfo = &chain->image_info.create;
1228    assert(pCreateInfo->flags == 0);
1229    assert(pCreateInfo->imageType == swcInfo->imageType);
1230    assert(pCreateInfo->format == swcInfo->format);
1231    assert(pCreateInfo->extent.width == swcInfo->extent.width);
1232    assert(pCreateInfo->extent.height == swcInfo->extent.height);
1233    assert(pCreateInfo->extent.depth == swcInfo->extent.depth);
1234    assert(pCreateInfo->mipLevels == swcInfo->mipLevels);
1235    assert(pCreateInfo->arrayLayers == swcInfo->arrayLayers);
1236    assert(pCreateInfo->samples == swcInfo->samples);
1237    assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
1238    assert(!(pCreateInfo->usage & ~swcInfo->usage));
1239 
1240    vk_foreach_struct_const(ext, pCreateInfo->pNext) {
1241       switch (ext->sType) {
1242       case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: {
1243          const VkImageFormatListCreateInfo *iflci =
1244             (const VkImageFormatListCreateInfo *)ext;
1245          const VkImageFormatListCreateInfo *swc_iflci =
1246             &chain->image_info.format_list;
1247 
1248          for (uint32_t i = 0; i < iflci->viewFormatCount; i++) {
1249             bool found = false;
1250             for (uint32_t j = 0; j < swc_iflci->viewFormatCount; j++) {
1251                if (iflci->pViewFormats[i] == swc_iflci->pViewFormats[j]) {
1252                   found = true;
1253                   break;
1254                }
1255             }
1256             assert(found);
1257          }
1258          break;
1259       }
1260 
1261       case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
1262          break;
1263 
1264       default:
1265          assert(!"Unsupported image create extension");
1266       }
1267    }
1268 #endif
1269 
1270    return wsi->CreateImage(chain->device, &chain->image_info.create,
1271                            &chain->alloc, pImage);
1272 }
1273 
1274 VkResult
wsi_common_bind_swapchain_image(const struct wsi_device * wsi,VkImage vk_image,VkSwapchainKHR _swapchain,uint32_t image_idx)1275 wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
1276                                 VkImage vk_image,
1277                                 VkSwapchainKHR _swapchain,
1278                                 uint32_t image_idx)
1279 {
1280    VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1281    struct wsi_image *image = chain->get_wsi_image(chain, image_idx);
1282 
1283    return wsi->BindImageMemory(chain->device, vk_image, image->memory, 0);
1284 }
1285 
1286 uint32_t
wsi_select_memory_type(const struct wsi_device * wsi,VkMemoryPropertyFlags req_props,VkMemoryPropertyFlags deny_props,uint32_t type_bits)1287 wsi_select_memory_type(const struct wsi_device *wsi,
1288                        VkMemoryPropertyFlags req_props,
1289                        VkMemoryPropertyFlags deny_props,
1290                        uint32_t type_bits)
1291 {
1292    assert(type_bits != 0);
1293 
1294    VkMemoryPropertyFlags common_props = ~0;
1295    u_foreach_bit(t, type_bits) {
1296       const VkMemoryType type = wsi->memory_props.memoryTypes[t];
1297 
1298       common_props &= type.propertyFlags;
1299 
1300       if (deny_props & type.propertyFlags)
1301          continue;
1302 
1303       if (!(req_props & ~type.propertyFlags))
1304          return t;
1305    }
1306 
1307    if ((deny_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
1308        (common_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
1309       /* If they asked for non-device-local and all the types are device-local
1310        * (this is commonly true for UMA platforms), try again without denying
1311        * device-local types
1312        */
1313       deny_props &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1314       return wsi_select_memory_type(wsi, req_props, deny_props, type_bits);
1315    }
1316 
1317    unreachable("No memory type found");
1318 }
1319 
1320 uint32_t
wsi_select_device_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1321 wsi_select_device_memory_type(const struct wsi_device *wsi,
1322                               uint32_t type_bits)
1323 {
1324    return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1325                                  0 /* deny_props */, type_bits);
1326 }
1327 
1328 static uint32_t
wsi_select_host_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1329 wsi_select_host_memory_type(const struct wsi_device *wsi,
1330                             uint32_t type_bits)
1331 {
1332    return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1333                                  0 /* deny_props */, type_bits);
1334 }
1335 
1336 VkResult
wsi_create_buffer_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image,VkExternalMemoryHandleTypeFlags handle_types,bool implicit_sync)1337 wsi_create_buffer_image_mem(const struct wsi_swapchain *chain,
1338                             const struct wsi_image_info *info,
1339                             struct wsi_image *image,
1340                             VkExternalMemoryHandleTypeFlags handle_types,
1341                             bool implicit_sync)
1342 {
1343    const struct wsi_device *wsi = chain->wsi;
1344    VkResult result;
1345 
1346    const VkExternalMemoryBufferCreateInfo buffer_external_info = {
1347       .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
1348       .pNext = NULL,
1349       .handleTypes = handle_types,
1350    };
1351    const VkBufferCreateInfo buffer_info = {
1352       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1353       .pNext = &buffer_external_info,
1354       .size = info->linear_size,
1355       .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
1356       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1357    };
1358    result = wsi->CreateBuffer(chain->device, &buffer_info,
1359                               &chain->alloc, &image->buffer.buffer);
1360    if (result != VK_SUCCESS)
1361       return result;
1362 
1363    VkMemoryRequirements reqs;
1364    wsi->GetBufferMemoryRequirements(chain->device, image->buffer.buffer, &reqs);
1365    assert(reqs.size <= info->linear_size);
1366 
1367    struct wsi_memory_allocate_info memory_wsi_info = {
1368       .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
1369       .pNext = NULL,
1370       .implicit_sync = implicit_sync,
1371    };
1372    VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
1373       .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1374       .pNext = &memory_wsi_info,
1375       .image = VK_NULL_HANDLE,
1376       .buffer = image->buffer.buffer,
1377    };
1378    VkMemoryAllocateInfo buf_mem_info = {
1379       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1380       .pNext = &buf_mem_dedicated_info,
1381       .allocationSize = info->linear_size,
1382       .memoryTypeIndex =
1383          info->select_buffer_memory_type(wsi, reqs.memoryTypeBits),
1384    };
1385 
1386    void *sw_host_ptr = NULL;
1387    if (info->alloc_shm)
1388       sw_host_ptr = info->alloc_shm(image, info->linear_size);
1389 
1390    VkExportMemoryAllocateInfo memory_export_info;
1391    VkImportMemoryHostPointerInfoEXT host_ptr_info;
1392    if (sw_host_ptr != NULL) {
1393       host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
1394          .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
1395          .pHostPointer = sw_host_ptr,
1396          .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
1397       };
1398       __vk_append_struct(&buf_mem_info, &host_ptr_info);
1399    } else if (handle_types != 0) {
1400       memory_export_info = (VkExportMemoryAllocateInfo) {
1401          .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
1402          .handleTypes = handle_types,
1403       };
1404       __vk_append_struct(&buf_mem_info, &memory_export_info);
1405    }
1406 
1407    result = wsi->AllocateMemory(chain->device, &buf_mem_info,
1408                                 &chain->alloc, &image->buffer.memory);
1409    if (result != VK_SUCCESS)
1410       return result;
1411 
1412    result = wsi->BindBufferMemory(chain->device, image->buffer.buffer,
1413                                   image->buffer.memory, 0);
1414    if (result != VK_SUCCESS)
1415       return result;
1416 
1417    wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1418 
1419    const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1420       .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1421       .pNext = NULL,
1422       .image = image->image,
1423       .buffer = VK_NULL_HANDLE,
1424    };
1425    const VkMemoryAllocateInfo memory_info = {
1426       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1427       .pNext = &memory_dedicated_info,
1428       .allocationSize = reqs.size,
1429       .memoryTypeIndex =
1430          info->select_image_memory_type(wsi, reqs.memoryTypeBits),
1431    };
1432 
1433    result = wsi->AllocateMemory(chain->device, &memory_info,
1434                                 &chain->alloc, &image->memory);
1435    if (result != VK_SUCCESS)
1436       return result;
1437 
1438    image->num_planes = 1;
1439    image->sizes[0] = info->linear_size;
1440    image->row_pitches[0] = info->linear_stride;
1441    image->offsets[0] = 0;
1442 
1443    return VK_SUCCESS;
1444 }
1445 
1446 VkResult
wsi_finish_create_buffer_image(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1447 wsi_finish_create_buffer_image(const struct wsi_swapchain *chain,
1448                                const struct wsi_image_info *info,
1449                                struct wsi_image *image)
1450 {
1451    const struct wsi_device *wsi = chain->wsi;
1452    VkResult result;
1453 
1454    int cmd_buffer_count =
1455       chain->buffer_blit_queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
1456    image->buffer.blit_cmd_buffers =
1457       vk_zalloc(&chain->alloc,
1458                 sizeof(VkCommandBuffer) * cmd_buffer_count, 8,
1459                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1460    if (!image->buffer.blit_cmd_buffers)
1461       return VK_ERROR_OUT_OF_HOST_MEMORY;
1462 
1463    for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1464       const VkCommandBufferAllocateInfo cmd_buffer_info = {
1465          .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
1466          .pNext = NULL,
1467          .commandPool = chain->cmd_pools[i],
1468          .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
1469          .commandBufferCount = 1,
1470       };
1471       result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
1472                                            &image->buffer.blit_cmd_buffers[i]);
1473       if (result != VK_SUCCESS)
1474          return result;
1475 
1476       const VkCommandBufferBeginInfo begin_info = {
1477          .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
1478       };
1479       wsi->BeginCommandBuffer(image->buffer.blit_cmd_buffers[i], &begin_info);
1480 
1481       VkImageMemoryBarrier img_mem_barrier = {
1482          .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1483          .pNext = NULL,
1484          .srcAccessMask = 0,
1485          .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
1486          .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1487          .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1488          .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1489          .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1490          .image = image->image,
1491          .subresourceRange = {
1492             .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1493             .baseMipLevel = 0,
1494             .levelCount = 1,
1495             .baseArrayLayer = 0,
1496             .layerCount = 1,
1497          },
1498       };
1499       wsi->CmdPipelineBarrier(image->buffer.blit_cmd_buffers[i],
1500                               VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1501                               VK_PIPELINE_STAGE_TRANSFER_BIT,
1502                               0,
1503                               0, NULL,
1504                               0, NULL,
1505                               1, &img_mem_barrier);
1506 
1507       struct VkBufferImageCopy buffer_image_copy = {
1508          .bufferOffset = 0,
1509          .bufferRowLength = info->linear_stride /
1510                             vk_format_get_blocksize(info->create.format),
1511          .bufferImageHeight = 0,
1512          .imageSubresource = {
1513             .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1514             .mipLevel = 0,
1515             .baseArrayLayer = 0,
1516             .layerCount = 1,
1517          },
1518          .imageOffset = { .x = 0, .y = 0, .z = 0 },
1519          .imageExtent = info->create.extent,
1520       };
1521       wsi->CmdCopyImageToBuffer(image->buffer.blit_cmd_buffers[i],
1522                                 image->image,
1523                                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1524                                 image->buffer.buffer,
1525                                 1, &buffer_image_copy);
1526 
1527       img_mem_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1528       img_mem_barrier.dstAccessMask = 0;
1529       img_mem_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
1530       img_mem_barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1531       wsi->CmdPipelineBarrier(image->buffer.blit_cmd_buffers[i],
1532                               VK_PIPELINE_STAGE_TRANSFER_BIT,
1533                               VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1534                               0,
1535                               0, NULL,
1536                               0, NULL,
1537                               1, &img_mem_barrier);
1538 
1539       result = wsi->EndCommandBuffer(image->buffer.blit_cmd_buffers[i]);
1540       if (result != VK_SUCCESS)
1541          return result;
1542    }
1543 
1544    return VK_SUCCESS;
1545 }
1546 
1547 VkResult
wsi_configure_buffer_image(UNUSED const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,uint32_t stride_align,uint32_t size_align,struct wsi_image_info * info)1548 wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
1549                            const VkSwapchainCreateInfoKHR *pCreateInfo,
1550                            uint32_t stride_align, uint32_t size_align,
1551                            struct wsi_image_info *info)
1552 {
1553    const struct wsi_device *wsi = chain->wsi;
1554 
1555    assert(util_is_power_of_two_nonzero(stride_align));
1556    assert(util_is_power_of_two_nonzero(size_align));
1557 
1558    VkResult result = wsi_configure_image(chain, pCreateInfo,
1559                                          0 /* handle_types */, info);
1560    if (result != VK_SUCCESS)
1561       return result;
1562 
1563    info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1564    info->wsi.buffer_blit_src = true;
1565 
1566    const uint32_t cpp = vk_format_get_blocksize(pCreateInfo->imageFormat);
1567    info->linear_stride = pCreateInfo->imageExtent.width * cpp;
1568    info->linear_stride = ALIGN_POT(info->linear_stride, stride_align);
1569 
1570    /* Since we can pick the stride to be whatever we want, also align to the
1571     * device's optimalBufferCopyRowPitchAlignment so we get efficient copies.
1572     */
1573    assert(wsi->optimalBufferCopyRowPitchAlignment > 0);
1574    info->linear_stride = ALIGN_POT(info->linear_stride,
1575                                    wsi->optimalBufferCopyRowPitchAlignment);
1576 
1577    info->linear_size = info->linear_stride * pCreateInfo->imageExtent.height;
1578    info->linear_size = ALIGN_POT(info->linear_size, size_align);
1579 
1580    info->finish_create = wsi_finish_create_buffer_image;
1581 
1582    return VK_SUCCESS;
1583 }
1584 
1585 static VkResult
wsi_create_cpu_linear_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1586 wsi_create_cpu_linear_image_mem(const struct wsi_swapchain *chain,
1587                                 const struct wsi_image_info *info,
1588                                 struct wsi_image *image)
1589 {
1590    const struct wsi_device *wsi = chain->wsi;
1591    VkResult result;
1592 
1593    VkMemoryRequirements reqs;
1594    wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1595 
1596    VkSubresourceLayout layout;
1597    wsi->GetImageSubresourceLayout(chain->device, image->image,
1598                                   &(VkImageSubresource) {
1599                                      .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1600                                      .mipLevel = 0,
1601                                      .arrayLayer = 0,
1602                                   }, &layout);
1603    assert(layout.offset == 0);
1604 
1605    const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1606       .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1607       .image = image->image,
1608       .buffer = VK_NULL_HANDLE,
1609    };
1610    VkMemoryAllocateInfo memory_info = {
1611       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1612       .pNext = &memory_dedicated_info,
1613       .allocationSize = reqs.size,
1614       .memoryTypeIndex =
1615          wsi_select_host_memory_type(wsi, reqs.memoryTypeBits),
1616    };
1617 
1618    void *sw_host_ptr = NULL;
1619    if (info->alloc_shm)
1620       sw_host_ptr = info->alloc_shm(image, layout.size);
1621 
1622    VkImportMemoryHostPointerInfoEXT host_ptr_info;
1623    if (sw_host_ptr != NULL) {
1624       host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
1625          .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
1626          .pHostPointer = sw_host_ptr,
1627          .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
1628       };
1629       __vk_append_struct(&memory_info, &host_ptr_info);
1630    }
1631 
1632    result = wsi->AllocateMemory(chain->device, &memory_info,
1633                                 &chain->alloc, &image->memory);
1634    if (result != VK_SUCCESS)
1635       return result;
1636 
1637    result = wsi->MapMemory(chain->device, image->memory,
1638                            0, VK_WHOLE_SIZE, 0, &image->cpu_map);
1639    if (result != VK_SUCCESS)
1640       return result;
1641 
1642    image->num_planes = 1;
1643    image->sizes[0] = reqs.size;
1644    image->row_pitches[0] = layout.rowPitch;
1645    image->offsets[0] = 0;
1646 
1647    return VK_SUCCESS;
1648 }
1649 
1650 static VkResult
wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1651 wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
1652                                 const struct wsi_image_info *info,
1653                                 struct wsi_image *image)
1654 {
1655    VkResult result;
1656 
1657    result = wsi_create_buffer_image_mem(chain, info, image, 0,
1658                                         false /* implicit_sync */);
1659    if (result != VK_SUCCESS)
1660       return result;
1661 
1662    result = chain->wsi->MapMemory(chain->device, image->buffer.memory,
1663                                   0, VK_WHOLE_SIZE, 0, &image->cpu_map);
1664    if (result != VK_SUCCESS)
1665       return result;
1666 
1667    return VK_SUCCESS;
1668 }
1669 
1670 VkResult
wsi_configure_cpu_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,uint8_t * (alloc_shm)(struct wsi_image * image,unsigned size),struct wsi_image_info * info)1671 wsi_configure_cpu_image(const struct wsi_swapchain *chain,
1672                         const VkSwapchainCreateInfoKHR *pCreateInfo,
1673                         uint8_t *(alloc_shm)(struct wsi_image *image,
1674                                              unsigned size),
1675                         struct wsi_image_info *info)
1676 {
1677    const VkExternalMemoryHandleTypeFlags handle_types =
1678       alloc_shm ? VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT : 0;
1679 
1680    if (chain->use_buffer_blit) {
1681       VkResult result = wsi_configure_buffer_image(chain, pCreateInfo,
1682                                                    1 /* stride_align */,
1683                                                    1 /* size_align */,
1684                                                    info);
1685       if (result != VK_SUCCESS)
1686          return result;
1687 
1688       info->select_buffer_memory_type = wsi_select_host_memory_type;
1689       info->select_image_memory_type = wsi_select_device_memory_type;
1690       info->create_mem = wsi_create_cpu_buffer_image_mem;
1691    } else {
1692       VkResult result = wsi_configure_image(chain, pCreateInfo,
1693                                             handle_types, info);
1694       if (result != VK_SUCCESS)
1695          return result;
1696 
1697       /* Force the image to be linear */
1698       info->create.tiling = VK_IMAGE_TILING_LINEAR;
1699 
1700       info->create_mem = wsi_create_cpu_linear_image_mem;
1701    }
1702 
1703    info->alloc_shm = alloc_shm;
1704 
1705    return VK_SUCCESS;
1706 }
1707