• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #define XK_MISCELLANY
27 #define XK_LATIN1
28 #include <X11/keysymdef.h>
29 #include <xcb/xcb.h>
30 #ifdef XCB_KEYSYMS_AVAILABLE
31 #include <xcb/xcb_keysyms.h>
32 #endif
33 #include <xcb/dri3.h>
34 #include <xcb/present.h>
35 #include <xcb/shm.h>
36 
37 #include "util/macros.h"
38 #include <stdatomic.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <unistd.h>
42 #include <errno.h>
43 #include <string.h>
44 #include <fcntl.h>
45 #include <xf86drm.h>
46 #include "drm-uapi/drm_fourcc.h"
47 #include "util/hash_table.h"
48 #include "util/mesa-blake3.h"
49 #include "util/os_file.h"
50 #include "util/os_time.h"
51 #include "util/u_debug.h"
52 #include "util/u_thread.h"
53 #include "util/xmlconfig.h"
54 #include "util/timespec.h"
55 
56 #include "vk_format.h"
57 #include "vk_instance.h"
58 #include "vk_physical_device.h"
59 #include "vk_device.h"
60 #include "vk_util.h"
61 #include "vk_enum_to_str.h"
62 #include "wsi_common_entrypoints.h"
63 #include "wsi_common_private.h"
64 #include "wsi_common_queue.h"
65 
66 #ifdef HAVE_SYS_SHM_H
67 #include <sys/ipc.h>
68 #include <sys/shm.h>
69 #endif
70 
71 #ifndef XCB_PRESENT_OPTION_ASYNC_MAY_TEAR
72 #define XCB_PRESENT_OPTION_ASYNC_MAY_TEAR 16
73 #endif
74 #ifndef XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR
75 #define XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR 8
76 #endif
77 
78 struct wsi_x11_connection {
79    bool has_dri3;
80    bool has_dri3_modifiers;
81    bool has_present;
82    bool is_proprietary_x11;
83    bool is_xwayland;
84    bool has_mit_shm;
85    bool has_xfixes;
86 };
87 
88 struct wsi_x11 {
89    struct wsi_interface base;
90 
91    pthread_mutex_t                              mutex;
92    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
93    struct hash_table *connections;
94 };
95 
96 struct wsi_x11_vk_surface {
97    union {
98       VkIcdSurfaceXlib xlib;
99       VkIcdSurfaceXcb xcb;
100    };
101    bool has_alpha;
102 };
103 
104 /**
105  * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
106  */
107 static int
wsi_dri3_open(xcb_connection_t * conn,xcb_window_t root,uint32_t provider)108 wsi_dri3_open(xcb_connection_t *conn,
109 	      xcb_window_t root,
110 	      uint32_t provider)
111 {
112    xcb_dri3_open_cookie_t       cookie;
113    xcb_dri3_open_reply_t        *reply;
114    int                          fd;
115 
116    cookie = xcb_dri3_open(conn,
117                           root,
118                           provider);
119 
120    reply = xcb_dri3_open_reply(conn, cookie, NULL);
121    if (!reply)
122       return -1;
123 
124    /* According to DRI3 extension nfd must equal one. */
125    if (reply->nfd != 1) {
126       free(reply);
127       return -1;
128    }
129 
130    fd = xcb_dri3_open_reply_fds(conn, reply)[0];
131    free(reply);
132    fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
133 
134    return fd;
135 }
136 
137 /**
138  * Checks compatibility of the device wsi_dev with the device the X server
139  * provides via DRI3.
140  *
141  * This returns true when no device could be retrieved from the X server or when
142  * the information for the X server device indicate that it is the same device.
143  */
144 static bool
wsi_x11_check_dri3_compatible(const struct wsi_device * wsi_dev,xcb_connection_t * conn)145 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
146                               xcb_connection_t *conn)
147 {
148    xcb_screen_iterator_t screen_iter =
149       xcb_setup_roots_iterator(xcb_get_setup(conn));
150    xcb_screen_t *screen = screen_iter.data;
151 
152    /* Open the DRI3 device from the X server. If we do not retrieve one we
153     * assume our local device is compatible.
154     */
155    int dri3_fd = wsi_dri3_open(conn, screen->root, None);
156    if (dri3_fd == -1)
157       return true;
158 
159    bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
160 
161    close(dri3_fd);
162 
163    return match;
164 }
165 
166 static bool
wsi_x11_detect_xwayland(xcb_connection_t * conn,xcb_query_extension_reply_t * randr_reply,xcb_query_extension_reply_t * xwl_reply)167 wsi_x11_detect_xwayland(xcb_connection_t *conn,
168                         xcb_query_extension_reply_t *randr_reply,
169                         xcb_query_extension_reply_t *xwl_reply)
170 {
171    /* Newer Xwayland exposes an X11 extension we can check for */
172    if (xwl_reply && xwl_reply->present)
173       return true;
174 
175    /* Older Xwayland uses the word "XWAYLAND" in the RandR output names */
176    if (!randr_reply || !randr_reply->present)
177       return false;
178 
179    xcb_randr_query_version_cookie_t ver_cookie =
180       xcb_randr_query_version_unchecked(conn, 1, 3);
181    xcb_randr_query_version_reply_t *ver_reply =
182       xcb_randr_query_version_reply(conn, ver_cookie, NULL);
183    bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
184                                        ver_reply->minor_version >= 3);
185    free(ver_reply);
186 
187    if (!has_randr_v1_3)
188       return false;
189 
190    const xcb_setup_t *setup = xcb_get_setup(conn);
191    xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
192 
193    xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
194       xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
195    xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
196       xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
197 
198    if (!gsr_reply || gsr_reply->num_outputs == 0) {
199       free(gsr_reply);
200       return false;
201    }
202 
203    xcb_randr_output_t *randr_outputs =
204       xcb_randr_get_screen_resources_current_outputs(gsr_reply);
205    xcb_randr_get_output_info_cookie_t goi_cookie =
206       xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
207    free(gsr_reply);
208 
209    xcb_randr_get_output_info_reply_t *goi_reply =
210       xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
211    if (!goi_reply) {
212       return false;
213    }
214 
215    char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
216    bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
217    free(goi_reply);
218 
219    return is_xwayland;
220 }
221 
222 static struct wsi_x11_connection *
wsi_x11_connection_create(struct wsi_device * wsi_dev,xcb_connection_t * conn)223 wsi_x11_connection_create(struct wsi_device *wsi_dev,
224                           xcb_connection_t *conn)
225 {
226    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
227                                 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
228                                 xfixes_cookie, xwl_cookie;
229    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
230                                *amd_reply, *nv_reply, *shm_reply = NULL,
231                                *xfixes_reply, *xwl_reply;
232    bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
233                     wsi_dev->has_import_memory_host;
234    bool has_dri3_v1_2 = false;
235    bool has_present_v1_2 = false;
236 
237    struct wsi_x11_connection *wsi_conn =
238       vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
239                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
240    if (!wsi_conn)
241       return NULL;
242 
243    sync_cookie = xcb_query_extension(conn, 4, "SYNC");
244    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
245    pres_cookie = xcb_query_extension(conn, 7, "Present");
246    randr_cookie = xcb_query_extension(conn, 5, "RANDR");
247    xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
248    xwl_cookie = xcb_query_extension(conn, 8, "XWAYLAND");
249 
250    if (wants_shm)
251       shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
252 
253    /* We try to be nice to users and emit a warning if they try to use a
254     * Vulkan application on a system without DRI3 enabled.  However, this ends
255     * up spewing the warning when a user has, for example, both Intel
256     * integrated graphics and a discrete card with proprietary drivers and are
257     * running on the discrete card with the proprietary DDX.  In this case, we
258     * really don't want to print the warning because it just confuses users.
259     * As a heuristic to detect this case, we check for a couple of proprietary
260     * X11 extensions.
261     */
262    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
263    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
264 
265    xcb_discard_reply(conn, sync_cookie.sequence);
266    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
267    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
268    randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
269    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
270    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
271    xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
272    xwl_reply = xcb_query_extension_reply(conn, xwl_cookie, NULL);
273    if (wants_shm)
274       shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
275    if (!dri3_reply || !pres_reply || !xfixes_reply) {
276       free(dri3_reply);
277       free(pres_reply);
278       free(xfixes_reply);
279       free(xwl_reply);
280       free(randr_reply);
281       free(amd_reply);
282       free(nv_reply);
283       if (wants_shm)
284          free(shm_reply);
285       vk_free(&wsi_dev->instance_alloc, wsi_conn);
286       return NULL;
287    }
288 
289    wsi_conn->has_dri3 = dri3_reply->present != 0;
290 #ifdef HAVE_DRI3_MODIFIERS
291    if (wsi_conn->has_dri3) {
292       xcb_dri3_query_version_cookie_t ver_cookie;
293       xcb_dri3_query_version_reply_t *ver_reply;
294 
295       ver_cookie = xcb_dri3_query_version(conn, 1, 2);
296       ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
297       has_dri3_v1_2 = ver_reply != NULL &&
298          (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
299       free(ver_reply);
300    }
301 #endif
302 
303    wsi_conn->has_present = pres_reply->present != 0;
304 #ifdef HAVE_DRI3_MODIFIERS
305    if (wsi_conn->has_present) {
306       xcb_present_query_version_cookie_t ver_cookie;
307       xcb_present_query_version_reply_t *ver_reply;
308 
309       ver_cookie = xcb_present_query_version(conn, 1, 2);
310       ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
311       has_present_v1_2 =
312         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
313       free(ver_reply);
314    }
315 #endif
316 
317    wsi_conn->has_xfixes = xfixes_reply->present != 0;
318    if (wsi_conn->has_xfixes) {
319       xcb_xfixes_query_version_cookie_t ver_cookie;
320       xcb_xfixes_query_version_reply_t *ver_reply;
321 
322       ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
323       ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
324       wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
325       free(ver_reply);
326    }
327 
328    wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn, randr_reply,
329                                                    xwl_reply);
330 
331    wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
332    wsi_conn->is_proprietary_x11 = false;
333    if (amd_reply && amd_reply->present)
334       wsi_conn->is_proprietary_x11 = true;
335    if (nv_reply && nv_reply->present)
336       wsi_conn->is_proprietary_x11 = true;
337 
338    wsi_conn->has_mit_shm = false;
339    if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
340       bool has_mit_shm = shm_reply->present != 0;
341 
342       xcb_shm_query_version_cookie_t ver_cookie;
343       xcb_shm_query_version_reply_t *ver_reply;
344 
345       ver_cookie = xcb_shm_query_version(conn);
346       ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
347 
348       has_mit_shm = ver_reply->shared_pixmaps;
349       free(ver_reply);
350       xcb_void_cookie_t cookie;
351       xcb_generic_error_t *error;
352 
353       if (has_mit_shm) {
354          cookie = xcb_shm_detach_checked(conn, 0);
355          if ((error = xcb_request_check(conn, cookie))) {
356             if (error->error_code != BadRequest)
357                wsi_conn->has_mit_shm = true;
358             free(error);
359          }
360       }
361    }
362 
363    free(dri3_reply);
364    free(pres_reply);
365    free(randr_reply);
366    free(xwl_reply);
367    free(amd_reply);
368    free(nv_reply);
369    free(xfixes_reply);
370    if (wants_shm)
371       free(shm_reply);
372 
373    return wsi_conn;
374 }
375 
376 static void
wsi_x11_connection_destroy(struct wsi_device * wsi_dev,struct wsi_x11_connection * conn)377 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
378                            struct wsi_x11_connection *conn)
379 {
380    vk_free(&wsi_dev->instance_alloc, conn);
381 }
382 
383 static bool
wsi_x11_check_for_dri3(struct wsi_x11_connection * wsi_conn)384 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
385 {
386   if (wsi_conn->has_dri3)
387     return true;
388   if (!wsi_conn->is_proprietary_x11) {
389     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
390                     "Note: you can probably enable DRI3 in your Xorg config\n");
391   }
392   return false;
393 }
394 
395 /**
396  * Get internal struct representing an xcb_connection_t.
397  *
398  * This can allocate the struct but the caller does not own the struct. It is
399  * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
400  *
401  * If the allocation fails NULL is returned.
402  */
403 static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device * wsi_dev,xcb_connection_t * conn)404 wsi_x11_get_connection(struct wsi_device *wsi_dev,
405                        xcb_connection_t *conn)
406 {
407    struct wsi_x11 *wsi =
408       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
409 
410    pthread_mutex_lock(&wsi->mutex);
411 
412    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
413    if (!entry) {
414       /* We're about to make a bunch of blocking calls.  Let's drop the
415        * mutex for now so we don't block up too badly.
416        */
417       pthread_mutex_unlock(&wsi->mutex);
418 
419       struct wsi_x11_connection *wsi_conn =
420          wsi_x11_connection_create(wsi_dev, conn);
421       if (!wsi_conn)
422          return NULL;
423 
424       pthread_mutex_lock(&wsi->mutex);
425 
426       entry = _mesa_hash_table_search(wsi->connections, conn);
427       if (entry) {
428          /* Oops, someone raced us to it */
429          wsi_x11_connection_destroy(wsi_dev, wsi_conn);
430       } else {
431          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
432       }
433    }
434 
435    pthread_mutex_unlock(&wsi->mutex);
436 
437    return entry->data;
438 }
439 
440 static const VkFormat formats[] = {
441    VK_FORMAT_R5G6B5_UNORM_PACK16,
442    VK_FORMAT_B8G8R8A8_SRGB,
443    VK_FORMAT_B8G8R8A8_UNORM,
444    VK_FORMAT_A2R10G10B10_UNORM_PACK32,
445 };
446 
447 static const VkPresentModeKHR present_modes[] = {
448    VK_PRESENT_MODE_IMMEDIATE_KHR,
449    VK_PRESENT_MODE_MAILBOX_KHR,
450    VK_PRESENT_MODE_FIFO_KHR,
451    VK_PRESENT_MODE_FIFO_RELAXED_KHR,
452 };
453 
454 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)455 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
456 {
457    xcb_screen_iterator_t screen_iter =
458       xcb_setup_roots_iterator(xcb_get_setup(conn));
459 
460    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
461       if (screen_iter.data->root == root)
462          return screen_iter.data;
463    }
464 
465    return NULL;
466 }
467 
468 static xcb_visualtype_t *
screen_get_visualtype(xcb_screen_t * screen,xcb_visualid_t visual_id,unsigned * depth)469 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
470                       unsigned *depth)
471 {
472    xcb_depth_iterator_t depth_iter =
473       xcb_screen_allowed_depths_iterator(screen);
474 
475    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
476       xcb_visualtype_iterator_t visual_iter =
477          xcb_depth_visuals_iterator (depth_iter.data);
478 
479       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
480          if (visual_iter.data->visual_id == visual_id) {
481             if (depth)
482                *depth = depth_iter.data->depth;
483             return visual_iter.data;
484          }
485       }
486    }
487 
488    return NULL;
489 }
490 
491 static xcb_visualtype_t *
connection_get_visualtype(xcb_connection_t * conn,xcb_visualid_t visual_id)492 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
493 {
494    xcb_screen_iterator_t screen_iter =
495       xcb_setup_roots_iterator(xcb_get_setup(conn));
496 
497    /* For this we have to iterate over all of the screens which is rather
498     * annoying.  Fortunately, there is probably only 1.
499     */
500    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
501       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
502                                                        visual_id, NULL);
503       if (visual)
504          return visual;
505    }
506 
507    return NULL;
508 }
509 
510 static xcb_visualtype_t *
get_visualtype_for_window(xcb_connection_t * conn,xcb_window_t window,unsigned * depth,xcb_visualtype_t ** rootvis)511 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
512                           unsigned *depth, xcb_visualtype_t **rootvis)
513 {
514    xcb_query_tree_cookie_t tree_cookie;
515    xcb_get_window_attributes_cookie_t attrib_cookie;
516    xcb_query_tree_reply_t *tree;
517    xcb_get_window_attributes_reply_t *attrib;
518 
519    tree_cookie = xcb_query_tree(conn, window);
520    attrib_cookie = xcb_get_window_attributes(conn, window);
521 
522    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
523    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
524    if (attrib == NULL || tree == NULL) {
525       free(attrib);
526       free(tree);
527       return NULL;
528    }
529 
530    xcb_window_t root = tree->root;
531    xcb_visualid_t visual_id = attrib->visual;
532    free(attrib);
533    free(tree);
534 
535    xcb_screen_t *screen = get_screen_for_root(conn, root);
536    if (screen == NULL)
537       return NULL;
538 
539    if (rootvis)
540       *rootvis = screen_get_visualtype(screen, screen->root_visual, depth);
541    return screen_get_visualtype(screen, visual_id, depth);
542 }
543 
544 static bool
visual_has_alpha(xcb_visualtype_t * visual,unsigned depth)545 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
546 {
547    uint32_t rgb_mask = visual->red_mask |
548                        visual->green_mask |
549                        visual->blue_mask;
550 
551    uint32_t all_mask = 0xffffffff >> (32 - depth);
552 
553    /* Do we have bits left over after RGB? */
554    return (all_mask & ~rgb_mask) != 0;
555 }
556 
557 static bool
visual_supported(xcb_visualtype_t * visual)558 visual_supported(xcb_visualtype_t *visual)
559 {
560    if (!visual)
561       return false;
562 
563    return visual->_class == XCB_VISUAL_CLASS_TRUE_COLOR ||
564           visual->_class == XCB_VISUAL_CLASS_DIRECT_COLOR;
565 }
566 
567 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,xcb_connection_t * connection,xcb_visualid_t visual_id)568 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
569                                                uint32_t queueFamilyIndex,
570                                                xcb_connection_t *connection,
571                                                xcb_visualid_t visual_id)
572 {
573    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
574    struct wsi_device *wsi_device = pdevice->wsi_device;
575    if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
576       return false;
577 
578    struct wsi_x11_connection *wsi_conn =
579       wsi_x11_get_connection(wsi_device, connection);
580 
581    if (!wsi_conn)
582       return false;
583 
584    if (!wsi_device->sw) {
585       if (!wsi_x11_check_for_dri3(wsi_conn))
586          return false;
587    }
588 
589    if (!visual_supported(connection_get_visualtype(connection, visual_id)))
590       return false;
591 
592    return true;
593 }
594 
595 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,Display * dpy,VisualID visualID)596 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
597                                                 uint32_t queueFamilyIndex,
598                                                 Display *dpy,
599                                                 VisualID visualID)
600 {
601    return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
602                                                          queueFamilyIndex,
603                                                          XGetXCBConnection(dpy),
604                                                          visualID);
605 }
606 
607 static xcb_connection_t*
x11_surface_get_connection(VkIcdSurfaceBase * icd_surface)608 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
609 {
610    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
611       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
612    else
613       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
614 }
615 
616 static xcb_window_t
x11_surface_get_window(VkIcdSurfaceBase * icd_surface)617 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
618 {
619    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
620       return ((VkIcdSurfaceXlib *)icd_surface)->window;
621    else
622       return ((VkIcdSurfaceXcb *)icd_surface)->window;
623 }
624 
625 static VkResult
x11_surface_get_support(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)626 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
627                         struct wsi_device *wsi_device,
628                         uint32_t queueFamilyIndex,
629                         VkBool32* pSupported)
630 {
631    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
632    xcb_window_t window = x11_surface_get_window(icd_surface);
633 
634    struct wsi_x11_connection *wsi_conn =
635       wsi_x11_get_connection(wsi_device, conn);
636    if (!wsi_conn)
637       return VK_ERROR_OUT_OF_HOST_MEMORY;
638 
639    if (!wsi_device->sw) {
640       if (!wsi_x11_check_for_dri3(wsi_conn)) {
641          *pSupported = false;
642          return VK_SUCCESS;
643       }
644    }
645 
646    if (!visual_supported(get_visualtype_for_window(conn, window, NULL, NULL))) {
647       *pSupported = false;
648       return VK_SUCCESS;
649    }
650 
651    *pSupported = true;
652    return VK_SUCCESS;
653 }
654 
655 static uint32_t
x11_get_min_image_count(const struct wsi_device * wsi_device,bool is_xwayland)656 x11_get_min_image_count(const struct wsi_device *wsi_device, bool is_xwayland)
657 {
658    if (wsi_device->x11.override_minImageCount)
659       return wsi_device->x11.override_minImageCount;
660 
661    /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
662     * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
663     * the render latency is CPU duration + GPU duration.
664     *
665     * This means that with scanout from pageflipping we need 3 frames to run
666     * full speed:
667     * 1) CPU rendering work
668     * 2) GPU rendering work
669     * 3) scanout
670     *
671     * Once we have a nonblocking acquire that returns a semaphore we can merge
672     * 1 and 3. Hence the ideal implementation needs only 2 images, but games
673     * cannot tellwe currently do not have an ideal implementation and that
674     * hence they need to allocate 3 images. So let us do it for them.
675     *
676     * This is a tradeoff as it uses more memory than needed for non-fullscreen
677     * and non-performance intensive applications.
678     *
679     * For Xwayland Venus reports four images as described in
680     *   wsi_wl_surface_get_capabilities
681     */
682    return is_xwayland && wsi_device->x11.extra_xwayland_image ? 4 : 3;
683 }
684 
685 static unsigned
686 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
687                                          struct wsi_x11_connection *wsi_conn,
688                                          VkPresentModeKHR present_mode);
689 
690 static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)691 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
692                              struct wsi_device *wsi_device,
693                              const VkSurfacePresentModeEXT *present_mode,
694                              VkSurfaceCapabilitiesKHR *caps)
695 {
696    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
697    xcb_window_t window = x11_surface_get_window(icd_surface);
698    struct wsi_x11_vk_surface *surface = (struct wsi_x11_vk_surface*)icd_surface;
699    struct wsi_x11_connection *wsi_conn =
700       wsi_x11_get_connection(wsi_device, conn);
701    xcb_get_geometry_cookie_t geom_cookie;
702    xcb_generic_error_t *err;
703    xcb_get_geometry_reply_t *geom;
704 
705    geom_cookie = xcb_get_geometry(conn, window);
706 
707    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
708    if (!geom)
709       return VK_ERROR_SURFACE_LOST_KHR;
710    {
711       VkExtent2D extent = { geom->width, geom->height };
712       caps->currentExtent = extent;
713       caps->minImageExtent = extent;
714       caps->maxImageExtent = extent;
715    }
716    free(err);
717    free(geom);
718 
719    if (surface->has_alpha) {
720       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
721                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
722    } else {
723       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
724                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
725    }
726 
727    if (present_mode) {
728       caps->minImageCount = x11_get_min_image_count_for_present_mode(wsi_device, wsi_conn, present_mode->presentMode);
729    } else {
730       caps->minImageCount = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
731    }
732 
733    /* There is no real maximum */
734    caps->maxImageCount = 0;
735 
736    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
737    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
738    caps->maxImageArrayLayers = 1;
739    caps->supportedUsageFlags = wsi_caps_get_image_usage();
740 
741    VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
742    if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
743       caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
744 
745    return VK_SUCCESS;
746 }
747 
748 static VkResult
x11_surface_get_capabilities2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)749 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
750                               struct wsi_device *wsi_device,
751                               const void *info_next,
752                               VkSurfaceCapabilities2KHR *caps)
753 {
754    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
755 
756    const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
757 
758    VkResult result =
759       x11_surface_get_capabilities(icd_surface, wsi_device, present_mode,
760                                    &caps->surfaceCapabilities);
761 
762    if (result != VK_SUCCESS)
763       return result;
764 
765    vk_foreach_struct(ext, caps->pNext) {
766       switch (ext->sType) {
767       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
768          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
769          protected->supportsProtected = VK_FALSE;
770          break;
771       }
772 
773       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
774          /* Unsupported. */
775          VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
776          scaling->supportedPresentScaling = 0;
777          scaling->supportedPresentGravityX = 0;
778          scaling->supportedPresentGravityY = 0;
779          scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
780          scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
781          break;
782       }
783 
784       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
785          /* All present modes are compatible with each other. */
786          VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
787          if (compat->pPresentModes) {
788             assert(present_mode);
789             VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
790             /* Must always return queried present mode even when truncating. */
791             vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
792                *mode = present_mode->presentMode;
793             }
794 
795             for (uint32_t i = 0; i < ARRAY_SIZE(present_modes); i++) {
796                if (present_modes[i] != present_mode->presentMode) {
797                   vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
798                      *mode = present_modes[i];
799                   }
800                }
801             }
802          } else {
803             if (!present_mode)
804                wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
805                                        "without a VkSurfacePresentModeEXT set. This is an "
806                                        "application bug.\n");
807 
808             compat->presentModeCount = ARRAY_SIZE(present_modes);
809          }
810          break;
811       }
812 
813       default:
814          /* Ignored */
815          break;
816       }
817    }
818 
819    return result;
820 }
821 
822 static int
format_get_component_bits(VkFormat format,int comp)823 format_get_component_bits(VkFormat format, int comp)
824 {
825    return vk_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, comp);
826 }
827 
828 static bool
rgb_component_bits_are_equal(VkFormat format,const xcb_visualtype_t * type)829 rgb_component_bits_are_equal(VkFormat format, const xcb_visualtype_t* type)
830 {
831    return format_get_component_bits(format, 0) == util_bitcount(type->red_mask) &&
832           format_get_component_bits(format, 1) == util_bitcount(type->green_mask) &&
833           format_get_component_bits(format, 2) == util_bitcount(type->blue_mask);
834 }
835 
836 static bool
get_sorted_vk_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,VkFormat * sorted_formats,unsigned * count)837 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
838                       VkFormat *sorted_formats, unsigned *count)
839 {
840    xcb_connection_t *conn = x11_surface_get_connection(surface);
841    xcb_window_t window = x11_surface_get_window(surface);
842    xcb_visualtype_t *rootvis = NULL;
843    xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL, &rootvis);
844 
845    if (!visual)
846       return false;
847 
848    /* use the root window's visual to set the default */
849    *count = 0;
850    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
851       if (rgb_component_bits_are_equal(formats[i], rootvis))
852          sorted_formats[(*count)++] = formats[i];
853    }
854 
855    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
856       for (unsigned j = 0; j < *count; j++)
857          if (formats[i] == sorted_formats[j])
858             goto next_format;
859       if (rgb_component_bits_are_equal(formats[i], visual))
860          sorted_formats[(*count)++] = formats[i];
861 next_format:;
862    }
863 
864    if (wsi_device->force_bgra8_unorm_first) {
865       for (unsigned i = 0; i < *count; i++) {
866          if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
867             sorted_formats[i] = sorted_formats[0];
868             sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
869             break;
870          }
871       }
872    }
873 
874    return true;
875 }
876 
877 static VkResult
x11_surface_get_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)878 x11_surface_get_formats(VkIcdSurfaceBase *surface,
879                         struct wsi_device *wsi_device,
880                         uint32_t *pSurfaceFormatCount,
881                         VkSurfaceFormatKHR *pSurfaceFormats)
882 {
883    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
884                           pSurfaceFormats, pSurfaceFormatCount);
885 
886    unsigned count;
887    VkFormat sorted_formats[ARRAY_SIZE(formats)];
888    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
889       return VK_ERROR_SURFACE_LOST_KHR;
890 
891    for (unsigned i = 0; i < count; i++) {
892       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
893          f->format = sorted_formats[i];
894          f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
895       }
896    }
897 
898    return vk_outarray_status(&out);
899 }
900 
901 static VkResult
x11_surface_get_formats2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)902 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
903                         struct wsi_device *wsi_device,
904                         const void *info_next,
905                         uint32_t *pSurfaceFormatCount,
906                         VkSurfaceFormat2KHR *pSurfaceFormats)
907 {
908    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
909                           pSurfaceFormats, pSurfaceFormatCount);
910 
911    unsigned count;
912    VkFormat sorted_formats[ARRAY_SIZE(formats)];
913    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
914       return VK_ERROR_SURFACE_LOST_KHR;
915 
916    for (unsigned i = 0; i < count; i++) {
917       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
918          assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
919          f->surfaceFormat.format = sorted_formats[i];
920          f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
921       }
922    }
923 
924    return vk_outarray_status(&out);
925 }
926 
927 static VkResult
x11_surface_get_present_modes(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)928 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
929                               struct wsi_device *wsi_device,
930                               uint32_t *pPresentModeCount,
931                               VkPresentModeKHR *pPresentModes)
932 {
933    if (pPresentModes == NULL) {
934       *pPresentModeCount = ARRAY_SIZE(present_modes);
935       return VK_SUCCESS;
936    }
937 
938    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
939    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
940 
941    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
942       VK_INCOMPLETE : VK_SUCCESS;
943 }
944 
945 static VkResult
x11_surface_get_present_rectangles(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)946 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
947                                    struct wsi_device *wsi_device,
948                                    uint32_t* pRectCount,
949                                    VkRect2D* pRects)
950 {
951    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
952    xcb_window_t window = x11_surface_get_window(icd_surface);
953    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
954 
955    vk_outarray_append_typed(VkRect2D, &out, rect) {
956       xcb_generic_error_t *err = NULL;
957       xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
958       xcb_get_geometry_reply_t *geom =
959          xcb_get_geometry_reply(conn, geom_cookie, &err);
960       free(err);
961       if (geom) {
962          *rect = (VkRect2D) {
963             .offset = { 0, 0 },
964             .extent = { geom->width, geom->height },
965          };
966       }
967       free(geom);
968       if (!geom)
969           return VK_ERROR_SURFACE_LOST_KHR;
970    }
971 
972    return vk_outarray_status(&out);
973 }
974 
975 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXcbSurfaceKHR(VkInstance _instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)976 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
977                         const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
978                         const VkAllocationCallbacks *pAllocator,
979                         VkSurfaceKHR *pSurface)
980 {
981    VK_FROM_HANDLE(vk_instance, instance, _instance);
982    struct wsi_x11_vk_surface *surface;
983 
984    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
985 
986    unsigned visual_depth;
987    xcb_visualtype_t *visual =
988       get_visualtype_for_window(pCreateInfo->connection, pCreateInfo->window, &visual_depth, NULL);
989    if (!visual)
990       return VK_ERROR_OUT_OF_HOST_MEMORY;
991 
992    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
993                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
994    if (surface == NULL)
995       return VK_ERROR_OUT_OF_HOST_MEMORY;
996 
997    surface->xcb.base.platform = VK_ICD_WSI_PLATFORM_XCB;
998    surface->xcb.connection = pCreateInfo->connection;
999    surface->xcb.window = pCreateInfo->window;
1000 
1001    surface->has_alpha = visual_has_alpha(visual, visual_depth);
1002 
1003    *pSurface = VkIcdSurfaceBase_to_handle(&surface->xcb.base);
1004    return VK_SUCCESS;
1005 }
1006 
1007 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXlibSurfaceKHR(VkInstance _instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1008 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
1009                          const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
1010                          const VkAllocationCallbacks *pAllocator,
1011                          VkSurfaceKHR *pSurface)
1012 {
1013    VK_FROM_HANDLE(vk_instance, instance, _instance);
1014    struct wsi_x11_vk_surface *surface;
1015 
1016    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
1017 
1018    unsigned visual_depth;
1019    xcb_visualtype_t *visual =
1020       get_visualtype_for_window(XGetXCBConnection(pCreateInfo->dpy), pCreateInfo->window, &visual_depth, NULL);
1021    if (!visual)
1022       return VK_ERROR_OUT_OF_HOST_MEMORY;
1023 
1024    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1025                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1026    if (surface == NULL)
1027       return VK_ERROR_OUT_OF_HOST_MEMORY;
1028 
1029    surface->xlib.base.platform = VK_ICD_WSI_PLATFORM_XLIB;
1030    surface->xlib.dpy = pCreateInfo->dpy;
1031    surface->xlib.window = pCreateInfo->window;
1032 
1033    surface->has_alpha = visual_has_alpha(visual, visual_depth);
1034 
1035    *pSurface = VkIcdSurfaceBase_to_handle(&surface->xlib.base);
1036    return VK_SUCCESS;
1037 }
1038 
1039 struct x11_image_pending_completion {
1040    uint32_t serial;
1041    uint64_t signal_present_id;
1042 };
1043 
1044 struct x11_image {
1045    struct wsi_image                          base;
1046    xcb_pixmap_t                              pixmap;
1047    xcb_xfixes_region_t                       update_region; /* long lived XID */
1048    xcb_xfixes_region_t                       update_area;   /* the above or None */
1049    struct xshmfence *                        shm_fence;
1050    uint32_t                                  sync_fence;
1051    xcb_shm_seg_t                             shmseg;
1052    int                                       shmid;
1053    uint8_t *                                 shmaddr;
1054    uint64_t                                  present_id;
1055    VkPresentModeKHR                          present_mode;
1056 
1057    /* In IMMEDIATE and MAILBOX modes, we can have multiple pending presentations per image.
1058     * We need to keep track of them when considering present ID. */
1059 
1060    /* This is arbitrarily chosen. With IMMEDIATE on a 3 deep swapchain,
1061     * we allow up to 48 outstanding presentations per vblank, which is more than enough
1062     * for any reasonable application. */
1063 #define X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS 16
1064    uint32_t                                  present_queued_count;
1065    struct x11_image_pending_completion       pending_completions[X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS];
1066 };
1067 
1068 struct x11_swapchain {
1069    struct wsi_swapchain                        base;
1070 
1071    bool                                         has_dri3_modifiers;
1072    bool                                         has_mit_shm;
1073    bool                                         has_async_may_tear;
1074 
1075    xcb_connection_t *                           conn;
1076    xcb_window_t                                 window;
1077    xcb_gc_t                                     gc;
1078    uint32_t                                     depth;
1079    VkExtent2D                                   extent;
1080 
1081    blake3_hash                                  dri3_modifier_hash;
1082 
1083    xcb_present_event_t                          event_id;
1084    xcb_special_event_t *                        special_event;
1085    uint64_t                                     send_sbc;
1086    uint64_t                                     last_present_msc;
1087    uint32_t                                     stamp;
1088    uint32_t                                     sent_image_count;
1089 
1090    atomic_int                                   status;
1091    bool                                         copy_is_suboptimal;
1092    struct wsi_queue                             present_queue;
1093    struct wsi_queue                             acquire_queue;
1094    pthread_t                                    queue_manager;
1095    pthread_t                                    event_manager;
1096 
1097    /* Used for communicating between event_manager and queue_manager.
1098     * Lock is also taken when reading and writing status.
1099     * When reading status in application threads,
1100     * x11_swapchain_read_status_atomic can be used as a wrapper function. */
1101    pthread_mutex_t                              thread_state_lock;
1102    pthread_cond_t                               thread_state_cond;
1103 
1104    /* Lock and condition variable for present wait.
1105     * Signalled by event thread and waited on by callers to PresentWaitKHR. */
1106    pthread_mutex_t                              present_progress_mutex;
1107    pthread_cond_t                               present_progress_cond;
1108    uint64_t                                     present_id;
1109    VkResult                                     present_progress_error;
1110 
1111    struct x11_image                             images[0];
1112 };
1113 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
1114                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
1115 
x11_present_complete(struct x11_swapchain * swapchain,struct x11_image * image,uint32_t index)1116 static void x11_present_complete(struct x11_swapchain *swapchain,
1117                                  struct x11_image *image, uint32_t index)
1118 {
1119    uint64_t signal_present_id = image->pending_completions[index].signal_present_id;
1120    if (signal_present_id) {
1121       pthread_mutex_lock(&swapchain->present_progress_mutex);
1122       if (signal_present_id > swapchain->present_id) {
1123          swapchain->present_id = signal_present_id;
1124          pthread_cond_broadcast(&swapchain->present_progress_cond);
1125       }
1126       pthread_mutex_unlock(&swapchain->present_progress_mutex);
1127    }
1128 
1129    image->present_queued_count--;
1130    if (image->present_queued_count) {
1131       memmove(image->pending_completions + index,
1132               image->pending_completions + index + 1,
1133               (image->present_queued_count - index) *
1134               sizeof(image->pending_completions[0]));
1135    }
1136 
1137    pthread_cond_signal(&swapchain->thread_state_cond);
1138 }
1139 
x11_notify_pending_present(struct x11_swapchain * swapchain,struct x11_image * image)1140 static void x11_notify_pending_present(struct x11_swapchain *swapchain,
1141                                        struct x11_image *image)
1142 {
1143    pthread_cond_signal(&swapchain->thread_state_cond);
1144 }
1145 
1146 /* It is assumed that thread_state_lock is taken when calling this function. */
x11_swapchain_notify_error(struct x11_swapchain * swapchain,VkResult result)1147 static void x11_swapchain_notify_error(struct x11_swapchain *swapchain, VkResult result)
1148 {
1149    pthread_mutex_lock(&swapchain->present_progress_mutex);
1150    swapchain->present_id = UINT64_MAX;
1151    swapchain->present_progress_error = result;
1152    pthread_cond_broadcast(&swapchain->present_progress_cond);
1153    pthread_mutex_unlock(&swapchain->present_progress_mutex);
1154    pthread_cond_broadcast(&swapchain->thread_state_cond);
1155 }
1156 
1157 /**
1158  * Update the swapchain status with the result of an operation, and return
1159  * the combined status. The chain status will eventually be returned from
1160  * AcquireNextImage and QueuePresent.
1161  *
1162  * We make sure to 'stick' more pessimistic statuses: an out-of-date error
1163  * is permanent once seen, and every subsequent call will return this. If
1164  * this has not been seen, success will be returned.
1165  *
1166  * It is assumed that thread_state_lock is taken when calling this function.
1167  */
1168 static VkResult
_x11_swapchain_result(struct x11_swapchain * chain,VkResult result,const char * file,int line)1169 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
1170                       const char *file, int line)
1171 {
1172    if (result < 0)
1173       x11_swapchain_notify_error(chain, result);
1174 
1175    /* Prioritise returning existing errors for consistency. */
1176    if (chain->status < 0)
1177       return chain->status;
1178 
1179    /* If we have a new error, mark it as permanent on the chain and return. */
1180    if (result < 0) {
1181 #ifndef NDEBUG
1182       fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1183               file, line, vk_Result_to_str(result));
1184 #endif
1185       chain->status = result;
1186       return result;
1187    }
1188 
1189    /* Return temporary errors, but don't persist them. */
1190    if (result == VK_TIMEOUT || result == VK_NOT_READY)
1191       return result;
1192 
1193    /* Suboptimal isn't an error, but is a status which sticks to the swapchain
1194     * and is always returned rather than success.
1195     */
1196    if (result == VK_SUBOPTIMAL_KHR) {
1197 #ifndef NDEBUG
1198       if (chain->status != VK_SUBOPTIMAL_KHR) {
1199          fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1200                  file, line, vk_Result_to_str(result));
1201       }
1202 #endif
1203       chain->status = result;
1204       return result;
1205    }
1206 
1207    /* No changes, so return the last status. */
1208    return chain->status;
1209 }
1210 #define x11_swapchain_result(chain, result) \
1211    _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1212 
1213 static struct wsi_image *
x11_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1214 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1215 {
1216    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1217    return &chain->images[image_index].base;
1218 }
1219 
1220 static bool
1221 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain);
1222 
1223 /* XXX this belongs in presentproto */
1224 #ifndef PresentWindowDestroyed
1225 #define PresentWindowDestroyed (1 << 0)
1226 #endif
1227 /**
1228  * Process an X11 Present event. Does not update chain->status.
1229  */
1230 static VkResult
x11_handle_dri3_present_event(struct x11_swapchain * chain,xcb_present_generic_event_t * event)1231 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1232                               xcb_present_generic_event_t *event)
1233 {
1234    switch (event->evtype) {
1235    case XCB_PRESENT_CONFIGURE_NOTIFY: {
1236       xcb_present_configure_notify_event_t *config = (void *) event;
1237       if (config->pixmap_flags & PresentWindowDestroyed)
1238          return VK_ERROR_SURFACE_LOST_KHR;
1239 
1240       struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1241       if (!wsi_device->x11.ignore_suboptimal) {
1242          if (config->width != chain->extent.width ||
1243              config->height != chain->extent.height)
1244             return VK_SUBOPTIMAL_KHR;
1245       }
1246 
1247       break;
1248    }
1249 
1250    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1251       xcb_present_idle_notify_event_t *idle = (void *) event;
1252 
1253       for (unsigned i = 0; i < chain->base.image_count; i++) {
1254          if (chain->images[i].pixmap == idle->pixmap) {
1255             chain->sent_image_count--;
1256             assert(chain->sent_image_count >= 0);
1257             wsi_queue_push(&chain->acquire_queue, i);
1258             break;
1259          }
1260       }
1261 
1262       break;
1263    }
1264 
1265    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1266       xcb_present_complete_notify_event_t *complete = (void *) event;
1267       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1268          unsigned i, j;
1269          for (i = 0; i < chain->base.image_count; i++) {
1270             struct x11_image *image = &chain->images[i];
1271             for (j = 0; j < image->present_queued_count; j++) {
1272                if (image->pending_completions[j].serial == complete->serial) {
1273                   x11_present_complete(chain, image, j);
1274                }
1275             }
1276          }
1277          chain->last_present_msc = complete->msc;
1278       }
1279 
1280       VkResult result = VK_SUCCESS;
1281 
1282       struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1283       if (wsi_device->x11.ignore_suboptimal)
1284          return result;
1285 
1286       switch (complete->mode) {
1287       case XCB_PRESENT_COMPLETE_MODE_COPY:
1288          if (chain->copy_is_suboptimal)
1289             result = VK_SUBOPTIMAL_KHR;
1290          break;
1291       case XCB_PRESENT_COMPLETE_MODE_FLIP:
1292          /* If we ever go from flipping to copying, the odds are very likely
1293           * that we could reallocate in a more optimal way if we didn't have
1294           * to care about scanout, so we always do this.
1295           */
1296          chain->copy_is_suboptimal = true;
1297          break;
1298 #ifdef HAVE_DRI3_MODIFIERS
1299       case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1300          /* The winsys is now trying to flip directly and cannot due to our
1301           * configuration. Request the user reallocate.
1302           */
1303 
1304          /* Sometimes, this complete mode is spurious, and a false positive.
1305           * Xwayland may report SUBOPTIMAL_COPY even if there are no changes in the modifiers.
1306           * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26616 for more details. */
1307          if (chain->status == VK_SUCCESS &&
1308              wsi_x11_swapchain_query_dri3_modifiers_changed(chain)) {
1309             result = VK_SUBOPTIMAL_KHR;
1310          }
1311          break;
1312 #endif
1313       default:
1314          break;
1315       }
1316 
1317       return result;
1318    }
1319 
1320    default:
1321       break;
1322    }
1323 
1324    return VK_SUCCESS;
1325 }
1326 
1327 /**
1328  * Send image to X server via Present extension.
1329  */
1330 static VkResult
x11_present_to_x11_dri3(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1331 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1332                         uint64_t target_msc, VkPresentModeKHR present_mode)
1333 {
1334    struct x11_image *image = &chain->images[image_index];
1335 
1336    assert(image_index < chain->base.image_count);
1337 
1338    uint32_t options = XCB_PRESENT_OPTION_NONE;
1339 
1340    int64_t divisor = 0;
1341    int64_t remainder = 0;
1342 
1343    struct wsi_x11_connection *wsi_conn =
1344       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1345    if (!wsi_conn)
1346       return VK_ERROR_OUT_OF_HOST_MEMORY;
1347 
1348    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1349        (present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1350         wsi_conn->is_xwayland) ||
1351        present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1352       options |= XCB_PRESENT_OPTION_ASYNC;
1353 
1354    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR
1355       && chain->has_async_may_tear)
1356       options |= XCB_PRESENT_OPTION_ASYNC_MAY_TEAR;
1357 
1358 #ifdef HAVE_DRI3_MODIFIERS
1359    if (chain->has_dri3_modifiers)
1360       options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1361 #endif
1362 
1363    xshmfence_reset(image->shm_fence);
1364 
1365    ++chain->sent_image_count;
1366    assert(chain->sent_image_count <= chain->base.image_count);
1367 
1368    ++chain->send_sbc;
1369    uint32_t serial = (uint32_t)chain->send_sbc;
1370 
1371    assert(image->present_queued_count < ARRAY_SIZE(image->pending_completions));
1372    image->pending_completions[image->present_queued_count++] =
1373       (struct x11_image_pending_completion) {
1374          .signal_present_id = image->present_id,
1375          .serial = serial,
1376       };
1377 
1378    xcb_present_pixmap(chain->conn,
1379                       chain->window,
1380                       image->pixmap,
1381                       serial,
1382                       0,                            /* valid */
1383                       image->update_area,           /* update */
1384                       0,                            /* x_off */
1385                       0,                            /* y_off */
1386                       XCB_NONE,                     /* target_crtc */
1387                       XCB_NONE,
1388                       image->sync_fence,
1389                       options,
1390                       target_msc,
1391                       divisor,
1392                       remainder, 0, NULL);
1393    xcb_flush(chain->conn);
1394    return x11_swapchain_result(chain, VK_SUCCESS);
1395 }
1396 
1397 /**
1398  * Send image to X server unaccelerated (software drivers).
1399  */
1400 static VkResult
x11_present_to_x11_sw(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc)1401 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
1402                       uint64_t target_msc)
1403 {
1404    struct x11_image *image = &chain->images[image_index];
1405 
1406    /* Begin querying this before submitting the frame for improved async performance.
1407     * In this _sw() mode we're expecting network round-trip delay, not just UNIX socket delay. */
1408    xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1409 
1410    xcb_void_cookie_t cookie;
1411    void *myptr = image->base.cpu_map;
1412    size_t hdr_len = sizeof(xcb_put_image_request_t);
1413    int stride_b = image->base.row_pitches[0];
1414    size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1415    uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1416 
1417    if (size < max_req_len) {
1418       cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1419                              chain->window,
1420                              chain->gc,
1421                              image->base.row_pitches[0] / 4,
1422                              chain->extent.height,
1423                              0,0,0,chain->depth,
1424                              image->base.row_pitches[0] * chain->extent.height,
1425                              image->base.cpu_map);
1426       xcb_discard_reply(chain->conn, cookie.sequence);
1427    } else {
1428       int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1429       int y_start = 0;
1430       int y_todo = chain->extent.height;
1431       while (y_todo) {
1432          int this_lines = MIN2(num_lines, y_todo);
1433          cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1434                                 chain->window,
1435                                 chain->gc,
1436                                 image->base.row_pitches[0] / 4,
1437                                 this_lines,
1438                                 0,y_start,0,chain->depth,
1439                                 this_lines * stride_b,
1440                                 (const uint8_t *)myptr + (y_start * stride_b));
1441          xcb_discard_reply(chain->conn, cookie.sequence);
1442          y_start += this_lines;
1443          y_todo -= this_lines;
1444       }
1445    }
1446 
1447    xcb_flush(chain->conn);
1448 
1449    /* We don't have queued present here.
1450     * Immediately let application acquire again, but query geometry first so
1451     * we can report SUBOPTIMAL on resize. */
1452    xcb_generic_error_t *err;
1453 
1454    xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1455    VkResult result = VK_SUCCESS;
1456    if (geom) {
1457       if (chain->extent.width != geom->width ||
1458           chain->extent.height != geom->height)
1459          result = VK_SUBOPTIMAL_KHR;
1460    } else {
1461       result = VK_ERROR_SURFACE_LOST_KHR;
1462    }
1463    free(err);
1464    free(geom);
1465 
1466    wsi_queue_push(&chain->acquire_queue, image_index);
1467    return result;
1468 }
1469 
1470 static void
x11_capture_trace(struct x11_swapchain * chain)1471 x11_capture_trace(struct x11_swapchain *chain)
1472 {
1473 #ifdef XCB_KEYSYMS_AVAILABLE
1474    VK_FROM_HANDLE(vk_device, device, chain->base.device);
1475    if (!device->physical->instance->trace_mode)
1476       return;
1477 
1478    xcb_query_keymap_cookie_t keys_cookie = xcb_query_keymap(chain->conn);
1479 
1480    xcb_generic_error_t *error = NULL;
1481    xcb_query_keymap_reply_t *keys = xcb_query_keymap_reply(chain->conn, keys_cookie, &error);
1482    if (error) {
1483       free(error);
1484       return;
1485    }
1486 
1487    xcb_key_symbols_t *key_symbols = xcb_key_symbols_alloc(chain->conn);
1488    xcb_keycode_t *keycodes = xcb_key_symbols_get_keycode(key_symbols, XK_F1);
1489    if (keycodes) {
1490       xcb_keycode_t keycode = keycodes[0];
1491       free(keycodes);
1492 
1493       simple_mtx_lock(&device->trace_mtx);
1494       bool capture_key_pressed = keys->keys[keycode / 8] & (1u << (keycode % 8));
1495       device->trace_hotkey_trigger = capture_key_pressed && (capture_key_pressed != chain->base.capture_key_pressed);
1496       chain->base.capture_key_pressed = capture_key_pressed;
1497       simple_mtx_unlock(&device->trace_mtx);
1498    }
1499 
1500    xcb_key_symbols_free(key_symbols);
1501    free(keys);
1502 #endif
1503 }
1504 
1505 /* Use a trivial helper here to make it easier to read in code
1506  * where we're intending to access chain->status outside the thread lock. */
x11_swapchain_read_status_atomic(struct x11_swapchain * chain)1507 static VkResult x11_swapchain_read_status_atomic(struct x11_swapchain *chain)
1508 {
1509    return chain->status;
1510 }
1511 
1512 /**
1513  * Decides if an early wait on buffer fences before buffer submission is required.
1514  * That is for mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1515  * present time, which could lead to missing a frame. This is an Xorg issue.
1516  *
1517  * On Wayland compositors, this used to be a problem as well, but not anymore,
1518  * and this check assumes that Mesa is running on a reasonable compositor.
1519  * The wait behavior can be forced by setting the 'vk_xwayland_wait_ready' DRIConf option to true.
1520  * Some drivers, like e.g. Venus may still want to require wait_ready by default,
1521  * so the option is kept around for now.
1522  *
1523  * On Wayland, we don't know at this point if tearing protocol is/can be used by Xwl,
1524  * so we have to make the MAILBOX assumption.
1525  */
1526 static bool
x11_needs_wait_for_fences(const struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1527 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1528                           struct wsi_x11_connection *wsi_conn,
1529                           VkPresentModeKHR present_mode)
1530 {
1531    if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1532       return false;
1533    }
1534 
1535    switch (present_mode) {
1536       case VK_PRESENT_MODE_MAILBOX_KHR:
1537          return true;
1538       case VK_PRESENT_MODE_IMMEDIATE_KHR:
1539          return wsi_conn->is_xwayland;
1540       default:
1541          return false;
1542    }
1543 }
1544 
1545 /* This matches Wayland. */
1546 #define X11_SWAPCHAIN_MAILBOX_IMAGES 4
1547 
1548 static bool
x11_requires_mailbox_image_count(const struct wsi_device * device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1549 x11_requires_mailbox_image_count(const struct wsi_device *device,
1550                                  struct wsi_x11_connection *wsi_conn,
1551                                  VkPresentModeKHR present_mode)
1552 {
1553    /* If we're resorting to wait for fences, we're assuming a MAILBOX-like model,
1554     * and we should allocate accordingly.
1555     *
1556     * One potential concern here is IMMEDIATE mode on Wayland.
1557     * This situation could arise:
1558     * - Fullscreen FLIP mode
1559     * - Compositor does not support tearing protocol (we cannot know this here)
1560     *
1561     * With 3 images, during the window between latch and flip, there is only one image left to app,
1562     * so peak FPS may not be reached if the window between latch and flip is large,
1563     * but tests on contemporary compositors suggest this effect is minor.
1564     * Frame rate in the thousands can easily be reached.
1565     *
1566     * There are pragmatic reasons to expose 3 images for IMMEDIATE on Xwl.
1567     * - minImageCount is not intended as a tool to tune performance, its intent is to signal forward progress.
1568     *   Our X11 and WL implementations do so for pragmatic reasons due to sync acquire interacting poorly with 2 images.
1569     *   A jump from 3 to 4 is at best a minor improvement which only affects applications
1570     *   running at extremely high frame rates, way beyond the monitor refresh rate.
1571     *   On the other hand, lowering minImageCount to 2 would break the fundamental idea of MAILBOX
1572     *   (and IMMEDIATE without tear), since FPS > refresh rate would not be possible.
1573     *
1574     * - Several games developed for other platforms and other Linux WSI implementations
1575     *   do not expect that image counts arbitrarily change when changing present mode,
1576     *   and will crash when Mesa does so.
1577     *   There are several games using the strict_image_count drirc to work around this,
1578     *   and it would be good to be friendlier in the first place, so we don't have to work around more games.
1579     *   IMMEDIATE is a common presentation mode on those platforms, but MAILBOX is more Wayland-centric in nature,
1580     *   so increasing image count for that mode is more reasonable.
1581     *
1582     * - IMMEDIATE expects tearing, and when tearing, 3 images are more than enough.
1583     *
1584     * - With EXT_swapchain_maintenance1, toggling between FIFO / IMMEDIATE (used extensively by D3D layering)
1585     *   would require application to allocate >3 images which is unfortunate for memory usage,
1586     *   and potentially disastrous for latency unless KHR_present_wait is used.
1587     */
1588    return x11_needs_wait_for_fences(device, wsi_conn, present_mode) ||
1589           present_mode == VK_PRESENT_MODE_MAILBOX_KHR;
1590 }
1591 
1592 /**
1593  * Send image to the X server for presentation at target_msc.
1594  */
1595 static VkResult
x11_present_to_x11(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1596 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1597                    uint64_t target_msc, VkPresentModeKHR present_mode)
1598 {
1599    x11_capture_trace(chain);
1600 
1601    VkResult result;
1602    if (chain->base.wsi->sw && !chain->has_mit_shm)
1603       result = x11_present_to_x11_sw(chain, image_index, target_msc);
1604    else
1605       result = x11_present_to_x11_dri3(chain, image_index, target_msc, present_mode);
1606 
1607    if (result < 0)
1608       x11_swapchain_notify_error(chain, result);
1609    else
1610       x11_notify_pending_present(chain, &chain->images[image_index]);
1611 
1612    return result;
1613 }
1614 
1615 static VkResult
x11_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1616 x11_release_images(struct wsi_swapchain *wsi_chain,
1617                    uint32_t count, const uint32_t *indices)
1618 {
1619    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1620    if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
1621       return chain->status;
1622 
1623    for (uint32_t i = 0; i < count; i++) {
1624       uint32_t index = indices[i];
1625       assert(index < chain->base.image_count);
1626       wsi_queue_push(&chain->acquire_queue, index);
1627    }
1628 
1629    return VK_SUCCESS;
1630 }
1631 
1632 static void
x11_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1633 x11_set_present_mode(struct wsi_swapchain *wsi_chain,
1634                      VkPresentModeKHR mode)
1635 {
1636    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1637    chain->base.present_mode = mode;
1638 }
1639 
1640 /**
1641  * Acquire a ready-to-use image from the swapchain.
1642  *
1643  * This means usually that the image is not waiting on presentation and that the
1644  * image has been released by the X server to be used again by the consumer.
1645  */
1646 static VkResult
x11_acquire_next_image(struct wsi_swapchain * anv_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1647 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1648                        const VkAcquireNextImageInfoKHR *info,
1649                        uint32_t *image_index)
1650 {
1651    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1652    uint64_t timeout = info->timeout;
1653 
1654    /* If the swapchain is in an error state, don't go any further. */
1655    VkResult result = x11_swapchain_read_status_atomic(chain);
1656    if (result < 0)
1657       return result;
1658 
1659    result = wsi_queue_pull(&chain->acquire_queue,
1660                            image_index, timeout);
1661 
1662    if (result == VK_TIMEOUT)
1663       return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
1664 
1665    if (result < 0) {
1666       pthread_mutex_lock(&chain->thread_state_lock);
1667       result = x11_swapchain_result(chain, result);
1668       pthread_mutex_unlock(&chain->thread_state_lock);
1669    } else {
1670       result = x11_swapchain_read_status_atomic(chain);
1671    }
1672 
1673    if (result < 0)
1674       return result;
1675 
1676    assert(*image_index < chain->base.image_count);
1677    if (chain->images[*image_index].shm_fence)
1678       xshmfence_await(chain->images[*image_index].shm_fence);
1679 
1680    return result;
1681 }
1682 
1683 #define MAX_DAMAGE_RECTS 64
1684 
1685 /**
1686  * Queue a new presentation of an image that was previously acquired by the
1687  * consumer.
1688  *
1689  * Note that in immediate presentation mode this does not really queue the
1690  * presentation but directly asks the X server to show it.
1691  */
1692 static VkResult
x11_queue_present(struct wsi_swapchain * anv_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)1693 x11_queue_present(struct wsi_swapchain *anv_chain,
1694                   uint32_t image_index,
1695                   uint64_t present_id,
1696                   const VkPresentRegionKHR *damage)
1697 {
1698    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1699    xcb_xfixes_region_t update_area = 0;
1700 
1701    /* If the swapchain is in an error state, don't go any further. */
1702    VkResult status = x11_swapchain_read_status_atomic(chain);
1703    if (status < 0)
1704       return status;
1705 
1706    if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1707       damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1708       xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
1709 
1710       update_area = chain->images[image_index].update_region;
1711       for (unsigned i = 0; i < damage->rectangleCount; i++) {
1712          const VkRectLayerKHR *rect = &damage->pRectangles[i];
1713          assert(rect->layer == 0);
1714          rects[i].x = rect->offset.x;
1715          rects[i].y = rect->offset.y;
1716          rects[i].width = rect->extent.width;
1717          rects[i].height = rect->extent.height;
1718       }
1719       xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1720    }
1721    chain->images[image_index].update_area = update_area;
1722    chain->images[image_index].present_id = present_id;
1723    /* With EXT_swapchain_maintenance1, the present mode can change per present. */
1724    chain->images[image_index].present_mode = chain->base.present_mode;
1725 
1726    wsi_queue_push(&chain->present_queue, image_index);
1727    return x11_swapchain_read_status_atomic(chain);
1728 }
1729 
1730 /**
1731  * The number of images that are not owned by X11:
1732  *  (1) in the ownership of the app, or
1733  *  (2) app to take ownership through an acquire, or
1734  *  (3) in the present queue waiting for the FIFO thread to present to X11.
1735  */
x11_driver_owned_images(const struct x11_swapchain * chain)1736 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1737 {
1738    return chain->base.image_count - chain->sent_image_count;
1739 }
1740 
1741 /* This thread is responsible for pumping PRESENT replies.
1742  * This is done in a separate thread from the X11 presentation thread
1743  * to be able to support non-blocking modes like IMMEDIATE and MAILBOX.
1744  * Frame completion events can happen at any time, and we need to handle
1745  * the events as soon as they come in to have a quality implementation.
1746  * The presentation thread may go to sleep waiting for new presentation events to come in,
1747  * and it cannot wait for both X events and application events at the same time.
1748  * If we only cared about FIFO, this thread wouldn't be very useful.
1749  * Earlier implementation of X11 WSI had a single FIFO thread that blocked on X events after presenting.
1750  * For IMMEDIATE and MAILBOX, the application thread pumped the event queue, which caused a lot of pain
1751  * when trying to deal with present wait.
1752  */
1753 static void *
x11_manage_event_queue(void * state)1754 x11_manage_event_queue(void *state)
1755 {
1756    struct x11_swapchain *chain = state;
1757    u_thread_setname("WSI swapchain event");
1758 
1759    /* While there is an outstanding IDLE we should wait for it.
1760     * In FLIP modes at most one image will not be driver owned eventually.
1761     * In BLIT modes, we expect that all images will eventually be driver owned,
1762     * but we don't know which mode is being used. */
1763    unsigned forward_progress_guaranteed_acquired_images = chain->base.image_count - 1;
1764 
1765    pthread_mutex_lock(&chain->thread_state_lock);
1766 
1767    while (chain->status >= 0) {
1768       /* This thread should only go sleep waiting for X events when we know there are pending events.
1769        * We expect COMPLETION events when there is at least one image marked as present_queued.
1770        * We also expect IDLE events, but we only consider waiting for them when all images are busy,
1771        * and application has fewer than N images acquired. */
1772 
1773       bool assume_forward_progress = false;
1774 
1775       for (uint32_t i = 0; i < chain->base.image_count; i++) {
1776          if (chain->images[i].present_queued_count != 0) {
1777             /* We must pump through a present wait and unblock FIFO thread if using FIFO mode. */
1778             assume_forward_progress = true;
1779             break;
1780          }
1781       }
1782 
1783       if (!assume_forward_progress) {
1784          /* If true, application expects acquire (IDLE) to happen in finite time. */
1785          assume_forward_progress = x11_driver_owned_images(chain) <
1786                                    forward_progress_guaranteed_acquired_images;
1787       }
1788 
1789       if (assume_forward_progress) {
1790          /* Only yield lock when blocking on X11 event. */
1791          pthread_mutex_unlock(&chain->thread_state_lock);
1792          xcb_generic_event_t *event =
1793                xcb_wait_for_special_event(chain->conn, chain->special_event);
1794          pthread_mutex_lock(&chain->thread_state_lock);
1795 
1796          /* Re-check status since we dropped the lock while waiting for X. */
1797          VkResult result = chain->status;
1798 
1799          if (result >= 0) {
1800             if (event) {
1801                /* Queue thread will be woken up if anything interesting happened in handler.
1802                 * Queue thread blocks on:
1803                 * - Presentation events completing
1804                 * - Presentation requests from application
1805                 * - WaitForFence workaround if applicable */
1806                result = x11_handle_dri3_present_event(chain, (void *) event);
1807             } else {
1808                result = VK_ERROR_SURFACE_LOST_KHR;
1809             }
1810          }
1811 
1812          /* Updates chain->status and wakes up threads as necessary on error. */
1813          x11_swapchain_result(chain, result);
1814          free(event);
1815       } else {
1816          /* Nothing important to do, go to sleep until queue thread wakes us up. */
1817          pthread_cond_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1818       }
1819    }
1820 
1821    pthread_mutex_unlock(&chain->thread_state_lock);
1822    return NULL;
1823 }
1824 
1825 /**
1826  * Presentation thread.
1827  *
1828  * Runs in a separate thread, blocks and reacts to queued images on the
1829  * present-queue
1830  *
1831  * This must be a thread since we have to block in two cases:
1832  * - FIFO:
1833  *     We must wait for previous presentation to complete
1834  *     in some way so we can compute the target MSC.
1835  * - WaitForFence workaround:
1836  *     In some cases, we need to wait for image to complete rendering before submitting it to X.
1837  */
1838 static void *
x11_manage_present_queue(void * state)1839 x11_manage_present_queue(void *state)
1840 {
1841    struct x11_swapchain *chain = state;
1842    struct wsi_x11_connection *wsi_conn =
1843          wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1844    VkResult result = VK_SUCCESS;
1845 
1846    u_thread_setname("WSI swapchain queue");
1847 
1848    uint64_t target_msc = 0;
1849 
1850    while (x11_swapchain_read_status_atomic(chain) >= 0) {
1851       uint32_t image_index = 0;
1852       {
1853          MESA_TRACE_SCOPE("pull present queue");
1854          result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1855          assert(result != VK_TIMEOUT);
1856       }
1857 
1858       /* The status can change underneath us if the swapchain is destroyed
1859        * from another thread. */
1860       if (result >= 0)
1861          result = x11_swapchain_read_status_atomic(chain);
1862       if (result < 0)
1863          break;
1864 
1865       VkPresentModeKHR present_mode = chain->images[image_index].present_mode;
1866 
1867       if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1868                                     present_mode)) {
1869          MESA_TRACE_SCOPE("wait fence");
1870          result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1871                                                  &chain->base.fences[image_index],
1872                                                  true, UINT64_MAX);
1873          if (result != VK_SUCCESS) {
1874             result = VK_ERROR_OUT_OF_DATE_KHR;
1875             break;
1876          }
1877       }
1878 
1879       pthread_mutex_lock(&chain->thread_state_lock);
1880 
1881       /* In IMMEDIATE and MAILBOX modes, there is a risk that we have exhausted the presentation queue,
1882        * since IDLE could return multiple times before observing a COMPLETE. */
1883       while (chain->status >= 0 &&
1884              chain->images[image_index].present_queued_count ==
1885              ARRAY_SIZE(chain->images[image_index].pending_completions)) {
1886          pthread_cond_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1887       }
1888 
1889       if (chain->status < 0) {
1890          pthread_mutex_unlock(&chain->thread_state_lock);
1891          break;
1892       }
1893 
1894       result = x11_present_to_x11(chain, image_index, target_msc, present_mode);
1895 
1896       if (result < 0) {
1897          pthread_mutex_unlock(&chain->thread_state_lock);
1898          break;
1899       }
1900 
1901       if (present_mode == VK_PRESENT_MODE_FIFO_KHR ||
1902           present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
1903          MESA_TRACE_SCOPE("wait present");
1904 
1905          while (chain->status >= 0 && chain->images[image_index].present_queued_count != 0) {
1906             /* In FIFO mode, we need to make sure we observe a COMPLETE before queueing up
1907              * another present. */
1908             pthread_cond_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1909          }
1910 
1911          /* If next present is not FIFO, we still need to ensure we don't override that
1912           * present. If FIFO, we need to ensure MSC is larger than the COMPLETED frame. */
1913          target_msc = chain->last_present_msc + 1;
1914       }
1915 
1916       pthread_mutex_unlock(&chain->thread_state_lock);
1917    }
1918 
1919    pthread_mutex_lock(&chain->thread_state_lock);
1920    x11_swapchain_result(chain, result);
1921    wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1922    pthread_mutex_unlock(&chain->thread_state_lock);
1923 
1924    return NULL;
1925 }
1926 
1927 static uint8_t *
alloc_shm(struct wsi_image * imagew,unsigned size)1928 alloc_shm(struct wsi_image *imagew, unsigned size)
1929 {
1930 #ifdef HAVE_SYS_SHM_H
1931    struct x11_image *image = (struct x11_image *)imagew;
1932    image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
1933    if (image->shmid < 0)
1934       return NULL;
1935 
1936    uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
1937    /* mark the segment immediately for deletion to avoid leaks */
1938    shmctl(image->shmid, IPC_RMID, 0);
1939 
1940    if (addr == (uint8_t *) -1)
1941       return NULL;
1942 
1943    image->shmaddr = addr;
1944    return addr;
1945 #else
1946    return NULL;
1947 #endif
1948 }
1949 
1950 static VkResult
x11_image_init(VkDevice device_h,struct x11_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct x11_image * image)1951 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1952                const VkSwapchainCreateInfoKHR *pCreateInfo,
1953                const VkAllocationCallbacks* pAllocator,
1954                struct x11_image *image)
1955 {
1956    xcb_void_cookie_t cookie;
1957    xcb_generic_error_t *error = NULL;
1958    VkResult result;
1959    uint32_t bpp = 32;
1960    int fence_fd;
1961 
1962    result = wsi_create_image(&chain->base, &chain->base.image_info,
1963                              &image->base);
1964    if (result != VK_SUCCESS)
1965       return result;
1966 
1967    image->update_region = xcb_generate_id(chain->conn);
1968    xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
1969 
1970    if (chain->base.wsi->sw) {
1971       if (!chain->has_mit_shm) {
1972          return VK_SUCCESS;
1973       }
1974 
1975       image->shmseg = xcb_generate_id(chain->conn);
1976 
1977       xcb_shm_attach(chain->conn,
1978                      image->shmseg,
1979                      image->shmid,
1980                      0);
1981       image->pixmap = xcb_generate_id(chain->conn);
1982       cookie = xcb_shm_create_pixmap_checked(chain->conn,
1983                                              image->pixmap,
1984                                              chain->window,
1985                                              image->base.row_pitches[0] / 4,
1986                                              pCreateInfo->imageExtent.height,
1987                                              chain->depth,
1988                                              image->shmseg, 0);
1989       xcb_discard_reply(chain->conn, cookie.sequence);
1990       goto out_fence;
1991    }
1992    image->pixmap = xcb_generate_id(chain->conn);
1993 
1994 #ifdef HAVE_DRI3_MODIFIERS
1995    if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1996       /* If the image has a modifier, we must have DRI3 v1.2. */
1997       assert(chain->has_dri3_modifiers);
1998 
1999       /* XCB requires an array of file descriptors but we only have one */
2000       int fds[4] = { -1, -1, -1, -1 };
2001       for (int i = 0; i < image->base.num_planes; i++) {
2002          fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
2003          if (fds[i] == -1) {
2004             for (int j = 0; j < i; j++)
2005                close(fds[j]);
2006 
2007             return VK_ERROR_OUT_OF_HOST_MEMORY;
2008          }
2009       }
2010 
2011       cookie =
2012          xcb_dri3_pixmap_from_buffers_checked(chain->conn,
2013                                               image->pixmap,
2014                                               chain->window,
2015                                               image->base.num_planes,
2016                                               pCreateInfo->imageExtent.width,
2017                                               pCreateInfo->imageExtent.height,
2018                                               image->base.row_pitches[0],
2019                                               image->base.offsets[0],
2020                                               image->base.row_pitches[1],
2021                                               image->base.offsets[1],
2022                                               image->base.row_pitches[2],
2023                                               image->base.offsets[2],
2024                                               image->base.row_pitches[3],
2025                                               image->base.offsets[3],
2026                                               chain->depth, bpp,
2027                                               image->base.drm_modifier,
2028                                               fds);
2029    } else
2030 #endif
2031    {
2032       /* Without passing modifiers, we can't have multi-plane RGB images. */
2033       assert(image->base.num_planes == 1);
2034 
2035       /* XCB will take ownership of the FD we pass it. */
2036       int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
2037       if (fd == -1)
2038          return VK_ERROR_OUT_OF_HOST_MEMORY;
2039 
2040       cookie =
2041          xcb_dri3_pixmap_from_buffer_checked(chain->conn,
2042                                              image->pixmap,
2043                                              chain->window,
2044                                              image->base.sizes[0],
2045                                              pCreateInfo->imageExtent.width,
2046                                              pCreateInfo->imageExtent.height,
2047                                              image->base.row_pitches[0],
2048                                              chain->depth, bpp, fd);
2049    }
2050 
2051    error = xcb_request_check(chain->conn, cookie);
2052    if (error != NULL) {
2053       free(error);
2054       goto fail_image;
2055    }
2056 
2057 out_fence:
2058    fence_fd = xshmfence_alloc_shm();
2059    if (fence_fd < 0)
2060       goto fail_pixmap;
2061 
2062    image->shm_fence = xshmfence_map_shm(fence_fd);
2063    if (image->shm_fence == NULL)
2064       goto fail_shmfence_alloc;
2065 
2066    image->sync_fence = xcb_generate_id(chain->conn);
2067    xcb_dri3_fence_from_fd(chain->conn,
2068                           image->pixmap,
2069                           image->sync_fence,
2070                           false,
2071                           fence_fd);
2072 
2073    xshmfence_trigger(image->shm_fence);
2074 
2075    return VK_SUCCESS;
2076 
2077 fail_shmfence_alloc:
2078    close(fence_fd);
2079 
2080 fail_pixmap:
2081    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2082    xcb_discard_reply(chain->conn, cookie.sequence);
2083 
2084 fail_image:
2085    wsi_destroy_image(&chain->base, &image->base);
2086 
2087    return VK_ERROR_INITIALIZATION_FAILED;
2088 }
2089 
2090 static void
x11_image_finish(struct x11_swapchain * chain,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2091 x11_image_finish(struct x11_swapchain *chain,
2092                  const VkAllocationCallbacks* pAllocator,
2093                  struct x11_image *image)
2094 {
2095    xcb_void_cookie_t cookie;
2096 
2097    if (!chain->base.wsi->sw || chain->has_mit_shm) {
2098       cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
2099       xcb_discard_reply(chain->conn, cookie.sequence);
2100       xshmfence_unmap_shm(image->shm_fence);
2101 
2102       cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2103       xcb_discard_reply(chain->conn, cookie.sequence);
2104 
2105       cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
2106       xcb_discard_reply(chain->conn, cookie.sequence);
2107    }
2108 
2109    wsi_destroy_image(&chain->base, &image->base);
2110 #ifdef HAVE_SYS_SHM_H
2111    if (image->shmaddr)
2112       shmdt(image->shmaddr);
2113 #endif
2114 }
2115 
2116 static void
wsi_x11_recompute_dri3_modifier_hash(blake3_hash * hash,const struct wsi_drm_image_params * params)2117 wsi_x11_recompute_dri3_modifier_hash(blake3_hash *hash, const struct wsi_drm_image_params *params)
2118 {
2119    mesa_blake3 ctx;
2120    _mesa_blake3_init(&ctx);
2121    _mesa_blake3_update(&ctx, &params->num_modifier_lists, sizeof(params->num_modifier_lists));
2122    for (uint32_t i = 0; i < params->num_modifier_lists; i++) {
2123       _mesa_blake3_update(&ctx, &i, sizeof(i));
2124       _mesa_blake3_update(&ctx, params->modifiers[i],
2125                           params->num_modifiers[i] * sizeof(*params->modifiers[i]));
2126    }
2127    _mesa_blake3_update(&ctx, &params->same_gpu, sizeof(params->same_gpu));
2128    _mesa_blake3_final(&ctx, *hash);
2129 }
2130 
2131 static void
wsi_x11_get_dri3_modifiers(struct wsi_x11_connection * wsi_conn,xcb_connection_t * conn,xcb_window_t window,uint8_t depth,uint8_t bpp,uint64_t ** modifiers_in,uint32_t * num_modifiers_in,uint32_t * num_tranches_in,const VkAllocationCallbacks * pAllocator)2132 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
2133                            xcb_connection_t *conn, xcb_window_t window,
2134                            uint8_t depth, uint8_t bpp,
2135                            uint64_t **modifiers_in, uint32_t *num_modifiers_in,
2136                            uint32_t *num_tranches_in,
2137                            const VkAllocationCallbacks *pAllocator)
2138 {
2139    if (!wsi_conn->has_dri3_modifiers)
2140       goto out;
2141 
2142 #ifdef HAVE_DRI3_MODIFIERS
2143    xcb_generic_error_t *error = NULL;
2144    xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
2145       xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
2146    xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
2147       xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
2148    free(error);
2149 
2150    if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
2151                       mod_reply->num_screen_modifiers == 0)) {
2152       free(mod_reply);
2153       goto out;
2154    }
2155 
2156    uint32_t n = 0;
2157    uint32_t counts[2];
2158    uint64_t *modifiers[2];
2159 
2160    if (mod_reply->num_window_modifiers) {
2161       counts[n] = mod_reply->num_window_modifiers;
2162       modifiers[n] = vk_alloc(pAllocator,
2163                               counts[n] * sizeof(uint64_t),
2164                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2165       if (!modifiers[n]) {
2166          free(mod_reply);
2167          goto out;
2168       }
2169 
2170       memcpy(modifiers[n],
2171              xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
2172              counts[n] * sizeof(uint64_t));
2173       n++;
2174    }
2175 
2176    if (mod_reply->num_screen_modifiers) {
2177       counts[n] = mod_reply->num_screen_modifiers;
2178       modifiers[n] = vk_alloc(pAllocator,
2179                               counts[n] * sizeof(uint64_t),
2180                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2181       if (!modifiers[n]) {
2182 	 if (n > 0)
2183             vk_free(pAllocator, modifiers[0]);
2184          free(mod_reply);
2185          goto out;
2186       }
2187 
2188       memcpy(modifiers[n],
2189              xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
2190              counts[n] * sizeof(uint64_t));
2191       n++;
2192    }
2193 
2194    for (int i = 0; i < n; i++) {
2195       modifiers_in[i] = modifiers[i];
2196       num_modifiers_in[i] = counts[i];
2197    }
2198    *num_tranches_in = n;
2199 
2200    free(mod_reply);
2201    return;
2202 #endif
2203 out:
2204    *num_tranches_in = 0;
2205 }
2206 
2207 static bool
wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain * chain)2208 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain)
2209 {
2210    const struct wsi_device *wsi_device = chain->base.wsi;
2211 
2212    if (wsi_device->sw || !wsi_device->supports_modifiers)
2213       return false;
2214 
2215    struct wsi_drm_image_params drm_image_params;
2216    uint64_t *modifiers[2] = {NULL, NULL};
2217    uint32_t num_modifiers[2] = {0, 0};
2218 
2219    struct wsi_x11_connection *wsi_conn =
2220          wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
2221 
2222    xcb_get_geometry_reply_t *geometry =
2223          xcb_get_geometry_reply(chain->conn, xcb_get_geometry(chain->conn, chain->window), NULL);
2224    if (geometry == NULL)
2225       return false;
2226    uint32_t bit_depth = geometry->depth;
2227    free(geometry);
2228 
2229    drm_image_params = (struct wsi_drm_image_params){
2230       .base.image_type = WSI_IMAGE_TYPE_DRM,
2231       .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, chain->conn),
2232    };
2233 
2234    wsi_x11_get_dri3_modifiers(wsi_conn, chain->conn, chain->window, bit_depth, 32,
2235                               modifiers, num_modifiers,
2236                               &drm_image_params.num_modifier_lists,
2237                               &wsi_device->instance_alloc);
2238 
2239    drm_image_params.num_modifiers = num_modifiers;
2240    drm_image_params.modifiers = (const uint64_t **)modifiers;
2241 
2242    blake3_hash hash;
2243    wsi_x11_recompute_dri3_modifier_hash(&hash, &drm_image_params);
2244 
2245    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2246       vk_free(&wsi_device->instance_alloc, modifiers[i]);
2247 
2248    return memcmp(hash, chain->dri3_modifier_hash, sizeof(hash)) != 0;
2249 }
2250 
2251 static VkResult
x11_swapchain_destroy(struct wsi_swapchain * anv_chain,const VkAllocationCallbacks * pAllocator)2252 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
2253                       const VkAllocationCallbacks *pAllocator)
2254 {
2255    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
2256    xcb_void_cookie_t cookie;
2257 
2258    pthread_mutex_lock(&chain->thread_state_lock);
2259    chain->status = VK_ERROR_OUT_OF_DATE_KHR;
2260    pthread_cond_broadcast(&chain->thread_state_cond);
2261    pthread_mutex_unlock(&chain->thread_state_lock);
2262 
2263    /* Push a UINT32_MAX to wake up the manager */
2264    wsi_queue_push(&chain->present_queue, UINT32_MAX);
2265    pthread_join(chain->queue_manager, NULL);
2266    pthread_join(chain->event_manager, NULL);
2267 
2268    wsi_queue_destroy(&chain->acquire_queue);
2269    wsi_queue_destroy(&chain->present_queue);
2270 
2271    for (uint32_t i = 0; i < chain->base.image_count; i++)
2272       x11_image_finish(chain, pAllocator, &chain->images[i]);
2273 
2274    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2275    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
2276                                              chain->window,
2277                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
2278    xcb_discard_reply(chain->conn, cookie.sequence);
2279 
2280    pthread_mutex_destroy(&chain->present_progress_mutex);
2281    pthread_cond_destroy(&chain->present_progress_cond);
2282    pthread_mutex_destroy(&chain->thread_state_lock);
2283    pthread_cond_destroy(&chain->thread_state_cond);
2284 
2285    wsi_swapchain_finish(&chain->base);
2286 
2287    vk_free(pAllocator, chain);
2288 
2289    return VK_SUCCESS;
2290 }
2291 
2292 static void
wsi_x11_set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)2293 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
2294                                    xcb_drawable_t drawable,
2295                                    uint32_t state)
2296 {
2297    static char const name[] = "_VARIABLE_REFRESH";
2298    xcb_intern_atom_cookie_t cookie;
2299    xcb_intern_atom_reply_t* reply;
2300    xcb_void_cookie_t check;
2301 
2302    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
2303    reply = xcb_intern_atom_reply(conn, cookie, NULL);
2304    if (reply == NULL)
2305       return;
2306 
2307    if (state)
2308       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
2309                                           drawable, reply->atom,
2310                                           XCB_ATOM_CARDINAL, 32, 1, &state);
2311    else
2312       check = xcb_delete_property_checked(conn, drawable, reply->atom);
2313 
2314    xcb_discard_reply(conn, check.sequence);
2315    free(reply);
2316 }
2317 
x11_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t waitValue,uint64_t timeout)2318 static VkResult x11_wait_for_present(struct wsi_swapchain *wsi_chain,
2319                                      uint64_t waitValue,
2320                                      uint64_t timeout)
2321 {
2322    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
2323    struct timespec abs_timespec;
2324    uint64_t abs_timeout = 0;
2325    if (timeout != 0)
2326       abs_timeout = os_time_get_absolute_timeout(timeout);
2327 
2328    /* Need to observe that the swapchain semaphore has been unsignalled,
2329     * as this is guaranteed when a present is complete. */
2330    VkResult result = wsi_swapchain_wait_for_present_semaphore(
2331          &chain->base, waitValue, timeout);
2332    if (result != VK_SUCCESS)
2333       return result;
2334 
2335    timespec_from_nsec(&abs_timespec, abs_timeout);
2336 
2337    pthread_mutex_lock(&chain->present_progress_mutex);
2338    while (chain->present_id < waitValue) {
2339       int ret = pthread_cond_timedwait(&chain->present_progress_cond,
2340                                        &chain->present_progress_mutex,
2341                                        &abs_timespec);
2342       if (ret == ETIMEDOUT) {
2343          result = VK_TIMEOUT;
2344          break;
2345       }
2346       if (ret) {
2347          result = VK_ERROR_DEVICE_LOST;
2348          break;
2349       }
2350    }
2351    if (result == VK_SUCCESS && chain->present_progress_error)
2352       result = chain->present_progress_error;
2353    pthread_mutex_unlock(&chain->present_progress_mutex);
2354    return result;
2355 }
2356 
2357 static unsigned
x11_get_min_image_count_for_present_mode(struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)2358 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
2359                                          struct wsi_x11_connection *wsi_conn,
2360                                          VkPresentModeKHR present_mode)
2361 {
2362    uint32_t min_image_count = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
2363    if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode))
2364       return MAX2(min_image_count, X11_SWAPCHAIN_MAILBOX_IMAGES);
2365    else
2366       return min_image_count;
2367 }
2368 
2369 /**
2370  * Create the swapchain.
2371  *
2372  * Supports immediate, fifo and mailbox presentation mode.
2373  *
2374  */
2375 static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2376 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2377                              VkDevice device,
2378                              struct wsi_device *wsi_device,
2379                              const VkSwapchainCreateInfoKHR *pCreateInfo,
2380                              const VkAllocationCallbacks* pAllocator,
2381                              struct wsi_swapchain **swapchain_out)
2382 {
2383    struct x11_swapchain *chain;
2384    xcb_void_cookie_t cookie;
2385    VkResult result;
2386    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2387 
2388    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2389 
2390    /* Get xcb connection from the icd_surface and from that our internal struct
2391     * representing it.
2392     */
2393    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
2394    struct wsi_x11_connection *wsi_conn =
2395       wsi_x11_get_connection(wsi_device, conn);
2396    if (!wsi_conn)
2397       return VK_ERROR_OUT_OF_HOST_MEMORY;
2398 
2399    /* Get number of images in our swapchain. This count depends on:
2400     * - requested minimal image count
2401     * - device characteristics
2402     * - presentation mode.
2403     */
2404    unsigned num_images = pCreateInfo->minImageCount;
2405    if (!wsi_device->x11.strict_imageCount) {
2406       if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode) ||
2407           wsi_device->x11.ensure_minImageCount) {
2408          unsigned present_mode_images = x11_get_min_image_count_for_present_mode(
2409                wsi_device, wsi_conn, pCreateInfo->presentMode);
2410          num_images = MAX2(num_images, present_mode_images);
2411       }
2412    }
2413 
2414    /* Check that we have a window up-front. It is an error to not have one. */
2415    xcb_window_t window = x11_surface_get_window(icd_surface);
2416 
2417    /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
2418     * chain's images extents should fit it for performance-optimizing flips.
2419     */
2420    xcb_get_geometry_reply_t *geometry =
2421       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
2422    if (geometry == NULL)
2423       return VK_ERROR_SURFACE_LOST_KHR;
2424    const uint32_t bit_depth = geometry->depth;
2425    const uint16_t cur_width = geometry->width;
2426    const uint16_t cur_height = geometry->height;
2427    free(geometry);
2428 
2429    /* Allocate the actual swapchain. The size depends on image count. */
2430    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2431    chain = vk_zalloc(pAllocator, size, 8,
2432                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2433    if (chain == NULL)
2434       return VK_ERROR_OUT_OF_HOST_MEMORY;
2435 
2436    int ret = pthread_mutex_init(&chain->present_progress_mutex, NULL);
2437    if (ret != 0) {
2438       vk_free(pAllocator, chain);
2439       return VK_ERROR_OUT_OF_HOST_MEMORY;
2440    }
2441 
2442    ret = pthread_mutex_init(&chain->thread_state_lock, NULL);
2443    if (ret != 0) {
2444       pthread_mutex_destroy(&chain->present_progress_mutex);
2445       vk_free(pAllocator, chain);
2446       return VK_ERROR_OUT_OF_HOST_MEMORY;
2447    }
2448 
2449    ret = pthread_cond_init(&chain->thread_state_cond, NULL);
2450    if (ret != 0) {
2451       pthread_mutex_destroy(&chain->present_progress_mutex);
2452       pthread_mutex_destroy(&chain->thread_state_lock);
2453       vk_free(pAllocator, chain);
2454       return VK_ERROR_OUT_OF_HOST_MEMORY;
2455    }
2456 
2457    bool bret = wsi_init_pthread_cond_monotonic(&chain->present_progress_cond);
2458    if (!bret) {
2459       pthread_mutex_destroy(&chain->present_progress_mutex);
2460       pthread_mutex_destroy(&chain->thread_state_lock);
2461       pthread_cond_destroy(&chain->thread_state_cond);
2462       vk_free(pAllocator, chain);
2463       return VK_ERROR_OUT_OF_HOST_MEMORY;
2464    }
2465 
2466    struct wsi_base_image_params *image_params = NULL;
2467    struct wsi_cpu_image_params cpu_image_params;
2468    struct wsi_drm_image_params drm_image_params;
2469    uint64_t *modifiers[2] = {NULL, NULL};
2470    uint32_t num_modifiers[2] = {0, 0};
2471    if (wsi_device->sw) {
2472       cpu_image_params = (struct wsi_cpu_image_params) {
2473          .base.image_type = WSI_IMAGE_TYPE_CPU,
2474          .alloc_shm = wsi_conn->has_mit_shm ? &alloc_shm : NULL,
2475       };
2476       image_params = &cpu_image_params.base;
2477    } else {
2478       drm_image_params = (struct wsi_drm_image_params) {
2479          .base.image_type = WSI_IMAGE_TYPE_DRM,
2480          .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, conn),
2481       };
2482       if (wsi_device->supports_modifiers) {
2483          wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, bit_depth, 32,
2484                                     modifiers, num_modifiers,
2485                                     &drm_image_params.num_modifier_lists,
2486                                     pAllocator);
2487          drm_image_params.num_modifiers = num_modifiers;
2488          drm_image_params.modifiers = (const uint64_t **)modifiers;
2489 
2490          wsi_x11_recompute_dri3_modifier_hash(&chain->dri3_modifier_hash, &drm_image_params);
2491       }
2492       image_params = &drm_image_params.base;
2493    }
2494 
2495    result = wsi_swapchain_init(wsi_device, &chain->base, device, pCreateInfo,
2496                                image_params, pAllocator);
2497 
2498    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2499       vk_free(pAllocator, modifiers[i]);
2500 
2501    if (result != VK_SUCCESS)
2502       goto fail_alloc;
2503 
2504    chain->base.destroy = x11_swapchain_destroy;
2505    chain->base.get_wsi_image = x11_get_wsi_image;
2506    chain->base.acquire_next_image = x11_acquire_next_image;
2507    chain->base.queue_present = x11_queue_present;
2508    chain->base.wait_for_present = x11_wait_for_present;
2509    chain->base.release_images = x11_release_images;
2510    chain->base.set_present_mode = x11_set_present_mode;
2511    chain->base.present_mode = present_mode;
2512    chain->base.image_count = num_images;
2513    chain->conn = conn;
2514    chain->window = window;
2515    chain->depth = bit_depth;
2516    chain->extent = pCreateInfo->imageExtent;
2517    chain->send_sbc = 0;
2518    chain->sent_image_count = 0;
2519    chain->last_present_msc = 0;
2520    chain->status = VK_SUCCESS;
2521    chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2522    chain->has_mit_shm = wsi_conn->has_mit_shm;
2523 
2524    xcb_present_query_capabilities_cookie_t present_query_cookie;
2525    xcb_present_query_capabilities_reply_t *present_query_reply;
2526    present_query_cookie = xcb_present_query_capabilities(conn, chain->window);
2527    present_query_reply = xcb_present_query_capabilities_reply(conn, present_query_cookie, NULL);
2528    if (present_query_reply) {
2529       chain->has_async_may_tear = present_query_reply->capabilities & XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR;
2530       free(present_query_reply);
2531    }
2532 
2533    /* When images in the swapchain don't fit the window, X can still present them, but it won't
2534     * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2535     * the chain extents X may be able to flip
2536     */
2537    if (!wsi_device->x11.ignore_suboptimal) {
2538       if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2539          chain->status = VK_SUBOPTIMAL_KHR;
2540    }
2541 
2542    /* On a new swapchain this helper variable is set to false. Once we present it will have an
2543     * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2544     * that in this case here is a high likelihood X could do flips again if the client reallocates a
2545     * new swapchain.
2546     *
2547     * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2548     * was true, and when the next present was completed with copying, we would return
2549     * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2550     * presents on the surface were completed with copying because of some surface state change, we
2551     * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2552     *
2553     * Note also that is is questionable in general if that mechanism is really useful. It ist not
2554     * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2555     * of making flips work again per se. In other words it is not clear why there is need for
2556     * another way to inform clients about suboptimal copies besides forwarding the
2557     * 'PresentOptionSuboptimal' complete mode.
2558     */
2559    chain->copy_is_suboptimal = false;
2560 
2561    /* For our swapchain we need to listen to following Present extension events:
2562     * - Configure: Window dimensions changed. Images in the swapchain might need
2563     *              to be reallocated.
2564     * - Complete: An image from our swapchain was presented on the output.
2565     * - Idle: An image from our swapchain is not anymore accessed by the X
2566     *         server and can be reused.
2567     */
2568    chain->event_id = xcb_generate_id(chain->conn);
2569    xcb_present_select_input(chain->conn, chain->event_id, chain->window,
2570                             XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2571                             XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
2572                             XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
2573 
2574    /* Create an XCB event queue to hold present events outside of the usual
2575     * application event queue
2576     */
2577    chain->special_event =
2578       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2579                                    chain->event_id, NULL);
2580 
2581    /* Create the graphics context. */
2582    chain->gc = xcb_generate_id(chain->conn);
2583    if (!chain->gc) {
2584       /* FINISHME: Choose a better error. */
2585       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2586       goto fail_register;
2587    }
2588 
2589    cookie = xcb_create_gc(chain->conn,
2590                           chain->gc,
2591                           chain->window,
2592                           XCB_GC_GRAPHICS_EXPOSURES,
2593                           (uint32_t []) { 0 });
2594    xcb_discard_reply(chain->conn, cookie.sequence);
2595 
2596    uint32_t image = 0;
2597    for (; image < chain->base.image_count; image++) {
2598       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2599                               &chain->images[image]);
2600       if (result != VK_SUCCESS)
2601          goto fail_init_images;
2602    }
2603 
2604    /* The queues have a length of base.image_count + 1 because we will
2605     * occasionally use UINT32_MAX to signal the other thread that an error
2606     * has occurred and we don't want an overflow.
2607     */
2608    ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2609    if (ret) {
2610       goto fail_init_images;
2611    }
2612 
2613    ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2614    if (ret) {
2615       wsi_queue_destroy(&chain->present_queue);
2616       goto fail_init_images;
2617    }
2618 
2619    for (unsigned i = 0; i < chain->base.image_count; i++)
2620       wsi_queue_push(&chain->acquire_queue, i);
2621 
2622    ret = pthread_create(&chain->queue_manager, NULL,
2623                         x11_manage_present_queue, chain);
2624    if (ret)
2625       goto fail_init_fifo_queue;
2626 
2627    ret = pthread_create(&chain->event_manager, NULL,
2628                         x11_manage_event_queue, chain);
2629    if (ret)
2630       goto fail_init_event_queue;
2631 
2632    /* It is safe to set it here as only one swapchain can be associated with
2633     * the window, and swapchain creation does the association. At this point
2634     * we know the creation is going to succeed. */
2635    wsi_x11_set_adaptive_sync_property(conn, window,
2636                                       wsi_device->enable_adaptive_sync);
2637 
2638    *swapchain_out = &chain->base;
2639 
2640    return VK_SUCCESS;
2641 
2642 fail_init_event_queue:
2643    /* Push a UINT32_MAX to wake up the manager */
2644    wsi_queue_push(&chain->present_queue, UINT32_MAX);
2645    pthread_join(chain->queue_manager, NULL);
2646 
2647 fail_init_fifo_queue:
2648    wsi_queue_destroy(&chain->present_queue);
2649    wsi_queue_destroy(&chain->acquire_queue);
2650 
2651 fail_init_images:
2652    for (uint32_t j = 0; j < image; j++)
2653       x11_image_finish(chain, pAllocator, &chain->images[j]);
2654 
2655 fail_register:
2656    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2657 
2658    wsi_swapchain_finish(&chain->base);
2659 
2660 fail_alloc:
2661    vk_free(pAllocator, chain);
2662 
2663    return result;
2664 }
2665 
2666 VkResult
wsi_x11_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,const struct driOptionCache * dri_options)2667 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2668                  const VkAllocationCallbacks *alloc,
2669                  const struct driOptionCache *dri_options)
2670 {
2671    struct wsi_x11 *wsi;
2672    VkResult result;
2673 
2674    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2675                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2676    if (!wsi) {
2677       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2678       goto fail;
2679    }
2680 
2681    int ret = pthread_mutex_init(&wsi->mutex, NULL);
2682    if (ret != 0) {
2683       if (ret == ENOMEM) {
2684          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2685       } else {
2686          /* FINISHME: Choose a better error. */
2687          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2688       }
2689 
2690       goto fail_alloc;
2691    }
2692 
2693    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2694                                               _mesa_key_pointer_equal);
2695    if (!wsi->connections) {
2696       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2697       goto fail_mutex;
2698    }
2699 
2700    if (dri_options) {
2701       if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2702          wsi_device->x11.override_minImageCount =
2703             driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2704       }
2705       if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2706          wsi_device->x11.strict_imageCount =
2707             driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2708       }
2709       if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2710          wsi_device->x11.ensure_minImageCount =
2711             driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2712       }
2713       wsi_device->x11.xwaylandWaitReady = true;
2714       if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2715          wsi_device->x11.xwaylandWaitReady =
2716             driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2717       }
2718 
2719       if (driCheckOption(dri_options, "vk_x11_ignore_suboptimal", DRI_BOOL)) {
2720          wsi_device->x11.ignore_suboptimal =
2721             driQueryOptionb(dri_options, "vk_x11_ignore_suboptimal");
2722       }
2723    }
2724 
2725    wsi->base.get_support = x11_surface_get_support;
2726    wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2727    wsi->base.get_formats = x11_surface_get_formats;
2728    wsi->base.get_formats2 = x11_surface_get_formats2;
2729    wsi->base.get_present_modes = x11_surface_get_present_modes;
2730    wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2731    wsi->base.create_swapchain = x11_surface_create_swapchain;
2732 
2733    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2734    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2735 
2736    return VK_SUCCESS;
2737 
2738 fail_mutex:
2739    pthread_mutex_destroy(&wsi->mutex);
2740 fail_alloc:
2741    vk_free(alloc, wsi);
2742 fail:
2743    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2744    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2745 
2746    return result;
2747 }
2748 
2749 void
wsi_x11_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2750 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2751                    const VkAllocationCallbacks *alloc)
2752 {
2753    struct wsi_x11 *wsi =
2754       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2755 
2756    if (wsi) {
2757       hash_table_foreach(wsi->connections, entry)
2758          wsi_x11_connection_destroy(wsi_device, entry->data);
2759 
2760       _mesa_hash_table_destroy(wsi->connections, NULL);
2761 
2762       pthread_mutex_destroy(&wsi->mutex);
2763 
2764       vk_free(alloc, wsi);
2765    }
2766 }
2767