• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #define XK_MISCELLANY
27 #define XK_LATIN1
28 #include <X11/keysymdef.h>
29 #include <xcb/xcb.h>
30 #ifdef XCB_KEYSYMS_AVAILABLE
31 #include <xcb/xcb_keysyms.h>
32 #endif
33 #include <xcb/dri3.h>
34 #include <xcb/present.h>
35 #include <xcb/shm.h>
36 
37 #include "util/macros.h"
38 #include <stdatomic.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <unistd.h>
42 #include <errno.h>
43 #include <string.h>
44 #include <fcntl.h>
45 #include "drm-uapi/drm_fourcc.h"
46 #include "util/libdrm.h"
47 #include "util/cnd_monotonic.h"
48 #include "util/hash_table.h"
49 #include "util/mesa-blake3.h"
50 #include "util/os_file.h"
51 #include "util/os_time.h"
52 #include "util/u_debug.h"
53 #include "util/u_thread.h"
54 #include "util/xmlconfig.h"
55 #include "util/timespec.h"
56 
57 #include "vk_format.h"
58 #include "vk_instance.h"
59 #include "vk_physical_device.h"
60 #include "vk_device.h"
61 #include "vk_util.h"
62 #include "vk_enum_to_str.h"
63 #include "wsi_common_entrypoints.h"
64 #include "wsi_common_private.h"
65 #include "wsi_common_queue.h"
66 
67 #ifdef HAVE_SYS_SHM_H
68 #include <sys/ipc.h>
69 #include <sys/shm.h>
70 #endif
71 
72 #ifndef XCB_PRESENT_OPTION_ASYNC_MAY_TEAR
73 #define XCB_PRESENT_OPTION_ASYNC_MAY_TEAR 16
74 #endif
75 #ifndef XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR
76 #define XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR 8
77 #endif
78 
79 #define MAX_DAMAGE_RECTS 64
80 
81 struct wsi_x11_connection {
82    bool has_dri3;
83    bool has_dri3_modifiers;
84    bool has_dri3_explicit_sync;
85    bool has_present;
86    bool is_proprietary_x11;
87    bool is_xwayland;
88    bool has_mit_shm;
89    bool has_xfixes;
90 };
91 
92 struct wsi_x11 {
93    struct wsi_interface base;
94 
95    mtx_t mutex;
96    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
97    struct hash_table *connections;
98 };
99 
100 struct wsi_x11_vk_surface {
101    union {
102       VkIcdSurfaceXlib xlib;
103       VkIcdSurfaceXcb xcb;
104    };
105    bool has_alpha;
106 };
107 #ifdef HAVE_X11_DRM
108 /**
109  * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
110  */
111 static int
wsi_dri3_open(xcb_connection_t * conn,xcb_window_t root,uint32_t provider)112 wsi_dri3_open(xcb_connection_t *conn,
113 	      xcb_window_t root,
114 	      uint32_t provider)
115 {
116    xcb_dri3_open_cookie_t       cookie;
117    xcb_dri3_open_reply_t        *reply;
118    int                          fd;
119 
120    cookie = xcb_dri3_open(conn,
121                           root,
122                           provider);
123 
124    reply = xcb_dri3_open_reply(conn, cookie, NULL);
125    if (!reply)
126       return -1;
127 
128    /* According to DRI3 extension nfd must equal one. */
129    if (reply->nfd != 1) {
130       free(reply);
131       return -1;
132    }
133 
134    fd = xcb_dri3_open_reply_fds(conn, reply)[0];
135    free(reply);
136    fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
137 
138    return fd;
139 }
140 /**
141  * Checks compatibility of the device wsi_dev with the device the X server
142  * provides via DRI3.
143  *
144  * This returns true when no device could be retrieved from the X server or when
145  * the information for the X server device indicate that it is the same device.
146  */
147 static bool
wsi_x11_check_dri3_compatible(const struct wsi_device * wsi_dev,xcb_connection_t * conn)148 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
149                               xcb_connection_t *conn)
150 {
151    xcb_screen_iterator_t screen_iter =
152       xcb_setup_roots_iterator(xcb_get_setup(conn));
153    xcb_screen_t *screen = screen_iter.data;
154 
155    /* Open the DRI3 device from the X server. If we do not retrieve one we
156     * assume our local device is compatible.
157     */
158    int dri3_fd = wsi_dri3_open(conn, screen->root, None);
159    if (dri3_fd == -1)
160       return true;
161 
162    bool match = wsi_dev->can_present_on_device(wsi_dev->pdevice, dri3_fd);
163 
164    close(dri3_fd);
165 
166    return match;
167 }
168 #endif
169 
170 static bool
wsi_x11_detect_xwayland(xcb_connection_t * conn,xcb_query_extension_reply_t * randr_reply,xcb_query_extension_reply_t * xwl_reply)171 wsi_x11_detect_xwayland(xcb_connection_t *conn,
172                         xcb_query_extension_reply_t *randr_reply,
173                         xcb_query_extension_reply_t *xwl_reply)
174 {
175    /* Newer Xwayland exposes an X11 extension we can check for */
176    if (xwl_reply && xwl_reply->present)
177       return true;
178 
179    /* Older Xwayland uses the word "XWAYLAND" in the RandR output names */
180    if (!randr_reply || !randr_reply->present)
181       return false;
182 
183    xcb_randr_query_version_cookie_t ver_cookie =
184       xcb_randr_query_version_unchecked(conn, 1, 3);
185    xcb_randr_query_version_reply_t *ver_reply =
186       xcb_randr_query_version_reply(conn, ver_cookie, NULL);
187    bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
188                                        ver_reply->minor_version >= 3);
189    free(ver_reply);
190 
191    if (!has_randr_v1_3)
192       return false;
193 
194    const xcb_setup_t *setup = xcb_get_setup(conn);
195    xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
196 
197    xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
198       xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
199    xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
200       xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
201 
202    if (!gsr_reply || gsr_reply->num_outputs == 0) {
203       free(gsr_reply);
204       return false;
205    }
206 
207    xcb_randr_output_t *randr_outputs =
208       xcb_randr_get_screen_resources_current_outputs(gsr_reply);
209    xcb_randr_get_output_info_cookie_t goi_cookie =
210       xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
211    free(gsr_reply);
212 
213    xcb_randr_get_output_info_reply_t *goi_reply =
214       xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
215    if (!goi_reply) {
216       return false;
217    }
218 
219    char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
220    bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
221    free(goi_reply);
222 
223    return is_xwayland;
224 }
225 
226 static struct wsi_x11_connection *
wsi_x11_connection_create(struct wsi_device * wsi_dev,xcb_connection_t * conn)227 wsi_x11_connection_create(struct wsi_device *wsi_dev,
228                           xcb_connection_t *conn)
229 {
230    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
231                                 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
232                                 xfixes_cookie, xwl_cookie;
233    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
234                                *amd_reply, *nv_reply, *shm_reply = NULL,
235                                *xfixes_reply, *xwl_reply;
236    bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
237                     wsi_dev->has_import_memory_host;
238    bool has_dri3_v1_2 = false;
239    bool has_present_v1_2 = false;
240    bool has_dri3_v1_4 = false;
241    bool has_present_v1_4 = false;
242 
243    /* wsi_x11_get_connection may be called from a thread, but we will never end up here on a worker thread,
244     * since the connection will always be in the hash-map,
245     * so we will not violate Vulkan's rule on allocation callbacks w.r.t.
246     * when it is allowed to call the allocation callbacks. */
247    struct wsi_x11_connection *wsi_conn =
248       vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
249                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
250    if (!wsi_conn)
251       return NULL;
252 
253    sync_cookie = xcb_query_extension(conn, 4, "SYNC");
254    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
255    pres_cookie = xcb_query_extension(conn, 7, "Present");
256    randr_cookie = xcb_query_extension(conn, 5, "RANDR");
257    xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
258    xwl_cookie = xcb_query_extension(conn, 8, "XWAYLAND");
259 
260    if (wants_shm)
261       shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
262 
263    /* We try to be nice to users and emit a warning if they try to use a
264     * Vulkan application on a system without DRI3 enabled.  However, this ends
265     * up spewing the warning when a user has, for example, both Intel
266     * integrated graphics and a discrete card with proprietary drivers and are
267     * running on the discrete card with the proprietary DDX.  In this case, we
268     * really don't want to print the warning because it just confuses users.
269     * As a heuristic to detect this case, we check for a couple of proprietary
270     * X11 extensions.
271     */
272    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
273    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
274 
275    xcb_discard_reply(conn, sync_cookie.sequence);
276    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
277    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
278    randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
279    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
280    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
281    xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
282    xwl_reply = xcb_query_extension_reply(conn, xwl_cookie, NULL);
283    if (wants_shm)
284       shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
285    if (!dri3_reply || !pres_reply || !xfixes_reply) {
286       free(dri3_reply);
287       free(pres_reply);
288       free(xfixes_reply);
289       free(xwl_reply);
290       free(randr_reply);
291       free(amd_reply);
292       free(nv_reply);
293       if (wants_shm)
294          free(shm_reply);
295       vk_free(&wsi_dev->instance_alloc, wsi_conn);
296       return NULL;
297    }
298 
299    wsi_conn->has_dri3 = dri3_reply->present != 0;
300 #ifdef HAVE_X11_DRM
301    if (wsi_conn->has_dri3) {
302       xcb_dri3_query_version_cookie_t ver_cookie;
303       xcb_dri3_query_version_reply_t *ver_reply;
304 
305       ver_cookie = xcb_dri3_query_version(conn, 1, 4);
306       ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
307       has_dri3_v1_2 = ver_reply != NULL &&
308          (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
309       has_dri3_v1_4 = ver_reply != NULL &&
310          (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
311       free(ver_reply);
312    }
313 #endif
314 
315    wsi_conn->has_present = pres_reply->present != 0;
316 #ifdef HAVE_X11_DRM
317    if (wsi_conn->has_present) {
318       xcb_present_query_version_cookie_t ver_cookie;
319       xcb_present_query_version_reply_t *ver_reply;
320 
321       ver_cookie = xcb_present_query_version(conn, 1, 4);
322       ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
323       has_present_v1_2 =
324         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
325       has_present_v1_4 =
326         (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
327       free(ver_reply);
328    }
329 #endif
330 
331    wsi_conn->has_xfixes = xfixes_reply->present != 0;
332    if (wsi_conn->has_xfixes) {
333       xcb_xfixes_query_version_cookie_t ver_cookie;
334       xcb_xfixes_query_version_reply_t *ver_reply;
335 
336       ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
337       ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
338       wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
339       free(ver_reply);
340    }
341 
342    wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn, randr_reply,
343                                                    xwl_reply);
344 
345    wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
346    wsi_conn->has_dri3_explicit_sync = has_dri3_v1_4 && has_present_v1_4;
347    wsi_conn->is_proprietary_x11 = false;
348    if (amd_reply && amd_reply->present)
349       wsi_conn->is_proprietary_x11 = true;
350    if (nv_reply && nv_reply->present)
351       wsi_conn->is_proprietary_x11 = true;
352 
353    wsi_conn->has_mit_shm = false;
354 #ifdef HAVE_X11_DRM
355    if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
356       bool has_mit_shm = shm_reply->present != 0;
357 
358       xcb_shm_query_version_cookie_t ver_cookie;
359       xcb_shm_query_version_reply_t *ver_reply;
360 
361       ver_cookie = xcb_shm_query_version(conn);
362       ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
363 
364       has_mit_shm = ver_reply->shared_pixmaps;
365       free(ver_reply);
366       xcb_void_cookie_t cookie;
367       xcb_generic_error_t *error;
368 
369       if (has_mit_shm) {
370          cookie = xcb_shm_detach_checked(conn, 0);
371          if ((error = xcb_request_check(conn, cookie))) {
372             if (error->error_code != BadRequest)
373                wsi_conn->has_mit_shm = true;
374             free(error);
375          }
376       }
377    }
378 #endif
379 
380    free(dri3_reply);
381    free(pres_reply);
382    free(randr_reply);
383    free(xwl_reply);
384    free(amd_reply);
385    free(nv_reply);
386    free(xfixes_reply);
387    if (wants_shm)
388       free(shm_reply);
389 
390    return wsi_conn;
391 }
392 
393 static void
wsi_x11_connection_destroy(struct wsi_device * wsi_dev,struct wsi_x11_connection * conn)394 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
395                            struct wsi_x11_connection *conn)
396 {
397    vk_free(&wsi_dev->instance_alloc, conn);
398 }
399 
400 static bool
wsi_x11_check_for_dri3(struct wsi_x11_connection * wsi_conn)401 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
402 {
403   if (wsi_conn->has_dri3)
404     return true;
405   if (!wsi_conn->is_proprietary_x11) {
406     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
407                     "Note: you can probably enable DRI3 in your Xorg config\n");
408   }
409   return false;
410 }
411 
412 /**
413  * Get internal struct representing an xcb_connection_t.
414  *
415  * This can allocate the struct but the caller does not own the struct. It is
416  * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
417  *
418  * If the allocation fails NULL is returned.
419  */
420 static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device * wsi_dev,xcb_connection_t * conn)421 wsi_x11_get_connection(struct wsi_device *wsi_dev,
422                        xcb_connection_t *conn)
423 {
424    struct wsi_x11 *wsi =
425       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
426 
427    mtx_lock(&wsi->mutex);
428 
429    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
430    if (!entry) {
431       /* We're about to make a bunch of blocking calls.  Let's drop the
432        * mutex for now so we don't block up too badly.
433        */
434       mtx_unlock(&wsi->mutex);
435 
436       struct wsi_x11_connection *wsi_conn =
437          wsi_x11_connection_create(wsi_dev, conn);
438       if (!wsi_conn)
439          return NULL;
440 
441       mtx_lock(&wsi->mutex);
442 
443       entry = _mesa_hash_table_search(wsi->connections, conn);
444       if (entry) {
445          /* Oops, someone raced us to it */
446          wsi_x11_connection_destroy(wsi_dev, wsi_conn);
447       } else {
448          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
449       }
450    }
451 
452    mtx_unlock(&wsi->mutex);
453 
454    return entry->data;
455 }
456 
457 static const VkFormat formats[] = {
458    VK_FORMAT_R5G6B5_UNORM_PACK16,
459    VK_FORMAT_B8G8R8A8_SRGB,
460    VK_FORMAT_B8G8R8A8_UNORM,
461    VK_FORMAT_A2R10G10B10_UNORM_PACK32,
462 };
463 
464 static const VkPresentModeKHR present_modes[] = {
465    VK_PRESENT_MODE_IMMEDIATE_KHR,
466    VK_PRESENT_MODE_MAILBOX_KHR,
467    VK_PRESENT_MODE_FIFO_KHR,
468    VK_PRESENT_MODE_FIFO_RELAXED_KHR,
469 };
470 
471 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)472 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
473 {
474    xcb_screen_iterator_t screen_iter =
475       xcb_setup_roots_iterator(xcb_get_setup(conn));
476 
477    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
478       if (screen_iter.data->root == root)
479          return screen_iter.data;
480    }
481 
482    return NULL;
483 }
484 
485 static xcb_visualtype_t *
screen_get_visualtype(xcb_screen_t * screen,xcb_visualid_t visual_id,unsigned * depth)486 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
487                       unsigned *depth)
488 {
489    xcb_depth_iterator_t depth_iter =
490       xcb_screen_allowed_depths_iterator(screen);
491 
492    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
493       xcb_visualtype_iterator_t visual_iter =
494          xcb_depth_visuals_iterator (depth_iter.data);
495 
496       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
497          if (visual_iter.data->visual_id == visual_id) {
498             if (depth)
499                *depth = depth_iter.data->depth;
500             return visual_iter.data;
501          }
502       }
503    }
504 
505    return NULL;
506 }
507 
508 static xcb_visualtype_t *
connection_get_visualtype(xcb_connection_t * conn,xcb_visualid_t visual_id)509 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
510 {
511    xcb_screen_iterator_t screen_iter =
512       xcb_setup_roots_iterator(xcb_get_setup(conn));
513 
514    /* For this we have to iterate over all of the screens which is rather
515     * annoying.  Fortunately, there is probably only 1.
516     */
517    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
518       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
519                                                        visual_id, NULL);
520       if (visual)
521          return visual;
522    }
523 
524    return NULL;
525 }
526 
527 static xcb_visualtype_t *
get_visualtype_for_window(xcb_connection_t * conn,xcb_window_t window,unsigned * depth,xcb_visualtype_t ** rootvis)528 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
529                           unsigned *depth, xcb_visualtype_t **rootvis)
530 {
531    xcb_query_tree_cookie_t tree_cookie;
532    xcb_get_window_attributes_cookie_t attrib_cookie;
533    xcb_query_tree_reply_t *tree;
534    xcb_get_window_attributes_reply_t *attrib;
535 
536    tree_cookie = xcb_query_tree(conn, window);
537    attrib_cookie = xcb_get_window_attributes(conn, window);
538 
539    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
540    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
541    if (attrib == NULL || tree == NULL) {
542       free(attrib);
543       free(tree);
544       return NULL;
545    }
546 
547    xcb_window_t root = tree->root;
548    xcb_visualid_t visual_id = attrib->visual;
549    free(attrib);
550    free(tree);
551 
552    xcb_screen_t *screen = get_screen_for_root(conn, root);
553    if (screen == NULL)
554       return NULL;
555 
556    if (rootvis)
557       *rootvis = screen_get_visualtype(screen, screen->root_visual, depth);
558    return screen_get_visualtype(screen, visual_id, depth);
559 }
560 
561 static bool
visual_has_alpha(xcb_visualtype_t * visual,unsigned depth)562 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
563 {
564    uint32_t rgb_mask = visual->red_mask |
565                        visual->green_mask |
566                        visual->blue_mask;
567 
568    uint32_t all_mask = 0xffffffff >> (32 - depth);
569 
570    /* Do we have bits left over after RGB? */
571    return (all_mask & ~rgb_mask) != 0;
572 }
573 
574 static bool
visual_supported(xcb_visualtype_t * visual)575 visual_supported(xcb_visualtype_t *visual)
576 {
577    if (!visual)
578       return false;
579 
580    return visual->_class == XCB_VISUAL_CLASS_TRUE_COLOR ||
581           visual->_class == XCB_VISUAL_CLASS_DIRECT_COLOR;
582 }
583 
584 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,xcb_connection_t * connection,xcb_visualid_t visual_id)585 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
586                                                uint32_t queueFamilyIndex,
587                                                xcb_connection_t *connection,
588                                                xcb_visualid_t visual_id)
589 {
590    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
591    struct wsi_device *wsi_device = pdevice->wsi_device;
592    if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
593       return false;
594 
595    struct wsi_x11_connection *wsi_conn =
596       wsi_x11_get_connection(wsi_device, connection);
597 
598    if (!wsi_conn)
599       return false;
600 
601    if (!wsi_device->sw) {
602       if (!wsi_x11_check_for_dri3(wsi_conn))
603          return false;
604    }
605 
606    if (!visual_supported(connection_get_visualtype(connection, visual_id)))
607       return false;
608 
609    return true;
610 }
611 
612 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,Display * dpy,VisualID visualID)613 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
614                                                 uint32_t queueFamilyIndex,
615                                                 Display *dpy,
616                                                 VisualID visualID)
617 {
618    return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
619                                                          queueFamilyIndex,
620                                                          XGetXCBConnection(dpy),
621                                                          visualID);
622 }
623 
624 static xcb_connection_t*
x11_surface_get_connection(VkIcdSurfaceBase * icd_surface)625 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
626 {
627    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
628       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
629    else
630       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
631 }
632 
633 static xcb_window_t
x11_surface_get_window(VkIcdSurfaceBase * icd_surface)634 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
635 {
636    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
637       return ((VkIcdSurfaceXlib *)icd_surface)->window;
638    else
639       return ((VkIcdSurfaceXcb *)icd_surface)->window;
640 }
641 
642 static VkResult
x11_surface_get_support(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)643 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
644                         struct wsi_device *wsi_device,
645                         uint32_t queueFamilyIndex,
646                         VkBool32* pSupported)
647 {
648    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
649    xcb_window_t window = x11_surface_get_window(icd_surface);
650 
651    struct wsi_x11_connection *wsi_conn =
652       wsi_x11_get_connection(wsi_device, conn);
653    if (!wsi_conn)
654       return VK_ERROR_OUT_OF_HOST_MEMORY;
655 
656    if (!wsi_device->sw) {
657       if (!wsi_x11_check_for_dri3(wsi_conn)) {
658          *pSupported = false;
659          return VK_SUCCESS;
660       }
661    }
662 
663    if (!visual_supported(get_visualtype_for_window(conn, window, NULL, NULL))) {
664       *pSupported = false;
665       return VK_SUCCESS;
666    }
667 
668    *pSupported = true;
669    return VK_SUCCESS;
670 }
671 
672 static uint32_t
x11_get_min_image_count(const struct wsi_device * wsi_device,bool is_xwayland)673 x11_get_min_image_count(const struct wsi_device *wsi_device, bool is_xwayland)
674 {
675    if (wsi_device->x11.override_minImageCount)
676       return wsi_device->x11.override_minImageCount;
677 
678    /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
679     * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
680     * the render latency is CPU duration + GPU duration.
681     *
682     * This means that with scanout from pageflipping we need 3 frames to run
683     * full speed:
684     * 1) CPU rendering work
685     * 2) GPU rendering work
686     * 3) scanout
687     *
688     * Once we have a nonblocking acquire that returns a semaphore we can merge
689     * 1 and 3. Hence the ideal implementation needs only 2 images, but games
690     * cannot tellwe currently do not have an ideal implementation and that
691     * hence they need to allocate 3 images. So let us do it for them.
692     *
693     * This is a tradeoff as it uses more memory than needed for non-fullscreen
694     * and non-performance intensive applications.
695     *
696     * For Xwayland Venus reports four images as described in
697     *   wsi_wl_surface_get_capabilities
698     */
699    return is_xwayland && wsi_device->x11.extra_xwayland_image ? 4 : 3;
700 }
701 
702 static unsigned
703 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
704                                          struct wsi_x11_connection *wsi_conn,
705                                          VkPresentModeKHR present_mode);
706 
707 static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)708 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
709                              struct wsi_device *wsi_device,
710                              const VkSurfacePresentModeEXT *present_mode,
711                              VkSurfaceCapabilitiesKHR *caps)
712 {
713    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
714    xcb_window_t window = x11_surface_get_window(icd_surface);
715    struct wsi_x11_vk_surface *surface = (struct wsi_x11_vk_surface*)icd_surface;
716    struct wsi_x11_connection *wsi_conn =
717       wsi_x11_get_connection(wsi_device, conn);
718    xcb_get_geometry_cookie_t geom_cookie;
719    xcb_generic_error_t *err;
720    xcb_get_geometry_reply_t *geom;
721 
722    geom_cookie = xcb_get_geometry(conn, window);
723 
724    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
725    if (!geom)
726       return VK_ERROR_SURFACE_LOST_KHR;
727    {
728       VkExtent2D extent = { geom->width, geom->height };
729       caps->currentExtent = extent;
730       caps->minImageExtent = extent;
731       caps->maxImageExtent = extent;
732    }
733    free(err);
734    free(geom);
735 
736    if (surface->has_alpha) {
737       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
738                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
739    } else {
740       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
741                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
742    }
743 
744    if (present_mode) {
745       caps->minImageCount = x11_get_min_image_count_for_present_mode(wsi_device, wsi_conn, present_mode->presentMode);
746    } else {
747       caps->minImageCount = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
748    }
749 
750    /* There is no real maximum */
751    caps->maxImageCount = 0;
752 
753    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
754    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
755    caps->maxImageArrayLayers = 1;
756    caps->supportedUsageFlags = wsi_caps_get_image_usage();
757 
758    VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
759    if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
760       caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
761 
762    return VK_SUCCESS;
763 }
764 
765 static VkResult
x11_surface_get_capabilities2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)766 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
767                               struct wsi_device *wsi_device,
768                               const void *info_next,
769                               VkSurfaceCapabilities2KHR *caps)
770 {
771    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
772 
773    const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
774 
775    VkResult result =
776       x11_surface_get_capabilities(icd_surface, wsi_device, present_mode,
777                                    &caps->surfaceCapabilities);
778 
779    if (result != VK_SUCCESS)
780       return result;
781 
782    vk_foreach_struct(ext, caps->pNext) {
783       switch (ext->sType) {
784       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
785          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
786          protected->supportsProtected = VK_FALSE;
787          break;
788       }
789 
790       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
791          /* Unsupported. */
792          VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
793          scaling->supportedPresentScaling = 0;
794          scaling->supportedPresentGravityX = 0;
795          scaling->supportedPresentGravityY = 0;
796          scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
797          scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
798          break;
799       }
800 
801       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
802          /* All present modes are compatible with each other. */
803          VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
804          if (compat->pPresentModes) {
805             assert(present_mode);
806             VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
807             /* Must always return queried present mode even when truncating. */
808             vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
809                *mode = present_mode->presentMode;
810             }
811 
812             for (uint32_t i = 0; i < ARRAY_SIZE(present_modes); i++) {
813                if (present_modes[i] != present_mode->presentMode) {
814                   vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
815                      *mode = present_modes[i];
816                   }
817                }
818             }
819          } else {
820             if (!present_mode)
821                wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
822                                        "without a VkSurfacePresentModeEXT set. This is an "
823                                        "application bug.\n");
824 
825             compat->presentModeCount = ARRAY_SIZE(present_modes);
826          }
827          break;
828       }
829 
830       default:
831          /* Ignored */
832          break;
833       }
834    }
835 
836    return result;
837 }
838 
839 static int
format_get_component_bits(VkFormat format,int comp)840 format_get_component_bits(VkFormat format, int comp)
841 {
842    return vk_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, comp);
843 }
844 
845 static bool
rgb_component_bits_are_equal(VkFormat format,const xcb_visualtype_t * type)846 rgb_component_bits_are_equal(VkFormat format, const xcb_visualtype_t* type)
847 {
848    return format_get_component_bits(format, 0) == util_bitcount(type->red_mask) &&
849           format_get_component_bits(format, 1) == util_bitcount(type->green_mask) &&
850           format_get_component_bits(format, 2) == util_bitcount(type->blue_mask);
851 }
852 
853 static bool
get_sorted_vk_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,VkFormat * sorted_formats,unsigned * count)854 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
855                       VkFormat *sorted_formats, unsigned *count)
856 {
857    xcb_connection_t *conn = x11_surface_get_connection(surface);
858    xcb_window_t window = x11_surface_get_window(surface);
859    xcb_visualtype_t *rootvis = NULL;
860    xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL, &rootvis);
861 
862    if (!visual)
863       return false;
864 
865    /* use the root window's visual to set the default */
866    *count = 0;
867    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
868       if (rgb_component_bits_are_equal(formats[i], rootvis))
869          sorted_formats[(*count)++] = formats[i];
870    }
871 
872    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
873       for (unsigned j = 0; j < *count; j++)
874          if (formats[i] == sorted_formats[j])
875             goto next_format;
876       if (rgb_component_bits_are_equal(formats[i], visual))
877          sorted_formats[(*count)++] = formats[i];
878 next_format:;
879    }
880 
881    if (wsi_device->force_bgra8_unorm_first) {
882       for (unsigned i = 0; i < *count; i++) {
883          if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
884             sorted_formats[i] = sorted_formats[0];
885             sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
886             break;
887          }
888       }
889    }
890 
891    return true;
892 }
893 
894 static VkResult
x11_surface_get_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)895 x11_surface_get_formats(VkIcdSurfaceBase *surface,
896                         struct wsi_device *wsi_device,
897                         uint32_t *pSurfaceFormatCount,
898                         VkSurfaceFormatKHR *pSurfaceFormats)
899 {
900    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
901                           pSurfaceFormats, pSurfaceFormatCount);
902 
903    unsigned count;
904    VkFormat sorted_formats[ARRAY_SIZE(formats)];
905    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
906       return VK_ERROR_SURFACE_LOST_KHR;
907 
908    for (unsigned i = 0; i < count; i++) {
909       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
910          f->format = sorted_formats[i];
911          f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
912       }
913    }
914 
915    return vk_outarray_status(&out);
916 }
917 
918 static VkResult
x11_surface_get_formats2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)919 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
920                         struct wsi_device *wsi_device,
921                         const void *info_next,
922                         uint32_t *pSurfaceFormatCount,
923                         VkSurfaceFormat2KHR *pSurfaceFormats)
924 {
925    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
926                           pSurfaceFormats, pSurfaceFormatCount);
927 
928    unsigned count;
929    VkFormat sorted_formats[ARRAY_SIZE(formats)];
930    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
931       return VK_ERROR_SURFACE_LOST_KHR;
932 
933    for (unsigned i = 0; i < count; i++) {
934       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
935          assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
936          f->surfaceFormat.format = sorted_formats[i];
937          f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
938       }
939    }
940 
941    return vk_outarray_status(&out);
942 }
943 
944 static VkResult
x11_surface_get_present_modes(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)945 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
946                               struct wsi_device *wsi_device,
947                               uint32_t *pPresentModeCount,
948                               VkPresentModeKHR *pPresentModes)
949 {
950    if (pPresentModes == NULL) {
951       *pPresentModeCount = ARRAY_SIZE(present_modes);
952       return VK_SUCCESS;
953    }
954 
955    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
956    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
957 
958    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
959       VK_INCOMPLETE : VK_SUCCESS;
960 }
961 
962 static VkResult
x11_surface_get_present_rectangles(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)963 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
964                                    struct wsi_device *wsi_device,
965                                    uint32_t* pRectCount,
966                                    VkRect2D* pRects)
967 {
968    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
969    xcb_window_t window = x11_surface_get_window(icd_surface);
970    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
971 
972    vk_outarray_append_typed(VkRect2D, &out, rect) {
973       xcb_generic_error_t *err = NULL;
974       xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
975       xcb_get_geometry_reply_t *geom =
976          xcb_get_geometry_reply(conn, geom_cookie, &err);
977       free(err);
978       if (geom) {
979          *rect = (VkRect2D) {
980             .offset = { 0, 0 },
981             .extent = { geom->width, geom->height },
982          };
983       }
984       free(geom);
985       if (!geom)
986           return VK_ERROR_SURFACE_LOST_KHR;
987    }
988 
989    return vk_outarray_status(&out);
990 }
991 
992 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXcbSurfaceKHR(VkInstance _instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)993 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
994                         const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
995                         const VkAllocationCallbacks *pAllocator,
996                         VkSurfaceKHR *pSurface)
997 {
998    VK_FROM_HANDLE(vk_instance, instance, _instance);
999    struct wsi_x11_vk_surface *surface;
1000 
1001    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
1002 
1003    unsigned visual_depth;
1004    xcb_visualtype_t *visual =
1005       get_visualtype_for_window(pCreateInfo->connection, pCreateInfo->window, &visual_depth, NULL);
1006    if (!visual)
1007       return VK_ERROR_OUT_OF_HOST_MEMORY;
1008 
1009    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1010                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1011    if (surface == NULL)
1012       return VK_ERROR_OUT_OF_HOST_MEMORY;
1013 
1014    surface->xcb.base.platform = VK_ICD_WSI_PLATFORM_XCB;
1015    surface->xcb.connection = pCreateInfo->connection;
1016    surface->xcb.window = pCreateInfo->window;
1017 
1018    surface->has_alpha = visual_has_alpha(visual, visual_depth);
1019 
1020    *pSurface = VkIcdSurfaceBase_to_handle(&surface->xcb.base);
1021    return VK_SUCCESS;
1022 }
1023 
1024 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXlibSurfaceKHR(VkInstance _instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1025 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
1026                          const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
1027                          const VkAllocationCallbacks *pAllocator,
1028                          VkSurfaceKHR *pSurface)
1029 {
1030    VK_FROM_HANDLE(vk_instance, instance, _instance);
1031    struct wsi_x11_vk_surface *surface;
1032 
1033    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
1034 
1035    unsigned visual_depth;
1036    xcb_visualtype_t *visual =
1037       get_visualtype_for_window(XGetXCBConnection(pCreateInfo->dpy), pCreateInfo->window, &visual_depth, NULL);
1038    if (!visual)
1039       return VK_ERROR_OUT_OF_HOST_MEMORY;
1040 
1041    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1042                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1043    if (surface == NULL)
1044       return VK_ERROR_OUT_OF_HOST_MEMORY;
1045 
1046    surface->xlib.base.platform = VK_ICD_WSI_PLATFORM_XLIB;
1047    surface->xlib.dpy = pCreateInfo->dpy;
1048    surface->xlib.window = pCreateInfo->window;
1049 
1050    surface->has_alpha = visual_has_alpha(visual, visual_depth);
1051 
1052    *pSurface = VkIcdSurfaceBase_to_handle(&surface->xlib.base);
1053    return VK_SUCCESS;
1054 }
1055 
1056 struct x11_image_pending_completion {
1057    uint32_t serial;
1058    uint64_t signal_present_id;
1059 };
1060 
1061 struct x11_image {
1062    struct wsi_image                          base;
1063    xcb_pixmap_t                              pixmap;
1064    xcb_xfixes_region_t                       update_region; /* long lived XID */
1065    xcb_xfixes_region_t                       update_area;   /* the above or None */
1066    struct xshmfence *                        shm_fence;
1067    uint32_t                                  sync_fence;
1068    xcb_shm_seg_t                             shmseg;
1069    int                                       shmid;
1070    uint8_t *                                 shmaddr;
1071    uint64_t                                  present_id;
1072    VkPresentModeKHR                          present_mode;
1073    xcb_rectangle_t                           rects[MAX_DAMAGE_RECTS];
1074    int                                       rectangle_count;
1075 
1076    /* In IMMEDIATE and MAILBOX modes, we can have multiple pending presentations per image.
1077     * We need to keep track of them when considering present ID. */
1078 
1079    /* This is arbitrarily chosen. With IMMEDIATE on a 3 deep swapchain,
1080     * we allow over 300 outstanding presentations per vblank, which is more than enough
1081     * for any reasonable application.
1082     * This used to be 16, but it regressed benchmarks that did 15k+ FPS.
1083     * This should allow over 25k FPS on a 60 Hz monitor. Any more than this is comical. */
1084 #define X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS 128
1085    uint32_t                                  present_queued_count;
1086    struct x11_image_pending_completion       pending_completions[X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS];
1087 #ifdef HAVE_DRI3_EXPLICIT_SYNC
1088    uint32_t                                  dri3_syncobj[WSI_ES_COUNT];
1089 #endif
1090 };
1091 
1092 struct x11_swapchain {
1093    struct wsi_swapchain                        base;
1094 
1095    bool                                         has_dri3_modifiers;
1096    bool                                         has_mit_shm;
1097    bool                                         has_async_may_tear;
1098 
1099    xcb_connection_t *                           conn;
1100    xcb_window_t                                 window;
1101    xcb_gc_t                                     gc;
1102    uint32_t                                     depth;
1103    VkExtent2D                                   extent;
1104 
1105    blake3_hash                                  dri3_modifier_hash;
1106 
1107    xcb_present_event_t                          event_id;
1108    xcb_special_event_t *                        special_event;
1109    uint64_t                                     send_sbc;
1110    uint64_t                                     last_present_msc;
1111    uint32_t                                     stamp;
1112    uint32_t                                     sent_image_count;
1113 
1114    atomic_int                                   status;
1115    bool                                         copy_is_suboptimal;
1116    struct wsi_queue                             present_queue;
1117    struct wsi_queue                             acquire_queue;
1118    thrd_t                                       queue_manager;
1119    thrd_t                                       event_manager;
1120 
1121    /* Used for communicating between event_manager and queue_manager.
1122     * Lock is also taken when reading and writing status.
1123     * When reading status in application threads,
1124     * x11_swapchain_read_status_atomic can be used as a wrapper function. */
1125    mtx_t                                        thread_state_lock;
1126    struct u_cnd_monotonic                       thread_state_cond;
1127 
1128    /* Lock and condition variable for present wait.
1129     * Signalled by event thread and waited on by callers to PresentWaitKHR. */
1130    mtx_t                                        present_progress_mutex;
1131    struct u_cnd_monotonic                       present_progress_cond;
1132    uint64_t                                     present_id;
1133    VkResult                                     present_progress_error;
1134 
1135    struct x11_image                             images[0];
1136 };
1137 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
1138                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
1139 
x11_present_complete(struct x11_swapchain * swapchain,struct x11_image * image,uint32_t index)1140 static void x11_present_complete(struct x11_swapchain *swapchain,
1141                                  struct x11_image *image, uint32_t index)
1142 {
1143    uint64_t signal_present_id = image->pending_completions[index].signal_present_id;
1144    if (signal_present_id) {
1145       mtx_lock(&swapchain->present_progress_mutex);
1146       if (signal_present_id > swapchain->present_id) {
1147          swapchain->present_id = signal_present_id;
1148          u_cnd_monotonic_broadcast(&swapchain->present_progress_cond);
1149       }
1150       mtx_unlock(&swapchain->present_progress_mutex);
1151    }
1152 
1153    image->present_queued_count--;
1154    if (image->present_queued_count) {
1155       memmove(image->pending_completions + index,
1156               image->pending_completions + index + 1,
1157               (image->present_queued_count - index) *
1158               sizeof(image->pending_completions[0]));
1159    }
1160 
1161    u_cnd_monotonic_signal(&swapchain->thread_state_cond);
1162 }
1163 
x11_notify_pending_present(struct x11_swapchain * swapchain,struct x11_image * image)1164 static void x11_notify_pending_present(struct x11_swapchain *swapchain,
1165                                        struct x11_image *image)
1166 {
1167    u_cnd_monotonic_signal(&swapchain->thread_state_cond);
1168 }
1169 
1170 /* It is assumed that thread_state_lock is taken when calling this function. */
x11_swapchain_notify_error(struct x11_swapchain * swapchain,VkResult result)1171 static void x11_swapchain_notify_error(struct x11_swapchain *swapchain, VkResult result)
1172 {
1173    mtx_lock(&swapchain->present_progress_mutex);
1174    swapchain->present_id = UINT64_MAX;
1175    swapchain->present_progress_error = result;
1176    u_cnd_monotonic_broadcast(&swapchain->present_progress_cond);
1177    mtx_unlock(&swapchain->present_progress_mutex);
1178    u_cnd_monotonic_broadcast(&swapchain->thread_state_cond);
1179 }
1180 
1181 /**
1182  * Update the swapchain status with the result of an operation, and return
1183  * the combined status. The chain status will eventually be returned from
1184  * AcquireNextImage and QueuePresent.
1185  *
1186  * We make sure to 'stick' more pessimistic statuses: an out-of-date error
1187  * is permanent once seen, and every subsequent call will return this. If
1188  * this has not been seen, success will be returned.
1189  *
1190  * It is assumed that thread_state_lock is taken when calling this function.
1191  */
1192 static VkResult
_x11_swapchain_result(struct x11_swapchain * chain,VkResult result,const char * file,int line)1193 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
1194                       const char *file, int line)
1195 {
1196    if (result < 0)
1197       x11_swapchain_notify_error(chain, result);
1198 
1199    /* Prioritise returning existing errors for consistency. */
1200    if (chain->status < 0)
1201       return chain->status;
1202 
1203    /* If we have a new error, mark it as permanent on the chain and return. */
1204    if (result < 0) {
1205 #ifndef NDEBUG
1206       fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1207               file, line, vk_Result_to_str(result));
1208 #endif
1209       chain->status = result;
1210       return result;
1211    }
1212 
1213    /* Return temporary errors, but don't persist them. */
1214    if (result == VK_TIMEOUT || result == VK_NOT_READY)
1215       return result;
1216 
1217    /* Suboptimal isn't an error, but is a status which sticks to the swapchain
1218     * and is always returned rather than success.
1219     */
1220    if (result == VK_SUBOPTIMAL_KHR) {
1221 #ifndef NDEBUG
1222       if (chain->status != VK_SUBOPTIMAL_KHR) {
1223          fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1224                  file, line, vk_Result_to_str(result));
1225       }
1226 #endif
1227       chain->status = result;
1228       return result;
1229    }
1230 
1231    /* No changes, so return the last status. */
1232    return chain->status;
1233 }
1234 #define x11_swapchain_result(chain, result) \
1235    _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1236 
1237 static struct wsi_image *
x11_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1238 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1239 {
1240    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1241    return &chain->images[image_index].base;
1242 }
1243 #ifdef HAVE_X11_DRM
1244 static bool
1245 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain);
1246 #endif
1247 static VkResult
x11_wait_for_explicit_sync_release_submission(struct x11_swapchain * chain,uint64_t rel_timeout_ns,uint32_t * image_index)1248 x11_wait_for_explicit_sync_release_submission(struct x11_swapchain *chain,
1249                                               uint64_t rel_timeout_ns,
1250                                               uint32_t *image_index)
1251 {
1252    STACK_ARRAY(struct wsi_image*, images, chain->base.image_count);
1253    for (uint32_t i = 0; i < chain->base.image_count; i++)
1254       images[i] = &chain->images[i].base;
1255 
1256    VkResult result;
1257 #ifdef HAVE_LIBDRM
1258    result = wsi_drm_wait_for_explicit_sync_release(&chain->base,
1259                                                    chain->base.image_count,
1260                                                    images,
1261                                                    rel_timeout_ns,
1262                                                    image_index);
1263 #else
1264    result = VK_ERROR_FEATURE_NOT_PRESENT;
1265 #endif
1266    STACK_ARRAY_FINISH(images);
1267    return result;
1268 }
1269 
1270 /* XXX this belongs in presentproto */
1271 #ifndef PresentWindowDestroyed
1272 #define PresentWindowDestroyed (1 << 0)
1273 #endif
1274 /**
1275  * Process an X11 Present event. Does not update chain->status.
1276  */
1277 static VkResult
x11_handle_dri3_present_event(struct x11_swapchain * chain,xcb_present_generic_event_t * event)1278 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1279                               xcb_present_generic_event_t *event)
1280 {
1281    switch (event->evtype) {
1282    case XCB_PRESENT_CONFIGURE_NOTIFY: {
1283       xcb_present_configure_notify_event_t *config = (void *) event;
1284       if (config->pixmap_flags & PresentWindowDestroyed)
1285          return VK_ERROR_SURFACE_LOST_KHR;
1286 
1287       struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1288       if (!wsi_device->x11.ignore_suboptimal) {
1289          if (config->width != chain->extent.width ||
1290              config->height != chain->extent.height)
1291             return VK_SUBOPTIMAL_KHR;
1292       }
1293 
1294       break;
1295    }
1296 
1297    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1298       xcb_present_idle_notify_event_t *idle = (void *) event;
1299 
1300       assert(!chain->base.image_info.explicit_sync);
1301       for (unsigned i = 0; i < chain->base.image_count; i++) {
1302          if (chain->images[i].pixmap == idle->pixmap) {
1303             chain->sent_image_count--;
1304             assert(chain->sent_image_count >= 0);
1305             wsi_queue_push(&chain->acquire_queue, i);
1306             break;
1307          }
1308       }
1309 
1310       break;
1311    }
1312 
1313    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1314       xcb_present_complete_notify_event_t *complete = (void *) event;
1315       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1316          unsigned i, j;
1317          for (i = 0; i < chain->base.image_count; i++) {
1318             struct x11_image *image = &chain->images[i];
1319             for (j = 0; j < image->present_queued_count; j++) {
1320                if (image->pending_completions[j].serial == complete->serial) {
1321                   x11_present_complete(chain, image, j);
1322                }
1323             }
1324          }
1325          chain->last_present_msc = complete->msc;
1326       }
1327 
1328       VkResult result = VK_SUCCESS;
1329 
1330       struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1331       if (wsi_device->x11.ignore_suboptimal)
1332          return result;
1333 
1334       switch (complete->mode) {
1335       case XCB_PRESENT_COMPLETE_MODE_COPY:
1336          if (chain->copy_is_suboptimal)
1337             result = VK_SUBOPTIMAL_KHR;
1338          break;
1339       case XCB_PRESENT_COMPLETE_MODE_FLIP:
1340          /* If we ever go from flipping to copying, the odds are very likely
1341           * that we could reallocate in a more optimal way if we didn't have
1342           * to care about scanout, so we always do this.
1343           */
1344          chain->copy_is_suboptimal = true;
1345          break;
1346 #ifdef HAVE_X11_DRM
1347       case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1348          /* The winsys is now trying to flip directly and cannot due to our
1349           * configuration. Request the user reallocate.
1350           */
1351 
1352          /* Sometimes, this complete mode is spurious, and a false positive.
1353           * Xwayland may report SUBOPTIMAL_COPY even if there are no changes in the modifiers.
1354           * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26616 for more details. */
1355          if (chain->status == VK_SUCCESS &&
1356              wsi_x11_swapchain_query_dri3_modifiers_changed(chain)) {
1357             result = VK_SUBOPTIMAL_KHR;
1358          }
1359          break;
1360 #endif
1361       default:
1362          break;
1363       }
1364 
1365       return result;
1366    }
1367 
1368    default:
1369       break;
1370    }
1371 
1372    return VK_SUCCESS;
1373 }
1374 #ifdef HAVE_X11_DRM
1375 /**
1376  * Send image to X server via Present extension.
1377  */
1378 static VkResult
x11_present_to_x11_dri3(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1379 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1380                         uint64_t target_msc, VkPresentModeKHR present_mode)
1381 {
1382    struct x11_image *image = &chain->images[image_index];
1383 
1384    assert(image_index < chain->base.image_count);
1385 
1386    uint32_t options = XCB_PRESENT_OPTION_NONE;
1387 
1388    int64_t divisor = 0;
1389    int64_t remainder = 0;
1390 
1391    struct wsi_x11_connection *wsi_conn =
1392       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1393    if (!wsi_conn)
1394       return VK_ERROR_OUT_OF_HOST_MEMORY;
1395 
1396    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1397        (present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1398         wsi_conn->is_xwayland) ||
1399        present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1400       options |= XCB_PRESENT_OPTION_ASYNC;
1401 
1402    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR
1403       && chain->has_async_may_tear)
1404       options |= XCB_PRESENT_OPTION_ASYNC_MAY_TEAR;
1405 
1406    if (chain->has_dri3_modifiers)
1407       options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1408 
1409    xshmfence_reset(image->shm_fence);
1410 
1411    if (!chain->base.image_info.explicit_sync) {
1412       ++chain->sent_image_count;
1413       assert(chain->sent_image_count <= chain->base.image_count);
1414    }
1415 
1416    ++chain->send_sbc;
1417    uint32_t serial = (uint32_t)chain->send_sbc;
1418 
1419    assert(image->present_queued_count < ARRAY_SIZE(image->pending_completions));
1420    image->pending_completions[image->present_queued_count++] =
1421       (struct x11_image_pending_completion) {
1422          .signal_present_id = image->present_id,
1423          .serial = serial,
1424       };
1425 
1426    xcb_void_cookie_t cookie;
1427 #ifdef HAVE_DRI3_EXPLICIT_SYNC
1428    if (chain->base.image_info.explicit_sync) {
1429       uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
1430       uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
1431       cookie = xcb_present_pixmap_synced(
1432          chain->conn,
1433          chain->window,
1434          image->pixmap,
1435          serial,
1436          0,                                   /* valid */
1437          image->update_area,                  /* update */
1438          0,                                   /* x_off */
1439          0,                                   /* y_off */
1440          XCB_NONE,                            /* target_crtc */
1441          image->dri3_syncobj[WSI_ES_ACQUIRE], /* acquire_syncobj */
1442          image->dri3_syncobj[WSI_ES_RELEASE], /* release_syncobj */
1443          acquire_point,
1444          release_point,
1445          options,
1446          target_msc,
1447          divisor,
1448          remainder, 0, NULL);
1449    } else
1450 #endif
1451    {
1452       cookie = xcb_present_pixmap(chain->conn,
1453                                   chain->window,
1454                                   image->pixmap,
1455                                   serial,
1456                                   0,                  /* valid */
1457                                   image->update_area, /* update */
1458                                   0,                  /* x_off */
1459                                   0,                  /* y_off */
1460                                   XCB_NONE,           /* target_crtc */
1461                                   XCB_NONE,
1462                                   image->sync_fence,
1463                                   options,
1464                                   target_msc,
1465                                   divisor,
1466                                   remainder, 0, NULL);
1467    }
1468    xcb_discard_reply(chain->conn, cookie.sequence);
1469    xcb_flush(chain->conn);
1470    return x11_swapchain_result(chain, VK_SUCCESS);
1471 }
1472 #endif
1473 /**
1474  * Send image to X server unaccelerated (software drivers).
1475  */
1476 static VkResult
x11_present_to_x11_sw(struct x11_swapchain * chain,uint32_t image_index)1477 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index)
1478 {
1479    assert(!chain->base.image_info.explicit_sync);
1480    struct x11_image *image = &chain->images[image_index];
1481 
1482    /* Begin querying this before submitting the frame for improved async performance.
1483     * In this _sw() mode we're expecting network round-trip delay, not just UNIX socket delay. */
1484    xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1485 
1486    xcb_void_cookie_t cookie;
1487    void *myptr = image->base.cpu_map;
1488    size_t hdr_len = sizeof(xcb_put_image_request_t);
1489    int stride_b = image->base.row_pitches[0];
1490    size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1491    uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1492 
1493    if (image->rectangle_count > 0) {
1494       for (int i = 0; i < image->rectangle_count; i++) {
1495          xcb_rectangle_t rect = chain->images[image_index].rects[i];
1496          const uint8_t *data = (const uint8_t*)myptr + (rect.y * stride_b) + (rect.x * 4);
1497          for (int j = 0; j < rect.height; j++) {
1498             cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1499                                    chain->window, chain->gc,
1500                                    rect.width,
1501                                    1,
1502                                    rect.x, rect.y + j,
1503                                    0, chain->depth,
1504                                    rect.width * 4,
1505                                    data);
1506             xcb_discard_reply(chain->conn, cookie.sequence);
1507             data += stride_b;
1508          }
1509       }
1510    } else if (size < max_req_len) {
1511       cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1512                              chain->window,
1513                              chain->gc,
1514                              image->base.row_pitches[0] / 4,
1515                              chain->extent.height,
1516                              0,0,0,chain->depth,
1517                              image->base.row_pitches[0] * chain->extent.height,
1518                              image->base.cpu_map);
1519       xcb_discard_reply(chain->conn, cookie.sequence);
1520    } else {
1521       int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1522       int y_start = 0;
1523       int y_todo = chain->extent.height;
1524       while (y_todo) {
1525          int this_lines = MIN2(num_lines, y_todo);
1526          cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1527                                 chain->window,
1528                                 chain->gc,
1529                                 image->base.row_pitches[0] / 4,
1530                                 this_lines,
1531                                 0,y_start,0,chain->depth,
1532                                 this_lines * stride_b,
1533                                 (const uint8_t *)myptr + (y_start * stride_b));
1534          xcb_discard_reply(chain->conn, cookie.sequence);
1535          y_start += this_lines;
1536          y_todo -= this_lines;
1537       }
1538    }
1539 
1540    xcb_flush(chain->conn);
1541 
1542    /* We don't have queued present here.
1543     * Immediately let application acquire again, but query geometry first so
1544     * we can report OUT_OF_DATE on resize. */
1545    xcb_generic_error_t *err;
1546 
1547    xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1548    VkResult result = VK_SUCCESS;
1549    if (geom) {
1550       if (chain->extent.width != geom->width ||
1551           chain->extent.height != geom->height)
1552          result = VK_ERROR_OUT_OF_DATE_KHR;
1553    } else {
1554       result = VK_ERROR_SURFACE_LOST_KHR;
1555    }
1556    free(err);
1557    free(geom);
1558 
1559    wsi_queue_push(&chain->acquire_queue, image_index);
1560    return result;
1561 }
1562 
1563 static void
x11_capture_trace(struct x11_swapchain * chain)1564 x11_capture_trace(struct x11_swapchain *chain)
1565 {
1566 #ifdef XCB_KEYSYMS_AVAILABLE
1567    VK_FROM_HANDLE(vk_device, device, chain->base.device);
1568    if (!device->physical->instance->trace_mode)
1569       return;
1570 
1571    xcb_query_keymap_cookie_t keys_cookie = xcb_query_keymap(chain->conn);
1572 
1573    xcb_generic_error_t *error = NULL;
1574    xcb_query_keymap_reply_t *keys = xcb_query_keymap_reply(chain->conn, keys_cookie, &error);
1575    if (error) {
1576       free(error);
1577       return;
1578    }
1579 
1580    xcb_key_symbols_t *key_symbols = xcb_key_symbols_alloc(chain->conn);
1581    xcb_keycode_t *keycodes = xcb_key_symbols_get_keycode(key_symbols, XK_F1);
1582    if (keycodes) {
1583       xcb_keycode_t keycode = keycodes[0];
1584       free(keycodes);
1585 
1586       simple_mtx_lock(&device->trace_mtx);
1587       bool capture_key_pressed = keys->keys[keycode / 8] & (1u << (keycode % 8));
1588       device->trace_hotkey_trigger = capture_key_pressed && (capture_key_pressed != chain->base.capture_key_pressed);
1589       chain->base.capture_key_pressed = capture_key_pressed;
1590       simple_mtx_unlock(&device->trace_mtx);
1591    }
1592 
1593    xcb_key_symbols_free(key_symbols);
1594    free(keys);
1595 #endif
1596 }
1597 
1598 /* Use a trivial helper here to make it easier to read in code
1599  * where we're intending to access chain->status outside the thread lock. */
x11_swapchain_read_status_atomic(struct x11_swapchain * chain)1600 static VkResult x11_swapchain_read_status_atomic(struct x11_swapchain *chain)
1601 {
1602    return chain->status;
1603 }
1604 
1605 /**
1606  * Decides if an early wait on buffer fences before buffer submission is required.
1607  * That is for mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1608  * present time, which could lead to missing a frame. This is an Xorg issue.
1609  *
1610  * On Wayland compositors, this used to be a problem as well, but not anymore,
1611  * and this check assumes that Mesa is running on a reasonable compositor.
1612  * The wait behavior can be forced by setting the 'vk_xwayland_wait_ready' DRIConf option to true.
1613  * Some drivers, like e.g. Venus may still want to require wait_ready by default,
1614  * so the option is kept around for now.
1615  *
1616  * On Wayland, we don't know at this point if tearing protocol is/can be used by Xwl,
1617  * so we have to make the MAILBOX assumption.
1618  */
1619 static bool
x11_needs_wait_for_fences(const struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1620 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1621                           struct wsi_x11_connection *wsi_conn,
1622                           VkPresentModeKHR present_mode)
1623 {
1624    if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1625       return false;
1626    }
1627 
1628    switch (present_mode) {
1629       case VK_PRESENT_MODE_MAILBOX_KHR:
1630          return true;
1631       case VK_PRESENT_MODE_IMMEDIATE_KHR:
1632          return wsi_conn->is_xwayland;
1633       default:
1634          return false;
1635    }
1636 }
1637 
1638 /* This matches Wayland. */
1639 #define X11_SWAPCHAIN_MAILBOX_IMAGES 4
1640 
1641 static bool
x11_requires_mailbox_image_count(const struct wsi_device * device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1642 x11_requires_mailbox_image_count(const struct wsi_device *device,
1643                                  struct wsi_x11_connection *wsi_conn,
1644                                  VkPresentModeKHR present_mode)
1645 {
1646    /* If we're resorting to wait for fences, we're assuming a MAILBOX-like model,
1647     * and we should allocate accordingly.
1648     *
1649     * One potential concern here is IMMEDIATE mode on Wayland.
1650     * This situation could arise:
1651     * - Fullscreen FLIP mode
1652     * - Compositor does not support tearing protocol (we cannot know this here)
1653     *
1654     * With 3 images, during the window between latch and flip, there is only one image left to app,
1655     * so peak FPS may not be reached if the window between latch and flip is large,
1656     * but tests on contemporary compositors suggest this effect is minor.
1657     * Frame rate in the thousands can easily be reached.
1658     *
1659     * There are pragmatic reasons to expose 3 images for IMMEDIATE on Xwl.
1660     * - minImageCount is not intended as a tool to tune performance, its intent is to signal forward progress.
1661     *   Our X11 and WL implementations do so for pragmatic reasons due to sync acquire interacting poorly with 2 images.
1662     *   A jump from 3 to 4 is at best a minor improvement which only affects applications
1663     *   running at extremely high frame rates, way beyond the monitor refresh rate.
1664     *   On the other hand, lowering minImageCount to 2 would break the fundamental idea of MAILBOX
1665     *   (and IMMEDIATE without tear), since FPS > refresh rate would not be possible.
1666     *
1667     * - Several games developed for other platforms and other Linux WSI implementations
1668     *   do not expect that image counts arbitrarily change when changing present mode,
1669     *   and will crash when Mesa does so.
1670     *   There are several games using the strict_image_count drirc to work around this,
1671     *   and it would be good to be friendlier in the first place, so we don't have to work around more games.
1672     *   IMMEDIATE is a common presentation mode on those platforms, but MAILBOX is more Wayland-centric in nature,
1673     *   so increasing image count for that mode is more reasonable.
1674     *
1675     * - IMMEDIATE expects tearing, and when tearing, 3 images are more than enough.
1676     *
1677     * - With EXT_swapchain_maintenance1, toggling between FIFO / IMMEDIATE (used extensively by D3D layering)
1678     *   would require application to allocate >3 images which is unfortunate for memory usage,
1679     *   and potentially disastrous for latency unless KHR_present_wait is used.
1680     */
1681    return x11_needs_wait_for_fences(device, wsi_conn, present_mode) ||
1682           present_mode == VK_PRESENT_MODE_MAILBOX_KHR;
1683 }
1684 
1685 /**
1686  * Send image to the X server for presentation at target_msc.
1687  */
1688 static VkResult
x11_present_to_x11(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1689 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1690                    uint64_t target_msc, VkPresentModeKHR present_mode)
1691 {
1692    x11_capture_trace(chain);
1693 
1694    VkResult result;
1695    if (chain->base.wsi->sw && !chain->has_mit_shm)
1696       result = x11_present_to_x11_sw(chain, image_index);
1697    else
1698 #ifdef HAVE_X11_DRM
1699       result = x11_present_to_x11_dri3(chain, image_index, target_msc, present_mode);
1700 #else
1701       unreachable("X11 missing DRI3 support!");
1702 #endif
1703 
1704    if (result < 0)
1705       x11_swapchain_notify_error(chain, result);
1706    else
1707       x11_notify_pending_present(chain, &chain->images[image_index]);
1708 
1709    return result;
1710 }
1711 
1712 static VkResult
x11_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1713 x11_release_images(struct wsi_swapchain *wsi_chain,
1714                    uint32_t count, const uint32_t *indices)
1715 {
1716    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1717    if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
1718       return chain->status;
1719 
1720    /* If we're using implicit sync, push images to the acquire queue */
1721    if (!chain->base.image_info.explicit_sync) {
1722       for (uint32_t i = 0; i < count; i++) {
1723          uint32_t index = indices[i];
1724          assert(index < chain->base.image_count);
1725          wsi_queue_push(&chain->acquire_queue, index);
1726       }
1727    }
1728 
1729    return VK_SUCCESS;
1730 }
1731 
1732 static void
x11_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1733 x11_set_present_mode(struct wsi_swapchain *wsi_chain,
1734                      VkPresentModeKHR mode)
1735 {
1736    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1737    chain->base.present_mode = mode;
1738 }
1739 
1740 /**
1741  * Acquire a ready-to-use image from the swapchain.
1742  *
1743  * This means usually that the image is not waiting on presentation and that the
1744  * image has been released by the X server to be used again by the consumer.
1745  */
1746 static VkResult
x11_acquire_next_image(struct wsi_swapchain * anv_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1747 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1748                        const VkAcquireNextImageInfoKHR *info,
1749                        uint32_t *image_index)
1750 {
1751    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1752    uint64_t timeout = info->timeout;
1753 
1754    /* If the swapchain is in an error state, don't go any further. */
1755    VkResult result = x11_swapchain_read_status_atomic(chain);
1756    if (result < 0)
1757       return result;
1758 
1759    if (chain->base.image_info.explicit_sync) {
1760       result = x11_wait_for_explicit_sync_release_submission(chain, timeout,
1761                                                              image_index);
1762    } else {
1763       result = wsi_queue_pull(&chain->acquire_queue,
1764                               image_index, timeout);
1765    }
1766 
1767    if (result == VK_TIMEOUT)
1768       return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
1769 
1770    if (result < 0) {
1771       mtx_lock(&chain->thread_state_lock);
1772       result = x11_swapchain_result(chain, result);
1773       mtx_unlock(&chain->thread_state_lock);
1774    } else {
1775       result = x11_swapchain_read_status_atomic(chain);
1776    }
1777 
1778    if (result < 0)
1779       return result;
1780 
1781    assert(*image_index < chain->base.image_count);
1782 #ifdef HAVE_X11_DRM
1783    if (chain->images[*image_index].shm_fence &&
1784        !chain->base.image_info.explicit_sync)
1785       xshmfence_await(chain->images[*image_index].shm_fence);
1786 #endif
1787 
1788    return result;
1789 }
1790 
1791 /**
1792  * Queue a new presentation of an image that was previously acquired by the
1793  * consumer.
1794  *
1795  * Note that in immediate presentation mode this does not really queue the
1796  * presentation but directly asks the X server to show it.
1797  */
1798 static VkResult
x11_queue_present(struct wsi_swapchain * anv_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)1799 x11_queue_present(struct wsi_swapchain *anv_chain,
1800                   uint32_t image_index,
1801                   uint64_t present_id,
1802                   const VkPresentRegionKHR *damage)
1803 {
1804    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1805    xcb_xfixes_region_t update_area = 0;
1806 
1807    /* If the swapchain is in an error state, don't go any further. */
1808    VkResult status = x11_swapchain_read_status_atomic(chain);
1809    if (status < 0)
1810       return status;
1811 
1812    if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1813       damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1814       xcb_rectangle_t *rects = chain->images[image_index].rects;
1815 
1816       update_area = chain->images[image_index].update_region;
1817       for (unsigned i = 0; i < damage->rectangleCount; i++) {
1818          const VkRectLayerKHR *rect = &damage->pRectangles[i];
1819          assert(rect->layer == 0);
1820          rects[i].x = rect->offset.x;
1821          rects[i].y = rect->offset.y;
1822          rects[i].width = rect->extent.width;
1823          rects[i].height = rect->extent.height;
1824       }
1825       xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1826       chain->images[image_index].rectangle_count = damage->rectangleCount;
1827    } else {
1828       chain->images[image_index].rectangle_count = 0;
1829    }
1830    chain->images[image_index].update_area = update_area;
1831    chain->images[image_index].present_id = present_id;
1832    /* With EXT_swapchain_maintenance1, the present mode can change per present. */
1833    chain->images[image_index].present_mode = chain->base.present_mode;
1834 
1835    wsi_queue_push(&chain->present_queue, image_index);
1836    return x11_swapchain_read_status_atomic(chain);
1837 }
1838 
1839 /**
1840  * The number of images that are not owned by X11:
1841  *  (1) in the ownership of the app, or
1842  *  (2) app to take ownership through an acquire, or
1843  *  (3) in the present queue waiting for the FIFO thread to present to X11.
1844  */
x11_driver_owned_images(const struct x11_swapchain * chain)1845 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1846 {
1847    return chain->base.image_count - chain->sent_image_count;
1848 }
1849 
1850 /* This thread is responsible for pumping PRESENT replies.
1851  * This is done in a separate thread from the X11 presentation thread
1852  * to be able to support non-blocking modes like IMMEDIATE and MAILBOX.
1853  * Frame completion events can happen at any time, and we need to handle
1854  * the events as soon as they come in to have a quality implementation.
1855  * The presentation thread may go to sleep waiting for new presentation events to come in,
1856  * and it cannot wait for both X events and application events at the same time.
1857  * If we only cared about FIFO, this thread wouldn't be very useful.
1858  * Earlier implementation of X11 WSI had a single FIFO thread that blocked on X events after presenting.
1859  * For IMMEDIATE and MAILBOX, the application thread pumped the event queue, which caused a lot of pain
1860  * when trying to deal with present wait.
1861  */
1862 static int
x11_manage_event_queue(void * state)1863 x11_manage_event_queue(void *state)
1864 {
1865    struct x11_swapchain *chain = state;
1866    u_thread_setname("WSI swapchain event");
1867 
1868    /* While there is an outstanding IDLE we should wait for it.
1869     * In FLIP modes at most one image will not be driver owned eventually.
1870     * In BLIT modes, we expect that all images will eventually be driver owned,
1871     * but we don't know which mode is being used. */
1872    unsigned forward_progress_guaranteed_acquired_images = chain->base.image_count - 1;
1873 
1874    mtx_lock(&chain->thread_state_lock);
1875 
1876    while (chain->status >= 0) {
1877       /* This thread should only go sleep waiting for X events when we know there are pending events.
1878        * We expect COMPLETION events when there is at least one image marked as present_queued.
1879        * We also expect IDLE events, but we only consider waiting for them when all images are busy,
1880        * and application has fewer than N images acquired. */
1881 
1882       bool assume_forward_progress = false;
1883 
1884       for (uint32_t i = 0; i < chain->base.image_count; i++) {
1885          if (chain->images[i].present_queued_count != 0) {
1886             /* We must pump through a present wait and unblock FIFO thread if using FIFO mode. */
1887             assume_forward_progress = true;
1888             break;
1889          }
1890       }
1891 
1892       if (!assume_forward_progress && !chain->base.image_info.explicit_sync) {
1893          /* If true, application expects acquire (IDLE) to happen in finite time. */
1894          assume_forward_progress = x11_driver_owned_images(chain) <
1895                                    forward_progress_guaranteed_acquired_images;
1896       }
1897 
1898       if (assume_forward_progress) {
1899          /* Only yield lock when blocking on X11 event. */
1900          mtx_unlock(&chain->thread_state_lock);
1901          xcb_generic_event_t *event =
1902                xcb_wait_for_special_event(chain->conn, chain->special_event);
1903          mtx_lock(&chain->thread_state_lock);
1904 
1905          /* Re-check status since we dropped the lock while waiting for X. */
1906          VkResult result = chain->status;
1907 
1908          if (result >= 0) {
1909             if (event) {
1910                /* Queue thread will be woken up if anything interesting happened in handler.
1911                 * Queue thread blocks on:
1912                 * - Presentation events completing
1913                 * - Presentation requests from application
1914                 * - WaitForFence workaround if applicable */
1915                result = x11_handle_dri3_present_event(chain, (void *) event);
1916             } else {
1917                result = VK_ERROR_SURFACE_LOST_KHR;
1918             }
1919          }
1920 
1921          /* Updates chain->status and wakes up threads as necessary on error. */
1922          x11_swapchain_result(chain, result);
1923          free(event);
1924       } else {
1925          /* Nothing important to do, go to sleep until queue thread wakes us up. */
1926          u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1927       }
1928    }
1929 
1930    mtx_unlock(&chain->thread_state_lock);
1931    return 0;
1932 }
1933 
1934 /**
1935  * Presentation thread.
1936  *
1937  * Runs in a separate thread, blocks and reacts to queued images on the
1938  * present-queue
1939  *
1940  * This must be a thread since we have to block in two cases:
1941  * - FIFO:
1942  *     We must wait for previous presentation to complete
1943  *     in some way so we can compute the target MSC.
1944  * - WaitForFence workaround:
1945  *     In some cases, we need to wait for image to complete rendering before submitting it to X.
1946  */
1947 static int
x11_manage_present_queue(void * state)1948 x11_manage_present_queue(void *state)
1949 {
1950    struct x11_swapchain *chain = state;
1951    struct wsi_x11_connection *wsi_conn =
1952          wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1953    VkResult result = VK_SUCCESS;
1954 
1955    u_thread_setname("WSI swapchain queue");
1956 
1957    uint64_t target_msc = 0;
1958 
1959    while (x11_swapchain_read_status_atomic(chain) >= 0) {
1960       uint32_t image_index = 0;
1961       {
1962          MESA_TRACE_SCOPE("pull present queue");
1963          result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1964          assert(result != VK_TIMEOUT);
1965       }
1966 
1967       /* The status can change underneath us if the swapchain is destroyed
1968        * from another thread. */
1969       if (result >= 0)
1970          result = x11_swapchain_read_status_atomic(chain);
1971       if (result < 0)
1972          break;
1973 
1974       VkPresentModeKHR present_mode = chain->images[image_index].present_mode;
1975 
1976       if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1977                                     present_mode) &&
1978           /* not necessary with explicit sync */
1979           !chain->base.image_info.explicit_sync) {
1980          MESA_TRACE_SCOPE("wait fence");
1981          result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1982                                                  &chain->base.fences[image_index],
1983                                                  true, UINT64_MAX);
1984          if (result != VK_SUCCESS) {
1985             result = VK_ERROR_OUT_OF_DATE_KHR;
1986             break;
1987          }
1988       }
1989 
1990       mtx_lock(&chain->thread_state_lock);
1991 
1992       /* In IMMEDIATE and MAILBOX modes, there is a risk that we have exhausted the presentation queue,
1993        * since IDLE could return multiple times before observing a COMPLETE. */
1994       while (chain->status >= 0 &&
1995              chain->images[image_index].present_queued_count ==
1996              ARRAY_SIZE(chain->images[image_index].pending_completions)) {
1997          u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1998       }
1999 
2000       if (chain->status < 0) {
2001          mtx_unlock(&chain->thread_state_lock);
2002          break;
2003       }
2004 
2005       result = x11_present_to_x11(chain, image_index, target_msc, present_mode);
2006 
2007       if (result < 0) {
2008          mtx_unlock(&chain->thread_state_lock);
2009          break;
2010       }
2011 
2012       if (present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2013           present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2014          MESA_TRACE_SCOPE("wait present");
2015 
2016          while (chain->status >= 0 && chain->images[image_index].present_queued_count != 0) {
2017             /* In FIFO mode, we need to make sure we observe a COMPLETE before queueing up
2018              * another present. */
2019             u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
2020          }
2021 
2022          /* If next present is not FIFO, we still need to ensure we don't override that
2023           * present. If FIFO, we need to ensure MSC is larger than the COMPLETED frame. */
2024          target_msc = chain->last_present_msc + 1;
2025       }
2026 
2027       mtx_unlock(&chain->thread_state_lock);
2028    }
2029 
2030    mtx_lock(&chain->thread_state_lock);
2031    x11_swapchain_result(chain, result);
2032    if (!chain->base.image_info.explicit_sync)
2033       wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
2034    mtx_unlock(&chain->thread_state_lock);
2035 
2036    return 0;
2037 }
2038 
2039 static uint8_t *
alloc_shm(struct wsi_image * imagew,unsigned size)2040 alloc_shm(struct wsi_image *imagew, unsigned size)
2041 {
2042 #ifdef HAVE_SYS_SHM_H
2043    struct x11_image *image = (struct x11_image *)imagew;
2044    image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
2045    if (image->shmid < 0)
2046       return NULL;
2047 
2048    uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
2049    /* mark the segment immediately for deletion to avoid leaks */
2050    shmctl(image->shmid, IPC_RMID, 0);
2051 
2052    if (addr == (uint8_t *) -1)
2053       return NULL;
2054 
2055    image->shmaddr = addr;
2056    return addr;
2057 #else
2058    return NULL;
2059 #endif
2060 }
2061 
2062 static VkResult
x11_image_init(VkDevice device_h,struct x11_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2063 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
2064                const VkSwapchainCreateInfoKHR *pCreateInfo,
2065                const VkAllocationCallbacks* pAllocator,
2066                struct x11_image *image)
2067 {
2068    VkResult result;
2069 
2070    result = wsi_create_image(&chain->base, &chain->base.image_info,
2071                              &image->base);
2072    if (result != VK_SUCCESS)
2073       return result;
2074 
2075    if (chain->base.wsi->sw && !chain->has_mit_shm)
2076       return VK_SUCCESS;
2077 
2078 #ifdef HAVE_X11_DRM
2079    xcb_void_cookie_t cookie;
2080    xcb_generic_error_t *error = NULL;
2081    uint32_t bpp = 32;
2082    int fence_fd;
2083    image->update_region = xcb_generate_id(chain->conn);
2084    xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
2085 
2086    if (chain->base.wsi->sw) {
2087       image->shmseg = xcb_generate_id(chain->conn);
2088 
2089       xcb_shm_attach(chain->conn,
2090                      image->shmseg,
2091                      image->shmid,
2092                      0);
2093       image->pixmap = xcb_generate_id(chain->conn);
2094       cookie = xcb_shm_create_pixmap_checked(chain->conn,
2095                                              image->pixmap,
2096                                              chain->window,
2097                                              image->base.row_pitches[0] / 4,
2098                                              pCreateInfo->imageExtent.height,
2099                                              chain->depth,
2100                                              image->shmseg, 0);
2101       xcb_discard_reply(chain->conn, cookie.sequence);
2102       goto out_fence;
2103    }
2104    image->pixmap = xcb_generate_id(chain->conn);
2105 
2106    if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
2107       /* If the image has a modifier, we must have DRI3 v1.2. */
2108       assert(chain->has_dri3_modifiers);
2109 
2110       /* XCB requires an array of file descriptors but we only have one */
2111       int fds[4] = { -1, -1, -1, -1 };
2112       for (int i = 0; i < image->base.num_planes; i++) {
2113          fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
2114          if (fds[i] == -1) {
2115             for (int j = 0; j < i; j++)
2116                close(fds[j]);
2117 
2118             return VK_ERROR_OUT_OF_HOST_MEMORY;
2119          }
2120       }
2121 
2122       cookie =
2123          xcb_dri3_pixmap_from_buffers_checked(chain->conn,
2124                                               image->pixmap,
2125                                               chain->window,
2126                                               image->base.num_planes,
2127                                               pCreateInfo->imageExtent.width,
2128                                               pCreateInfo->imageExtent.height,
2129                                               image->base.row_pitches[0],
2130                                               image->base.offsets[0],
2131                                               image->base.row_pitches[1],
2132                                               image->base.offsets[1],
2133                                               image->base.row_pitches[2],
2134                                               image->base.offsets[2],
2135                                               image->base.row_pitches[3],
2136                                               image->base.offsets[3],
2137                                               chain->depth, bpp,
2138                                               image->base.drm_modifier,
2139                                               fds);
2140    } else {
2141       /* Without passing modifiers, we can't have multi-plane RGB images. */
2142       assert(image->base.num_planes == 1);
2143 
2144       /* XCB will take ownership of the FD we pass it. */
2145       int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
2146       if (fd == -1)
2147          return VK_ERROR_OUT_OF_HOST_MEMORY;
2148 
2149       cookie =
2150          xcb_dri3_pixmap_from_buffer_checked(chain->conn,
2151                                              image->pixmap,
2152                                              chain->window,
2153                                              image->base.sizes[0],
2154                                              pCreateInfo->imageExtent.width,
2155                                              pCreateInfo->imageExtent.height,
2156                                              image->base.row_pitches[0],
2157                                              chain->depth, bpp, fd);
2158    }
2159 
2160    error = xcb_request_check(chain->conn, cookie);
2161    if (error != NULL) {
2162       free(error);
2163       goto fail_image;
2164    }
2165 
2166 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2167    if (chain->base.image_info.explicit_sync) {
2168       for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2169          image->dri3_syncobj[i] = xcb_generate_id(chain->conn);
2170          int fd = dup(image->base.explicit_sync[i].fd);
2171          if (fd < 0)
2172             goto fail_image;
2173 
2174          cookie = xcb_dri3_import_syncobj_checked(chain->conn,
2175                                                   image->dri3_syncobj[i],
2176                                                   chain->window,
2177                                                   fd /* libxcb closes the fd */);
2178          error = xcb_request_check(chain->conn, cookie);
2179          if (error != NULL) {
2180             free(error);
2181             goto fail_image;
2182          }
2183       }
2184    }
2185 #endif
2186 
2187 out_fence:
2188    fence_fd = xshmfence_alloc_shm();
2189    if (fence_fd < 0)
2190       goto fail_pixmap;
2191 
2192    image->shm_fence = xshmfence_map_shm(fence_fd);
2193    if (image->shm_fence == NULL)
2194       goto fail_shmfence_alloc;
2195 
2196    image->sync_fence = xcb_generate_id(chain->conn);
2197    xcb_dri3_fence_from_fd(chain->conn,
2198                           image->pixmap,
2199                           image->sync_fence,
2200                           false,
2201                           fence_fd);
2202 
2203    xshmfence_trigger(image->shm_fence);
2204    return VK_SUCCESS;
2205 
2206 fail_shmfence_alloc:
2207    close(fence_fd);
2208 
2209 fail_pixmap:
2210    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2211    xcb_discard_reply(chain->conn, cookie.sequence);
2212 
2213 fail_image:
2214    wsi_destroy_image(&chain->base, &image->base);
2215 
2216 #else
2217    unreachable("SHM support not compiled in");
2218 #endif
2219    return VK_ERROR_INITIALIZATION_FAILED;
2220 }
2221 
2222 static void
x11_image_finish(struct x11_swapchain * chain,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2223 x11_image_finish(struct x11_swapchain *chain,
2224                  const VkAllocationCallbacks* pAllocator,
2225                  struct x11_image *image)
2226 {
2227    xcb_void_cookie_t cookie;
2228    if (!chain->base.wsi->sw || chain->has_mit_shm) {
2229 #ifdef HAVE_X11_DRM
2230       cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
2231       xcb_discard_reply(chain->conn, cookie.sequence);
2232       xshmfence_unmap_shm(image->shm_fence);
2233 #endif
2234 
2235       cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2236       xcb_discard_reply(chain->conn, cookie.sequence);
2237 #ifdef HAVE_X11_DRM
2238       cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
2239       xcb_discard_reply(chain->conn, cookie.sequence);
2240 #endif
2241 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2242       if (chain->base.image_info.explicit_sync) {
2243          for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2244             cookie = xcb_dri3_free_syncobj(chain->conn, image->dri3_syncobj[i]);
2245             xcb_discard_reply(chain->conn, cookie.sequence);
2246          }
2247       }
2248 #endif
2249    }
2250 
2251    wsi_destroy_image(&chain->base, &image->base);
2252 #ifdef HAVE_SYS_SHM_H
2253    if (image->shmaddr)
2254       shmdt(image->shmaddr);
2255 #endif
2256 }
2257 
2258 static void
wsi_x11_recompute_dri3_modifier_hash(blake3_hash * hash,const struct wsi_drm_image_params * params)2259 wsi_x11_recompute_dri3_modifier_hash(blake3_hash *hash, const struct wsi_drm_image_params *params)
2260 {
2261    mesa_blake3 ctx;
2262    _mesa_blake3_init(&ctx);
2263    _mesa_blake3_update(&ctx, &params->num_modifier_lists, sizeof(params->num_modifier_lists));
2264    for (uint32_t i = 0; i < params->num_modifier_lists; i++) {
2265       _mesa_blake3_update(&ctx, &i, sizeof(i));
2266       _mesa_blake3_update(&ctx, params->modifiers[i],
2267                           params->num_modifiers[i] * sizeof(*params->modifiers[i]));
2268    }
2269    _mesa_blake3_update(&ctx, &params->same_gpu, sizeof(params->same_gpu));
2270    _mesa_blake3_final(&ctx, *hash);
2271 }
2272 
2273 static void
wsi_x11_get_dri3_modifiers(struct wsi_x11_connection * wsi_conn,xcb_connection_t * conn,xcb_window_t window,uint8_t depth,uint8_t bpp,uint64_t ** modifiers_in,uint32_t * num_modifiers_in,uint32_t * num_tranches_in,const VkAllocationCallbacks * pAllocator)2274 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
2275                            xcb_connection_t *conn, xcb_window_t window,
2276                            uint8_t depth, uint8_t bpp,
2277                            uint64_t **modifiers_in, uint32_t *num_modifiers_in,
2278                            uint32_t *num_tranches_in,
2279                            const VkAllocationCallbacks *pAllocator)
2280 {
2281    if (!wsi_conn->has_dri3_modifiers)
2282       goto out;
2283 
2284 #ifdef HAVE_X11_DRM
2285    xcb_generic_error_t *error = NULL;
2286    xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
2287       xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
2288    xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
2289       xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
2290    free(error);
2291 
2292    if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
2293                       mod_reply->num_screen_modifiers == 0)) {
2294       free(mod_reply);
2295       goto out;
2296    }
2297 
2298    uint32_t n = 0;
2299    uint32_t counts[2];
2300    uint64_t *modifiers[2];
2301 
2302    if (mod_reply->num_window_modifiers) {
2303       counts[n] = mod_reply->num_window_modifiers;
2304       modifiers[n] = vk_alloc(pAllocator,
2305                               counts[n] * sizeof(uint64_t),
2306                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2307       if (!modifiers[n]) {
2308          free(mod_reply);
2309          goto out;
2310       }
2311 
2312       memcpy(modifiers[n],
2313              xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
2314              counts[n] * sizeof(uint64_t));
2315       n++;
2316    }
2317 
2318    if (mod_reply->num_screen_modifiers) {
2319       counts[n] = mod_reply->num_screen_modifiers;
2320       modifiers[n] = vk_alloc(pAllocator,
2321                               counts[n] * sizeof(uint64_t),
2322                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2323       if (!modifiers[n]) {
2324 	 if (n > 0)
2325             vk_free(pAllocator, modifiers[0]);
2326          free(mod_reply);
2327          goto out;
2328       }
2329 
2330       memcpy(modifiers[n],
2331              xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
2332              counts[n] * sizeof(uint64_t));
2333       n++;
2334    }
2335 
2336    for (int i = 0; i < n; i++) {
2337       modifiers_in[i] = modifiers[i];
2338       num_modifiers_in[i] = counts[i];
2339    }
2340    *num_tranches_in = n;
2341 
2342    free(mod_reply);
2343    return;
2344 #endif
2345 out:
2346    *num_tranches_in = 0;
2347 }
2348 #ifdef HAVE_X11_DRM
2349 static bool
wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain * chain)2350 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain)
2351 {
2352    const struct wsi_device *wsi_device = chain->base.wsi;
2353 
2354    if (wsi_device->sw || !wsi_device->supports_modifiers)
2355       return false;
2356 
2357    struct wsi_drm_image_params drm_image_params;
2358    uint64_t *modifiers[2] = {NULL, NULL};
2359    uint32_t num_modifiers[2] = {0, 0};
2360 
2361    struct wsi_x11_connection *wsi_conn =
2362          wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
2363 
2364    xcb_get_geometry_reply_t *geometry =
2365          xcb_get_geometry_reply(chain->conn, xcb_get_geometry(chain->conn, chain->window), NULL);
2366    if (geometry == NULL)
2367       return false;
2368    uint32_t bit_depth = geometry->depth;
2369    free(geometry);
2370 
2371    drm_image_params = (struct wsi_drm_image_params){
2372       .base.image_type = WSI_IMAGE_TYPE_DRM,
2373       .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, chain->conn),
2374       .explicit_sync = chain->base.image_info.explicit_sync,
2375    };
2376 
2377    /* This is called from a thread, so we must not use an allocation callback from user.
2378     * From spec:
2379     * An implementation must only make calls into an application-provided allocator
2380     * during the execution of an API command.
2381     * An implementation must only make calls into an application-provided allocator
2382     * from the same thread that called the provoking API command. */
2383 
2384    wsi_x11_get_dri3_modifiers(wsi_conn, chain->conn, chain->window, bit_depth, 32,
2385                               modifiers, num_modifiers,
2386                               &drm_image_params.num_modifier_lists,
2387                               vk_default_allocator());
2388 
2389    drm_image_params.num_modifiers = num_modifiers;
2390    drm_image_params.modifiers = (const uint64_t **)modifiers;
2391 
2392    blake3_hash hash;
2393    wsi_x11_recompute_dri3_modifier_hash(&hash, &drm_image_params);
2394 
2395    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2396       vk_free(vk_default_allocator(), modifiers[i]);
2397 
2398    return memcmp(hash, chain->dri3_modifier_hash, sizeof(hash)) != 0;
2399 }
2400 #endif
2401 static VkResult
x11_swapchain_destroy(struct wsi_swapchain * anv_chain,const VkAllocationCallbacks * pAllocator)2402 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
2403                       const VkAllocationCallbacks *pAllocator)
2404 {
2405    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
2406 
2407    mtx_lock(&chain->thread_state_lock);
2408    chain->status = VK_ERROR_OUT_OF_DATE_KHR;
2409    u_cnd_monotonic_broadcast(&chain->thread_state_cond);
2410    mtx_unlock(&chain->thread_state_lock);
2411 
2412    /* Push a UINT32_MAX to wake up the manager */
2413    wsi_queue_push(&chain->present_queue, UINT32_MAX);
2414    thrd_join(chain->queue_manager, NULL);
2415    thrd_join(chain->event_manager, NULL);
2416 
2417    if (!chain->base.image_info.explicit_sync)
2418       wsi_queue_destroy(&chain->acquire_queue);
2419    wsi_queue_destroy(&chain->present_queue);
2420 
2421    for (uint32_t i = 0; i < chain->base.image_count; i++)
2422       x11_image_finish(chain, pAllocator, &chain->images[i]);
2423 #ifdef HAVE_X11_DRM
2424    xcb_void_cookie_t cookie;
2425    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2426    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
2427                                              chain->window,
2428                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
2429    xcb_discard_reply(chain->conn, cookie.sequence);
2430 #endif
2431    mtx_destroy(&chain->present_progress_mutex);
2432    u_cnd_monotonic_destroy(&chain->present_progress_cond);
2433    mtx_destroy(&chain->thread_state_lock);
2434    u_cnd_monotonic_destroy(&chain->thread_state_cond);
2435 
2436    wsi_swapchain_finish(&chain->base);
2437 
2438    vk_free(pAllocator, chain);
2439 
2440    return VK_SUCCESS;
2441 }
2442 
2443 static void
wsi_x11_set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)2444 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
2445                                    xcb_drawable_t drawable,
2446                                    uint32_t state)
2447 {
2448    static char const name[] = "_VARIABLE_REFRESH";
2449    xcb_intern_atom_cookie_t cookie;
2450    xcb_intern_atom_reply_t* reply;
2451    xcb_void_cookie_t check;
2452 
2453    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
2454    reply = xcb_intern_atom_reply(conn, cookie, NULL);
2455    if (reply == NULL)
2456       return;
2457 
2458    if (state)
2459       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
2460                                           drawable, reply->atom,
2461                                           XCB_ATOM_CARDINAL, 32, 1, &state);
2462    else
2463       check = xcb_delete_property_checked(conn, drawable, reply->atom);
2464 
2465    xcb_discard_reply(conn, check.sequence);
2466    free(reply);
2467 }
2468 
x11_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t waitValue,uint64_t timeout)2469 static VkResult x11_wait_for_present(struct wsi_swapchain *wsi_chain,
2470                                      uint64_t waitValue,
2471                                      uint64_t timeout)
2472 {
2473    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
2474    struct timespec abs_timespec;
2475    uint64_t abs_timeout = 0;
2476    if (timeout != 0)
2477       abs_timeout = os_time_get_absolute_timeout(timeout);
2478 
2479    /* Need to observe that the swapchain semaphore has been unsignalled,
2480     * as this is guaranteed when a present is complete. */
2481    VkResult result = wsi_swapchain_wait_for_present_semaphore(
2482          &chain->base, waitValue, timeout);
2483    if (result != VK_SUCCESS)
2484       return result;
2485 
2486    timespec_from_nsec(&abs_timespec, abs_timeout);
2487 
2488    mtx_lock(&chain->present_progress_mutex);
2489    while (chain->present_id < waitValue) {
2490       int ret = u_cnd_monotonic_timedwait(&chain->present_progress_cond,
2491                                           &chain->present_progress_mutex,
2492                                           &abs_timespec);
2493       if (ret == ETIMEDOUT) {
2494          result = VK_TIMEOUT;
2495          break;
2496       }
2497       if (ret) {
2498          result = VK_ERROR_DEVICE_LOST;
2499          break;
2500       }
2501    }
2502    if (result == VK_SUCCESS && chain->present_progress_error)
2503       result = chain->present_progress_error;
2504    mtx_unlock(&chain->present_progress_mutex);
2505    return result;
2506 }
2507 
2508 static unsigned
x11_get_min_image_count_for_present_mode(struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)2509 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
2510                                          struct wsi_x11_connection *wsi_conn,
2511                                          VkPresentModeKHR present_mode)
2512 {
2513    uint32_t min_image_count = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
2514    if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode))
2515       return MAX2(min_image_count, X11_SWAPCHAIN_MAILBOX_IMAGES);
2516    else
2517       return min_image_count;
2518 }
2519 
2520 /**
2521  * Create the swapchain.
2522  *
2523  * Supports immediate, fifo and mailbox presentation mode.
2524  *
2525  */
2526 static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2527 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2528                              VkDevice device,
2529                              struct wsi_device *wsi_device,
2530                              const VkSwapchainCreateInfoKHR *pCreateInfo,
2531                              const VkAllocationCallbacks* pAllocator,
2532                              struct wsi_swapchain **swapchain_out)
2533 {
2534    struct x11_swapchain *chain;
2535    xcb_void_cookie_t cookie;
2536    VkResult result;
2537    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2538 
2539    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2540 
2541    /* Get xcb connection from the icd_surface and from that our internal struct
2542     * representing it.
2543     */
2544    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
2545    struct wsi_x11_connection *wsi_conn =
2546       wsi_x11_get_connection(wsi_device, conn);
2547    if (!wsi_conn)
2548       return VK_ERROR_OUT_OF_HOST_MEMORY;
2549 
2550    /* Get number of images in our swapchain. This count depends on:
2551     * - requested minimal image count
2552     * - device characteristics
2553     * - presentation mode.
2554     */
2555    unsigned num_images = pCreateInfo->minImageCount;
2556    if (!wsi_device->x11.strict_imageCount) {
2557       if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode) ||
2558           wsi_device->x11.ensure_minImageCount) {
2559          unsigned present_mode_images = x11_get_min_image_count_for_present_mode(
2560                wsi_device, wsi_conn, pCreateInfo->presentMode);
2561          num_images = MAX2(num_images, present_mode_images);
2562       }
2563    }
2564 
2565    /* Check that we have a window up-front. It is an error to not have one. */
2566    xcb_window_t window = x11_surface_get_window(icd_surface);
2567 
2568    /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
2569     * chain's images extents should fit it for performance-optimizing flips.
2570     */
2571    xcb_get_geometry_reply_t *geometry =
2572       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
2573    if (geometry == NULL)
2574       return VK_ERROR_SURFACE_LOST_KHR;
2575    const uint32_t bit_depth = geometry->depth;
2576    const uint16_t cur_width = geometry->width;
2577    const uint16_t cur_height = geometry->height;
2578    free(geometry);
2579 
2580    /* Allocate the actual swapchain. The size depends on image count. */
2581    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2582    chain = vk_zalloc(pAllocator, size, 8,
2583                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2584    if (chain == NULL)
2585       return VK_ERROR_OUT_OF_HOST_MEMORY;
2586 
2587    int ret = mtx_init(&chain->present_progress_mutex, mtx_plain);
2588    if (ret != thrd_success) {
2589       vk_free(pAllocator, chain);
2590       return VK_ERROR_OUT_OF_HOST_MEMORY;
2591    }
2592 
2593    ret = mtx_init(&chain->thread_state_lock, mtx_plain);
2594    if (ret != thrd_success) {
2595       mtx_destroy(&chain->present_progress_mutex);
2596       vk_free(pAllocator, chain);
2597       return VK_ERROR_OUT_OF_HOST_MEMORY;
2598    }
2599 
2600    ret = u_cnd_monotonic_init(&chain->thread_state_cond);
2601    if (ret != thrd_success) {
2602       mtx_destroy(&chain->present_progress_mutex);
2603       mtx_destroy(&chain->thread_state_lock);
2604       vk_free(pAllocator, chain);
2605       return VK_ERROR_OUT_OF_HOST_MEMORY;
2606    }
2607 
2608    ret = u_cnd_monotonic_init(&chain->present_progress_cond);
2609    if (ret != thrd_success) {
2610       mtx_destroy(&chain->present_progress_mutex);
2611       mtx_destroy(&chain->thread_state_lock);
2612       u_cnd_monotonic_destroy(&chain->thread_state_cond);
2613       vk_free(pAllocator, chain);
2614       return VK_ERROR_OUT_OF_HOST_MEMORY;
2615    }
2616 
2617    uint32_t present_caps = 0;
2618 #ifdef HAVE_X11_DRM
2619    xcb_present_query_capabilities_cookie_t present_query_cookie;
2620    xcb_present_query_capabilities_reply_t *present_query_reply;
2621    present_query_cookie = xcb_present_query_capabilities(conn, window);
2622    present_query_reply = xcb_present_query_capabilities_reply(conn, present_query_cookie, NULL);
2623    if (present_query_reply) {
2624       present_caps = present_query_reply->capabilities;
2625       free(present_query_reply);
2626    }
2627 #endif
2628 
2629 #ifdef HAVE_X11_DRM
2630    struct wsi_drm_image_params drm_image_params;
2631    uint32_t num_modifiers[2] = {0, 0};
2632 #endif
2633    struct wsi_base_image_params *image_params = NULL;
2634    struct wsi_cpu_image_params cpu_image_params;
2635    uint64_t *modifiers[2] = {NULL, NULL};
2636    if (wsi_device->sw) {
2637       cpu_image_params = (struct wsi_cpu_image_params) {
2638          .base.image_type = WSI_IMAGE_TYPE_CPU,
2639          .alloc_shm = wsi_conn->has_mit_shm ? &alloc_shm : NULL,
2640       };
2641       image_params = &cpu_image_params.base;
2642    } else {
2643 #ifdef HAVE_X11_DRM
2644       drm_image_params = (struct wsi_drm_image_params) {
2645          .base.image_type = WSI_IMAGE_TYPE_DRM,
2646          .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, conn),
2647          .explicit_sync =
2648 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2649             wsi_conn->has_dri3_explicit_sync &&
2650             (present_caps & XCB_PRESENT_CAPABILITY_SYNCOBJ) &&
2651             wsi_device_supports_explicit_sync(wsi_device),
2652 #else
2653             false,
2654 #endif
2655       };
2656       if (wsi_device->supports_modifiers) {
2657          wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, bit_depth, 32,
2658                                     modifiers, num_modifiers,
2659                                     &drm_image_params.num_modifier_lists,
2660                                     pAllocator);
2661          drm_image_params.num_modifiers = num_modifiers;
2662          drm_image_params.modifiers = (const uint64_t **)modifiers;
2663 
2664          wsi_x11_recompute_dri3_modifier_hash(&chain->dri3_modifier_hash, &drm_image_params);
2665       }
2666       image_params = &drm_image_params.base;
2667 #else
2668       unreachable("X11 DRM support missing!");
2669 #endif
2670    }
2671 
2672    result = wsi_swapchain_init(wsi_device, &chain->base, device, pCreateInfo,
2673                                image_params, pAllocator);
2674 
2675    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2676       vk_free(pAllocator, modifiers[i]);
2677 
2678    if (result != VK_SUCCESS)
2679       goto fail_alloc;
2680 
2681    chain->base.destroy = x11_swapchain_destroy;
2682    chain->base.get_wsi_image = x11_get_wsi_image;
2683    chain->base.acquire_next_image = x11_acquire_next_image;
2684    chain->base.queue_present = x11_queue_present;
2685    chain->base.wait_for_present = x11_wait_for_present;
2686    chain->base.release_images = x11_release_images;
2687    chain->base.set_present_mode = x11_set_present_mode;
2688    chain->base.present_mode = present_mode;
2689    chain->base.image_count = num_images;
2690    chain->conn = conn;
2691    chain->window = window;
2692    chain->depth = bit_depth;
2693    chain->extent = pCreateInfo->imageExtent;
2694    chain->send_sbc = 0;
2695    chain->sent_image_count = 0;
2696    chain->last_present_msc = 0;
2697    chain->status = VK_SUCCESS;
2698    chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2699    chain->has_mit_shm = wsi_conn->has_mit_shm;
2700    chain->has_async_may_tear = present_caps & XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR;
2701 
2702    /* When images in the swapchain don't fit the window, X can still present them, but it won't
2703     * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2704     * the chain extents X may be able to flip
2705     */
2706    if (!wsi_device->x11.ignore_suboptimal) {
2707       if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2708          chain->status = VK_SUBOPTIMAL_KHR;
2709    }
2710 
2711    /* On a new swapchain this helper variable is set to false. Once we present it will have an
2712     * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2713     * that in this case here is a high likelihood X could do flips again if the client reallocates a
2714     * new swapchain.
2715     *
2716     * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2717     * was true, and when the next present was completed with copying, we would return
2718     * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2719     * presents on the surface were completed with copying because of some surface state change, we
2720     * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2721     *
2722     * Note also that is is questionable in general if that mechanism is really useful. It ist not
2723     * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2724     * of making flips work again per se. In other words it is not clear why there is need for
2725     * another way to inform clients about suboptimal copies besides forwarding the
2726     * 'PresentOptionSuboptimal' complete mode.
2727     */
2728    chain->copy_is_suboptimal = false;
2729 #ifdef HAVE_X11_DRM
2730    /* For our swapchain we need to listen to following Present extension events:
2731     * - Configure: Window dimensions changed. Images in the swapchain might need
2732     *              to be reallocated.
2733     * - Complete: An image from our swapchain was presented on the output.
2734     * - Idle: An image from our swapchain is not anymore accessed by the X
2735     *         server and can be reused.
2736     */
2737    chain->event_id = xcb_generate_id(chain->conn);
2738    uint32_t event_mask = XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2739                          XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY;
2740    if (!chain->base.image_info.explicit_sync)
2741       event_mask |= XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY;
2742    xcb_present_select_input(chain->conn, chain->event_id, chain->window, event_mask);
2743 
2744    /* Create an XCB event queue to hold present events outside of the usual
2745     * application event queue
2746     */
2747    chain->special_event =
2748       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2749                                    chain->event_id, NULL);
2750 #endif
2751    /* Create the graphics context. */
2752    chain->gc = xcb_generate_id(chain->conn);
2753    if (!chain->gc) {
2754       /* FINISHME: Choose a better error. */
2755       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2756       goto fail_register;
2757    }
2758 
2759    cookie = xcb_create_gc(chain->conn,
2760                           chain->gc,
2761                           chain->window,
2762                           XCB_GC_GRAPHICS_EXPOSURES,
2763                           (uint32_t []) { 0 });
2764    xcb_discard_reply(chain->conn, cookie.sequence);
2765 
2766    uint32_t image = 0;
2767    for (; image < chain->base.image_count; image++) {
2768       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2769                               &chain->images[image]);
2770       if (result != VK_SUCCESS)
2771          goto fail_init_images;
2772    }
2773 
2774    /* The queues have a length of base.image_count + 1 because we will
2775     * occasionally use UINT32_MAX to signal the other thread that an error
2776     * has occurred and we don't want an overflow.
2777     */
2778    ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2779    if (ret) {
2780       goto fail_init_images;
2781    }
2782 
2783    /* Acquire queue is only needed when using implicit sync */
2784    if (!chain->base.image_info.explicit_sync) {
2785       ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2786       if (ret) {
2787          wsi_queue_destroy(&chain->present_queue);
2788          goto fail_init_images;
2789       }
2790 
2791       for (unsigned i = 0; i < chain->base.image_count; i++)
2792          wsi_queue_push(&chain->acquire_queue, i);
2793    }
2794 
2795    ret = thrd_create(&chain->queue_manager,
2796                      x11_manage_present_queue, chain);
2797    if (ret != thrd_success)
2798       goto fail_init_fifo_queue;
2799 
2800    ret = thrd_create(&chain->event_manager,
2801                      x11_manage_event_queue, chain);
2802    if (ret != thrd_success)
2803       goto fail_init_event_queue;
2804 
2805    /* It is safe to set it here as only one swapchain can be associated with
2806     * the window, and swapchain creation does the association. At this point
2807     * we know the creation is going to succeed. */
2808    wsi_x11_set_adaptive_sync_property(conn, window,
2809                                       wsi_device->enable_adaptive_sync);
2810 
2811    *swapchain_out = &chain->base;
2812 
2813    return VK_SUCCESS;
2814 
2815 fail_init_event_queue:
2816    /* Push a UINT32_MAX to wake up the manager */
2817    wsi_queue_push(&chain->present_queue, UINT32_MAX);
2818    thrd_join(chain->queue_manager, NULL);
2819 
2820 fail_init_fifo_queue:
2821    wsi_queue_destroy(&chain->present_queue);
2822    if (!chain->base.image_info.explicit_sync)
2823       wsi_queue_destroy(&chain->acquire_queue);
2824 
2825 fail_init_images:
2826    for (uint32_t j = 0; j < image; j++)
2827       x11_image_finish(chain, pAllocator, &chain->images[j]);
2828 
2829 fail_register:
2830 #ifdef HAVE_X11_DRM
2831    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2832 #endif
2833    wsi_swapchain_finish(&chain->base);
2834 
2835 fail_alloc:
2836    vk_free(pAllocator, chain);
2837 
2838    return result;
2839 }
2840 
2841 VkResult
wsi_x11_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,const struct driOptionCache * dri_options)2842 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2843                  const VkAllocationCallbacks *alloc,
2844                  const struct driOptionCache *dri_options)
2845 {
2846    struct wsi_x11 *wsi;
2847    VkResult result;
2848 
2849    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2850                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2851    if (!wsi) {
2852       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2853       goto fail;
2854    }
2855 
2856    int ret = mtx_init(&wsi->mutex, mtx_plain);
2857    if (ret != thrd_success) {
2858       if (ret == ENOMEM) {
2859          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2860       } else {
2861          /* FINISHME: Choose a better error. */
2862          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2863       }
2864 
2865       goto fail_alloc;
2866    }
2867 
2868    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2869                                               _mesa_key_pointer_equal);
2870    if (!wsi->connections) {
2871       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2872       goto fail_mutex;
2873    }
2874 
2875    if (dri_options) {
2876       if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2877          wsi_device->x11.override_minImageCount =
2878             driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2879       }
2880       if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2881          wsi_device->x11.strict_imageCount =
2882             driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2883       }
2884       if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2885          wsi_device->x11.ensure_minImageCount =
2886             driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2887       }
2888       wsi_device->x11.xwaylandWaitReady = true;
2889       if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2890          wsi_device->x11.xwaylandWaitReady =
2891             driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2892       }
2893 
2894       if (driCheckOption(dri_options, "vk_x11_ignore_suboptimal", DRI_BOOL)) {
2895          wsi_device->x11.ignore_suboptimal =
2896             driQueryOptionb(dri_options, "vk_x11_ignore_suboptimal");
2897       }
2898    }
2899 
2900    wsi->base.get_support = x11_surface_get_support;
2901    wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2902    wsi->base.get_formats = x11_surface_get_formats;
2903    wsi->base.get_formats2 = x11_surface_get_formats2;
2904    wsi->base.get_present_modes = x11_surface_get_present_modes;
2905    wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2906    wsi->base.create_swapchain = x11_surface_create_swapchain;
2907 
2908    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2909    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2910 
2911    return VK_SUCCESS;
2912 
2913 fail_mutex:
2914    mtx_destroy(&wsi->mutex);
2915 fail_alloc:
2916    vk_free(alloc, wsi);
2917 fail:
2918    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2919    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2920 
2921    return result;
2922 }
2923 
2924 void
wsi_x11_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2925 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2926                    const VkAllocationCallbacks *alloc)
2927 {
2928    struct wsi_x11 *wsi =
2929       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2930 
2931    if (wsi) {
2932       hash_table_foreach(wsi->connections, entry)
2933          wsi_x11_connection_destroy(wsi_device, entry->data);
2934 
2935       _mesa_hash_table_destroy(wsi->connections, NULL);
2936 
2937       mtx_destroy(&wsi->mutex);
2938 
2939       vk_free(alloc, wsi);
2940    }
2941 }
2942