• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #define XK_MISCELLANY
27 #define XK_LATIN1
28 #include <X11/keysymdef.h>
29 #include <xcb/xcb.h>
30 #ifdef XCB_KEYSYMS_AVAILABLE
31 #include <xcb/xcb_keysyms.h>
32 #endif
33 #include <xcb/dri3.h>
34 #include <xcb/present.h>
35 #include <xcb/shm.h>
36 
37 #include "util/macros.h"
38 #include <stdatomic.h>
39 #include <stdlib.h>
40 #include <stdio.h>
41 #include <unistd.h>
42 #include <errno.h>
43 #include <string.h>
44 #include <fcntl.h>
45 #include "drm-uapi/drm_fourcc.h"
46 #include "util/libdrm.h"
47 #include "util/cnd_monotonic.h"
48 #include "util/hash_table.h"
49 #include "util/mesa-blake3.h"
50 #include "util/os_file.h"
51 #include "util/os_time.h"
52 #include "util/u_debug.h"
53 #include "util/u_thread.h"
54 #include "util/xmlconfig.h"
55 #include "util/timespec.h"
56 
57 #include "vk_format.h"
58 #include "vk_instance.h"
59 #include "vk_physical_device.h"
60 #include "vk_device.h"
61 #include "vk_util.h"
62 #include "vk_enum_to_str.h"
63 #include "wsi_common_entrypoints.h"
64 #include "wsi_common_private.h"
65 #include "wsi_common_queue.h"
66 
67 #ifdef HAVE_SYS_SHM_H
68 #include <sys/ipc.h>
69 #include <sys/shm.h>
70 #endif
71 
72 #ifndef XCB_PRESENT_OPTION_ASYNC_MAY_TEAR
73 #define XCB_PRESENT_OPTION_ASYNC_MAY_TEAR 16
74 #endif
75 #ifndef XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR
76 #define XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR 8
77 #endif
78 
79 #define MAX_DAMAGE_RECTS 64
80 
81 struct wsi_x11_connection {
82    bool has_dri3;
83    bool has_dri3_modifiers;
84    bool has_dri3_explicit_sync;
85    bool has_present;
86    bool is_proprietary_x11;
87    bool is_xwayland;
88    bool has_mit_shm;
89    bool has_xfixes;
90 };
91 
92 struct wsi_x11 {
93    struct wsi_interface base;
94 
95    mtx_t mutex;
96    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
97    struct hash_table *connections;
98 };
99 
100 struct wsi_x11_vk_surface {
101    union {
102       VkIcdSurfaceXlib xlib;
103       VkIcdSurfaceXcb xcb;
104    };
105    bool has_alpha;
106 };
107 #ifdef HAVE_X11_DRM
108 /**
109  * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
110  */
111 static int
wsi_dri3_open(xcb_connection_t * conn,xcb_window_t root,uint32_t provider)112 wsi_dri3_open(xcb_connection_t *conn,
113 	      xcb_window_t root,
114 	      uint32_t provider)
115 {
116    xcb_dri3_open_cookie_t       cookie;
117    xcb_dri3_open_reply_t        *reply;
118    int                          fd;
119 
120    cookie = xcb_dri3_open(conn,
121                           root,
122                           provider);
123 
124    reply = xcb_dri3_open_reply(conn, cookie, NULL);
125    if (!reply)
126       return -1;
127 
128    /* According to DRI3 extension nfd must equal one. */
129    if (reply->nfd != 1) {
130       free(reply);
131       return -1;
132    }
133 
134    fd = xcb_dri3_open_reply_fds(conn, reply)[0];
135    free(reply);
136    fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
137 
138    return fd;
139 }
140 /**
141  * Checks compatibility of the device wsi_dev with the device the X server
142  * provides via DRI3.
143  *
144  * This returns true when no device could be retrieved from the X server or when
145  * the information for the X server device indicate that it is the same device.
146  */
147 static bool
wsi_x11_check_dri3_compatible(const struct wsi_device * wsi_dev,xcb_connection_t * conn)148 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
149                               xcb_connection_t *conn)
150 {
151    xcb_screen_iterator_t screen_iter =
152       xcb_setup_roots_iterator(xcb_get_setup(conn));
153    xcb_screen_t *screen = screen_iter.data;
154 
155    /* Open the DRI3 device from the X server. If we do not retrieve one we
156     * assume our local device is compatible.
157     */
158    int dri3_fd = wsi_dri3_open(conn, screen->root, None);
159    if (dri3_fd == -1)
160       return true;
161 
162    bool match = wsi_dev->can_present_on_device(wsi_dev->pdevice, dri3_fd);
163 
164    close(dri3_fd);
165 
166    return match;
167 }
168 #endif
169 
170 static bool
wsi_x11_detect_xwayland(xcb_connection_t * conn,xcb_query_extension_reply_t * randr_reply,xcb_query_extension_reply_t * xwl_reply)171 wsi_x11_detect_xwayland(xcb_connection_t *conn,
172                         xcb_query_extension_reply_t *randr_reply,
173                         xcb_query_extension_reply_t *xwl_reply)
174 {
175    /* Newer Xwayland exposes an X11 extension we can check for */
176    if (xwl_reply && xwl_reply->present)
177       return true;
178 
179    /* Older Xwayland uses the word "XWAYLAND" in the RandR output names */
180    if (!randr_reply || !randr_reply->present)
181       return false;
182 
183    xcb_randr_query_version_cookie_t ver_cookie =
184       xcb_randr_query_version_unchecked(conn, 1, 3);
185    xcb_randr_query_version_reply_t *ver_reply =
186       xcb_randr_query_version_reply(conn, ver_cookie, NULL);
187    bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
188                                        ver_reply->minor_version >= 3);
189    free(ver_reply);
190 
191    if (!has_randr_v1_3)
192       return false;
193 
194    const xcb_setup_t *setup = xcb_get_setup(conn);
195    xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
196 
197    xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
198       xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
199    xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
200       xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
201 
202    if (!gsr_reply || gsr_reply->num_outputs == 0) {
203       free(gsr_reply);
204       return false;
205    }
206 
207    xcb_randr_output_t *randr_outputs =
208       xcb_randr_get_screen_resources_current_outputs(gsr_reply);
209    xcb_randr_get_output_info_cookie_t goi_cookie =
210       xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
211    free(gsr_reply);
212 
213    xcb_randr_get_output_info_reply_t *goi_reply =
214       xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
215    if (!goi_reply) {
216       return false;
217    }
218 
219    char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
220    bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
221    free(goi_reply);
222 
223    return is_xwayland;
224 }
225 
226 static struct wsi_x11_connection *
wsi_x11_connection_create(struct wsi_device * wsi_dev,xcb_connection_t * conn)227 wsi_x11_connection_create(struct wsi_device *wsi_dev,
228                           xcb_connection_t *conn)
229 {
230    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
231                                 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
232                                 xfixes_cookie, xwl_cookie;
233    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
234                                *amd_reply, *nv_reply, *shm_reply = NULL,
235                                *xfixes_reply, *xwl_reply;
236    bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
237                     wsi_dev->has_import_memory_host;
238    bool has_dri3_v1_2 = false;
239    bool has_present_v1_2 = false;
240    bool has_dri3_v1_4 = false;
241    bool has_present_v1_4 = false;
242 
243    /* wsi_x11_get_connection may be called from a thread, but we will never end up here on a worker thread,
244     * since the connection will always be in the hash-map,
245     * so we will not violate Vulkan's rule on allocation callbacks w.r.t.
246     * when it is allowed to call the allocation callbacks. */
247    struct wsi_x11_connection *wsi_conn =
248       vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
249                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
250    if (!wsi_conn)
251       return NULL;
252 
253    sync_cookie = xcb_query_extension(conn, 4, "SYNC");
254    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
255    pres_cookie = xcb_query_extension(conn, 7, "Present");
256    randr_cookie = xcb_query_extension(conn, 5, "RANDR");
257    xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
258    xwl_cookie = xcb_query_extension(conn, 8, "XWAYLAND");
259 
260    if (wants_shm)
261       shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
262 
263    /* We try to be nice to users and emit a warning if they try to use a
264     * Vulkan application on a system without DRI3 enabled.  However, this ends
265     * up spewing the warning when a user has, for example, both Intel
266     * integrated graphics and a discrete card with proprietary drivers and are
267     * running on the discrete card with the proprietary DDX.  In this case, we
268     * really don't want to print the warning because it just confuses users.
269     * As a heuristic to detect this case, we check for a couple of proprietary
270     * X11 extensions.
271     */
272    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
273    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
274 
275    xcb_discard_reply(conn, sync_cookie.sequence);
276    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
277    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
278    randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
279    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
280    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
281    xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
282    xwl_reply = xcb_query_extension_reply(conn, xwl_cookie, NULL);
283    if (wants_shm)
284       shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
285    if (!dri3_reply || !pres_reply || !xfixes_reply) {
286       free(dri3_reply);
287       free(pres_reply);
288       free(xfixes_reply);
289       free(xwl_reply);
290       free(randr_reply);
291       free(amd_reply);
292       free(nv_reply);
293       if (wants_shm)
294          free(shm_reply);
295       vk_free(&wsi_dev->instance_alloc, wsi_conn);
296       return NULL;
297    }
298 
299    wsi_conn->has_dri3 = dri3_reply->present != 0;
300 #ifdef HAVE_X11_DRM
301    if (wsi_conn->has_dri3) {
302       xcb_dri3_query_version_cookie_t ver_cookie;
303       xcb_dri3_query_version_reply_t *ver_reply;
304 
305       ver_cookie = xcb_dri3_query_version(conn, 1, 4);
306       ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
307       has_dri3_v1_2 = ver_reply != NULL &&
308          (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
309       has_dri3_v1_4 = ver_reply != NULL &&
310          (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
311       free(ver_reply);
312    }
313 #endif
314 
315    wsi_conn->has_present = pres_reply->present != 0;
316 #ifdef HAVE_X11_DRM
317    if (wsi_conn->has_present) {
318       xcb_present_query_version_cookie_t ver_cookie;
319       xcb_present_query_version_reply_t *ver_reply;
320 
321       ver_cookie = xcb_present_query_version(conn, 1, 4);
322       ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
323       has_present_v1_2 =
324         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
325       has_present_v1_4 =
326         (ver_reply->major_version > 1 || ver_reply->minor_version >= 4);
327       free(ver_reply);
328    }
329 #endif
330 
331    wsi_conn->has_xfixes = xfixes_reply->present != 0;
332    if (wsi_conn->has_xfixes) {
333       xcb_xfixes_query_version_cookie_t ver_cookie;
334       xcb_xfixes_query_version_reply_t *ver_reply;
335 
336       ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
337       ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
338       wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
339       free(ver_reply);
340    }
341 
342    wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn, randr_reply,
343                                                    xwl_reply);
344 
345    wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
346    wsi_conn->has_dri3_explicit_sync = has_dri3_v1_4 && has_present_v1_4;
347    wsi_conn->is_proprietary_x11 = false;
348    if (amd_reply && amd_reply->present)
349       wsi_conn->is_proprietary_x11 = true;
350    if (nv_reply && nv_reply->present)
351       wsi_conn->is_proprietary_x11 = true;
352 
353    wsi_conn->has_mit_shm = false;
354 #ifdef HAVE_X11_DRM
355    if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
356       bool has_mit_shm = shm_reply->present != 0;
357 
358       xcb_shm_query_version_cookie_t ver_cookie;
359       xcb_shm_query_version_reply_t *ver_reply;
360 
361       ver_cookie = xcb_shm_query_version(conn);
362       ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
363 
364       has_mit_shm = ver_reply->shared_pixmaps;
365       free(ver_reply);
366       xcb_void_cookie_t cookie;
367       xcb_generic_error_t *error;
368 
369       if (has_mit_shm) {
370          cookie = xcb_shm_detach_checked(conn, 0);
371          if ((error = xcb_request_check(conn, cookie))) {
372             if (error->error_code != BadRequest)
373                wsi_conn->has_mit_shm = true;
374             free(error);
375          }
376       }
377    }
378 #endif
379 
380    free(dri3_reply);
381    free(pres_reply);
382    free(randr_reply);
383    free(xwl_reply);
384    free(amd_reply);
385    free(nv_reply);
386    free(xfixes_reply);
387    if (wants_shm)
388       free(shm_reply);
389 
390    return wsi_conn;
391 }
392 
393 static void
wsi_x11_connection_destroy(struct wsi_device * wsi_dev,struct wsi_x11_connection * conn)394 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
395                            struct wsi_x11_connection *conn)
396 {
397    vk_free(&wsi_dev->instance_alloc, conn);
398 }
399 
400 static bool
wsi_x11_check_for_dri3(struct wsi_x11_connection * wsi_conn)401 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
402 {
403   if (wsi_conn->has_dri3)
404     return true;
405   if (!wsi_conn->is_proprietary_x11) {
406     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
407                     "Note: you can probably enable DRI3 in your Xorg config\n");
408   }
409   return false;
410 }
411 
412 /**
413  * Get internal struct representing an xcb_connection_t.
414  *
415  * This can allocate the struct but the caller does not own the struct. It is
416  * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
417  *
418  * If the allocation fails NULL is returned.
419  */
420 static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device * wsi_dev,xcb_connection_t * conn)421 wsi_x11_get_connection(struct wsi_device *wsi_dev,
422                        xcb_connection_t *conn)
423 {
424    struct wsi_x11 *wsi =
425       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
426 
427    mtx_lock(&wsi->mutex);
428 
429    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
430    if (!entry) {
431       /* We're about to make a bunch of blocking calls.  Let's drop the
432        * mutex for now so we don't block up too badly.
433        */
434       mtx_unlock(&wsi->mutex);
435 
436       struct wsi_x11_connection *wsi_conn =
437          wsi_x11_connection_create(wsi_dev, conn);
438       if (!wsi_conn)
439          return NULL;
440 
441       mtx_lock(&wsi->mutex);
442 
443       entry = _mesa_hash_table_search(wsi->connections, conn);
444       if (entry) {
445          /* Oops, someone raced us to it */
446          wsi_x11_connection_destroy(wsi_dev, wsi_conn);
447       } else {
448          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
449       }
450    }
451 
452    mtx_unlock(&wsi->mutex);
453 
454    return entry->data;
455 }
456 
457 static const VkFormat formats[] = {
458    VK_FORMAT_R5G6B5_UNORM_PACK16,
459    VK_FORMAT_B8G8R8A8_SRGB,
460    VK_FORMAT_B8G8R8A8_UNORM,
461    VK_FORMAT_A2R10G10B10_UNORM_PACK32,
462 };
463 
464 static const VkPresentModeKHR present_modes[] = {
465    VK_PRESENT_MODE_IMMEDIATE_KHR,
466    VK_PRESENT_MODE_MAILBOX_KHR,
467    VK_PRESENT_MODE_FIFO_KHR,
468    VK_PRESENT_MODE_FIFO_RELAXED_KHR,
469 };
470 
471 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)472 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
473 {
474    xcb_screen_iterator_t screen_iter =
475       xcb_setup_roots_iterator(xcb_get_setup(conn));
476 
477    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
478       if (screen_iter.data->root == root)
479          return screen_iter.data;
480    }
481 
482    return NULL;
483 }
484 
485 static xcb_visualtype_t *
screen_get_visualtype(xcb_screen_t * screen,xcb_visualid_t visual_id,unsigned * depth)486 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
487                       unsigned *depth)
488 {
489    xcb_depth_iterator_t depth_iter =
490       xcb_screen_allowed_depths_iterator(screen);
491 
492    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
493       xcb_visualtype_iterator_t visual_iter =
494          xcb_depth_visuals_iterator (depth_iter.data);
495 
496       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
497          if (visual_iter.data->visual_id == visual_id) {
498             if (depth)
499                *depth = depth_iter.data->depth;
500             return visual_iter.data;
501          }
502       }
503    }
504 
505    return NULL;
506 }
507 
508 static xcb_visualtype_t *
connection_get_visualtype(xcb_connection_t * conn,xcb_visualid_t visual_id)509 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
510 {
511    xcb_screen_iterator_t screen_iter =
512       xcb_setup_roots_iterator(xcb_get_setup(conn));
513 
514    /* For this we have to iterate over all of the screens which is rather
515     * annoying.  Fortunately, there is probably only 1.
516     */
517    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
518       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
519                                                        visual_id, NULL);
520       if (visual)
521          return visual;
522    }
523 
524    return NULL;
525 }
526 
527 static xcb_visualtype_t *
get_visualtype_for_window(xcb_connection_t * conn,xcb_window_t window,unsigned * depth,xcb_visualtype_t ** rootvis)528 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
529                           unsigned *depth, xcb_visualtype_t **rootvis)
530 {
531    xcb_query_tree_cookie_t tree_cookie;
532    xcb_get_window_attributes_cookie_t attrib_cookie;
533    xcb_query_tree_reply_t *tree;
534    xcb_get_window_attributes_reply_t *attrib;
535 
536    tree_cookie = xcb_query_tree(conn, window);
537    attrib_cookie = xcb_get_window_attributes(conn, window);
538 
539    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
540    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
541    if (attrib == NULL || tree == NULL) {
542       free(attrib);
543       free(tree);
544       return NULL;
545    }
546 
547    xcb_window_t root = tree->root;
548    xcb_visualid_t visual_id = attrib->visual;
549    free(attrib);
550    free(tree);
551 
552    xcb_screen_t *screen = get_screen_for_root(conn, root);
553    if (screen == NULL)
554       return NULL;
555 
556    if (rootvis)
557       *rootvis = screen_get_visualtype(screen, screen->root_visual, depth);
558    return screen_get_visualtype(screen, visual_id, depth);
559 }
560 
561 static bool
visual_has_alpha(xcb_visualtype_t * visual,unsigned depth)562 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
563 {
564    uint32_t rgb_mask = visual->red_mask |
565                        visual->green_mask |
566                        visual->blue_mask;
567 
568    uint32_t all_mask = 0xffffffff >> (32 - depth);
569 
570    /* Do we have bits left over after RGB? */
571    return (all_mask & ~rgb_mask) != 0;
572 }
573 
574 static bool
visual_supported(xcb_visualtype_t * visual)575 visual_supported(xcb_visualtype_t *visual)
576 {
577    if (!visual)
578       return false;
579 
580    return visual->_class == XCB_VISUAL_CLASS_TRUE_COLOR ||
581           visual->_class == XCB_VISUAL_CLASS_DIRECT_COLOR;
582 }
583 
584 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,xcb_connection_t * connection,xcb_visualid_t visual_id)585 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
586                                                uint32_t queueFamilyIndex,
587                                                xcb_connection_t *connection,
588                                                xcb_visualid_t visual_id)
589 {
590    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
591    struct wsi_device *wsi_device = pdevice->wsi_device;
592    if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
593       return false;
594 
595    struct wsi_x11_connection *wsi_conn =
596       wsi_x11_get_connection(wsi_device, connection);
597 
598    if (!wsi_conn)
599       return false;
600 
601    if (!wsi_device->sw) {
602       if (!wsi_x11_check_for_dri3(wsi_conn))
603          return false;
604    }
605 
606    if (!visual_supported(connection_get_visualtype(connection, visual_id)))
607       return false;
608 
609    return true;
610 }
611 
612 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,Display * dpy,VisualID visualID)613 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
614                                                 uint32_t queueFamilyIndex,
615                                                 Display *dpy,
616                                                 VisualID visualID)
617 {
618    return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
619                                                          queueFamilyIndex,
620                                                          XGetXCBConnection(dpy),
621                                                          visualID);
622 }
623 
624 static xcb_connection_t*
x11_surface_get_connection(VkIcdSurfaceBase * icd_surface)625 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
626 {
627    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
628       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
629    else
630       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
631 }
632 
633 static xcb_window_t
x11_surface_get_window(VkIcdSurfaceBase * icd_surface)634 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
635 {
636    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
637       return ((VkIcdSurfaceXlib *)icd_surface)->window;
638    else
639       return ((VkIcdSurfaceXcb *)icd_surface)->window;
640 }
641 
642 static VkResult
x11_surface_get_support(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)643 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
644                         struct wsi_device *wsi_device,
645                         uint32_t queueFamilyIndex,
646                         VkBool32* pSupported)
647 {
648    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
649    xcb_window_t window = x11_surface_get_window(icd_surface);
650 
651    struct wsi_x11_connection *wsi_conn =
652       wsi_x11_get_connection(wsi_device, conn);
653    if (!wsi_conn)
654       return VK_ERROR_OUT_OF_HOST_MEMORY;
655 
656    if (!wsi_device->sw) {
657       if (!wsi_x11_check_for_dri3(wsi_conn)) {
658          *pSupported = false;
659          return VK_SUCCESS;
660       }
661    }
662 
663    if (!visual_supported(get_visualtype_for_window(conn, window, NULL, NULL))) {
664       *pSupported = false;
665       return VK_SUCCESS;
666    }
667 
668    *pSupported = true;
669    return VK_SUCCESS;
670 }
671 
672 static uint32_t
x11_get_min_image_count(const struct wsi_device * wsi_device,bool is_xwayland)673 x11_get_min_image_count(const struct wsi_device *wsi_device, bool is_xwayland)
674 {
675    if (wsi_device->x11.override_minImageCount)
676       return wsi_device->x11.override_minImageCount;
677 
678    /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
679     * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
680     * the render latency is CPU duration + GPU duration.
681     *
682     * This means that with scanout from pageflipping we need 3 frames to run
683     * full speed:
684     * 1) CPU rendering work
685     * 2) GPU rendering work
686     * 3) scanout
687     *
688     * Once we have a nonblocking acquire that returns a semaphore we can merge
689     * 1 and 3. Hence the ideal implementation needs only 2 images, but games
690     * cannot tellwe currently do not have an ideal implementation and that
691     * hence they need to allocate 3 images. So let us do it for them.
692     *
693     * This is a tradeoff as it uses more memory than needed for non-fullscreen
694     * and non-performance intensive applications.
695     *
696     * For Xwayland Venus reports four images as described in
697     *   wsi_wl_surface_get_capabilities
698     */
699    return is_xwayland && wsi_device->x11.extra_xwayland_image ? 4 : 3;
700 }
701 
702 static unsigned
703 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
704                                          struct wsi_x11_connection *wsi_conn,
705                                          VkPresentModeKHR present_mode);
706 
707 static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)708 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
709                              struct wsi_device *wsi_device,
710                              const VkSurfacePresentModeEXT *present_mode,
711                              VkSurfaceCapabilitiesKHR *caps)
712 {
713    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
714    xcb_window_t window = x11_surface_get_window(icd_surface);
715    struct wsi_x11_vk_surface *surface = (struct wsi_x11_vk_surface*)icd_surface;
716    struct wsi_x11_connection *wsi_conn =
717       wsi_x11_get_connection(wsi_device, conn);
718    xcb_get_geometry_cookie_t geom_cookie;
719    xcb_generic_error_t *err;
720    xcb_get_geometry_reply_t *geom;
721 
722    geom_cookie = xcb_get_geometry(conn, window);
723 
724    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
725    if (!geom)
726       return VK_ERROR_SURFACE_LOST_KHR;
727    {
728       VkExtent2D extent = { geom->width, geom->height };
729       caps->currentExtent = extent;
730       caps->minImageExtent = extent;
731       caps->maxImageExtent = extent;
732    }
733    free(err);
734    free(geom);
735 
736    if (surface->has_alpha) {
737       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
738                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
739    } else {
740       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
741                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
742    }
743 
744    if (present_mode) {
745       caps->minImageCount = x11_get_min_image_count_for_present_mode(wsi_device, wsi_conn, present_mode->presentMode);
746    } else {
747       caps->minImageCount = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
748    }
749 
750    /* There is no real maximum */
751    caps->maxImageCount = 0;
752 
753    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
754    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
755    caps->maxImageArrayLayers = 1;
756    caps->supportedUsageFlags = wsi_caps_get_image_usage();
757 
758    VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
759    if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
760       caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
761 
762    return VK_SUCCESS;
763 }
764 
765 static VkResult
x11_surface_get_capabilities2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)766 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
767                               struct wsi_device *wsi_device,
768                               const void *info_next,
769                               VkSurfaceCapabilities2KHR *caps)
770 {
771    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
772 
773    const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
774 
775    VkResult result =
776       x11_surface_get_capabilities(icd_surface, wsi_device, present_mode,
777                                    &caps->surfaceCapabilities);
778 
779    if (result != VK_SUCCESS)
780       return result;
781 
782    vk_foreach_struct(ext, caps->pNext) {
783       switch (ext->sType) {
784       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
785          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
786          protected->supportsProtected = VK_FALSE;
787          break;
788       }
789 
790       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
791          /* Unsupported. */
792          VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
793          scaling->supportedPresentScaling = 0;
794          scaling->supportedPresentGravityX = 0;
795          scaling->supportedPresentGravityY = 0;
796          scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
797          scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
798          break;
799       }
800 
801       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
802          /* All present modes are compatible with each other. */
803          VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
804          if (compat->pPresentModes) {
805             assert(present_mode);
806             VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
807             /* Must always return queried present mode even when truncating. */
808             vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
809                *mode = present_mode->presentMode;
810             }
811 
812             for (uint32_t i = 0; i < ARRAY_SIZE(present_modes); i++) {
813                if (present_modes[i] != present_mode->presentMode) {
814                   vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
815                      *mode = present_modes[i];
816                   }
817                }
818             }
819          } else {
820             if (!present_mode)
821                wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
822                                        "without a VkSurfacePresentModeEXT set. This is an "
823                                        "application bug.\n");
824 
825             compat->presentModeCount = ARRAY_SIZE(present_modes);
826          }
827          break;
828       }
829 
830       default:
831          /* Ignored */
832          break;
833       }
834    }
835 
836    return result;
837 }
838 
839 static int
format_get_component_bits(VkFormat format,int comp)840 format_get_component_bits(VkFormat format, int comp)
841 {
842    return vk_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, comp);
843 }
844 
845 static bool
rgb_component_bits_are_equal(VkFormat format,const xcb_visualtype_t * type)846 rgb_component_bits_are_equal(VkFormat format, const xcb_visualtype_t* type)
847 {
848    return format_get_component_bits(format, 0) == util_bitcount(type->red_mask) &&
849           format_get_component_bits(format, 1) == util_bitcount(type->green_mask) &&
850           format_get_component_bits(format, 2) == util_bitcount(type->blue_mask);
851 }
852 
853 static bool
get_sorted_vk_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,VkFormat * sorted_formats,unsigned * count)854 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
855                       VkFormat *sorted_formats, unsigned *count)
856 {
857    xcb_connection_t *conn = x11_surface_get_connection(surface);
858    xcb_window_t window = x11_surface_get_window(surface);
859    xcb_visualtype_t *rootvis = NULL;
860    xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL, &rootvis);
861 
862    if (!visual)
863       return false;
864 
865    /* use the root window's visual to set the default */
866    *count = 0;
867    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
868       if (rgb_component_bits_are_equal(formats[i], rootvis))
869          sorted_formats[(*count)++] = formats[i];
870    }
871 
872    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
873       for (unsigned j = 0; j < *count; j++)
874          if (formats[i] == sorted_formats[j])
875             goto next_format;
876       if (rgb_component_bits_are_equal(formats[i], visual))
877          sorted_formats[(*count)++] = formats[i];
878 next_format:;
879    }
880 
881    if (wsi_device->force_bgra8_unorm_first) {
882       for (unsigned i = 0; i < *count; i++) {
883          if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
884             sorted_formats[i] = sorted_formats[0];
885             sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
886             break;
887          }
888       }
889    }
890 
891    return true;
892 }
893 
894 static VkResult
x11_surface_get_formats(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)895 x11_surface_get_formats(VkIcdSurfaceBase *surface,
896                         struct wsi_device *wsi_device,
897                         uint32_t *pSurfaceFormatCount,
898                         VkSurfaceFormatKHR *pSurfaceFormats)
899 {
900    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
901                           pSurfaceFormats, pSurfaceFormatCount);
902 
903    unsigned count;
904    VkFormat sorted_formats[ARRAY_SIZE(formats)];
905    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
906       return VK_ERROR_SURFACE_LOST_KHR;
907 
908    for (unsigned i = 0; i < count; i++) {
909       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
910          f->format = sorted_formats[i];
911          f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
912       }
913    }
914 
915    return vk_outarray_status(&out);
916 }
917 
918 static VkResult
x11_surface_get_formats2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)919 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
920                         struct wsi_device *wsi_device,
921                         const void *info_next,
922                         uint32_t *pSurfaceFormatCount,
923                         VkSurfaceFormat2KHR *pSurfaceFormats)
924 {
925    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
926                           pSurfaceFormats, pSurfaceFormatCount);
927 
928    unsigned count;
929    VkFormat sorted_formats[ARRAY_SIZE(formats)];
930    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
931       return VK_ERROR_SURFACE_LOST_KHR;
932 
933    for (unsigned i = 0; i < count; i++) {
934       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
935          assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
936          f->surfaceFormat.format = sorted_formats[i];
937          f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
938       }
939    }
940 
941    return vk_outarray_status(&out);
942 }
943 
944 static VkResult
x11_surface_get_present_modes(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)945 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
946                               struct wsi_device *wsi_device,
947                               uint32_t *pPresentModeCount,
948                               VkPresentModeKHR *pPresentModes)
949 {
950    if (pPresentModes == NULL) {
951       *pPresentModeCount = ARRAY_SIZE(present_modes);
952       return VK_SUCCESS;
953    }
954 
955    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
956    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
957 
958    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
959       VK_INCOMPLETE : VK_SUCCESS;
960 }
961 
962 static VkResult
x11_surface_get_present_rectangles(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)963 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
964                                    struct wsi_device *wsi_device,
965                                    uint32_t* pRectCount,
966                                    VkRect2D* pRects)
967 {
968    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
969    xcb_window_t window = x11_surface_get_window(icd_surface);
970    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
971 
972    vk_outarray_append_typed(VkRect2D, &out, rect) {
973       xcb_generic_error_t *err = NULL;
974       xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
975       xcb_get_geometry_reply_t *geom =
976          xcb_get_geometry_reply(conn, geom_cookie, &err);
977       free(err);
978       if (geom) {
979          *rect = (VkRect2D) {
980             .offset = { 0, 0 },
981             .extent = { geom->width, geom->height },
982          };
983       }
984       free(geom);
985       if (!geom)
986           return VK_ERROR_SURFACE_LOST_KHR;
987    }
988 
989    return vk_outarray_status(&out);
990 }
991 
992 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXcbSurfaceKHR(VkInstance _instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)993 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
994                         const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
995                         const VkAllocationCallbacks *pAllocator,
996                         VkSurfaceKHR *pSurface)
997 {
998    VK_FROM_HANDLE(vk_instance, instance, _instance);
999    struct wsi_x11_vk_surface *surface;
1000 
1001    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
1002 
1003    unsigned visual_depth;
1004    xcb_visualtype_t *visual =
1005       get_visualtype_for_window(pCreateInfo->connection, pCreateInfo->window, &visual_depth, NULL);
1006    if (!visual)
1007       return VK_ERROR_OUT_OF_HOST_MEMORY;
1008 
1009    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1010                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1011    if (surface == NULL)
1012       return VK_ERROR_OUT_OF_HOST_MEMORY;
1013 
1014    surface->xcb.base.platform = VK_ICD_WSI_PLATFORM_XCB;
1015    surface->xcb.connection = pCreateInfo->connection;
1016    surface->xcb.window = pCreateInfo->window;
1017 
1018    surface->has_alpha = visual_has_alpha(visual, visual_depth);
1019 
1020    *pSurface = VkIcdSurfaceBase_to_handle(&surface->xcb.base);
1021    return VK_SUCCESS;
1022 }
1023 
1024 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateXlibSurfaceKHR(VkInstance _instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1025 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
1026                          const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
1027                          const VkAllocationCallbacks *pAllocator,
1028                          VkSurfaceKHR *pSurface)
1029 {
1030    VK_FROM_HANDLE(vk_instance, instance, _instance);
1031    struct wsi_x11_vk_surface *surface;
1032 
1033    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
1034 
1035    unsigned visual_depth;
1036    xcb_visualtype_t *visual =
1037       get_visualtype_for_window(XGetXCBConnection(pCreateInfo->dpy), pCreateInfo->window, &visual_depth, NULL);
1038    if (!visual)
1039       return VK_ERROR_OUT_OF_HOST_MEMORY;
1040 
1041    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof(struct wsi_x11_vk_surface), 8,
1042                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1043    if (surface == NULL)
1044       return VK_ERROR_OUT_OF_HOST_MEMORY;
1045 
1046    surface->xlib.base.platform = VK_ICD_WSI_PLATFORM_XLIB;
1047    surface->xlib.dpy = pCreateInfo->dpy;
1048    surface->xlib.window = pCreateInfo->window;
1049 
1050    surface->has_alpha = visual_has_alpha(visual, visual_depth);
1051 
1052    *pSurface = VkIcdSurfaceBase_to_handle(&surface->xlib.base);
1053    return VK_SUCCESS;
1054 }
1055 
1056 struct x11_image_pending_completion {
1057    uint32_t serial;
1058    uint64_t signal_present_id;
1059 };
1060 
1061 struct x11_image {
1062    struct wsi_image                          base;
1063    xcb_pixmap_t                              pixmap;
1064    xcb_xfixes_region_t                       update_region; /* long lived XID */
1065    xcb_xfixes_region_t                       update_area;   /* the above or None */
1066    struct xshmfence *                        shm_fence;
1067    uint32_t                                  sync_fence;
1068    xcb_shm_seg_t                             shmseg;
1069    int                                       shmid;
1070    uint8_t *                                 shmaddr;
1071    uint64_t                                  present_id;
1072    VkPresentModeKHR                          present_mode;
1073    xcb_rectangle_t                           rects[MAX_DAMAGE_RECTS];
1074    int                                       rectangle_count;
1075 
1076    /* In IMMEDIATE and MAILBOX modes, we can have multiple pending presentations per image.
1077     * We need to keep track of them when considering present ID. */
1078 
1079    /* This is arbitrarily chosen. With IMMEDIATE on a 3 deep swapchain,
1080     * we allow over 300 outstanding presentations per vblank, which is more than enough
1081     * for any reasonable application.
1082     * This used to be 16, but it regressed benchmarks that did 15k+ FPS.
1083     * This should allow over 25k FPS on a 60 Hz monitor. Any more than this is comical. */
1084 #define X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS 128
1085    uint32_t                                  present_queued_count;
1086    struct x11_image_pending_completion       pending_completions[X11_SWAPCHAIN_MAX_PENDING_COMPLETIONS];
1087 #ifdef HAVE_DRI3_EXPLICIT_SYNC
1088    uint32_t                                  dri3_syncobj[WSI_ES_COUNT];
1089 #endif
1090 };
1091 
1092 struct x11_swapchain {
1093    struct wsi_swapchain                        base;
1094 
1095    bool                                         has_dri3_modifiers;
1096    bool                                         has_mit_shm;
1097    bool                                         has_async_may_tear;
1098 
1099    xcb_connection_t *                           conn;
1100    xcb_window_t                                 window;
1101    xcb_gc_t                                     gc;
1102    uint32_t                                     depth;
1103    VkExtent2D                                   extent;
1104 
1105    blake3_hash                                  dri3_modifier_hash;
1106 
1107    xcb_present_event_t                          event_id;
1108    xcb_special_event_t *                        special_event;
1109    uint64_t                                     send_sbc;
1110    uint64_t                                     last_present_msc;
1111    uint32_t                                     stamp;
1112    uint32_t                                     sent_image_count;
1113 
1114    atomic_int                                   status;
1115    bool                                         copy_is_suboptimal;
1116    struct wsi_queue                             present_queue;
1117    struct wsi_queue                             acquire_queue;
1118    thrd_t                                       queue_manager;
1119    thrd_t                                       event_manager;
1120 
1121    /* Used for communicating between event_manager and queue_manager.
1122     * Lock is also taken when reading and writing status.
1123     * When reading status in application threads,
1124     * x11_swapchain_read_status_atomic can be used as a wrapper function. */
1125    mtx_t                                        thread_state_lock;
1126    struct u_cnd_monotonic                       thread_state_cond;
1127 
1128    /* Lock and condition variable for present wait.
1129     * Signalled by event thread and waited on by callers to PresentWaitKHR. */
1130    mtx_t                                        present_progress_mutex;
1131    struct u_cnd_monotonic                       present_progress_cond;
1132    uint64_t                                     present_id;
1133    VkResult                                     present_progress_error;
1134 
1135    struct x11_image                             images[0];
1136 };
1137 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
1138                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
1139 
x11_present_complete(struct x11_swapchain * swapchain,struct x11_image * image,uint32_t index)1140 static void x11_present_complete(struct x11_swapchain *swapchain,
1141                                  struct x11_image *image, uint32_t index)
1142 {
1143    uint64_t signal_present_id = image->pending_completions[index].signal_present_id;
1144    if (signal_present_id) {
1145       mtx_lock(&swapchain->present_progress_mutex);
1146       if (signal_present_id > swapchain->present_id) {
1147          swapchain->present_id = signal_present_id;
1148          u_cnd_monotonic_broadcast(&swapchain->present_progress_cond);
1149       }
1150       mtx_unlock(&swapchain->present_progress_mutex);
1151    }
1152 
1153    image->present_queued_count--;
1154    if (image->present_queued_count) {
1155       memmove(image->pending_completions + index,
1156               image->pending_completions + index + 1,
1157               (image->present_queued_count - index) *
1158               sizeof(image->pending_completions[0]));
1159    }
1160 
1161    u_cnd_monotonic_signal(&swapchain->thread_state_cond);
1162 }
1163 
x11_notify_pending_present(struct x11_swapchain * swapchain,struct x11_image * image)1164 static void x11_notify_pending_present(struct x11_swapchain *swapchain,
1165                                        struct x11_image *image)
1166 {
1167    u_cnd_monotonic_signal(&swapchain->thread_state_cond);
1168 }
1169 
1170 /* It is assumed that thread_state_lock is taken when calling this function. */
x11_swapchain_notify_error(struct x11_swapchain * swapchain,VkResult result)1171 static void x11_swapchain_notify_error(struct x11_swapchain *swapchain, VkResult result)
1172 {
1173    mtx_lock(&swapchain->present_progress_mutex);
1174    swapchain->present_id = UINT64_MAX;
1175    swapchain->present_progress_error = result;
1176    u_cnd_monotonic_broadcast(&swapchain->present_progress_cond);
1177    mtx_unlock(&swapchain->present_progress_mutex);
1178    u_cnd_monotonic_broadcast(&swapchain->thread_state_cond);
1179 }
1180 
1181 /**
1182  * Update the swapchain status with the result of an operation, and return
1183  * the combined status. The chain status will eventually be returned from
1184  * AcquireNextImage and QueuePresent.
1185  *
1186  * We make sure to 'stick' more pessimistic statuses: an out-of-date error
1187  * is permanent once seen, and every subsequent call will return this. If
1188  * this has not been seen, success will be returned.
1189  *
1190  * It is assumed that thread_state_lock is taken when calling this function.
1191  */
1192 static VkResult
_x11_swapchain_result(struct x11_swapchain * chain,VkResult result,const char * file,int line)1193 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
1194                       const char *file, int line)
1195 {
1196    if (result < 0)
1197       x11_swapchain_notify_error(chain, result);
1198 
1199    /* Prioritise returning existing errors for consistency. */
1200    if (chain->status < 0)
1201       return chain->status;
1202 
1203    /* If we have a new error, mark it as permanent on the chain and return. */
1204    if (result < 0) {
1205 #ifndef NDEBUG
1206       fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1207               file, line, vk_Result_to_str(result));
1208 #endif
1209       chain->status = result;
1210       return result;
1211    }
1212 
1213    /* Return temporary errors, but don't persist them. */
1214    if (result == VK_TIMEOUT || result == VK_NOT_READY)
1215       return result;
1216 
1217    /* Suboptimal isn't an error, but is a status which sticks to the swapchain
1218     * and is always returned rather than success.
1219     */
1220    if (result == VK_SUBOPTIMAL_KHR) {
1221 #ifndef NDEBUG
1222       if (chain->status != VK_SUBOPTIMAL_KHR) {
1223          fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1224                  file, line, vk_Result_to_str(result));
1225       }
1226 #endif
1227       chain->status = result;
1228       return result;
1229    }
1230 
1231    /* No changes, so return the last status. */
1232    return chain->status;
1233 }
1234 #define x11_swapchain_result(chain, result) \
1235    _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1236 
1237 static struct wsi_image *
x11_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1238 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1239 {
1240    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1241    return &chain->images[image_index].base;
1242 }
1243 #ifdef HAVE_X11_DRM
1244 static bool
1245 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain);
1246 #endif
1247 static VkResult
x11_wait_for_explicit_sync_release_submission(struct x11_swapchain * chain,uint64_t rel_timeout_ns,uint32_t * image_index)1248 x11_wait_for_explicit_sync_release_submission(struct x11_swapchain *chain,
1249                                               uint64_t rel_timeout_ns,
1250                                               uint32_t *image_index)
1251 {
1252    STACK_ARRAY(struct wsi_image*, images, chain->base.image_count);
1253    for (uint32_t i = 0; i < chain->base.image_count; i++)
1254       images[i] = &chain->images[i].base;
1255 
1256    VkResult result;
1257 #ifdef HAVE_LIBDRM
1258    result = wsi_drm_wait_for_explicit_sync_release(&chain->base,
1259                                                    chain->base.image_count,
1260                                                    images,
1261                                                    rel_timeout_ns,
1262                                                    image_index);
1263 #else
1264    result = VK_ERROR_FEATURE_NOT_PRESENT;
1265 #endif
1266    STACK_ARRAY_FINISH(images);
1267    return result;
1268 }
1269 
1270 /* XXX this belongs in presentproto */
1271 #ifndef PresentWindowDestroyed
1272 #define PresentWindowDestroyed (1 << 0)
1273 #endif
1274 /**
1275  * Process an X11 Present event. Does not update chain->status.
1276  */
1277 static VkResult
x11_handle_dri3_present_event(struct x11_swapchain * chain,xcb_present_generic_event_t * event)1278 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1279                               xcb_present_generic_event_t *event)
1280 {
1281    switch (event->evtype) {
1282    case XCB_PRESENT_CONFIGURE_NOTIFY: {
1283       xcb_present_configure_notify_event_t *config = (void *) event;
1284       if (config->pixmap_flags & PresentWindowDestroyed)
1285          return VK_ERROR_SURFACE_LOST_KHR;
1286 
1287       struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1288       if (!wsi_device->x11.ignore_suboptimal) {
1289          if (config->width != chain->extent.width ||
1290              config->height != chain->extent.height)
1291             return VK_SUBOPTIMAL_KHR;
1292       }
1293 
1294       break;
1295    }
1296 
1297    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1298       xcb_present_idle_notify_event_t *idle = (void *) event;
1299 
1300       assert(!chain->base.image_info.explicit_sync);
1301       for (unsigned i = 0; i < chain->base.image_count; i++) {
1302          if (chain->images[i].pixmap == idle->pixmap) {
1303             chain->sent_image_count--;
1304             assert(chain->sent_image_count >= 0);
1305             wsi_queue_push(&chain->acquire_queue, i);
1306             break;
1307          }
1308       }
1309 
1310       break;
1311    }
1312 
1313    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1314       xcb_present_complete_notify_event_t *complete = (void *) event;
1315       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1316          unsigned i, j;
1317          for (i = 0; i < chain->base.image_count; i++) {
1318             struct x11_image *image = &chain->images[i];
1319             for (j = 0; j < image->present_queued_count; j++) {
1320                if (image->pending_completions[j].serial == complete->serial) {
1321                   x11_present_complete(chain, image, j);
1322                }
1323             }
1324          }
1325          chain->last_present_msc = complete->msc;
1326       }
1327 
1328       VkResult result = VK_SUCCESS;
1329 
1330       struct wsi_device *wsi_device = (struct wsi_device *)chain->base.wsi;
1331       if (wsi_device->x11.ignore_suboptimal)
1332          return result;
1333 
1334       switch (complete->mode) {
1335       case XCB_PRESENT_COMPLETE_MODE_COPY:
1336          if (chain->copy_is_suboptimal)
1337             result = VK_SUBOPTIMAL_KHR;
1338          break;
1339       case XCB_PRESENT_COMPLETE_MODE_FLIP:
1340          /* If we ever go from flipping to copying, the odds are very likely
1341           * that we could reallocate in a more optimal way if we didn't have
1342           * to care about scanout, so we always do this.
1343           */
1344          chain->copy_is_suboptimal = true;
1345          break;
1346 #ifdef HAVE_X11_DRM
1347       case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1348          /* The winsys is now trying to flip directly and cannot due to our
1349           * configuration. Request the user reallocate.
1350           */
1351 
1352          /* Sometimes, this complete mode is spurious, and a false positive.
1353           * Xwayland may report SUBOPTIMAL_COPY even if there are no changes in the modifiers.
1354           * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26616 for more details. */
1355          if (chain->status == VK_SUCCESS &&
1356              wsi_x11_swapchain_query_dri3_modifiers_changed(chain)) {
1357             result = VK_SUBOPTIMAL_KHR;
1358          }
1359          break;
1360 #endif
1361       default:
1362          break;
1363       }
1364 
1365       return result;
1366    }
1367 
1368    default:
1369       break;
1370    }
1371 
1372    return VK_SUCCESS;
1373 }
1374 #ifdef HAVE_X11_DRM
1375 /**
1376  * Send image to X server via Present extension.
1377  */
1378 static VkResult
x11_present_to_x11_dri3(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1379 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1380                         uint64_t target_msc, VkPresentModeKHR present_mode)
1381 {
1382    struct x11_image *image = &chain->images[image_index];
1383 
1384    assert(image_index < chain->base.image_count);
1385 
1386    uint32_t options = XCB_PRESENT_OPTION_NONE;
1387 
1388    int64_t divisor = 0;
1389    int64_t remainder = 0;
1390 
1391    struct wsi_x11_connection *wsi_conn =
1392       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1393    if (!wsi_conn)
1394       return VK_ERROR_OUT_OF_HOST_MEMORY;
1395 
1396    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1397        (present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1398         wsi_conn->is_xwayland) ||
1399        present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1400       options |= XCB_PRESENT_OPTION_ASYNC;
1401 
1402    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR
1403       && chain->has_async_may_tear)
1404       options |= XCB_PRESENT_OPTION_ASYNC_MAY_TEAR;
1405 
1406    if (chain->has_dri3_modifiers)
1407       options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1408 
1409    xshmfence_reset(image->shm_fence);
1410 
1411    if (!chain->base.image_info.explicit_sync) {
1412       ++chain->sent_image_count;
1413       assert(chain->sent_image_count <= chain->base.image_count);
1414    }
1415 
1416    ++chain->send_sbc;
1417    uint32_t serial = (uint32_t)chain->send_sbc;
1418 
1419    assert(image->present_queued_count < ARRAY_SIZE(image->pending_completions));
1420    image->pending_completions[image->present_queued_count++] =
1421       (struct x11_image_pending_completion) {
1422          .signal_present_id = image->present_id,
1423          .serial = serial,
1424       };
1425 
1426    xcb_void_cookie_t cookie;
1427 #ifdef HAVE_DRI3_EXPLICIT_SYNC
1428    if (chain->base.image_info.explicit_sync) {
1429       uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
1430       uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
1431       cookie = xcb_present_pixmap_synced(
1432          chain->conn,
1433          chain->window,
1434          image->pixmap,
1435          serial,
1436          0,                                   /* valid */
1437          image->update_area,                  /* update */
1438          0,                                   /* x_off */
1439          0,                                   /* y_off */
1440          XCB_NONE,                            /* target_crtc */
1441          image->dri3_syncobj[WSI_ES_ACQUIRE], /* acquire_syncobj */
1442          image->dri3_syncobj[WSI_ES_RELEASE], /* release_syncobj */
1443          acquire_point,
1444          release_point,
1445          options,
1446          target_msc,
1447          divisor,
1448          remainder, 0, NULL);
1449    } else
1450 #endif
1451    {
1452       cookie = xcb_present_pixmap(chain->conn,
1453                                   chain->window,
1454                                   image->pixmap,
1455                                   serial,
1456                                   0,                  /* valid */
1457                                   image->update_area, /* update */
1458                                   0,                  /* x_off */
1459                                   0,                  /* y_off */
1460                                   XCB_NONE,           /* target_crtc */
1461                                   XCB_NONE,
1462                                   image->sync_fence,
1463                                   options,
1464                                   target_msc,
1465                                   divisor,
1466                                   remainder, 0, NULL);
1467    }
1468    xcb_discard_reply(chain->conn, cookie.sequence);
1469    xcb_flush(chain->conn);
1470    return x11_swapchain_result(chain, VK_SUCCESS);
1471 }
1472 #endif
1473 /**
1474  * Send image to X server unaccelerated (software drivers).
1475  */
1476 static VkResult
x11_present_to_x11_sw(struct x11_swapchain * chain,uint32_t image_index)1477 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index)
1478 {
1479    assert(!chain->base.image_info.explicit_sync);
1480    struct x11_image *image = &chain->images[image_index];
1481 
1482    /* Begin querying this before submitting the frame for improved async performance.
1483     * In this _sw() mode we're expecting network round-trip delay, not just UNIX socket delay. */
1484    xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1485 
1486    xcb_void_cookie_t cookie;
1487    void *myptr = image->base.cpu_map;
1488    size_t hdr_len = sizeof(xcb_put_image_request_t);
1489    int stride_b = image->base.row_pitches[0];
1490    size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1491    uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1492 
1493    if (image->rectangle_count > 0) {
1494       for (int i = 0; i < image->rectangle_count; i++) {
1495          xcb_rectangle_t rect = chain->images[image_index].rects[i];
1496          const uint8_t *data = (const uint8_t*)myptr + (rect.y * stride_b) + (rect.x * 4);
1497          for (int j = 0; j < rect.height; j++) {
1498             cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1499                                    chain->window, chain->gc,
1500                                    rect.width,
1501                                    1,
1502                                    rect.x, rect.y + j,
1503                                    0, chain->depth,
1504                                    rect.width * 4,
1505                                    data);
1506             xcb_discard_reply(chain->conn, cookie.sequence);
1507             data += stride_b;
1508          }
1509       }
1510    } else if (size < max_req_len) {
1511       cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1512                              chain->window,
1513                              chain->gc,
1514                              image->base.row_pitches[0] / 4,
1515                              chain->extent.height,
1516                              0,0,0,chain->depth,
1517                              image->base.row_pitches[0] * chain->extent.height,
1518                              image->base.cpu_map);
1519       xcb_discard_reply(chain->conn, cookie.sequence);
1520    } else {
1521       int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1522       int y_start = 0;
1523       int y_todo = chain->extent.height;
1524       while (y_todo) {
1525          int this_lines = MIN2(num_lines, y_todo);
1526          cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1527                                 chain->window,
1528                                 chain->gc,
1529                                 image->base.row_pitches[0] / 4,
1530                                 this_lines,
1531                                 0,y_start,0,chain->depth,
1532                                 this_lines * stride_b,
1533                                 (const uint8_t *)myptr + (y_start * stride_b));
1534          xcb_discard_reply(chain->conn, cookie.sequence);
1535          y_start += this_lines;
1536          y_todo -= this_lines;
1537       }
1538    }
1539 
1540    xcb_flush(chain->conn);
1541 
1542    /* We don't have queued present here.
1543     * Immediately let application acquire again, but query geometry first so
1544     * we can report OUT_OF_DATE on resize. */
1545    xcb_generic_error_t *err;
1546 
1547    xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1548    VkResult result = VK_SUCCESS;
1549    if (geom) {
1550       if (chain->extent.width != geom->width ||
1551           chain->extent.height != geom->height)
1552          result = VK_ERROR_OUT_OF_DATE_KHR;
1553    } else {
1554       result = VK_ERROR_SURFACE_LOST_KHR;
1555    }
1556    free(err);
1557    free(geom);
1558 
1559    wsi_queue_push(&chain->acquire_queue, image_index);
1560    return result;
1561 }
1562 
1563 static void
x11_capture_trace(struct x11_swapchain * chain)1564 x11_capture_trace(struct x11_swapchain *chain)
1565 {
1566 #ifdef XCB_KEYSYMS_AVAILABLE
1567    VK_FROM_HANDLE(vk_device, device, chain->base.device);
1568    if (!device->physical->instance->trace_mode)
1569       return;
1570 
1571    xcb_query_keymap_cookie_t keys_cookie = xcb_query_keymap(chain->conn);
1572 
1573    xcb_generic_error_t *error = NULL;
1574    xcb_query_keymap_reply_t *keys = xcb_query_keymap_reply(chain->conn, keys_cookie, &error);
1575    if (error) {
1576       free(error);
1577       return;
1578    }
1579 
1580    xcb_key_symbols_t *key_symbols = xcb_key_symbols_alloc(chain->conn);
1581    xcb_keycode_t *keycodes = xcb_key_symbols_get_keycode(key_symbols, XK_F1);
1582    if (keycodes) {
1583       xcb_keycode_t keycode = keycodes[0];
1584       free(keycodes);
1585 
1586       simple_mtx_lock(&device->trace_mtx);
1587       bool capture_key_pressed = keys->keys[keycode / 8] & (1u << (keycode % 8));
1588       device->trace_hotkey_trigger = capture_key_pressed && (capture_key_pressed != chain->base.capture_key_pressed);
1589       chain->base.capture_key_pressed = capture_key_pressed;
1590       simple_mtx_unlock(&device->trace_mtx);
1591    }
1592 
1593    xcb_key_symbols_free(key_symbols);
1594    free(keys);
1595 #endif
1596 }
1597 
1598 /* Use a trivial helper here to make it easier to read in code
1599  * where we're intending to access chain->status outside the thread lock. */
x11_swapchain_read_status_atomic(struct x11_swapchain * chain)1600 static VkResult x11_swapchain_read_status_atomic(struct x11_swapchain *chain)
1601 {
1602    return chain->status;
1603 }
1604 
1605 /**
1606  * Decides if an early wait on buffer fences before buffer submission is required.
1607  * That is for mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1608  * present time, which could lead to missing a frame. This is an Xorg issue.
1609  *
1610  * On Wayland compositors, this used to be a problem as well, but not anymore,
1611  * and this check assumes that Mesa is running on a reasonable compositor.
1612  * The wait behavior can be forced by setting the 'vk_xwayland_wait_ready' DRIConf option to true.
1613  * Some drivers, like e.g. Venus may still want to require wait_ready by default,
1614  * so the option is kept around for now.
1615  *
1616  * On Wayland, we don't know at this point if tearing protocol is/can be used by Xwl,
1617  * so we have to make the MAILBOX assumption.
1618  */
1619 static bool
x11_needs_wait_for_fences(const struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1620 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1621                           struct wsi_x11_connection *wsi_conn,
1622                           VkPresentModeKHR present_mode)
1623 {
1624    if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1625       return false;
1626    }
1627 
1628    switch (present_mode) {
1629       case VK_PRESENT_MODE_MAILBOX_KHR:
1630          return true;
1631       case VK_PRESENT_MODE_IMMEDIATE_KHR:
1632          return wsi_conn->is_xwayland;
1633       default:
1634          return false;
1635    }
1636 }
1637 
1638 /* This matches Wayland. */
1639 #define X11_SWAPCHAIN_MAILBOX_IMAGES 4
1640 
1641 static bool
x11_requires_mailbox_image_count(const struct wsi_device * device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)1642 x11_requires_mailbox_image_count(const struct wsi_device *device,
1643                                  struct wsi_x11_connection *wsi_conn,
1644                                  VkPresentModeKHR present_mode)
1645 {
1646    /* If we're resorting to wait for fences, we're assuming a MAILBOX-like model,
1647     * and we should allocate accordingly.
1648     *
1649     * One potential concern here is IMMEDIATE mode on Wayland.
1650     * This situation could arise:
1651     * - Fullscreen FLIP mode
1652     * - Compositor does not support tearing protocol (we cannot know this here)
1653     *
1654     * With 3 images, during the window between latch and flip, there is only one image left to app,
1655     * so peak FPS may not be reached if the window between latch and flip is large,
1656     * but tests on contemporary compositors suggest this effect is minor.
1657     * Frame rate in the thousands can easily be reached.
1658     *
1659     * There are pragmatic reasons to expose 3 images for IMMEDIATE on Xwl.
1660     * - minImageCount is not intended as a tool to tune performance, its intent is to signal forward progress.
1661     *   Our X11 and WL implementations do so for pragmatic reasons due to sync acquire interacting poorly with 2 images.
1662     *   A jump from 3 to 4 is at best a minor improvement which only affects applications
1663     *   running at extremely high frame rates, way beyond the monitor refresh rate.
1664     *   On the other hand, lowering minImageCount to 2 would break the fundamental idea of MAILBOX
1665     *   (and IMMEDIATE without tear), since FPS > refresh rate would not be possible.
1666     *
1667     * - Several games developed for other platforms and other Linux WSI implementations
1668     *   do not expect that image counts arbitrarily change when changing present mode,
1669     *   and will crash when Mesa does so.
1670     *   There are several games using the strict_image_count drirc to work around this,
1671     *   and it would be good to be friendlier in the first place, so we don't have to work around more games.
1672     *   IMMEDIATE is a common presentation mode on those platforms, but MAILBOX is more Wayland-centric in nature,
1673     *   so increasing image count for that mode is more reasonable.
1674     *
1675     * - IMMEDIATE expects tearing, and when tearing, 3 images are more than enough.
1676     *
1677     * - With EXT_swapchain_maintenance1, toggling between FIFO / IMMEDIATE (used extensively by D3D layering)
1678     *   would require application to allocate >3 images which is unfortunate for memory usage,
1679     *   and potentially disastrous for latency unless KHR_present_wait is used.
1680     */
1681    return x11_needs_wait_for_fences(device, wsi_conn, present_mode) ||
1682           present_mode == VK_PRESENT_MODE_MAILBOX_KHR;
1683 }
1684 
1685 /**
1686  * Send image to the X server for presentation at target_msc.
1687  */
1688 static VkResult
x11_present_to_x11(struct x11_swapchain * chain,uint32_t image_index,uint64_t target_msc,VkPresentModeKHR present_mode)1689 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1690                    uint64_t target_msc, VkPresentModeKHR present_mode)
1691 {
1692    x11_capture_trace(chain);
1693 
1694    VkResult result;
1695    if (chain->base.wsi->sw && !chain->has_mit_shm)
1696       result = x11_present_to_x11_sw(chain, image_index);
1697    else
1698 #ifdef HAVE_X11_DRM
1699       result = x11_present_to_x11_dri3(chain, image_index, target_msc, present_mode);
1700 #else
1701       unreachable("X11 missing DRI3 support!");
1702 #endif
1703 
1704    if (result < 0)
1705       x11_swapchain_notify_error(chain, result);
1706    else
1707       x11_notify_pending_present(chain, &chain->images[image_index]);
1708 
1709    return result;
1710 }
1711 
1712 static VkResult
x11_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1713 x11_release_images(struct wsi_swapchain *wsi_chain,
1714                    uint32_t count, const uint32_t *indices)
1715 {
1716    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1717    if (chain->status == VK_ERROR_SURFACE_LOST_KHR)
1718       return chain->status;
1719 
1720    /* If we're using implicit sync, push images to the acquire queue */
1721    if (!chain->base.image_info.explicit_sync) {
1722       for (uint32_t i = 0; i < count; i++) {
1723          uint32_t index = indices[i];
1724          assert(index < chain->base.image_count);
1725          wsi_queue_push(&chain->acquire_queue, index);
1726       }
1727    }
1728 
1729    return VK_SUCCESS;
1730 }
1731 
1732 static void
x11_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1733 x11_set_present_mode(struct wsi_swapchain *wsi_chain,
1734                      VkPresentModeKHR mode)
1735 {
1736    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1737    chain->base.present_mode = mode;
1738 }
1739 
1740 /**
1741  * Acquire a ready-to-use image from the swapchain.
1742  *
1743  * This means usually that the image is not waiting on presentation and that the
1744  * image has been released by the X server to be used again by the consumer.
1745  */
1746 static VkResult
x11_acquire_next_image(struct wsi_swapchain * anv_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1747 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1748                        const VkAcquireNextImageInfoKHR *info,
1749                        uint32_t *image_index)
1750 {
1751    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1752    uint64_t timeout = info->timeout;
1753 
1754    /* If the swapchain is in an error state, don't go any further. */
1755    VkResult result = x11_swapchain_read_status_atomic(chain);
1756    if (result < 0)
1757       return result;
1758 
1759    if (chain->base.image_info.explicit_sync) {
1760       result = x11_wait_for_explicit_sync_release_submission(chain, timeout,
1761                                                              image_index);
1762    } else {
1763       result = wsi_queue_pull(&chain->acquire_queue,
1764                               image_index, timeout);
1765    }
1766 
1767    if (result == VK_TIMEOUT)
1768       return info->timeout ? VK_TIMEOUT : VK_NOT_READY;
1769 
1770    if (result < 0) {
1771       mtx_lock(&chain->thread_state_lock);
1772       result = x11_swapchain_result(chain, result);
1773       mtx_unlock(&chain->thread_state_lock);
1774    } else {
1775       result = x11_swapchain_read_status_atomic(chain);
1776    }
1777 
1778    if (result < 0)
1779       return result;
1780 
1781    assert(*image_index < chain->base.image_count);
1782 #ifdef HAVE_X11_DRM
1783    if (chain->images[*image_index].shm_fence &&
1784        !chain->base.image_info.explicit_sync)
1785       xshmfence_await(chain->images[*image_index].shm_fence);
1786 #endif
1787 
1788    return result;
1789 }
1790 
1791 /**
1792  * Queue a new presentation of an image that was previously acquired by the
1793  * consumer.
1794  *
1795  * Note that in immediate presentation mode this does not really queue the
1796  * presentation but directly asks the X server to show it.
1797  */
1798 static VkResult
x11_queue_present(struct wsi_swapchain * anv_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)1799 x11_queue_present(struct wsi_swapchain *anv_chain,
1800                   uint32_t image_index,
1801                   uint64_t present_id,
1802                   const VkPresentRegionKHR *damage)
1803 {
1804    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1805    xcb_xfixes_region_t update_area = 0;
1806 
1807    /* If the swapchain is in an error state, don't go any further. */
1808    VkResult status = x11_swapchain_read_status_atomic(chain);
1809    if (status < 0)
1810       return status;
1811 
1812    if (chain->images[image_index].update_region != None &&
1813        damage && damage->pRectangles && damage->rectangleCount > 0 &&
1814        damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1815       xcb_rectangle_t *rects = chain->images[image_index].rects;
1816 
1817       update_area = chain->images[image_index].update_region;
1818       for (unsigned i = 0; i < damage->rectangleCount; i++) {
1819          const VkRectLayerKHR *rect = &damage->pRectangles[i];
1820          assert(rect->layer == 0);
1821          rects[i].x = rect->offset.x;
1822          rects[i].y = rect->offset.y;
1823          rects[i].width = rect->extent.width;
1824          rects[i].height = rect->extent.height;
1825       }
1826       xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1827       chain->images[image_index].rectangle_count = damage->rectangleCount;
1828    } else {
1829       chain->images[image_index].rectangle_count = 0;
1830    }
1831    chain->images[image_index].update_area = update_area;
1832    chain->images[image_index].present_id = present_id;
1833    /* With EXT_swapchain_maintenance1, the present mode can change per present. */
1834    chain->images[image_index].present_mode = chain->base.present_mode;
1835 
1836    wsi_queue_push(&chain->present_queue, image_index);
1837    return x11_swapchain_read_status_atomic(chain);
1838 }
1839 
1840 /**
1841  * The number of images that are not owned by X11:
1842  *  (1) in the ownership of the app, or
1843  *  (2) app to take ownership through an acquire, or
1844  *  (3) in the present queue waiting for the FIFO thread to present to X11.
1845  */
x11_driver_owned_images(const struct x11_swapchain * chain)1846 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1847 {
1848    return chain->base.image_count - chain->sent_image_count;
1849 }
1850 
1851 /* This thread is responsible for pumping PRESENT replies.
1852  * This is done in a separate thread from the X11 presentation thread
1853  * to be able to support non-blocking modes like IMMEDIATE and MAILBOX.
1854  * Frame completion events can happen at any time, and we need to handle
1855  * the events as soon as they come in to have a quality implementation.
1856  * The presentation thread may go to sleep waiting for new presentation events to come in,
1857  * and it cannot wait for both X events and application events at the same time.
1858  * If we only cared about FIFO, this thread wouldn't be very useful.
1859  * Earlier implementation of X11 WSI had a single FIFO thread that blocked on X events after presenting.
1860  * For IMMEDIATE and MAILBOX, the application thread pumped the event queue, which caused a lot of pain
1861  * when trying to deal with present wait.
1862  */
1863 static int
x11_manage_event_queue(void * state)1864 x11_manage_event_queue(void *state)
1865 {
1866    struct x11_swapchain *chain = state;
1867    u_thread_setname("WSI swapchain event");
1868 
1869    /* While there is an outstanding IDLE we should wait for it.
1870     * In FLIP modes at most one image will not be driver owned eventually.
1871     * In BLIT modes, we expect that all images will eventually be driver owned,
1872     * but we don't know which mode is being used. */
1873    unsigned forward_progress_guaranteed_acquired_images = chain->base.image_count - 1;
1874 
1875    mtx_lock(&chain->thread_state_lock);
1876 
1877    while (chain->status >= 0) {
1878       /* This thread should only go sleep waiting for X events when we know there are pending events.
1879        * We expect COMPLETION events when there is at least one image marked as present_queued.
1880        * We also expect IDLE events, but we only consider waiting for them when all images are busy,
1881        * and application has fewer than N images acquired. */
1882 
1883       bool assume_forward_progress = false;
1884 
1885       for (uint32_t i = 0; i < chain->base.image_count; i++) {
1886          if (chain->images[i].present_queued_count != 0) {
1887             /* We must pump through a present wait and unblock FIFO thread if using FIFO mode. */
1888             assume_forward_progress = true;
1889             break;
1890          }
1891       }
1892 
1893       if (!assume_forward_progress && !chain->base.image_info.explicit_sync) {
1894          /* If true, application expects acquire (IDLE) to happen in finite time. */
1895          assume_forward_progress = x11_driver_owned_images(chain) <
1896                                    forward_progress_guaranteed_acquired_images;
1897       }
1898 
1899       if (assume_forward_progress) {
1900          /* Only yield lock when blocking on X11 event. */
1901          mtx_unlock(&chain->thread_state_lock);
1902          xcb_generic_event_t *event =
1903                xcb_wait_for_special_event(chain->conn, chain->special_event);
1904          mtx_lock(&chain->thread_state_lock);
1905 
1906          /* Re-check status since we dropped the lock while waiting for X. */
1907          VkResult result = chain->status;
1908 
1909          if (result >= 0) {
1910             if (event) {
1911                /* Queue thread will be woken up if anything interesting happened in handler.
1912                 * Queue thread blocks on:
1913                 * - Presentation events completing
1914                 * - Presentation requests from application
1915                 * - WaitForFence workaround if applicable */
1916                result = x11_handle_dri3_present_event(chain, (void *) event);
1917             } else {
1918                result = VK_ERROR_SURFACE_LOST_KHR;
1919             }
1920          }
1921 
1922          /* Updates chain->status and wakes up threads as necessary on error. */
1923          x11_swapchain_result(chain, result);
1924          free(event);
1925       } else {
1926          /* Nothing important to do, go to sleep until queue thread wakes us up. */
1927          u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1928       }
1929    }
1930 
1931    mtx_unlock(&chain->thread_state_lock);
1932    return 0;
1933 }
1934 
1935 /**
1936  * Presentation thread.
1937  *
1938  * Runs in a separate thread, blocks and reacts to queued images on the
1939  * present-queue
1940  *
1941  * This must be a thread since we have to block in two cases:
1942  * - FIFO:
1943  *     We must wait for previous presentation to complete
1944  *     in some way so we can compute the target MSC.
1945  * - WaitForFence workaround:
1946  *     In some cases, we need to wait for image to complete rendering before submitting it to X.
1947  */
1948 static int
x11_manage_present_queue(void * state)1949 x11_manage_present_queue(void *state)
1950 {
1951    struct x11_swapchain *chain = state;
1952    struct wsi_x11_connection *wsi_conn =
1953          wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1954    VkResult result = VK_SUCCESS;
1955 
1956    u_thread_setname("WSI swapchain queue");
1957 
1958    uint64_t target_msc = 0;
1959 
1960    while (x11_swapchain_read_status_atomic(chain) >= 0) {
1961       uint32_t image_index = 0;
1962       {
1963          MESA_TRACE_SCOPE("pull present queue");
1964          result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1965          assert(result != VK_TIMEOUT);
1966       }
1967 
1968       /* The status can change underneath us if the swapchain is destroyed
1969        * from another thread. */
1970       if (result >= 0)
1971          result = x11_swapchain_read_status_atomic(chain);
1972       if (result < 0)
1973          break;
1974 
1975       VkPresentModeKHR present_mode = chain->images[image_index].present_mode;
1976 
1977       if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1978                                     present_mode) &&
1979           /* not necessary with explicit sync */
1980           !chain->base.image_info.explicit_sync) {
1981          MESA_TRACE_SCOPE("wait fence");
1982          result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1983                                                  &chain->base.fences[image_index],
1984                                                  true, UINT64_MAX);
1985          if (result != VK_SUCCESS) {
1986             result = VK_ERROR_OUT_OF_DATE_KHR;
1987             break;
1988          }
1989       }
1990 
1991       mtx_lock(&chain->thread_state_lock);
1992 
1993       /* In IMMEDIATE and MAILBOX modes, there is a risk that we have exhausted the presentation queue,
1994        * since IDLE could return multiple times before observing a COMPLETE. */
1995       while (chain->status >= 0 &&
1996              chain->images[image_index].present_queued_count ==
1997              ARRAY_SIZE(chain->images[image_index].pending_completions)) {
1998          u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
1999       }
2000 
2001       if (chain->status < 0) {
2002          mtx_unlock(&chain->thread_state_lock);
2003          break;
2004       }
2005 
2006       result = x11_present_to_x11(chain, image_index, target_msc, present_mode);
2007 
2008       if (result < 0) {
2009          mtx_unlock(&chain->thread_state_lock);
2010          break;
2011       }
2012 
2013       if (present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2014           present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2015          MESA_TRACE_SCOPE("wait present");
2016 
2017          while (chain->status >= 0 && chain->images[image_index].present_queued_count != 0) {
2018             /* In FIFO mode, we need to make sure we observe a COMPLETE before queueing up
2019              * another present. */
2020             u_cnd_monotonic_wait(&chain->thread_state_cond, &chain->thread_state_lock);
2021          }
2022 
2023          /* If next present is not FIFO, we still need to ensure we don't override that
2024           * present. If FIFO, we need to ensure MSC is larger than the COMPLETED frame. */
2025          target_msc = chain->last_present_msc + 1;
2026       }
2027 
2028       mtx_unlock(&chain->thread_state_lock);
2029    }
2030 
2031    mtx_lock(&chain->thread_state_lock);
2032    x11_swapchain_result(chain, result);
2033    if (!chain->base.image_info.explicit_sync)
2034       wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
2035    mtx_unlock(&chain->thread_state_lock);
2036 
2037    return 0;
2038 }
2039 
2040 static uint8_t *
alloc_shm(struct wsi_image * imagew,unsigned size)2041 alloc_shm(struct wsi_image *imagew, unsigned size)
2042 {
2043 #ifdef HAVE_SYS_SHM_H
2044    struct x11_image *image = (struct x11_image *)imagew;
2045    image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
2046    if (image->shmid < 0)
2047       return NULL;
2048 
2049    uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
2050    /* mark the segment immediately for deletion to avoid leaks */
2051    shmctl(image->shmid, IPC_RMID, 0);
2052 
2053    if (addr == (uint8_t *) -1)
2054       return NULL;
2055 
2056    image->shmaddr = addr;
2057    return addr;
2058 #else
2059    return NULL;
2060 #endif
2061 }
2062 
2063 static VkResult
x11_image_init(VkDevice device_h,struct x11_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2064 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
2065                const VkSwapchainCreateInfoKHR *pCreateInfo,
2066                const VkAllocationCallbacks* pAllocator,
2067                struct x11_image *image)
2068 {
2069    VkResult result;
2070 
2071    result = wsi_create_image(&chain->base, &chain->base.image_info,
2072                              &image->base);
2073    if (result != VK_SUCCESS)
2074       return result;
2075 
2076    image->update_region = None;
2077    if (chain->base.wsi->sw && !chain->has_mit_shm)
2078       return VK_SUCCESS;
2079 
2080 #ifdef HAVE_X11_DRM
2081    xcb_void_cookie_t cookie;
2082    xcb_generic_error_t *error = NULL;
2083    uint32_t bpp = 32;
2084    int fence_fd;
2085    image->update_region = xcb_generate_id(chain->conn);
2086    xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
2087 
2088    if (chain->base.wsi->sw) {
2089       image->shmseg = xcb_generate_id(chain->conn);
2090 
2091       xcb_shm_attach(chain->conn,
2092                      image->shmseg,
2093                      image->shmid,
2094                      0);
2095       image->pixmap = xcb_generate_id(chain->conn);
2096       cookie = xcb_shm_create_pixmap_checked(chain->conn,
2097                                              image->pixmap,
2098                                              chain->window,
2099                                              image->base.row_pitches[0] / 4,
2100                                              pCreateInfo->imageExtent.height,
2101                                              chain->depth,
2102                                              image->shmseg, 0);
2103       xcb_discard_reply(chain->conn, cookie.sequence);
2104       goto out_fence;
2105    }
2106    image->pixmap = xcb_generate_id(chain->conn);
2107 
2108    if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
2109       /* If the image has a modifier, we must have DRI3 v1.2. */
2110       assert(chain->has_dri3_modifiers);
2111 
2112       /* XCB requires an array of file descriptors but we only have one */
2113       int fds[4] = { -1, -1, -1, -1 };
2114       for (int i = 0; i < image->base.num_planes; i++) {
2115          fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
2116          if (fds[i] == -1) {
2117             for (int j = 0; j < i; j++)
2118                close(fds[j]);
2119 
2120             return VK_ERROR_OUT_OF_HOST_MEMORY;
2121          }
2122       }
2123 
2124       cookie =
2125          xcb_dri3_pixmap_from_buffers_checked(chain->conn,
2126                                               image->pixmap,
2127                                               chain->window,
2128                                               image->base.num_planes,
2129                                               pCreateInfo->imageExtent.width,
2130                                               pCreateInfo->imageExtent.height,
2131                                               image->base.row_pitches[0],
2132                                               image->base.offsets[0],
2133                                               image->base.row_pitches[1],
2134                                               image->base.offsets[1],
2135                                               image->base.row_pitches[2],
2136                                               image->base.offsets[2],
2137                                               image->base.row_pitches[3],
2138                                               image->base.offsets[3],
2139                                               chain->depth, bpp,
2140                                               image->base.drm_modifier,
2141                                               fds);
2142    } else {
2143       /* Without passing modifiers, we can't have multi-plane RGB images. */
2144       assert(image->base.num_planes == 1);
2145 
2146       /* XCB will take ownership of the FD we pass it. */
2147       int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
2148       if (fd == -1)
2149          return VK_ERROR_OUT_OF_HOST_MEMORY;
2150 
2151       cookie =
2152          xcb_dri3_pixmap_from_buffer_checked(chain->conn,
2153                                              image->pixmap,
2154                                              chain->window,
2155                                              image->base.sizes[0],
2156                                              pCreateInfo->imageExtent.width,
2157                                              pCreateInfo->imageExtent.height,
2158                                              image->base.row_pitches[0],
2159                                              chain->depth, bpp, fd);
2160    }
2161 
2162    error = xcb_request_check(chain->conn, cookie);
2163    if (error != NULL) {
2164       free(error);
2165       goto fail_image;
2166    }
2167 
2168 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2169    if (chain->base.image_info.explicit_sync) {
2170       for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2171          image->dri3_syncobj[i] = xcb_generate_id(chain->conn);
2172          int fd = dup(image->base.explicit_sync[i].fd);
2173          if (fd < 0)
2174             goto fail_image;
2175 
2176          cookie = xcb_dri3_import_syncobj_checked(chain->conn,
2177                                                   image->dri3_syncobj[i],
2178                                                   chain->window,
2179                                                   fd /* libxcb closes the fd */);
2180          error = xcb_request_check(chain->conn, cookie);
2181          if (error != NULL) {
2182             free(error);
2183             goto fail_image;
2184          }
2185       }
2186    }
2187 #endif
2188 
2189 out_fence:
2190    fence_fd = xshmfence_alloc_shm();
2191    if (fence_fd < 0)
2192       goto fail_pixmap;
2193 
2194    image->shm_fence = xshmfence_map_shm(fence_fd);
2195    if (image->shm_fence == NULL)
2196       goto fail_shmfence_alloc;
2197 
2198    image->sync_fence = xcb_generate_id(chain->conn);
2199    xcb_dri3_fence_from_fd(chain->conn,
2200                           image->pixmap,
2201                           image->sync_fence,
2202                           false,
2203                           fence_fd);
2204 
2205    xshmfence_trigger(image->shm_fence);
2206    return VK_SUCCESS;
2207 
2208 fail_shmfence_alloc:
2209    close(fence_fd);
2210 
2211 fail_pixmap:
2212    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2213    xcb_discard_reply(chain->conn, cookie.sequence);
2214 
2215 fail_image:
2216    wsi_destroy_image(&chain->base, &image->base);
2217 
2218 #else
2219    unreachable("SHM support not compiled in");
2220 #endif
2221    return VK_ERROR_INITIALIZATION_FAILED;
2222 }
2223 
2224 static void
x11_image_finish(struct x11_swapchain * chain,const VkAllocationCallbacks * pAllocator,struct x11_image * image)2225 x11_image_finish(struct x11_swapchain *chain,
2226                  const VkAllocationCallbacks* pAllocator,
2227                  struct x11_image *image)
2228 {
2229    xcb_void_cookie_t cookie;
2230    if (!chain->base.wsi->sw || chain->has_mit_shm) {
2231 #ifdef HAVE_X11_DRM
2232       cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
2233       xcb_discard_reply(chain->conn, cookie.sequence);
2234       xshmfence_unmap_shm(image->shm_fence);
2235 #endif
2236 
2237       cookie = xcb_free_pixmap(chain->conn, image->pixmap);
2238       xcb_discard_reply(chain->conn, cookie.sequence);
2239 #ifdef HAVE_X11_DRM
2240       cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
2241       xcb_discard_reply(chain->conn, cookie.sequence);
2242 #endif
2243 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2244       if (chain->base.image_info.explicit_sync) {
2245          for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2246             cookie = xcb_dri3_free_syncobj(chain->conn, image->dri3_syncobj[i]);
2247             xcb_discard_reply(chain->conn, cookie.sequence);
2248          }
2249       }
2250 #endif
2251    }
2252 
2253    wsi_destroy_image(&chain->base, &image->base);
2254 #ifdef HAVE_SYS_SHM_H
2255    if (image->shmaddr)
2256       shmdt(image->shmaddr);
2257 #endif
2258 }
2259 
2260 static void
wsi_x11_recompute_dri3_modifier_hash(blake3_hash * hash,const struct wsi_drm_image_params * params)2261 wsi_x11_recompute_dri3_modifier_hash(blake3_hash *hash, const struct wsi_drm_image_params *params)
2262 {
2263    mesa_blake3 ctx;
2264    _mesa_blake3_init(&ctx);
2265    _mesa_blake3_update(&ctx, &params->num_modifier_lists, sizeof(params->num_modifier_lists));
2266    for (uint32_t i = 0; i < params->num_modifier_lists; i++) {
2267       _mesa_blake3_update(&ctx, &i, sizeof(i));
2268       _mesa_blake3_update(&ctx, params->modifiers[i],
2269                           params->num_modifiers[i] * sizeof(*params->modifiers[i]));
2270    }
2271    _mesa_blake3_update(&ctx, &params->same_gpu, sizeof(params->same_gpu));
2272    _mesa_blake3_final(&ctx, *hash);
2273 }
2274 
2275 static void
wsi_x11_get_dri3_modifiers(struct wsi_x11_connection * wsi_conn,xcb_connection_t * conn,xcb_window_t window,uint8_t depth,uint8_t bpp,uint64_t ** modifiers_in,uint32_t * num_modifiers_in,uint32_t * num_tranches_in,const VkAllocationCallbacks * pAllocator)2276 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
2277                            xcb_connection_t *conn, xcb_window_t window,
2278                            uint8_t depth, uint8_t bpp,
2279                            uint64_t **modifiers_in, uint32_t *num_modifiers_in,
2280                            uint32_t *num_tranches_in,
2281                            const VkAllocationCallbacks *pAllocator)
2282 {
2283    if (!wsi_conn->has_dri3_modifiers)
2284       goto out;
2285 
2286 #ifdef HAVE_X11_DRM
2287    xcb_generic_error_t *error = NULL;
2288    xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
2289       xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
2290    xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
2291       xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
2292    free(error);
2293 
2294    if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
2295                       mod_reply->num_screen_modifiers == 0)) {
2296       free(mod_reply);
2297       goto out;
2298    }
2299 
2300    uint32_t n = 0;
2301    uint32_t counts[2];
2302    uint64_t *modifiers[2];
2303 
2304    if (mod_reply->num_window_modifiers) {
2305       counts[n] = mod_reply->num_window_modifiers;
2306       modifiers[n] = vk_alloc(pAllocator,
2307                               counts[n] * sizeof(uint64_t),
2308                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2309       if (!modifiers[n]) {
2310          free(mod_reply);
2311          goto out;
2312       }
2313 
2314       memcpy(modifiers[n],
2315              xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
2316              counts[n] * sizeof(uint64_t));
2317       n++;
2318    }
2319 
2320    if (mod_reply->num_screen_modifiers) {
2321       counts[n] = mod_reply->num_screen_modifiers;
2322       modifiers[n] = vk_alloc(pAllocator,
2323                               counts[n] * sizeof(uint64_t),
2324                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2325       if (!modifiers[n]) {
2326 	 if (n > 0)
2327             vk_free(pAllocator, modifiers[0]);
2328          free(mod_reply);
2329          goto out;
2330       }
2331 
2332       memcpy(modifiers[n],
2333              xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
2334              counts[n] * sizeof(uint64_t));
2335       n++;
2336    }
2337 
2338    for (int i = 0; i < n; i++) {
2339       modifiers_in[i] = modifiers[i];
2340       num_modifiers_in[i] = counts[i];
2341    }
2342    *num_tranches_in = n;
2343 
2344    free(mod_reply);
2345    return;
2346 #endif
2347 out:
2348    *num_tranches_in = 0;
2349 }
2350 #ifdef HAVE_X11_DRM
2351 static bool
wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain * chain)2352 wsi_x11_swapchain_query_dri3_modifiers_changed(struct x11_swapchain *chain)
2353 {
2354    const struct wsi_device *wsi_device = chain->base.wsi;
2355 
2356    if (wsi_device->sw || !wsi_device->supports_modifiers)
2357       return false;
2358 
2359    struct wsi_drm_image_params drm_image_params;
2360    uint64_t *modifiers[2] = {NULL, NULL};
2361    uint32_t num_modifiers[2] = {0, 0};
2362 
2363    struct wsi_x11_connection *wsi_conn =
2364          wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
2365 
2366    xcb_get_geometry_reply_t *geometry =
2367          xcb_get_geometry_reply(chain->conn, xcb_get_geometry(chain->conn, chain->window), NULL);
2368    if (geometry == NULL)
2369       return false;
2370    uint32_t bit_depth = geometry->depth;
2371    free(geometry);
2372 
2373    drm_image_params = (struct wsi_drm_image_params){
2374       .base.image_type = WSI_IMAGE_TYPE_DRM,
2375       .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, chain->conn),
2376       .explicit_sync = chain->base.image_info.explicit_sync,
2377    };
2378 
2379    /* This is called from a thread, so we must not use an allocation callback from user.
2380     * From spec:
2381     * An implementation must only make calls into an application-provided allocator
2382     * during the execution of an API command.
2383     * An implementation must only make calls into an application-provided allocator
2384     * from the same thread that called the provoking API command. */
2385 
2386    wsi_x11_get_dri3_modifiers(wsi_conn, chain->conn, chain->window, bit_depth, 32,
2387                               modifiers, num_modifiers,
2388                               &drm_image_params.num_modifier_lists,
2389                               vk_default_allocator());
2390 
2391    drm_image_params.num_modifiers = num_modifiers;
2392    drm_image_params.modifiers = (const uint64_t **)modifiers;
2393 
2394    blake3_hash hash;
2395    wsi_x11_recompute_dri3_modifier_hash(&hash, &drm_image_params);
2396 
2397    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2398       vk_free(vk_default_allocator(), modifiers[i]);
2399 
2400    return memcmp(hash, chain->dri3_modifier_hash, sizeof(hash)) != 0;
2401 }
2402 #endif
2403 static VkResult
x11_swapchain_destroy(struct wsi_swapchain * anv_chain,const VkAllocationCallbacks * pAllocator)2404 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
2405                       const VkAllocationCallbacks *pAllocator)
2406 {
2407    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
2408 
2409    mtx_lock(&chain->thread_state_lock);
2410    chain->status = VK_ERROR_OUT_OF_DATE_KHR;
2411    u_cnd_monotonic_broadcast(&chain->thread_state_cond);
2412    mtx_unlock(&chain->thread_state_lock);
2413 
2414    /* Push a UINT32_MAX to wake up the manager */
2415    wsi_queue_push(&chain->present_queue, UINT32_MAX);
2416    thrd_join(chain->queue_manager, NULL);
2417    thrd_join(chain->event_manager, NULL);
2418 
2419    if (!chain->base.image_info.explicit_sync)
2420       wsi_queue_destroy(&chain->acquire_queue);
2421    wsi_queue_destroy(&chain->present_queue);
2422 
2423    for (uint32_t i = 0; i < chain->base.image_count; i++)
2424       x11_image_finish(chain, pAllocator, &chain->images[i]);
2425 #ifdef HAVE_X11_DRM
2426    xcb_void_cookie_t cookie;
2427    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2428    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
2429                                              chain->window,
2430                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
2431    xcb_discard_reply(chain->conn, cookie.sequence);
2432 #endif
2433    mtx_destroy(&chain->present_progress_mutex);
2434    u_cnd_monotonic_destroy(&chain->present_progress_cond);
2435    mtx_destroy(&chain->thread_state_lock);
2436    u_cnd_monotonic_destroy(&chain->thread_state_cond);
2437 
2438    wsi_swapchain_finish(&chain->base);
2439 
2440    vk_free(pAllocator, chain);
2441 
2442    return VK_SUCCESS;
2443 }
2444 
2445 static void
wsi_x11_set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)2446 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
2447                                    xcb_drawable_t drawable,
2448                                    uint32_t state)
2449 {
2450    static char const name[] = "_VARIABLE_REFRESH";
2451    xcb_intern_atom_cookie_t cookie;
2452    xcb_intern_atom_reply_t* reply;
2453    xcb_void_cookie_t check;
2454 
2455    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
2456    reply = xcb_intern_atom_reply(conn, cookie, NULL);
2457    if (reply == NULL)
2458       return;
2459 
2460    if (state)
2461       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
2462                                           drawable, reply->atom,
2463                                           XCB_ATOM_CARDINAL, 32, 1, &state);
2464    else
2465       check = xcb_delete_property_checked(conn, drawable, reply->atom);
2466 
2467    xcb_discard_reply(conn, check.sequence);
2468    free(reply);
2469 }
2470 
x11_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t waitValue,uint64_t timeout)2471 static VkResult x11_wait_for_present(struct wsi_swapchain *wsi_chain,
2472                                      uint64_t waitValue,
2473                                      uint64_t timeout)
2474 {
2475    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
2476    struct timespec abs_timespec;
2477    uint64_t abs_timeout = 0;
2478    if (timeout != 0)
2479       abs_timeout = os_time_get_absolute_timeout(timeout);
2480 
2481    /* Need to observe that the swapchain semaphore has been unsignalled,
2482     * as this is guaranteed when a present is complete. */
2483    VkResult result = wsi_swapchain_wait_for_present_semaphore(
2484          &chain->base, waitValue, timeout);
2485    if (result != VK_SUCCESS)
2486       return result;
2487 
2488    timespec_from_nsec(&abs_timespec, abs_timeout);
2489 
2490    mtx_lock(&chain->present_progress_mutex);
2491    while (chain->present_id < waitValue) {
2492       int ret = u_cnd_monotonic_timedwait(&chain->present_progress_cond,
2493                                           &chain->present_progress_mutex,
2494                                           &abs_timespec);
2495       if (ret == ETIMEDOUT) {
2496          result = VK_TIMEOUT;
2497          break;
2498       }
2499       if (ret) {
2500          result = VK_ERROR_DEVICE_LOST;
2501          break;
2502       }
2503    }
2504    if (result == VK_SUCCESS && chain->present_progress_error)
2505       result = chain->present_progress_error;
2506    mtx_unlock(&chain->present_progress_mutex);
2507    return result;
2508 }
2509 
2510 static unsigned
x11_get_min_image_count_for_present_mode(struct wsi_device * wsi_device,struct wsi_x11_connection * wsi_conn,VkPresentModeKHR present_mode)2511 x11_get_min_image_count_for_present_mode(struct wsi_device *wsi_device,
2512                                          struct wsi_x11_connection *wsi_conn,
2513                                          VkPresentModeKHR present_mode)
2514 {
2515    uint32_t min_image_count = x11_get_min_image_count(wsi_device, wsi_conn->is_xwayland);
2516    if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode))
2517       return MAX2(min_image_count, X11_SWAPCHAIN_MAILBOX_IMAGES);
2518    else
2519       return min_image_count;
2520 }
2521 
2522 /**
2523  * Create the swapchain.
2524  *
2525  * Supports immediate, fifo and mailbox presentation mode.
2526  *
2527  */
2528 static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2529 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2530                              VkDevice device,
2531                              struct wsi_device *wsi_device,
2532                              const VkSwapchainCreateInfoKHR *pCreateInfo,
2533                              const VkAllocationCallbacks* pAllocator,
2534                              struct wsi_swapchain **swapchain_out)
2535 {
2536    struct x11_swapchain *chain;
2537    xcb_void_cookie_t cookie;
2538    VkResult result;
2539    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2540 
2541    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2542 
2543    /* Get xcb connection from the icd_surface and from that our internal struct
2544     * representing it.
2545     */
2546    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
2547    struct wsi_x11_connection *wsi_conn =
2548       wsi_x11_get_connection(wsi_device, conn);
2549    if (!wsi_conn)
2550       return VK_ERROR_OUT_OF_HOST_MEMORY;
2551 
2552    /* Get number of images in our swapchain. This count depends on:
2553     * - requested minimal image count
2554     * - device characteristics
2555     * - presentation mode.
2556     */
2557    unsigned num_images = pCreateInfo->minImageCount;
2558    if (!wsi_device->x11.strict_imageCount) {
2559       if (x11_requires_mailbox_image_count(wsi_device, wsi_conn, present_mode) ||
2560           wsi_device->x11.ensure_minImageCount) {
2561          unsigned present_mode_images = x11_get_min_image_count_for_present_mode(
2562                wsi_device, wsi_conn, pCreateInfo->presentMode);
2563          num_images = MAX2(num_images, present_mode_images);
2564       }
2565    }
2566 
2567    /* Check that we have a window up-front. It is an error to not have one. */
2568    xcb_window_t window = x11_surface_get_window(icd_surface);
2569 
2570    /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
2571     * chain's images extents should fit it for performance-optimizing flips.
2572     */
2573    xcb_get_geometry_reply_t *geometry =
2574       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
2575    if (geometry == NULL)
2576       return VK_ERROR_SURFACE_LOST_KHR;
2577    const uint32_t bit_depth = geometry->depth;
2578    const uint16_t cur_width = geometry->width;
2579    const uint16_t cur_height = geometry->height;
2580    free(geometry);
2581 
2582    /* Allocate the actual swapchain. The size depends on image count. */
2583    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2584    chain = vk_zalloc(pAllocator, size, 8,
2585                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2586    if (chain == NULL)
2587       return VK_ERROR_OUT_OF_HOST_MEMORY;
2588 
2589    int ret = mtx_init(&chain->present_progress_mutex, mtx_plain);
2590    if (ret != thrd_success) {
2591       vk_free(pAllocator, chain);
2592       return VK_ERROR_OUT_OF_HOST_MEMORY;
2593    }
2594 
2595    ret = mtx_init(&chain->thread_state_lock, mtx_plain);
2596    if (ret != thrd_success) {
2597       mtx_destroy(&chain->present_progress_mutex);
2598       vk_free(pAllocator, chain);
2599       return VK_ERROR_OUT_OF_HOST_MEMORY;
2600    }
2601 
2602    ret = u_cnd_monotonic_init(&chain->thread_state_cond);
2603    if (ret != thrd_success) {
2604       mtx_destroy(&chain->present_progress_mutex);
2605       mtx_destroy(&chain->thread_state_lock);
2606       vk_free(pAllocator, chain);
2607       return VK_ERROR_OUT_OF_HOST_MEMORY;
2608    }
2609 
2610    ret = u_cnd_monotonic_init(&chain->present_progress_cond);
2611    if (ret != thrd_success) {
2612       mtx_destroy(&chain->present_progress_mutex);
2613       mtx_destroy(&chain->thread_state_lock);
2614       u_cnd_monotonic_destroy(&chain->thread_state_cond);
2615       vk_free(pAllocator, chain);
2616       return VK_ERROR_OUT_OF_HOST_MEMORY;
2617    }
2618 
2619    uint32_t present_caps = 0;
2620 #ifdef HAVE_X11_DRM
2621    xcb_present_query_capabilities_cookie_t present_query_cookie;
2622    xcb_present_query_capabilities_reply_t *present_query_reply;
2623    present_query_cookie = xcb_present_query_capabilities(conn, window);
2624    present_query_reply = xcb_present_query_capabilities_reply(conn, present_query_cookie, NULL);
2625    if (present_query_reply) {
2626       present_caps = present_query_reply->capabilities;
2627       free(present_query_reply);
2628    }
2629 #endif
2630 
2631 #ifdef HAVE_X11_DRM
2632    struct wsi_drm_image_params drm_image_params;
2633    uint32_t num_modifiers[2] = {0, 0};
2634 #endif
2635    struct wsi_base_image_params *image_params = NULL;
2636    struct wsi_cpu_image_params cpu_image_params;
2637    uint64_t *modifiers[2] = {NULL, NULL};
2638    if (wsi_device->sw) {
2639       cpu_image_params = (struct wsi_cpu_image_params) {
2640          .base.image_type = WSI_IMAGE_TYPE_CPU,
2641          .alloc_shm = wsi_conn->has_mit_shm ? &alloc_shm : NULL,
2642       };
2643       image_params = &cpu_image_params.base;
2644    } else {
2645 #ifdef HAVE_X11_DRM
2646       drm_image_params = (struct wsi_drm_image_params) {
2647          .base.image_type = WSI_IMAGE_TYPE_DRM,
2648          .same_gpu = wsi_x11_check_dri3_compatible(wsi_device, conn),
2649          .explicit_sync =
2650 #ifdef HAVE_DRI3_EXPLICIT_SYNC
2651             wsi_conn->has_dri3_explicit_sync &&
2652             (present_caps & XCB_PRESENT_CAPABILITY_SYNCOBJ) &&
2653             wsi_device_supports_explicit_sync(wsi_device),
2654 #else
2655             false,
2656 #endif
2657       };
2658       if (wsi_device->supports_modifiers) {
2659          wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, bit_depth, 32,
2660                                     modifiers, num_modifiers,
2661                                     &drm_image_params.num_modifier_lists,
2662                                     pAllocator);
2663          drm_image_params.num_modifiers = num_modifiers;
2664          drm_image_params.modifiers = (const uint64_t **)modifiers;
2665 
2666          wsi_x11_recompute_dri3_modifier_hash(&chain->dri3_modifier_hash, &drm_image_params);
2667       }
2668       image_params = &drm_image_params.base;
2669 #else
2670       unreachable("X11 DRM support missing!");
2671 #endif
2672    }
2673 
2674    result = wsi_swapchain_init(wsi_device, &chain->base, device, pCreateInfo,
2675                                image_params, pAllocator);
2676 
2677    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2678       vk_free(pAllocator, modifiers[i]);
2679 
2680    if (result != VK_SUCCESS)
2681       goto fail_alloc;
2682 
2683    chain->base.destroy = x11_swapchain_destroy;
2684    chain->base.get_wsi_image = x11_get_wsi_image;
2685    chain->base.acquire_next_image = x11_acquire_next_image;
2686    chain->base.queue_present = x11_queue_present;
2687    chain->base.wait_for_present = x11_wait_for_present;
2688    chain->base.release_images = x11_release_images;
2689    chain->base.set_present_mode = x11_set_present_mode;
2690    chain->base.present_mode = present_mode;
2691    chain->base.image_count = num_images;
2692    chain->conn = conn;
2693    chain->window = window;
2694    chain->depth = bit_depth;
2695    chain->extent = pCreateInfo->imageExtent;
2696    chain->send_sbc = 0;
2697    chain->sent_image_count = 0;
2698    chain->last_present_msc = 0;
2699    chain->status = VK_SUCCESS;
2700    chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2701    chain->has_mit_shm = wsi_conn->has_mit_shm;
2702    chain->has_async_may_tear = present_caps & XCB_PRESENT_CAPABILITY_ASYNC_MAY_TEAR;
2703 
2704    /* When images in the swapchain don't fit the window, X can still present them, but it won't
2705     * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2706     * the chain extents X may be able to flip
2707     */
2708    if (!wsi_device->x11.ignore_suboptimal) {
2709       if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2710          chain->status = VK_SUBOPTIMAL_KHR;
2711    }
2712 
2713    /* On a new swapchain this helper variable is set to false. Once we present it will have an
2714     * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2715     * that in this case here is a high likelihood X could do flips again if the client reallocates a
2716     * new swapchain.
2717     *
2718     * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2719     * was true, and when the next present was completed with copying, we would return
2720     * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2721     * presents on the surface were completed with copying because of some surface state change, we
2722     * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2723     *
2724     * Note also that is is questionable in general if that mechanism is really useful. It ist not
2725     * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2726     * of making flips work again per se. In other words it is not clear why there is need for
2727     * another way to inform clients about suboptimal copies besides forwarding the
2728     * 'PresentOptionSuboptimal' complete mode.
2729     */
2730    chain->copy_is_suboptimal = false;
2731 #ifdef HAVE_X11_DRM
2732    /* For our swapchain we need to listen to following Present extension events:
2733     * - Configure: Window dimensions changed. Images in the swapchain might need
2734     *              to be reallocated.
2735     * - Complete: An image from our swapchain was presented on the output.
2736     * - Idle: An image from our swapchain is not anymore accessed by the X
2737     *         server and can be reused.
2738     */
2739    chain->event_id = xcb_generate_id(chain->conn);
2740    uint32_t event_mask = XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2741                          XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY;
2742    if (!chain->base.image_info.explicit_sync)
2743       event_mask |= XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY;
2744    xcb_present_select_input(chain->conn, chain->event_id, chain->window, event_mask);
2745 
2746    /* Create an XCB event queue to hold present events outside of the usual
2747     * application event queue
2748     */
2749    chain->special_event =
2750       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2751                                    chain->event_id, NULL);
2752 #endif
2753    /* Create the graphics context. */
2754    chain->gc = xcb_generate_id(chain->conn);
2755    if (!chain->gc) {
2756       /* FINISHME: Choose a better error. */
2757       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2758       goto fail_register;
2759    }
2760 
2761    cookie = xcb_create_gc(chain->conn,
2762                           chain->gc,
2763                           chain->window,
2764                           XCB_GC_GRAPHICS_EXPOSURES,
2765                           (uint32_t []) { 0 });
2766    xcb_discard_reply(chain->conn, cookie.sequence);
2767 
2768    uint32_t image = 0;
2769    for (; image < chain->base.image_count; image++) {
2770       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2771                               &chain->images[image]);
2772       if (result != VK_SUCCESS)
2773          goto fail_init_images;
2774    }
2775 
2776    /* The queues have a length of base.image_count + 1 because we will
2777     * occasionally use UINT32_MAX to signal the other thread that an error
2778     * has occurred and we don't want an overflow.
2779     */
2780    ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2781    if (ret) {
2782       goto fail_init_images;
2783    }
2784 
2785    /* Acquire queue is only needed when using implicit sync */
2786    if (!chain->base.image_info.explicit_sync) {
2787       ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2788       if (ret) {
2789          wsi_queue_destroy(&chain->present_queue);
2790          goto fail_init_images;
2791       }
2792 
2793       for (unsigned i = 0; i < chain->base.image_count; i++)
2794          wsi_queue_push(&chain->acquire_queue, i);
2795    }
2796 
2797    ret = thrd_create(&chain->queue_manager,
2798                      x11_manage_present_queue, chain);
2799    if (ret != thrd_success)
2800       goto fail_init_fifo_queue;
2801 
2802    ret = thrd_create(&chain->event_manager,
2803                      x11_manage_event_queue, chain);
2804    if (ret != thrd_success)
2805       goto fail_init_event_queue;
2806 
2807    /* It is safe to set it here as only one swapchain can be associated with
2808     * the window, and swapchain creation does the association. At this point
2809     * we know the creation is going to succeed. */
2810    wsi_x11_set_adaptive_sync_property(conn, window,
2811                                       wsi_device->enable_adaptive_sync);
2812 
2813    *swapchain_out = &chain->base;
2814 
2815    return VK_SUCCESS;
2816 
2817 fail_init_event_queue:
2818    /* Push a UINT32_MAX to wake up the manager */
2819    wsi_queue_push(&chain->present_queue, UINT32_MAX);
2820    thrd_join(chain->queue_manager, NULL);
2821 
2822 fail_init_fifo_queue:
2823    wsi_queue_destroy(&chain->present_queue);
2824    if (!chain->base.image_info.explicit_sync)
2825       wsi_queue_destroy(&chain->acquire_queue);
2826 
2827 fail_init_images:
2828    for (uint32_t j = 0; j < image; j++)
2829       x11_image_finish(chain, pAllocator, &chain->images[j]);
2830 
2831 fail_register:
2832 #ifdef HAVE_X11_DRM
2833    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2834 #endif
2835    wsi_swapchain_finish(&chain->base);
2836 
2837 fail_alloc:
2838    vk_free(pAllocator, chain);
2839 
2840    return result;
2841 }
2842 
2843 VkResult
wsi_x11_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,const struct driOptionCache * dri_options)2844 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2845                  const VkAllocationCallbacks *alloc,
2846                  const struct driOptionCache *dri_options)
2847 {
2848    struct wsi_x11 *wsi;
2849    VkResult result;
2850 
2851    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2852                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2853    if (!wsi) {
2854       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2855       goto fail;
2856    }
2857 
2858    int ret = mtx_init(&wsi->mutex, mtx_plain);
2859    if (ret != thrd_success) {
2860       if (ret == ENOMEM) {
2861          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2862       } else {
2863          /* FINISHME: Choose a better error. */
2864          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2865       }
2866 
2867       goto fail_alloc;
2868    }
2869 
2870    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2871                                               _mesa_key_pointer_equal);
2872    if (!wsi->connections) {
2873       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2874       goto fail_mutex;
2875    }
2876 
2877    if (dri_options) {
2878       if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2879          wsi_device->x11.override_minImageCount =
2880             driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2881       }
2882       if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2883          wsi_device->x11.strict_imageCount =
2884             driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2885       }
2886       if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2887          wsi_device->x11.ensure_minImageCount =
2888             driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2889       }
2890       wsi_device->x11.xwaylandWaitReady = true;
2891       if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2892          wsi_device->x11.xwaylandWaitReady =
2893             driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2894       }
2895 
2896       if (driCheckOption(dri_options, "vk_x11_ignore_suboptimal", DRI_BOOL)) {
2897          wsi_device->x11.ignore_suboptimal =
2898             driQueryOptionb(dri_options, "vk_x11_ignore_suboptimal");
2899       }
2900    }
2901 
2902    wsi->base.get_support = x11_surface_get_support;
2903    wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2904    wsi->base.get_formats = x11_surface_get_formats;
2905    wsi->base.get_formats2 = x11_surface_get_formats2;
2906    wsi->base.get_present_modes = x11_surface_get_present_modes;
2907    wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2908    wsi->base.create_swapchain = x11_surface_create_swapchain;
2909 
2910    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2911    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2912 
2913    return VK_SUCCESS;
2914 
2915 fail_mutex:
2916    mtx_destroy(&wsi->mutex);
2917 fail_alloc:
2918    vk_free(alloc, wsi);
2919 fail:
2920    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2921    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2922 
2923    return result;
2924 }
2925 
2926 void
wsi_x11_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2927 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2928                    const VkAllocationCallbacks *alloc)
2929 {
2930    struct wsi_x11 *wsi =
2931       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2932 
2933    if (wsi) {
2934       hash_table_foreach(wsi->connections, entry)
2935          wsi_x11_connection_destroy(wsi_device, entry->data);
2936 
2937       _mesa_hash_table_destroy(wsi->connections, NULL);
2938 
2939       mtx_destroy(&wsi->mutex);
2940 
2941       vk_free(alloc, wsi);
2942    }
2943 }
2944