• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <wayland-client.h>
25 
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <poll.h>
33 #include <sys/mman.h>
34 #include <sys/types.h>
35 
36 #include "drm-uapi/drm_fourcc.h"
37 
38 #include "vk_instance.h"
39 #include "vk_device.h"
40 #include "vk_physical_device.h"
41 #include "vk_util.h"
42 #include "wsi_common_entrypoints.h"
43 #include "wsi_common_private.h"
44 #include "fifo-v1-client-protocol.h"
45 #include "commit-timing-v1-client-protocol.h"
46 #include "linux-dmabuf-unstable-v1-client-protocol.h"
47 #include "presentation-time-client-protocol.h"
48 #include "linux-drm-syncobj-v1-client-protocol.h"
49 #include "tearing-control-v1-client-protocol.h"
50 
51 #include <util/cnd_monotonic.h>
52 #include <util/compiler.h>
53 #include <util/hash_table.h>
54 #include <util/timespec.h>
55 #include <util/u_endian.h>
56 #include <util/u_vector.h>
57 #include <util/u_dynarray.h>
58 #include <util/anon_file.h>
59 #include <util/os_time.h>
60 
61 #include <loader/loader_wayland_helper.h>
62 
63 #ifdef MAJOR_IN_MKDEV
64 #include <sys/mkdev.h>
65 #endif
66 #ifdef MAJOR_IN_SYSMACROS
67 #include <sys/sysmacros.h>
68 #endif
69 
70 struct wsi_wayland;
71 
72 struct wsi_wl_format {
73    VkFormat vk_format;
74    uint32_t flags;
75    struct u_vector modifiers;
76 };
77 
78 struct dmabuf_feedback_format_table {
79    unsigned int size;
80    struct {
81       uint32_t format;
82       uint32_t padding; /* unused */
83       uint64_t modifier;
84    } *data;
85 };
86 
87 struct dmabuf_feedback_tranche {
88    dev_t target_device;
89    uint32_t flags;
90    struct u_vector formats;
91 };
92 
93 struct dmabuf_feedback {
94    dev_t main_device;
95    struct dmabuf_feedback_format_table format_table;
96    struct util_dynarray tranches;
97    struct dmabuf_feedback_tranche pending_tranche;
98 };
99 
100 struct wsi_wl_display {
101    /* The real wl_display */
102    struct wl_display *wl_display;
103    /* Actually a proxy wrapper around the event queue */
104    struct wl_display *wl_display_wrapper;
105    struct wl_event_queue *queue;
106 
107    struct wl_shm *wl_shm;
108    struct zwp_linux_dmabuf_v1 *wl_dmabuf;
109    struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
110    struct wp_tearing_control_manager_v1 *tearing_control_manager;
111    struct wp_linux_drm_syncobj_manager_v1 *wl_syncobj;
112 
113    struct dmabuf_feedback_format_table format_table;
114 
115    /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
116    struct wp_presentation *wp_presentation_notwrapped;
117    uint32_t wp_presentation_version;
118 
119    struct wp_fifo_manager_v1 *fifo_manager;
120    struct wp_commit_timing_manager_v1 *commit_timing_manager;
121    bool no_timestamps;
122 
123    struct wsi_wayland *wsi_wl;
124 
125    /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
126    struct u_vector formats;
127 
128    bool sw;
129 
130    dev_t main_device;
131    bool same_gpu;
132 
133    clockid_t presentation_clock_id;
134 };
135 
136 struct wsi_wayland {
137    struct wsi_interface base;
138 
139    struct wsi_device *wsi;
140 
141    const VkAllocationCallbacks *alloc;
142    VkPhysicalDevice physical_device;
143 };
144 
145 struct wsi_wl_image {
146    struct wsi_image base;
147    struct wl_buffer *buffer;
148    bool busy;
149    int shm_fd;
150    void *shm_ptr;
151    unsigned shm_size;
152    uint64_t flow_id;
153 
154    struct wp_linux_drm_syncobj_timeline_v1 *wl_syncobj_timeline[WSI_ES_COUNT];
155 };
156 
157 enum wsi_wl_buffer_type {
158    WSI_WL_BUFFER_NATIVE,
159    WSI_WL_BUFFER_GPU_SHM,
160    WSI_WL_BUFFER_SHM_MEMCPY,
161 };
162 
163 struct wsi_wl_surface {
164    VkIcdSurfaceWayland base;
165 
166    unsigned int chain_count;
167 
168    struct wsi_wl_swapchain *chain;
169    struct wl_surface *surface;
170    struct wsi_wl_display *display;
171 
172    /* This has no functional use, and is here only for perfetto */
173    struct {
174       char *latency_str;
175       uint64_t presenting;
176       uint64_t presentation_track_id;
177    } analytics;
178 
179    struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
180    struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
181 
182    struct wp_linux_drm_syncobj_surface_v1 *wl_syncobj_surface;
183 };
184 
185 struct wsi_wl_swapchain {
186    struct wsi_swapchain base;
187 
188    struct wsi_wl_surface *wsi_wl_surface;
189    struct wp_tearing_control_v1 *tearing_control;
190    struct wp_fifo_v1 *fifo;
191    struct wp_commit_timer_v1 *commit_timer;
192 
193    struct wl_callback *frame;
194 
195    VkExtent2D extent;
196    VkFormat vk_format;
197    enum wsi_wl_buffer_type buffer_type;
198    uint32_t drm_format;
199    enum wl_shm_format shm_format;
200 
201    bool suboptimal;
202    bool retired;
203 
204    uint32_t num_drm_modifiers;
205    const uint64_t *drm_modifiers;
206 
207    bool legacy_fifo_ready;
208    bool next_present_force_wait_barrier;
209 
210    struct {
211       mtx_t lock; /* protects all members */
212       uint64_t max_completed;
213       uint64_t max_forward_progress_present_id;
214       uint64_t max_present_id;
215       uint64_t prev_max_present_id;
216 
217       struct wl_list outstanding_list;
218       struct u_cnd_monotonic list_advanced;
219       struct wl_event_queue *queue;
220       struct wp_presentation *wp_presentation;
221       /* Fallback when wp_presentation is not supported */
222       struct wl_surface *surface;
223       bool dispatch_in_progress;
224 
225       uint64_t display_time_error;
226       uint64_t display_time_correction;
227       uint64_t last_target_time;
228       uint64_t displayed_time;
229       bool valid_refresh_nsec;
230       unsigned int refresh_nsec;
231    } present_ids;
232 
233    struct wsi_wl_image images[0];
234 };
235 VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
236                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
237 
238 static bool
wsi_wl_use_explicit_sync(struct wsi_wl_display * display,struct wsi_device * device)239 wsi_wl_use_explicit_sync(struct wsi_wl_display *display, struct wsi_device *device)
240 {
241    return wsi_device_supports_explicit_sync(device) &&
242           display->wl_syncobj != NULL;
243 }
244 
245 enum wsi_wl_fmt_flag {
246    WSI_WL_FMT_ALPHA = 1 << 0,
247    WSI_WL_FMT_OPAQUE = 1 << 1,
248 };
249 
250 static struct wsi_wl_format *
find_format(struct u_vector * formats,VkFormat format)251 find_format(struct u_vector *formats, VkFormat format)
252 {
253    struct wsi_wl_format *f;
254 
255    u_vector_foreach(f, formats)
256       if (f->vk_format == format)
257          return f;
258 
259    return NULL;
260 }
261 
262 static char *
stringify_wayland_id(uint32_t id)263 stringify_wayland_id(uint32_t id)
264 {
265    char *out;
266 
267    if (asprintf(&out, "wl%d", id) < 0)
268       return NULL;
269 
270    return out;
271 }
272 
273 /* Given a time base and a refresh period, find the next
274  * time past 'from' that is an even multiple of the period
275  * past the base.
276  */
277 static uint64_t
next_phase_locked_time(uint64_t base,uint64_t period,uint64_t from)278 next_phase_locked_time(uint64_t base, uint64_t period, uint64_t from)
279 {
280    uint64_t target, cycles;
281 
282    assert(from != 0);
283 
284    if (base == 0)
285       return from;
286 
287    /* If our time base is in the future (which can happen when using
288     * presentation feedback events), target the next possible
289     * presentation time.
290     */
291    if (base >= from)
292       return base + period;
293 
294    /* The presentation time extension recommends that the compositor
295     * use a clock with "precision of one millisecond or better",
296     * so we shouldn't rely on these times being perfectly precise.
297     *
298     * Additionally, some compositors round off feedback times
299     * internally, (eg: to microsecond precision), so our times can
300     * have some jitter in either direction.
301     *
302     * We need to be especially careful not to miss an opportunity
303     * to display by calculating a cycle too far into the future.
304     * This will cause delays in frame presentation.
305     *
306     * If we choose a cycle too soon, fifo barrier will still keep
307     * the pace properly, except in the case of occluded surfaces -
308     * but occluded surfaces don't move their base time in response
309     * to presentation events, so there is no jitter and the math
310     * is more forgiving. That case just needs to monotonically
311     * increase.
312     *
313     * We fairly arbitrarily use period / 4 here to try to stay
314     * well away from rounding up too far, but to also avoid
315     * scheduling too soon if the time values are imprecise.
316     */
317    cycles = (from - base + period / 4) / period;
318    target = base + (cycles + 1) * period;
319    return target;
320 }
321 
322 static struct wsi_wl_format *
wsi_wl_display_add_vk_format(struct wsi_wl_display * display,struct u_vector * formats,VkFormat format,uint32_t flags)323 wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
324                              struct u_vector *formats,
325                              VkFormat format, uint32_t flags)
326 {
327    assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
328 
329    /* Don't add a format that's already in the list */
330    struct wsi_wl_format *f = find_format(formats, format);
331    if (f) {
332       f->flags |= flags;
333       return f;
334    }
335 
336    /* Don't add formats that aren't renderable. */
337    VkFormatProperties props;
338 
339    display->wsi_wl->wsi->GetPhysicalDeviceFormatProperties(display->wsi_wl->physical_device,
340                                                            format, &props);
341    if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
342       return NULL;
343 
344    struct u_vector modifiers;
345    if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
346       return NULL;
347 
348    f = u_vector_add(formats);
349    if (!f) {
350       u_vector_finish(&modifiers);
351       return NULL;
352    }
353 
354    f->vk_format = format;
355    f->flags = flags;
356    f->modifiers = modifiers;
357 
358    return f;
359 }
360 
361 static void
wsi_wl_format_add_modifier(struct wsi_wl_format * format,uint64_t modifier)362 wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
363 {
364    uint64_t *mod;
365 
366    if (modifier == DRM_FORMAT_MOD_INVALID)
367       return;
368 
369    u_vector_foreach(mod, &format->modifiers)
370       if (*mod == modifier)
371          return;
372 
373    mod = u_vector_add(&format->modifiers);
374    if (mod)
375       *mod = modifier;
376 }
377 
378 static void
wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,VkFormat vk_format,uint32_t flags,uint64_t modifier)379 wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
380                                       struct u_vector *formats,
381                                       VkFormat vk_format, uint32_t flags,
382                                       uint64_t modifier)
383 {
384    struct wsi_wl_format *format;
385 
386    format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
387    if (format)
388       wsi_wl_format_add_modifier(format, modifier);
389 }
390 
391 static void
wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,uint32_t drm_format,uint64_t modifier)392 wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
393                                        struct u_vector *formats,
394                                        uint32_t drm_format, uint64_t modifier)
395 {
396    switch (drm_format) {
397 #if 0
398    /* TODO: These are only available when VK_EXT_4444_formats is enabled, so
399     * we probably need to make their use conditional on this extension. */
400    case DRM_FORMAT_ARGB4444:
401       wsi_wl_display_add_vk_format_modifier(display, formats,
402                                             VK_FORMAT_A4R4G4B4_UNORM_PACK16,
403                                             WSI_WL_FMT_ALPHA, modifier);
404       break;
405    case DRM_FORMAT_XRGB4444:
406       wsi_wl_display_add_vk_format_modifier(display, formats,
407                                             VK_FORMAT_A4R4G4B4_UNORM_PACK16,
408                                             WSI_WL_FMT_OPAQUE, modifier);
409       break;
410    case DRM_FORMAT_ABGR4444:
411       wsi_wl_display_add_vk_format_modifier(display, formats,
412                                             VK_FORMAT_A4B4G4R4_UNORM_PACK16,
413                                             WSI_WL_FMT_ALPHA, modifier);
414       break;
415    case DRM_FORMAT_XBGR4444:
416       wsi_wl_display_add_vk_format_modifier(display, formats,
417                                             VK_FORMAT_A4B4G4R4_UNORM_PACK16,
418                                             WSI_WL_FMT_OPAQUE, modifier);
419       break;
420 #endif
421 
422    /* Vulkan _PACKN formats have the same component order as DRM formats
423     * on little endian systems, on big endian there exists no analog. */
424 #if UTIL_ARCH_LITTLE_ENDIAN
425    case DRM_FORMAT_RGBA4444:
426       wsi_wl_display_add_vk_format_modifier(display, formats,
427                                             VK_FORMAT_R4G4B4A4_UNORM_PACK16,
428                                             WSI_WL_FMT_ALPHA, modifier);
429       break;
430    case DRM_FORMAT_RGBX4444:
431       wsi_wl_display_add_vk_format_modifier(display, formats,
432                                             VK_FORMAT_R4G4B4A4_UNORM_PACK16,
433                                             WSI_WL_FMT_OPAQUE, modifier);
434       break;
435    case DRM_FORMAT_BGRA4444:
436       wsi_wl_display_add_vk_format_modifier(display, formats,
437                                             VK_FORMAT_B4G4R4A4_UNORM_PACK16,
438                                             WSI_WL_FMT_ALPHA, modifier);
439       break;
440    case DRM_FORMAT_BGRX4444:
441       wsi_wl_display_add_vk_format_modifier(display, formats,
442                                             VK_FORMAT_B4G4R4A4_UNORM_PACK16,
443                                             WSI_WL_FMT_OPAQUE, modifier);
444       break;
445    case DRM_FORMAT_RGB565:
446       wsi_wl_display_add_vk_format_modifier(display, formats,
447                                             VK_FORMAT_R5G6B5_UNORM_PACK16,
448                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
449                                             modifier);
450       break;
451    case DRM_FORMAT_BGR565:
452       wsi_wl_display_add_vk_format_modifier(display, formats,
453                                             VK_FORMAT_B5G6R5_UNORM_PACK16,
454                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
455                                             modifier);
456       break;
457    case DRM_FORMAT_ARGB1555:
458       wsi_wl_display_add_vk_format_modifier(display, formats,
459                                             VK_FORMAT_A1R5G5B5_UNORM_PACK16,
460                                             WSI_WL_FMT_ALPHA, modifier);
461       break;
462    case DRM_FORMAT_XRGB1555:
463       wsi_wl_display_add_vk_format_modifier(display, formats,
464                                             VK_FORMAT_A1R5G5B5_UNORM_PACK16,
465                                             WSI_WL_FMT_OPAQUE, modifier);
466       break;
467    case DRM_FORMAT_RGBA5551:
468       wsi_wl_display_add_vk_format_modifier(display, formats,
469                                             VK_FORMAT_R5G5B5A1_UNORM_PACK16,
470                                             WSI_WL_FMT_ALPHA, modifier);
471       break;
472    case DRM_FORMAT_RGBX5551:
473       wsi_wl_display_add_vk_format_modifier(display, formats,
474                                             VK_FORMAT_R5G5B5A1_UNORM_PACK16,
475                                             WSI_WL_FMT_OPAQUE, modifier);
476       break;
477    case DRM_FORMAT_BGRA5551:
478       wsi_wl_display_add_vk_format_modifier(display, formats,
479                                             VK_FORMAT_B5G5R5A1_UNORM_PACK16,
480                                             WSI_WL_FMT_ALPHA, modifier);
481       break;
482    case DRM_FORMAT_BGRX5551:
483       wsi_wl_display_add_vk_format_modifier(display, formats,
484                                             VK_FORMAT_B5G5R5A1_UNORM_PACK16,
485                                             WSI_WL_FMT_OPAQUE, modifier);
486       break;
487    case DRM_FORMAT_ARGB2101010:
488       wsi_wl_display_add_vk_format_modifier(display, formats,
489                                             VK_FORMAT_A2R10G10B10_UNORM_PACK32,
490                                             WSI_WL_FMT_ALPHA, modifier);
491       break;
492    case DRM_FORMAT_XRGB2101010:
493       wsi_wl_display_add_vk_format_modifier(display, formats,
494                                             VK_FORMAT_A2R10G10B10_UNORM_PACK32,
495                                             WSI_WL_FMT_OPAQUE, modifier);
496       break;
497    case DRM_FORMAT_ABGR2101010:
498       wsi_wl_display_add_vk_format_modifier(display, formats,
499                                             VK_FORMAT_A2B10G10R10_UNORM_PACK32,
500                                             WSI_WL_FMT_ALPHA, modifier);
501       break;
502    case DRM_FORMAT_XBGR2101010:
503       wsi_wl_display_add_vk_format_modifier(display, formats,
504                                             VK_FORMAT_A2B10G10R10_UNORM_PACK32,
505                                             WSI_WL_FMT_OPAQUE, modifier);
506       break;
507 
508    /* Vulkan 16-bits-per-channel formats have an inverted channel order
509     * compared to DRM formats, just like the 8-bits-per-channel ones.
510     * On little endian systems the memory representation of each channel
511     * matches the DRM formats'. */
512    case DRM_FORMAT_ABGR16161616:
513       wsi_wl_display_add_vk_format_modifier(display, formats,
514                                             VK_FORMAT_R16G16B16A16_UNORM,
515                                             WSI_WL_FMT_ALPHA, modifier);
516       break;
517    case DRM_FORMAT_XBGR16161616:
518       wsi_wl_display_add_vk_format_modifier(display, formats,
519                                             VK_FORMAT_R16G16B16A16_UNORM,
520                                             WSI_WL_FMT_OPAQUE, modifier);
521       break;
522    case DRM_FORMAT_ABGR16161616F:
523       wsi_wl_display_add_vk_format_modifier(display, formats,
524                                             VK_FORMAT_R16G16B16A16_SFLOAT,
525                                             WSI_WL_FMT_ALPHA, modifier);
526       break;
527    case DRM_FORMAT_XBGR16161616F:
528       wsi_wl_display_add_vk_format_modifier(display, formats,
529                                             VK_FORMAT_R16G16B16A16_SFLOAT,
530                                             WSI_WL_FMT_OPAQUE, modifier);
531       break;
532 #endif
533 
534    /* Non-packed 8-bit formats have an inverted channel order compared to the
535     * little endian DRM formats, because the DRM channel ordering is high->low
536     * but the vulkan channel ordering is in memory byte order
537     *
538     * For all UNORM formats which have a SRGB variant, we must support both if
539     * we can. SRGB in this context means that rendering to it will result in a
540     * linear -> nonlinear SRGB colorspace conversion before the data is stored.
541     * The inverse function is applied when sampling from SRGB images.
542     * From Wayland's perspective nothing changes, the difference is just how
543     * Vulkan interprets the pixel data. */
544    case DRM_FORMAT_XBGR8888:
545       wsi_wl_display_add_vk_format_modifier(display, formats,
546                                             VK_FORMAT_R8G8B8_SRGB,
547                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
548                                             modifier);
549       wsi_wl_display_add_vk_format_modifier(display, formats,
550                                             VK_FORMAT_R8G8B8_UNORM,
551                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
552                                             modifier);
553       wsi_wl_display_add_vk_format_modifier(display, formats,
554                                             VK_FORMAT_R8G8B8A8_SRGB,
555                                             WSI_WL_FMT_OPAQUE, modifier);
556       wsi_wl_display_add_vk_format_modifier(display, formats,
557                                             VK_FORMAT_R8G8B8A8_UNORM,
558                                             WSI_WL_FMT_OPAQUE, modifier);
559       break;
560    case DRM_FORMAT_ABGR8888:
561       wsi_wl_display_add_vk_format_modifier(display, formats,
562                                             VK_FORMAT_R8G8B8A8_SRGB,
563                                             WSI_WL_FMT_ALPHA, modifier);
564       wsi_wl_display_add_vk_format_modifier(display, formats,
565                                             VK_FORMAT_R8G8B8A8_UNORM,
566                                             WSI_WL_FMT_ALPHA, modifier);
567       break;
568    case DRM_FORMAT_XRGB8888:
569       wsi_wl_display_add_vk_format_modifier(display, formats,
570                                             VK_FORMAT_B8G8R8_SRGB,
571                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
572                                             modifier);
573       wsi_wl_display_add_vk_format_modifier(display, formats,
574                                             VK_FORMAT_B8G8R8_UNORM,
575                                             WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
576                                             modifier);
577       wsi_wl_display_add_vk_format_modifier(display, formats,
578                                             VK_FORMAT_B8G8R8A8_SRGB,
579                                             WSI_WL_FMT_OPAQUE, modifier);
580       wsi_wl_display_add_vk_format_modifier(display, formats,
581                                             VK_FORMAT_B8G8R8A8_UNORM,
582                                             WSI_WL_FMT_OPAQUE, modifier);
583       break;
584    case DRM_FORMAT_ARGB8888:
585       wsi_wl_display_add_vk_format_modifier(display, formats,
586                                             VK_FORMAT_B8G8R8A8_SRGB,
587                                             WSI_WL_FMT_ALPHA, modifier);
588       wsi_wl_display_add_vk_format_modifier(display, formats,
589                                             VK_FORMAT_B8G8R8A8_UNORM,
590                                             WSI_WL_FMT_ALPHA, modifier);
591       break;
592    }
593 }
594 
595 static uint32_t
drm_format_for_wl_shm_format(enum wl_shm_format shm_format)596 drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
597 {
598    /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
599    switch (shm_format) {
600    case WL_SHM_FORMAT_ARGB8888:
601       return DRM_FORMAT_ARGB8888;
602    case WL_SHM_FORMAT_XRGB8888:
603       return DRM_FORMAT_XRGB8888;
604    default:
605       return shm_format;
606    }
607 }
608 
609 static void
wsi_wl_display_add_wl_shm_format(struct wsi_wl_display * display,struct u_vector * formats,enum wl_shm_format shm_format)610 wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
611                                  struct u_vector *formats,
612                                  enum wl_shm_format shm_format)
613 {
614    uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
615 
616    wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
617                                           DRM_FORMAT_MOD_INVALID);
618 }
619 
620 static uint32_t
wl_drm_format_for_vk_format(VkFormat vk_format,bool alpha)621 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
622 {
623    switch (vk_format) {
624 #if 0
625    case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
626       return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
627    case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
628       return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
629 #endif
630 #if UTIL_ARCH_LITTLE_ENDIAN
631    case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
632       return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
633    case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
634       return alpha ? DRM_FORMAT_BGRA4444 : DRM_FORMAT_BGRX4444;
635    case VK_FORMAT_R5G6B5_UNORM_PACK16:
636       return DRM_FORMAT_RGB565;
637    case VK_FORMAT_B5G6R5_UNORM_PACK16:
638       return DRM_FORMAT_BGR565;
639    case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
640       return alpha ? DRM_FORMAT_ARGB1555 : DRM_FORMAT_XRGB1555;
641    case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
642       return alpha ? DRM_FORMAT_RGBA5551 : DRM_FORMAT_RGBX5551;
643    case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
644       return alpha ? DRM_FORMAT_BGRA5551 : DRM_FORMAT_BGRX5551;
645    case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
646       return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
647    case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
648       return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
649    case VK_FORMAT_R16G16B16A16_UNORM:
650       return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
651    case VK_FORMAT_R16G16B16A16_SFLOAT:
652       return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
653 #endif
654    case VK_FORMAT_R8G8B8_UNORM:
655    case VK_FORMAT_R8G8B8_SRGB:
656       return DRM_FORMAT_XBGR8888;
657    case VK_FORMAT_R8G8B8A8_UNORM:
658    case VK_FORMAT_R8G8B8A8_SRGB:
659       return alpha ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_XBGR8888;
660    case VK_FORMAT_B8G8R8_UNORM:
661    case VK_FORMAT_B8G8R8_SRGB:
662       return DRM_FORMAT_BGRX8888;
663    case VK_FORMAT_B8G8R8A8_UNORM:
664    case VK_FORMAT_B8G8R8A8_SRGB:
665       return alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
666 
667    default:
668       assert(!"Unsupported Vulkan format");
669       return DRM_FORMAT_INVALID;
670    }
671 }
672 
673 static enum wl_shm_format
wl_shm_format_for_vk_format(VkFormat vk_format,bool alpha)674 wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
675 {
676    uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
677    if (drm_format == DRM_FORMAT_INVALID) {
678       return 0;
679    }
680 
681    /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
682    switch (drm_format) {
683    case DRM_FORMAT_ARGB8888:
684       return WL_SHM_FORMAT_ARGB8888;
685    case DRM_FORMAT_XRGB8888:
686       return WL_SHM_FORMAT_XRGB8888;
687    default:
688       return drm_format;
689    }
690 }
691 
692 static void
dmabuf_handle_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)693 dmabuf_handle_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
694                      uint32_t format)
695 {
696    /* Formats are implicitly advertised by the modifier event, so we ignore
697     * them here. */
698 }
699 
700 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)701 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
702                        uint32_t format, uint32_t modifier_hi,
703                        uint32_t modifier_lo)
704 {
705    struct wsi_wl_display *display = data;
706    uint64_t modifier;
707 
708    /* Ignore this if the compositor advertised dma-buf feedback. From version 4
709     * onwards (when dma-buf feedback was introduced), the compositor should not
710     * advertise this event anymore, but let's keep this for safety. */
711    if (display->wl_dmabuf_feedback)
712       return;
713 
714    modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
715    wsi_wl_display_add_drm_format_modifier(display, &display->formats,
716                                           format, modifier);
717 }
718 
719 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
720    dmabuf_handle_format,
721    dmabuf_handle_modifier,
722 };
723 
724 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)725 dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
726 {
727    if (format_table->data && format_table->data != MAP_FAILED)
728       munmap(format_table->data, format_table->size);
729 }
730 
731 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)732 dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
733 {
734    memset(format_table, 0, sizeof(*format_table));
735 }
736 
737 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)738 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
739 {
740    struct wsi_wl_format *format;
741 
742    u_vector_foreach(format, &tranche->formats)
743       u_vector_finish(&format->modifiers);
744 
745    u_vector_finish(&tranche->formats);
746 }
747 
748 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)749 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
750 {
751    memset(tranche, 0, sizeof(*tranche));
752 
753    if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
754       return -1;
755 
756    return 0;
757 }
758 
759 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)760 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
761 {
762    dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
763 
764    util_dynarray_foreach(&dmabuf_feedback->tranches,
765                          struct dmabuf_feedback_tranche, tranche)
766       dmabuf_feedback_tranche_fini(tranche);
767    util_dynarray_fini(&dmabuf_feedback->tranches);
768 
769    dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
770 }
771 
772 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)773 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
774 {
775    memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
776 
777    if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
778       return -1;
779 
780    util_dynarray_init(&dmabuf_feedback->tranches, NULL);
781 
782    dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
783 
784    return 0;
785 }
786 
787 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)788 default_dmabuf_feedback_format_table(void *data,
789                                      struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
790                                      int32_t fd, uint32_t size)
791 {
792    struct wsi_wl_display *display = data;
793 
794    display->format_table.size = size;
795    display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
796 
797    close(fd);
798 }
799 
800 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)801 default_dmabuf_feedback_main_device(void *data,
802                                     struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
803                                     struct wl_array *device)
804 {
805    struct wsi_wl_display *display = data;
806 
807    assert(device->size == sizeof(dev_t));
808    memcpy(&display->main_device, device->data, device->size);
809 }
810 
811 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)812 default_dmabuf_feedback_tranche_target_device(void *data,
813                                               struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
814                                               struct wl_array *device)
815 {
816    /* ignore this event */
817 }
818 
819 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)820 default_dmabuf_feedback_tranche_flags(void *data,
821                                       struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
822                                       uint32_t flags)
823 {
824    /* ignore this event */
825 }
826 
827 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)828 default_dmabuf_feedback_tranche_formats(void *data,
829                                         struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
830                                         struct wl_array *indices)
831 {
832    struct wsi_wl_display *display = data;
833    uint32_t format;
834    uint64_t modifier;
835    uint16_t *index;
836 
837    /* We couldn't map the format table or the compositor didn't advertise it,
838     * so we have to ignore the feedback. */
839    if (display->format_table.data == MAP_FAILED ||
840        display->format_table.data == NULL)
841       return;
842 
843    wl_array_for_each(index, indices) {
844       format = display->format_table.data[*index].format;
845       modifier = display->format_table.data[*index].modifier;
846       wsi_wl_display_add_drm_format_modifier(display, &display->formats,
847                                              format, modifier);
848    }
849 }
850 
851 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)852 default_dmabuf_feedback_tranche_done(void *data,
853                                      struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
854 {
855    /* ignore this event */
856 }
857 
858 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)859 default_dmabuf_feedback_done(void *data,
860                              struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
861 {
862    /* ignore this event */
863 }
864 
865 static const struct zwp_linux_dmabuf_feedback_v1_listener
866 dmabuf_feedback_listener = {
867    .format_table = default_dmabuf_feedback_format_table,
868    .main_device = default_dmabuf_feedback_main_device,
869    .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
870    .tranche_flags = default_dmabuf_feedback_tranche_flags,
871    .tranche_formats = default_dmabuf_feedback_tranche_formats,
872    .tranche_done = default_dmabuf_feedback_tranche_done,
873    .done = default_dmabuf_feedback_done,
874 };
875 
876 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)877 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
878 {
879    struct wsi_wl_display *display = data;
880 
881    wsi_wl_display_add_wl_shm_format(display, &display->formats, format);
882 }
883 
884 static const struct wl_shm_listener shm_listener = {
885    .format = shm_handle_format
886 };
887 
888 static void
presentation_handle_clock_id(void * data,struct wp_presentation * wp_presentation,uint32_t clk_id)889 presentation_handle_clock_id(void* data, struct wp_presentation *wp_presentation, uint32_t clk_id)
890 {
891    struct wsi_wl_display *display = data;
892 
893    display->presentation_clock_id = clk_id;
894 }
895 
896 static const struct wp_presentation_listener presentation_listener = {
897    presentation_handle_clock_id,
898 };
899 
900 static void
registry_handle_global(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)901 registry_handle_global(void *data, struct wl_registry *registry,
902                        uint32_t name, const char *interface, uint32_t version)
903 {
904    struct wsi_wl_display *display = data;
905 
906    if (display->sw) {
907       if (strcmp(interface, wl_shm_interface.name) == 0) {
908          display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
909          wl_shm_add_listener(display->wl_shm, &shm_listener, display);
910       }
911    } else {
912       if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
913          display->wl_dmabuf =
914             wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
915                              MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
916          zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
917                                           &dmabuf_listener, display);
918       } else if (strcmp(interface, wp_linux_drm_syncobj_manager_v1_interface.name) == 0) {
919          display->wl_syncobj =
920             wl_registry_bind(registry, name, &wp_linux_drm_syncobj_manager_v1_interface, 1);
921       }
922    }
923 
924    if (strcmp(interface, wp_presentation_interface.name) == 0) {
925       if (version > 1)
926          display->wp_presentation_version = 2;
927       else
928          display->wp_presentation_version = 1;
929 
930       display->wp_presentation_notwrapped =
931          wl_registry_bind(registry, name, &wp_presentation_interface,
932                           display->wp_presentation_version);
933       wp_presentation_add_listener(display->wp_presentation_notwrapped, &presentation_listener, display);
934    } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
935       display->tearing_control_manager =
936          wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
937    } else if (strcmp(interface, wp_fifo_manager_v1_interface.name) == 0) {
938       display->fifo_manager =
939          wl_registry_bind(registry, name, &wp_fifo_manager_v1_interface, 1);
940    } else if (!display->no_timestamps &&
941               strcmp(interface, wp_commit_timing_manager_v1_interface.name) == 0) {
942       display->commit_timing_manager =
943          wl_registry_bind(registry, name, &wp_commit_timing_manager_v1_interface, 1);
944    }
945 }
946 
947 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)948 registry_handle_global_remove(void *data, struct wl_registry *registry,
949                               uint32_t name)
950 { /* No-op */ }
951 
952 static const struct wl_registry_listener registry_listener = {
953    registry_handle_global,
954    registry_handle_global_remove
955 };
956 
957 static void
wsi_wl_display_finish(struct wsi_wl_display * display)958 wsi_wl_display_finish(struct wsi_wl_display *display)
959 {
960    struct wsi_wl_format *f;
961    u_vector_foreach(f, &display->formats)
962       u_vector_finish(&f->modifiers);
963    u_vector_finish(&display->formats);
964    if (display->wl_shm)
965       wl_shm_destroy(display->wl_shm);
966    if (display->wl_syncobj)
967       wp_linux_drm_syncobj_manager_v1_destroy(display->wl_syncobj);
968    if (display->wl_dmabuf)
969       zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
970    if (display->wp_presentation_notwrapped)
971       wp_presentation_destroy(display->wp_presentation_notwrapped);
972    if (display->fifo_manager)
973       wp_fifo_manager_v1_destroy(display->fifo_manager);
974    if (display->commit_timing_manager)
975       wp_commit_timing_manager_v1_destroy(display->commit_timing_manager);
976    if (display->tearing_control_manager)
977       wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
978    if (display->wl_display_wrapper)
979       wl_proxy_wrapper_destroy(display->wl_display_wrapper);
980    if (display->queue)
981       wl_event_queue_destroy(display->queue);
982 }
983 
984 static VkResult
wsi_wl_display_init(struct wsi_wayland * wsi_wl,struct wsi_wl_display * display,struct wl_display * wl_display,bool get_format_list,bool sw,const char * queue_name)985 wsi_wl_display_init(struct wsi_wayland *wsi_wl,
986                     struct wsi_wl_display *display,
987                     struct wl_display *wl_display,
988                     bool get_format_list, bool sw,
989                     const char *queue_name)
990 {
991    VkResult result = VK_SUCCESS;
992    memset(display, 0, sizeof(*display));
993 
994    if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
995       return VK_ERROR_OUT_OF_HOST_MEMORY;
996 
997    display->presentation_clock_id = -1; // 0 is a valid clock ID
998    display->wsi_wl = wsi_wl;
999    display->wl_display = wl_display;
1000    display->sw = sw;
1001 
1002    display->queue = wl_display_create_queue_with_name(wl_display, queue_name);
1003    if (!display->queue) {
1004       result = VK_ERROR_OUT_OF_HOST_MEMORY;
1005       goto fail;
1006    }
1007 
1008    display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
1009    if (!display->wl_display_wrapper) {
1010       result = VK_ERROR_OUT_OF_HOST_MEMORY;
1011       goto fail;
1012    }
1013 
1014    display->no_timestamps = wsi_wl->wsi->wayland.disable_timestamps;
1015 
1016    wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
1017                       display->queue);
1018 
1019    struct wl_registry *registry =
1020       wl_display_get_registry(display->wl_display_wrapper);
1021    if (!registry) {
1022       result = VK_ERROR_OUT_OF_HOST_MEMORY;
1023       goto fail;
1024    }
1025 
1026    wl_registry_add_listener(registry, &registry_listener, display);
1027 
1028    /* Round-trip to get wl_shm and zwp_linux_dmabuf_v1 globals */
1029    wl_display_roundtrip_queue(display->wl_display, display->queue);
1030    if (!display->wl_dmabuf && !display->wl_shm) {
1031       result = VK_ERROR_SURFACE_LOST_KHR;
1032       goto fail_registry;
1033    }
1034 
1035    /* Caller doesn't expect us to query formats/modifiers, so return */
1036    if (!get_format_list)
1037       goto out;
1038 
1039    /* Default assumption */
1040    display->same_gpu = true;
1041 
1042    /* Get the default dma-buf feedback */
1043    if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
1044                              ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
1045          dmabuf_feedback_format_table_init(&display->format_table);
1046          display->wl_dmabuf_feedback =
1047             zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
1048          zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
1049                                                    &dmabuf_feedback_listener, display);
1050 
1051          /* Round-trip again to fetch dma-buf feedback */
1052          wl_display_roundtrip_queue(display->wl_display, display->queue);
1053 
1054          if (wsi_wl->wsi->drm_info.hasRender ||
1055              wsi_wl->wsi->drm_info.hasPrimary) {
1056             /* Apparently some wayland compositor do not send the render
1057              * device node but the primary, so test against both.
1058              */
1059             display->same_gpu =
1060                (wsi_wl->wsi->drm_info.hasRender &&
1061                 major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
1062                 minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
1063                (wsi_wl->wsi->drm_info.hasPrimary &&
1064                 major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
1065                 minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
1066          }
1067    }
1068 
1069    /* Round-trip again to get formats and modifiers */
1070    wl_display_roundtrip_queue(display->wl_display, display->queue);
1071 
1072    if (wsi_wl->wsi->force_bgra8_unorm_first) {
1073       /* Find BGRA8_UNORM in the list and swap it to the first position if we
1074        * can find it.  Some apps get confused if SRGB is first in the list.
1075        */
1076       struct wsi_wl_format *first_fmt = u_vector_tail(&display->formats);
1077       struct wsi_wl_format *f, tmp_fmt;
1078       f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
1079       if (f) {
1080          tmp_fmt = *f;
1081          *f = *first_fmt;
1082          *first_fmt = tmp_fmt;
1083       }
1084    }
1085 
1086 out:
1087    /* We don't need this anymore */
1088    wl_registry_destroy(registry);
1089 
1090    /* Destroy default dma-buf feedback object and format table */
1091    if (display->wl_dmabuf_feedback) {
1092       zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
1093       display->wl_dmabuf_feedback = NULL;
1094       dmabuf_feedback_format_table_fini(&display->format_table);
1095    }
1096 
1097    return VK_SUCCESS;
1098 
1099 fail_registry:
1100    if (registry)
1101       wl_registry_destroy(registry);
1102 
1103 fail:
1104    wsi_wl_display_finish(display);
1105    return result;
1106 }
1107 
1108 static VkResult
wsi_wl_display_create(struct wsi_wayland * wsi,struct wl_display * wl_display,bool sw,struct wsi_wl_display ** display_out)1109 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
1110                       bool sw,
1111                       struct wsi_wl_display **display_out)
1112 {
1113    struct wsi_wl_display *display =
1114       vk_alloc(wsi->alloc, sizeof(*display), 8,
1115                VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1116    if (!display)
1117       return VK_ERROR_OUT_OF_HOST_MEMORY;
1118 
1119    VkResult result = wsi_wl_display_init(wsi, display, wl_display, true,
1120                                          sw, "mesa vk display queue");
1121    if (result != VK_SUCCESS) {
1122       vk_free(wsi->alloc, display);
1123       return result;
1124    }
1125 
1126    *display_out = display;
1127 
1128    return result;
1129 }
1130 
1131 static void
wsi_wl_display_destroy(struct wsi_wl_display * display)1132 wsi_wl_display_destroy(struct wsi_wl_display *display)
1133 {
1134    struct wsi_wayland *wsi = display->wsi_wl;
1135    wsi_wl_display_finish(display);
1136    vk_free(wsi->alloc, display);
1137 }
1138 
1139 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,struct wl_display * wl_display)1140 wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
1141                                                    uint32_t queueFamilyIndex,
1142                                                    struct wl_display *wl_display)
1143 {
1144    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
1145    struct wsi_device *wsi_device = pdevice->wsi_device;
1146    struct wsi_wayland *wsi =
1147       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1148 
1149    if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
1150       return false;
1151 
1152    struct wsi_wl_display display;
1153    VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
1154                                       wsi_device->sw, "mesa presentation support query");
1155    if (ret == VK_SUCCESS)
1156       wsi_wl_display_finish(&display);
1157 
1158    return ret == VK_SUCCESS;
1159 }
1160 
1161 static VkResult
wsi_wl_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)1162 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
1163                            struct wsi_device *wsi_device,
1164                            uint32_t queueFamilyIndex,
1165                            VkBool32* pSupported)
1166 {
1167    *pSupported = true;
1168 
1169    return VK_SUCCESS;
1170 }
1171 
1172 /* For true mailbox mode, we need at least 4 images:
1173  *  1) One to scan out from
1174  *  2) One to have queued for scan-out
1175  *  3) One to be currently held by the Wayland compositor
1176  *  4) One to render to
1177  */
1178 #define WSI_WL_BUMPED_NUM_IMAGES 4
1179 
1180 /* Catch-all. 3 images is a sound default for everything except MAILBOX. */
1181 #define WSI_WL_DEFAULT_NUM_IMAGES 3
1182 
1183 static uint32_t
wsi_wl_surface_get_min_image_count(struct wsi_wl_display * display,const VkSurfacePresentModeEXT * present_mode)1184 wsi_wl_surface_get_min_image_count(struct wsi_wl_display *display,
1185                                    const VkSurfacePresentModeEXT *present_mode)
1186 {
1187    if (present_mode) {
1188       return present_mode->presentMode == VK_PRESENT_MODE_MAILBOX_KHR ?
1189              WSI_WL_BUMPED_NUM_IMAGES : WSI_WL_DEFAULT_NUM_IMAGES;
1190    }
1191 
1192    /* If explicit present_mode is not being queried, we need to provide a safe "catch-all"
1193     * which can work for any presentation mode. Implementations are allowed to bump the minImageCount
1194     * on swapchain creation, so this limit should be the lowest value which can guarantee forward progress. */
1195 
1196    /* When FIFO protocol is not supported, we always returned 4 here,
1197     * despite it going against the spirit of minImageCount in the specification.
1198     * To avoid any unforeseen breakage, just keep using the same values we always have.
1199     * In this path, we also never consider bumping the image count in minImageCount in swapchain creation time. */
1200 
1201    /* When FIFO protocol is supported, applications will no longer block
1202     * in QueuePresentKHR due to frame callback, so returning 4 images
1203     * for a FIFO swapchain is deeply problematic due to excessive latency.
1204     * This latency can only be limited through means of presentWait which few applications use, and we cannot
1205     * mandate that shipping applications are rewritten to avoid a regression.
1206     * 2 images are enough for forward progress in FIFO, but 3 is used here as a pragmatic decision
1207     * because 2 could result in waiting for the compositor to remove an
1208     * old image from scanout when we'd like to be rendering,
1209     * and we don't want naively written applications to head into poor performance territory by default.
1210     * X11 backend has very similar logic and rationale here.
1211     */
1212    return display->fifo_manager ? WSI_WL_DEFAULT_NUM_IMAGES : WSI_WL_BUMPED_NUM_IMAGES;
1213 }
1214 
1215 static VkResult
wsi_wl_surface_get_capabilities(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)1216 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
1217                                 struct wsi_device *wsi_device,
1218                                 const VkSurfacePresentModeEXT *present_mode,
1219                                 VkSurfaceCapabilitiesKHR* caps)
1220 {
1221    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1222    struct wsi_wl_surface *wsi_wl_surface =
1223       wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
1224    struct wsi_wayland *wsi =
1225       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1226    struct wsi_wl_display temp_display, *display = wsi_wl_surface->display;
1227 
1228    if (!wsi_wl_surface->display) {
1229       if (wsi_wl_display_init(wsi, &temp_display, surface->display, true,
1230                               wsi_device->sw, "mesa image count query"))
1231          return VK_ERROR_SURFACE_LOST_KHR;
1232       display = &temp_display;
1233    }
1234 
1235    caps->minImageCount = wsi_wl_surface_get_min_image_count(display, present_mode);
1236 
1237    if (!wsi_wl_surface->display)
1238       wsi_wl_display_finish(&temp_display);
1239 
1240    /* There is no real maximum */
1241    caps->maxImageCount = 0;
1242 
1243    caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
1244    caps->minImageExtent = (VkExtent2D) { 1, 1 };
1245    caps->maxImageExtent = (VkExtent2D) {
1246       wsi_device->maxImageDimension2D,
1247       wsi_device->maxImageDimension2D,
1248    };
1249 
1250    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1251    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1252    caps->maxImageArrayLayers = 1;
1253 
1254    caps->supportedCompositeAlpha =
1255       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
1256       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
1257 
1258    caps->supportedUsageFlags = wsi_caps_get_image_usage();
1259 
1260    VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
1261    if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
1262       caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
1263 
1264    return VK_SUCCESS;
1265 }
1266 
1267 static VkResult
wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)1268 wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
1269                                  struct wsi_device *wsi_device,
1270                                  const void *info_next,
1271                                  VkSurfaceCapabilities2KHR* caps)
1272 {
1273    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
1274 
1275    const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
1276 
1277    VkResult result =
1278       wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
1279                                       &caps->surfaceCapabilities);
1280 
1281    vk_foreach_struct(ext, caps->pNext) {
1282       switch (ext->sType) {
1283       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
1284          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
1285          protected->supportsProtected = VK_FALSE;
1286          break;
1287       }
1288 
1289       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
1290          /* Unsupported. */
1291          VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
1292          scaling->supportedPresentScaling = 0;
1293          scaling->supportedPresentGravityX = 0;
1294          scaling->supportedPresentGravityY = 0;
1295          scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
1296          scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
1297          break;
1298       }
1299 
1300       case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
1301          /* Can easily toggle between FIFO and MAILBOX on Wayland. */
1302          VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
1303          if (compat->pPresentModes) {
1304             assert(present_mode);
1305             VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
1306             /* Must always return queried present mode even when truncating. */
1307             vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1308                *mode = present_mode->presentMode;
1309             }
1310             switch (present_mode->presentMode) {
1311             case VK_PRESENT_MODE_MAILBOX_KHR:
1312                vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1313                   *mode = VK_PRESENT_MODE_FIFO_KHR;
1314                }
1315                break;
1316             case VK_PRESENT_MODE_FIFO_KHR:
1317                vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1318                   *mode = VK_PRESENT_MODE_MAILBOX_KHR;
1319                }
1320                break;
1321             default:
1322                break;
1323             }
1324          } else {
1325             if (!present_mode) {
1326                wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
1327                                        "without a VkSurfacePresentModeEXT set. This is an "
1328                                        "application bug.\n");
1329                compat->presentModeCount = 1;
1330             } else {
1331                switch (present_mode->presentMode) {
1332                case VK_PRESENT_MODE_MAILBOX_KHR:
1333                case VK_PRESENT_MODE_FIFO_KHR:
1334                   compat->presentModeCount = 2;
1335                   break;
1336                default:
1337                   compat->presentModeCount = 1;
1338                   break;
1339                }
1340             }
1341          }
1342          break;
1343       }
1344 
1345       default:
1346          /* Ignored */
1347          break;
1348       }
1349    }
1350 
1351    return result;
1352 }
1353 
1354 static VkResult
wsi_wl_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)1355 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
1356 			   struct wsi_device *wsi_device,
1357                            uint32_t* pSurfaceFormatCount,
1358                            VkSurfaceFormatKHR* pSurfaceFormats)
1359 {
1360    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1361    struct wsi_wayland *wsi =
1362       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1363 
1364    struct wsi_wl_display display;
1365    if (wsi_wl_display_init(wsi, &display, surface->display, true,
1366                            wsi_device->sw, "mesa formats query"))
1367       return VK_ERROR_SURFACE_LOST_KHR;
1368 
1369    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
1370                           pSurfaceFormats, pSurfaceFormatCount);
1371 
1372    struct wsi_wl_format *disp_fmt;
1373    u_vector_foreach(disp_fmt, &display.formats) {
1374       /* Skip formats for which we can't support both alpha & opaque
1375        * formats.
1376        */
1377       if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1378           !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1379          continue;
1380 
1381       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
1382          out_fmt->format = disp_fmt->vk_format;
1383          out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1384       }
1385    }
1386 
1387    wsi_wl_display_finish(&display);
1388 
1389    return vk_outarray_status(&out);
1390 }
1391 
1392 static VkResult
wsi_wl_surface_get_formats2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)1393 wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
1394 			    struct wsi_device *wsi_device,
1395                             const void *info_next,
1396                             uint32_t* pSurfaceFormatCount,
1397                             VkSurfaceFormat2KHR* pSurfaceFormats)
1398 {
1399    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1400    struct wsi_wayland *wsi =
1401       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1402 
1403    struct wsi_wl_display display;
1404    if (wsi_wl_display_init(wsi, &display, surface->display, true,
1405                            wsi_device->sw, "mesa formats2 query"))
1406       return VK_ERROR_SURFACE_LOST_KHR;
1407 
1408    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
1409                           pSurfaceFormats, pSurfaceFormatCount);
1410 
1411    struct wsi_wl_format *disp_fmt;
1412    u_vector_foreach(disp_fmt, &display.formats) {
1413       /* Skip formats for which we can't support both alpha & opaque
1414        * formats.
1415        */
1416       if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1417           !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1418          continue;
1419 
1420       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
1421          out_fmt->surfaceFormat.format = disp_fmt->vk_format;
1422          out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1423       }
1424    }
1425 
1426    wsi_wl_display_finish(&display);
1427 
1428    return vk_outarray_status(&out);
1429 }
1430 
1431 static VkResult
wsi_wl_surface_get_present_modes(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)1432 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
1433                                  struct wsi_device *wsi_device,
1434                                  uint32_t* pPresentModeCount,
1435                                  VkPresentModeKHR* pPresentModes)
1436 {
1437    VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1438    struct wsi_wayland *wsi =
1439       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1440 
1441    struct wsi_wl_display display;
1442    if (wsi_wl_display_init(wsi, &display, surface->display, true,
1443                            wsi_device->sw, "mesa present modes query"))
1444       return VK_ERROR_SURFACE_LOST_KHR;
1445 
1446    VkPresentModeKHR present_modes[3];
1447    uint32_t present_modes_count = 0;
1448 
1449    /* The following two modes are always supported */
1450    present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
1451    present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
1452 
1453    if (display.tearing_control_manager)
1454       present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
1455 
1456    assert(present_modes_count <= ARRAY_SIZE(present_modes));
1457    wsi_wl_display_finish(&display);
1458 
1459    if (pPresentModes == NULL) {
1460       *pPresentModeCount = present_modes_count;
1461       return VK_SUCCESS;
1462    }
1463 
1464    *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
1465    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
1466 
1467    if (*pPresentModeCount < present_modes_count)
1468       return VK_INCOMPLETE;
1469    else
1470       return VK_SUCCESS;
1471 }
1472 
1473 static VkResult
wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)1474 wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
1475                                       struct wsi_device *wsi_device,
1476                                       uint32_t* pRectCount,
1477                                       VkRect2D* pRects)
1478 {
1479    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
1480 
1481    vk_outarray_append_typed(VkRect2D, &out, rect) {
1482       /* We don't know a size so just return the usual "I don't know." */
1483       *rect = (VkRect2D) {
1484          .offset = { 0, 0 },
1485          .extent = { UINT32_MAX, UINT32_MAX },
1486       };
1487    }
1488 
1489    return vk_outarray_status(&out);
1490 }
1491 
1492 static void
wsi_wl_surface_analytics_fini(struct wsi_wl_surface * wsi_wl_surface,const VkAllocationCallbacks * parent_pAllocator,const VkAllocationCallbacks * pAllocator)1493 wsi_wl_surface_analytics_fini(struct wsi_wl_surface *wsi_wl_surface,
1494                               const VkAllocationCallbacks *parent_pAllocator,
1495                               const VkAllocationCallbacks *pAllocator)
1496 {
1497    vk_free2(parent_pAllocator, pAllocator,
1498             wsi_wl_surface->analytics.latency_str);
1499 }
1500 
1501 void
wsi_wl_surface_destroy(VkIcdSurfaceBase * icd_surface,VkInstance _instance,const VkAllocationCallbacks * pAllocator)1502 wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
1503                        const VkAllocationCallbacks *pAllocator)
1504 {
1505    VK_FROM_HANDLE(vk_instance, instance, _instance);
1506    struct wsi_wl_surface *wsi_wl_surface =
1507       wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
1508 
1509    if (wsi_wl_surface->wl_syncobj_surface)
1510       wp_linux_drm_syncobj_surface_v1_destroy(wsi_wl_surface->wl_syncobj_surface);
1511 
1512    if (wsi_wl_surface->wl_dmabuf_feedback) {
1513       zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1514       dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1515       dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
1516    }
1517 
1518    if (wsi_wl_surface->surface)
1519       wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1520 
1521    if (wsi_wl_surface->display)
1522       wsi_wl_display_destroy(wsi_wl_surface->display);
1523 
1524    wsi_wl_surface_analytics_fini(wsi_wl_surface, &instance->alloc, pAllocator);
1525 
1526    vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
1527 }
1528 
1529 static struct wsi_wl_format *
pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface,VkFormat vk_format)1530 pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
1531                                          VkFormat vk_format)
1532 {
1533    struct wsi_wl_format *f = NULL;
1534 
1535    /* If the main_device was not advertised, we don't have valid feedback */
1536    if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
1537       return NULL;
1538 
1539    util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
1540                          struct dmabuf_feedback_tranche, tranche) {
1541       f = find_format(&tranche->formats, vk_format);
1542       if (f)
1543          break;
1544    }
1545 
1546    return f;
1547 }
1548 
1549 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1550 surface_dmabuf_feedback_format_table(void *data,
1551                                      struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1552                                      int32_t fd, uint32_t size)
1553 {
1554    struct wsi_wl_surface *wsi_wl_surface = data;
1555    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1556 
1557    feedback->format_table.size = size;
1558    feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1559 
1560    close(fd);
1561 }
1562 
1563 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1564 surface_dmabuf_feedback_main_device(void *data,
1565                                     struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1566                                     struct wl_array *device)
1567 {
1568    struct wsi_wl_surface *wsi_wl_surface = data;
1569    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1570 
1571    memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
1572 }
1573 
1574 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1575 surface_dmabuf_feedback_tranche_target_device(void *data,
1576                                               struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1577                                               struct wl_array *device)
1578 {
1579    struct wsi_wl_surface *wsi_wl_surface = data;
1580    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1581 
1582    memcpy(&feedback->pending_tranche.target_device, device->data,
1583           sizeof(feedback->pending_tranche.target_device));
1584 }
1585 
1586 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)1587 surface_dmabuf_feedback_tranche_flags(void *data,
1588                                       struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1589                                       uint32_t flags)
1590 {
1591    struct wsi_wl_surface *wsi_wl_surface = data;
1592    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1593 
1594    feedback->pending_tranche.flags = flags;
1595 }
1596 
1597 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)1598 surface_dmabuf_feedback_tranche_formats(void *data,
1599                                         struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1600                                         struct wl_array *indices)
1601 {
1602    struct wsi_wl_surface *wsi_wl_surface = data;
1603    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1604    uint32_t format;
1605    uint64_t modifier;
1606    uint16_t *index;
1607 
1608    /* Compositor may advertise or not a format table. If it does, we use it.
1609     * Otherwise, we steal the most recent advertised format table. If we don't have
1610     * a most recent advertised format table, compositor did something wrong. */
1611    if (feedback->format_table.data == NULL) {
1612       feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
1613       dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
1614    }
1615    if (feedback->format_table.data == MAP_FAILED ||
1616        feedback->format_table.data == NULL)
1617       return;
1618 
1619    wl_array_for_each(index, indices) {
1620       format = feedback->format_table.data[*index].format;
1621       modifier = feedback->format_table.data[*index].modifier;
1622 
1623       wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
1624                         &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
1625                         format, modifier);
1626    }
1627 }
1628 
1629 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1630 surface_dmabuf_feedback_tranche_done(void *data,
1631                                      struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1632 {
1633    struct wsi_wl_surface *wsi_wl_surface = data;
1634    struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1635 
1636    /* Add tranche to array of tranches. */
1637    util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
1638                         feedback->pending_tranche);
1639 
1640    dmabuf_feedback_tranche_init(&feedback->pending_tranche);
1641 }
1642 
1643 static bool
sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A,const uint64_t * modifiers_A,uint32_t num_drm_modifiers_B,const uint64_t * modifiers_B)1644 sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
1645                                uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
1646 {
1647    uint32_t i, j;
1648    bool mod_found;
1649 
1650    if (num_drm_modifiers_A != num_drm_modifiers_B)
1651       return false;
1652 
1653    for (i = 0; i < num_drm_modifiers_A; i++) {
1654       mod_found = false;
1655       for (j = 0; j < num_drm_modifiers_B; j++) {
1656          if (modifiers_A[i] == modifiers_B[j]) {
1657             mod_found = true;
1658             break;
1659          }
1660       }
1661       if (!mod_found)
1662          return false;
1663    }
1664 
1665    return true;
1666 }
1667 
1668 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1669 surface_dmabuf_feedback_done(void *data,
1670                              struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1671 {
1672    struct wsi_wl_surface *wsi_wl_surface = data;
1673    struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
1674    struct wsi_wl_format *f;
1675 
1676    dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1677    wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
1678    dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
1679 
1680    /* It's not just because we received dma-buf feedback that re-allocation is a
1681     * good idea. In order to know if we should re-allocate or not, we must
1682     * compare the most recent parameters that we used to allocate with the ones
1683     * from the feedback we just received.
1684     *
1685     * The allocation parameters are: the format, its set of modifiers and the
1686     * tranche flags. On WSI we are not using the tranche flags for anything, so
1687     * we disconsider this. As we can't switch to another format (it is selected
1688     * by the client), we just need to compare the set of modifiers.
1689     *
1690     * So we just look for the vk_format in the tranches (respecting their
1691     * preferences), and compare its set of modifiers with the set of modifiers
1692     * we've used to allocate previously. If they differ, we are using suboptimal
1693     * parameters and should re-allocate.
1694     */
1695    f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
1696    if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
1697                                             u_vector_tail(&f->modifiers),
1698                                             chain->num_drm_modifiers,
1699                                             chain->drm_modifiers))
1700       wsi_wl_surface->chain->suboptimal = true;
1701 }
1702 
1703 static const struct zwp_linux_dmabuf_feedback_v1_listener
1704 surface_dmabuf_feedback_listener = {
1705    .format_table = surface_dmabuf_feedback_format_table,
1706    .main_device = surface_dmabuf_feedback_main_device,
1707    .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
1708    .tranche_flags = surface_dmabuf_feedback_tranche_flags,
1709    .tranche_formats = surface_dmabuf_feedback_tranche_formats,
1710    .tranche_done = surface_dmabuf_feedback_tranche_done,
1711    .done = surface_dmabuf_feedback_done,
1712 };
1713 
wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface)1714 static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
1715 {
1716    wsi_wl_surface->wl_dmabuf_feedback =
1717       zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
1718                                                wsi_wl_surface->surface);
1719 
1720    zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
1721                                              &surface_dmabuf_feedback_listener,
1722                                              wsi_wl_surface);
1723 
1724    if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
1725       goto fail;
1726    if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
1727       goto fail_pending;
1728 
1729    return VK_SUCCESS;
1730 
1731 fail_pending:
1732    dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1733 fail:
1734    zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1735    wsi_wl_surface->wl_dmabuf_feedback = NULL;
1736    return VK_ERROR_OUT_OF_HOST_MEMORY;
1737 }
1738 
1739 static void
wsi_wl_surface_analytics_init(struct wsi_wl_surface * wsi_wl_surface,const VkAllocationCallbacks * pAllocator)1740 wsi_wl_surface_analytics_init(struct wsi_wl_surface *wsi_wl_surface,
1741                               const VkAllocationCallbacks *pAllocator)
1742 {
1743    uint64_t wl_id;
1744    char *track_name;
1745 
1746    wl_id = wl_proxy_get_id((struct wl_proxy *) wsi_wl_surface->surface);
1747    track_name = vk_asprintf(pAllocator, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
1748                             "wl%" PRIu64 " presentation", wl_id);
1749    wsi_wl_surface->analytics.presentation_track_id = util_perfetto_new_track(track_name);
1750    vk_free(pAllocator, track_name);
1751 
1752    wsi_wl_surface->analytics.latency_str =
1753       vk_asprintf(pAllocator,
1754                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
1755                   "wl%" PRIu64 " latency", wl_id);
1756 }
1757 
wsi_wl_surface_init(struct wsi_wl_surface * wsi_wl_surface,struct wsi_device * wsi_device,const VkAllocationCallbacks * pAllocator)1758 static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
1759                                     struct wsi_device *wsi_device,
1760                                     const VkAllocationCallbacks *pAllocator)
1761 {
1762    struct wsi_wayland *wsi =
1763       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1764    VkResult result;
1765 
1766    /* wsi_wl_surface has already been initialized. */
1767    if (wsi_wl_surface->display)
1768       return VK_SUCCESS;
1769 
1770    result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
1771                                   wsi_device->sw, &wsi_wl_surface->display);
1772    if (result != VK_SUCCESS)
1773       goto fail;
1774 
1775    wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
1776    if (!wsi_wl_surface->surface) {
1777       result = VK_ERROR_OUT_OF_HOST_MEMORY;
1778       goto fail;
1779    }
1780    wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
1781                       wsi_wl_surface->display->queue);
1782 
1783    /* Bind wsi_wl_surface to dma-buf feedback. */
1784    if (wsi_wl_surface->display->wl_dmabuf &&
1785        zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
1786        ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
1787       result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
1788       if (result != VK_SUCCESS)
1789          goto fail;
1790 
1791       wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
1792                                  wsi_wl_surface->display->queue);
1793    }
1794 
1795    if (wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device)) {
1796       wsi_wl_surface->wl_syncobj_surface =
1797          wp_linux_drm_syncobj_manager_v1_get_surface(wsi_wl_surface->display->wl_syncobj,
1798                                                      wsi_wl_surface->surface);
1799 
1800       if (!wsi_wl_surface->wl_syncobj_surface)
1801          goto fail;
1802    }
1803 
1804    wsi_wl_surface_analytics_init(wsi_wl_surface, pAllocator);
1805 
1806    return VK_SUCCESS;
1807 
1808 fail:
1809    if (wsi_wl_surface->surface)
1810       wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1811 
1812    if (wsi_wl_surface->display)
1813       wsi_wl_display_destroy(wsi_wl_surface->display);
1814    return result;
1815 }
1816 
1817 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateWaylandSurfaceKHR(VkInstance _instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1818 wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
1819                             const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
1820                             const VkAllocationCallbacks *pAllocator,
1821                             VkSurfaceKHR *pSurface)
1822 {
1823    VK_FROM_HANDLE(vk_instance, instance, _instance);
1824    struct wsi_wl_surface *wsi_wl_surface;
1825    VkIcdSurfaceWayland *surface;
1826 
1827    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
1828 
1829    wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
1830                                8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1831    if (wsi_wl_surface == NULL)
1832       return VK_ERROR_OUT_OF_HOST_MEMORY;
1833 
1834    surface = &wsi_wl_surface->base;
1835 
1836    surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
1837    surface->display = pCreateInfo->display;
1838    surface->surface = pCreateInfo->surface;
1839 
1840    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
1841 
1842    return VK_SUCCESS;
1843 }
1844 
1845 struct wsi_wl_present_id {
1846    struct wp_presentation_feedback *feedback;
1847    /* Fallback when wp_presentation is not supported.
1848     * Using frame callback is not the intended way to achieve
1849     * this, but it is the best effort alternative when the proper interface is
1850     * not available. This approach also matches Xwayland,
1851     * which uses frame callback to signal DRI3 COMPLETE. */
1852    struct wl_callback *frame;
1853    uint64_t present_id;
1854    uint64_t flow_id;
1855    uint64_t submission_time;
1856    const VkAllocationCallbacks *alloc;
1857    struct wsi_wl_swapchain *chain;
1858    int buffer_id;
1859    uint64_t target_time;
1860    uint64_t correction;
1861    struct wl_list link;
1862 };
1863 
1864 static struct wsi_image *
wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1865 wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
1866                                uint32_t image_index)
1867 {
1868    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1869    return &chain->images[image_index].base;
1870 }
1871 
1872 static VkResult
wsi_wl_swapchain_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1873 wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
1874                                 uint32_t count, const uint32_t *indices)
1875 {
1876    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1877    for (uint32_t i = 0; i < count; i++) {
1878       uint32_t index = indices[i];
1879       chain->images[index].busy = false;
1880    }
1881    return VK_SUCCESS;
1882 }
1883 
1884 static void
wsi_wl_swapchain_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1885 wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
1886                                   VkPresentModeKHR mode)
1887 {
1888    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1889    chain->base.present_mode = mode;
1890 }
1891 
1892 static VkResult
dispatch_present_id_queue(struct wsi_swapchain * wsi_chain,struct timespec * end_time)1893 dispatch_present_id_queue(struct wsi_swapchain *wsi_chain, struct timespec *end_time)
1894 {
1895    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1896 
1897    /* We might not own this surface if we're retired, but it is only used here to
1898     * read events from the present ID queue. This queue is private to a given VkSwapchainKHR,
1899     * so calling present wait on a retired swapchain cannot interfere with a non-retired swapchain. */
1900    struct wl_display *wl_display = chain->wsi_wl_surface->display->wl_display;
1901 
1902    VkResult ret;
1903    int err;
1904 
1905    /* PresentWait can be called concurrently.
1906     * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
1907     * The lock is only held while there is forward progress processing events from Wayland,
1908     * so there should be no problem locking without timeout.
1909     * We would like to be able to support timeout = 0 to query the current max_completed count.
1910     * A timedlock with no timeout can be problematic in that scenario. */
1911    err = mtx_lock(&chain->present_ids.lock);
1912    if (err != thrd_success)
1913       return VK_ERROR_OUT_OF_DATE_KHR;
1914 
1915    /* Someone else is dispatching events; wait for them to update the chain
1916     * status and wake us up. */
1917    if (chain->present_ids.dispatch_in_progress) {
1918       err = u_cnd_monotonic_timedwait(&chain->present_ids.list_advanced,
1919                                       &chain->present_ids.lock, end_time);
1920       mtx_unlock(&chain->present_ids.lock);
1921 
1922       if (err == thrd_timedout)
1923          return VK_TIMEOUT;
1924       else if (err != thrd_success)
1925          return VK_ERROR_OUT_OF_DATE_KHR;
1926 
1927       return VK_SUCCESS;
1928    }
1929 
1930    /* Whether or not we were dispatching the events before, we are now. */
1931    assert(!chain->present_ids.dispatch_in_progress);
1932    chain->present_ids.dispatch_in_progress = true;
1933 
1934    /* We drop the lock now - we're still protected by dispatch_in_progress,
1935     * and holding the lock while dispatch_queue_timeout waits in poll()
1936     * might delay other threads unnecessarily.
1937     *
1938     * We'll pick up the lock again in the dispatched functions.
1939     */
1940    mtx_unlock(&chain->present_ids.lock);
1941 
1942    ret = loader_wayland_dispatch(wl_display,
1943                                  chain->present_ids.queue,
1944                                  end_time);
1945 
1946    mtx_lock(&chain->present_ids.lock);
1947 
1948    /* Wake up other waiters who may have been unblocked by the events
1949     * we just read. */
1950    u_cnd_monotonic_broadcast(&chain->present_ids.list_advanced);
1951 
1952    assert(chain->present_ids.dispatch_in_progress);
1953    chain->present_ids.dispatch_in_progress = false;
1954 
1955    u_cnd_monotonic_broadcast(&chain->present_ids.list_advanced);
1956    mtx_unlock(&chain->present_ids.lock);
1957 
1958    if (ret == -1)
1959       return VK_ERROR_OUT_OF_DATE_KHR;
1960    if (ret == 0)
1961       return VK_TIMEOUT;
1962    return VK_SUCCESS;
1963 }
1964 
1965 static bool
wsi_wl_swapchain_present_id_completes_in_finite_time_locked(struct wsi_wl_swapchain * chain,uint64_t present_id)1966 wsi_wl_swapchain_present_id_completes_in_finite_time_locked(struct wsi_wl_swapchain *chain,
1967                                                             uint64_t present_id)
1968 {
1969    return present_id <= chain->present_ids.max_forward_progress_present_id;
1970 }
1971 
1972 static VkResult
wsi_wl_swapchain_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t present_id,uint64_t timeout)1973 wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
1974                                   uint64_t present_id,
1975                                   uint64_t timeout)
1976 {
1977    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1978    struct timespec end_time;
1979    VkResult ret;
1980    int err;
1981 
1982    MESA_TRACE_FUNC();
1983 
1984    uint64_t atimeout;
1985    if (timeout == 0 || timeout == UINT64_MAX)
1986       atimeout = timeout;
1987    else
1988       atimeout = os_time_get_absolute_timeout(timeout);
1989 
1990    /* Need to observe that the swapchain semaphore has been unsignalled,
1991     * as this is guaranteed when a present is complete. */
1992    VkResult result = wsi_swapchain_wait_for_present_semaphore(
1993          &chain->base, present_id, timeout);
1994    if (result != VK_SUCCESS)
1995       return result;
1996 
1997    /* If using frame callback, guard against lack of forward progress
1998     * of the frame callback in some situations,
1999     * e.g. the surface might not be visible.
2000     * If rendering has completed on GPU,
2001     * and we still haven't received a callback after 100ms, unblock the application.
2002     * 100ms is chosen arbitrarily.
2003     * The queue depth in WL WSI is just one frame due to frame callback in FIFO mode,
2004     * so from the time a frame has completed render to when it should be considered presented
2005     * will not exceed 100ms except in contrived edge cases. */
2006 
2007    /* For FIFO without commit-timing we have a similar concern, but only when waiting on the last presented ID that is pending.
2008     * It is possible the last presentation is held back due to being occluded, but this scenario is very rare
2009     * in practice. An application blocking on the last presentation implies zero CPU and GPU overlap,
2010     * and is likely only going to happen at swapchain destruction or similar. */
2011 
2012    uint64_t assumed_success_at = UINT64_MAX;
2013    if (!chain->present_ids.wp_presentation) {
2014       assumed_success_at = os_time_get_absolute_timeout(100 * 1000 * 1000);
2015    } else {
2016       err = mtx_lock(&chain->present_ids.lock);
2017       if (err != thrd_success)
2018          return VK_ERROR_OUT_OF_DATE_KHR;
2019 
2020       /* If we're waiting for the very last commit made for whatever reason,
2021        * we're not necessarily guaranteed forward progress until a subsequent commit is made.
2022        * Add a timeout post GPU rendering completion to unblock any waiter in reasonable time. */
2023       if (!wsi_wl_swapchain_present_id_completes_in_finite_time_locked(chain, present_id)) {
2024          /* The queue depth could be larger, so just make a heuristic decision here to bump the timeout. */
2025          uint32_t num_pending_cycles = wl_list_length(&chain->present_ids.outstanding_list) + 1;
2026          assumed_success_at = os_time_get_absolute_timeout(100ull * 1000 * 1000 * num_pending_cycles);
2027       }
2028       mtx_unlock(&chain->present_ids.lock);
2029    }
2030 
2031    /* If app timeout is beyond the deadline we set for reply,
2032     * always treat the timeout as successful. */
2033    VkResult timeout_result = assumed_success_at < atimeout ? VK_SUCCESS : VK_TIMEOUT;
2034    timespec_from_nsec(&end_time, MIN2(atimeout, assumed_success_at));
2035 
2036    while (1) {
2037       err = mtx_lock(&chain->present_ids.lock);
2038       if (err != thrd_success)
2039          return VK_ERROR_OUT_OF_DATE_KHR;
2040 
2041       bool completed = chain->present_ids.max_completed >= present_id;
2042       mtx_unlock(&chain->present_ids.lock);
2043 
2044       if (completed)
2045          return VK_SUCCESS;
2046 
2047 retry:
2048       ret = dispatch_present_id_queue(wsi_chain, &end_time);
2049       if (ret == VK_TIMEOUT) {
2050          if (timeout_result == VK_SUCCESS && chain->fifo && chain->present_ids.wp_presentation) {
2051             /* If there have been subsequent commits since when we made the decision to add a timeout,
2052              * we can drop that timeout condition and rely on forward progress instead. */
2053             err = mtx_lock(&chain->present_ids.lock);
2054             if (err != thrd_success)
2055                return VK_ERROR_OUT_OF_DATE_KHR;
2056 
2057             if (wsi_wl_swapchain_present_id_completes_in_finite_time_locked(chain, present_id)) {
2058                timespec_from_nsec(&end_time, atimeout);
2059                timeout_result = VK_TIMEOUT;
2060             }
2061             mtx_unlock(&chain->present_ids.lock);
2062 
2063             /* Retry the wait, but now without any workaround. */
2064             if (timeout_result == VK_TIMEOUT)
2065                goto retry;
2066          }
2067          return timeout_result;
2068       }
2069 
2070       if (ret != VK_SUCCESS)
2071          return ret;
2072    }
2073 }
2074 
2075 static int
wsi_wl_swapchain_ensure_dispatch(struct wsi_wl_swapchain * chain)2076 wsi_wl_swapchain_ensure_dispatch(struct wsi_wl_swapchain *chain)
2077 {
2078    struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
2079    struct wl_display *display = wsi_wl_surface->display->wl_display;
2080    struct timespec timeout = {0, 0};
2081    int ret = 0;
2082 
2083    mtx_lock(&chain->present_ids.lock);
2084    if (chain->present_ids.dispatch_in_progress)
2085       goto already_dispatching;
2086 
2087    chain->present_ids.dispatch_in_progress = true;
2088    mtx_unlock(&chain->present_ids.lock);
2089 
2090    /* Use a dispatch with an instant timeout because dispatch_pending
2091     * won't read any events in the pipe.
2092     */
2093    ret = wl_display_dispatch_queue_timeout(display,
2094                                            chain->present_ids.queue,
2095                                            &timeout);
2096 
2097    mtx_lock(&chain->present_ids.lock);
2098    u_cnd_monotonic_broadcast(&chain->present_ids.list_advanced);
2099    chain->present_ids.dispatch_in_progress = false;
2100 
2101 already_dispatching:
2102    mtx_unlock(&chain->present_ids.lock);
2103    return ret;
2104 }
2105 
2106 static VkResult
wsi_wl_swapchain_acquire_next_image_explicit(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)2107 wsi_wl_swapchain_acquire_next_image_explicit(struct wsi_swapchain *wsi_chain,
2108                                              const VkAcquireNextImageInfoKHR *info,
2109                                              uint32_t *image_index)
2110 {
2111    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2112    uint64_t id = 0;
2113 
2114    MESA_TRACE_FUNC_FLOW(&id);
2115 
2116    /* See comments in queue_present() */
2117    if (chain->retired)
2118       return VK_ERROR_OUT_OF_DATE_KHR;
2119 
2120    STACK_ARRAY(struct wsi_image*, images, wsi_chain->image_count);
2121    for (uint32_t i = 0; i < chain->base.image_count; i++)
2122       images[i] = &chain->images[i].base;
2123 
2124    VkResult result;
2125 #ifdef HAVE_LIBDRM
2126    result = wsi_drm_wait_for_explicit_sync_release(wsi_chain,
2127                                                    wsi_chain->image_count,
2128                                                    images,
2129                                                    info->timeout,
2130                                                    image_index);
2131 #else
2132    result = VK_ERROR_FEATURE_NOT_PRESENT;
2133 #endif
2134    STACK_ARRAY_FINISH(images);
2135 
2136    if (result == VK_SUCCESS) {
2137       chain->images[*image_index].flow_id = id;
2138       if (chain->suboptimal)
2139          result = VK_SUBOPTIMAL_KHR;
2140    }
2141 
2142    return result;
2143 }
2144 
2145 static VkResult
wsi_wl_swapchain_acquire_next_image_implicit(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)2146 wsi_wl_swapchain_acquire_next_image_implicit(struct wsi_swapchain *wsi_chain,
2147                                              const VkAcquireNextImageInfoKHR *info,
2148                                              uint32_t *image_index)
2149 {
2150    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2151    struct timespec start_time, end_time;
2152    struct timespec rel_timeout;
2153    uint64_t id = 0;
2154 
2155    MESA_TRACE_FUNC_FLOW(&id);
2156 
2157    /* See comments in queue_present() */
2158    if (chain->retired)
2159       return VK_ERROR_OUT_OF_DATE_KHR;
2160 
2161    struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
2162    timespec_from_nsec(&rel_timeout, info->timeout);
2163 
2164    clock_gettime(CLOCK_MONOTONIC, &start_time);
2165    timespec_add(&end_time, &rel_timeout, &start_time);
2166 
2167    while (1) {
2168       /* If we can use timestamps, we want to make sure the queue feedback
2169        * events are in is dispatched so we eventually get a refresh rate
2170        * and a vsync time to phase lock to. We don't need to wait for it
2171        * now.
2172         */
2173       if (chain->commit_timer) {
2174          if (wsi_wl_swapchain_ensure_dispatch(chain) == -1)
2175             return VK_ERROR_OUT_OF_DATE_KHR;
2176       }
2177       /* Try to find a free image. */
2178       for (uint32_t i = 0; i < chain->base.image_count; i++) {
2179          if (!chain->images[i].busy) {
2180             /* We found a non-busy image */
2181             *image_index = i;
2182             chain->images[i].busy = true;
2183             chain->images[i].flow_id = id;
2184             return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
2185          }
2186       }
2187 
2188       /* Try to dispatch potential events. */
2189       int ret = loader_wayland_dispatch(wsi_wl_surface->display->wl_display,
2190                                         wsi_wl_surface->display->queue,
2191                                         &end_time);
2192       if (ret == -1)
2193          return VK_ERROR_OUT_OF_DATE_KHR;
2194 
2195       /* Check for timeout. */
2196       if (ret == 0)
2197          return (info->timeout ? VK_TIMEOUT : VK_NOT_READY);
2198    }
2199 }
2200 
2201 static void
presentation_handle_sync_output(void * data,struct wp_presentation_feedback * feedback,struct wl_output * output)2202 presentation_handle_sync_output(void *data,
2203                                 struct wp_presentation_feedback *feedback,
2204                                 struct wl_output *output)
2205 {
2206 }
2207 
2208 static void
wsi_wl_presentation_update_present_id(struct wsi_wl_present_id * id)2209 wsi_wl_presentation_update_present_id(struct wsi_wl_present_id *id)
2210 {
2211    mtx_lock(&id->chain->present_ids.lock);
2212    if (id->present_id > id->chain->present_ids.max_completed)
2213       id->chain->present_ids.max_completed = id->present_id;
2214 
2215    id->chain->present_ids.display_time_correction -= id->correction;
2216    wl_list_remove(&id->link);
2217    mtx_unlock(&id->chain->present_ids.lock);
2218    vk_free(id->alloc, id);
2219 }
2220 
2221 static void
trace_present(const struct wsi_wl_present_id * id,uint64_t presentation_time)2222 trace_present(const struct wsi_wl_present_id *id,
2223               uint64_t presentation_time)
2224 {
2225    struct wsi_wl_swapchain *chain = id->chain;
2226    struct wsi_wl_surface *surface = chain->wsi_wl_surface;
2227    char *buffer_name;
2228 
2229    MESA_TRACE_SET_COUNTER(surface->analytics.latency_str,
2230                           (presentation_time - id->submission_time) / 1000000.0);
2231 
2232    /* Close the previous image display interval first, if there is one. */
2233    if (surface->analytics.presenting && util_perfetto_is_tracing_enabled()) {
2234       buffer_name = stringify_wayland_id(surface->analytics.presenting);
2235       MESA_TRACE_TIMESTAMP_END(buffer_name ? buffer_name : "Wayland buffer",
2236                                surface->analytics.presentation_track_id,
2237                                chain->wsi_wl_surface->display->presentation_clock_id, presentation_time);
2238       free(buffer_name);
2239    }
2240 
2241    surface->analytics.presenting = id->buffer_id;
2242 
2243    if (util_perfetto_is_tracing_enabled()) {
2244       buffer_name = stringify_wayland_id(id->buffer_id);
2245       MESA_TRACE_TIMESTAMP_BEGIN(buffer_name ? buffer_name : "Wayland buffer",
2246                                  surface->analytics.presentation_track_id,
2247                                  id->flow_id,
2248                                  chain->wsi_wl_surface->display->presentation_clock_id, presentation_time);
2249       free(buffer_name);
2250    }
2251 }
2252 
2253 static void
presentation_handle_presented(void * data,struct wp_presentation_feedback * feedback,uint32_t tv_sec_hi,uint32_t tv_sec_lo,uint32_t tv_nsec,uint32_t refresh,uint32_t seq_hi,uint32_t seq_lo,uint32_t flags)2254 presentation_handle_presented(void *data,
2255                               struct wp_presentation_feedback *feedback,
2256                               uint32_t tv_sec_hi, uint32_t tv_sec_lo,
2257                               uint32_t tv_nsec, uint32_t refresh,
2258                               uint32_t seq_hi, uint32_t seq_lo,
2259                               uint32_t flags)
2260 {
2261    struct wsi_wl_present_id *id = data;
2262    struct timespec presentation_ts;
2263    uint64_t presentation_time;
2264 
2265    MESA_TRACE_FUNC_FLOW(&id->flow_id);
2266 
2267    struct wsi_wl_swapchain *chain = id->chain;
2268    uint64_t target_time = id->target_time;
2269 
2270 
2271    presentation_ts.tv_sec = ((uint64_t)tv_sec_hi << 32) + tv_sec_lo;
2272    presentation_ts.tv_nsec = tv_nsec;
2273    presentation_time = timespec_to_nsec(&presentation_ts);
2274    trace_present(id, presentation_time);
2275 
2276    mtx_lock(&chain->present_ids.lock);
2277    chain->present_ids.refresh_nsec = refresh;
2278    if (!chain->present_ids.valid_refresh_nsec) {
2279       chain->present_ids.valid_refresh_nsec = true;
2280       chain->present_ids.last_target_time = presentation_time;
2281       target_time = presentation_time;
2282    }
2283 
2284    if (presentation_time > chain->present_ids.displayed_time)
2285       chain->present_ids.displayed_time = presentation_time;
2286 
2287    if (target_time && presentation_time > target_time)
2288       chain->present_ids.display_time_error = presentation_time - target_time;
2289    else
2290       chain->present_ids.display_time_error = 0;
2291    mtx_unlock(&chain->present_ids.lock);
2292 
2293    wsi_wl_presentation_update_present_id(id);
2294    wp_presentation_feedback_destroy(feedback);
2295 }
2296 
2297 static void
presentation_handle_discarded(void * data,struct wp_presentation_feedback * feedback)2298 presentation_handle_discarded(void *data,
2299                               struct wp_presentation_feedback *feedback)
2300 {
2301    struct wsi_wl_present_id *id = data;
2302 
2303    MESA_TRACE_FUNC_FLOW(&id->flow_id);
2304    struct wsi_wl_swapchain *chain = id->chain;
2305 
2306    mtx_lock(&chain->present_ids.lock);
2307    if (!chain->present_ids.valid_refresh_nsec) {
2308       /* We've started occluded, so make up some safe values to throttle us */
2309       chain->present_ids.displayed_time = os_time_get_nano();
2310       chain->present_ids.last_target_time = chain->present_ids.displayed_time;
2311       chain->present_ids.refresh_nsec = 16666666;
2312       chain->present_ids.valid_refresh_nsec = true;
2313    }
2314    mtx_unlock(&chain->present_ids.lock);
2315 
2316    wsi_wl_presentation_update_present_id(id);
2317    wp_presentation_feedback_destroy(feedback);
2318 }
2319 
2320 static const struct wp_presentation_feedback_listener
2321       pres_feedback_listener = {
2322    presentation_handle_sync_output,
2323    presentation_handle_presented,
2324    presentation_handle_discarded,
2325 };
2326 
2327 static void
presentation_frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)2328 presentation_frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
2329 {
2330    struct wsi_wl_present_id *id = data;
2331    wsi_wl_presentation_update_present_id(id);
2332    wl_callback_destroy(callback);
2333 }
2334 
2335 static const struct wl_callback_listener pres_frame_listener = {
2336    presentation_frame_handle_done,
2337 };
2338 
2339 static void
frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)2340 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
2341 {
2342    struct wsi_wl_swapchain *chain = data;
2343 
2344    chain->frame = NULL;
2345    chain->legacy_fifo_ready = true;
2346 
2347    wl_callback_destroy(callback);
2348 }
2349 
2350 static const struct wl_callback_listener frame_listener = {
2351    frame_handle_done,
2352 };
2353 
2354 /* The present_ids lock must be held */
2355 static bool
set_timestamp(struct wsi_wl_swapchain * chain,uint64_t * timestamp,uint64_t * correction)2356 set_timestamp(struct wsi_wl_swapchain *chain,
2357               uint64_t *timestamp,
2358               uint64_t *correction)
2359 {
2360    uint64_t target;
2361    struct timespec target_ts;
2362    uint64_t refresh;
2363    uint64_t displayed_time;
2364    int32_t error = 0;
2365 
2366    if (!chain->present_ids.valid_refresh_nsec)
2367       return false;
2368 
2369    displayed_time = chain->present_ids.displayed_time;
2370    refresh = chain->present_ids.refresh_nsec;
2371 
2372    /* If refresh is 0, presentation feedback has informed us we have no
2373     * fixed refresh cycle. In that case we can't generate sensible
2374     * timestamps at all, so bail out.
2375     */
2376    if (!refresh)
2377       return false;
2378 
2379    /* We assume we're being fed at the display's refresh rate, but
2380     * if that doesn't happen our timestamps fall into the past.
2381     *
2382     * This would result in an offscreen surface being unthrottled until
2383     * it "catches up" on missed frames. Instead, correct for missed
2384     * frame opportunities by jumping forward if our display time
2385     * didn't match our target time.
2386     *
2387     * Since we might have a few frames in flight, we need to keep a
2388     * running tally of how much correction we're applying and remove
2389     * it as corrected frames are retired.
2390     */
2391    if (chain->present_ids.display_time_error > chain->present_ids.display_time_correction)
2392       error = chain->present_ids.display_time_error -
2393               chain->present_ids.display_time_correction;
2394 
2395    target = chain->present_ids.last_target_time;
2396    if (error > 0)  {
2397       target += (error / refresh) * refresh;
2398       *correction = (error / refresh) * refresh;
2399    } else {
2400       *correction = 0;
2401    }
2402 
2403    chain->present_ids.display_time_correction += *correction;
2404    target = next_phase_locked_time(displayed_time,
2405                                    refresh,
2406                                    target);
2407   /* Take back 500 us as a safety margin, to ensure we don't miss our
2408    * target due to round-off error.
2409    */
2410    timespec_from_nsec(&target_ts, target - 500000);
2411    wp_commit_timer_v1_set_timestamp(chain->commit_timer,
2412                                     (uint64_t)target_ts.tv_sec >> 32, target_ts.tv_sec,
2413                                     target_ts.tv_nsec);
2414 
2415    chain->present_ids.last_target_time = target;
2416    *timestamp = target;
2417    return true;
2418 }
2419 
2420 static VkResult
wsi_wl_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)2421 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
2422                                uint32_t image_index,
2423                                uint64_t present_id,
2424                                const VkPresentRegionKHR *damage)
2425 {
2426    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2427    bool timestamped = false;
2428    bool queue_dispatched = false;
2429    uint64_t flow_id = chain->images[image_index].flow_id;
2430 
2431    MESA_TRACE_FUNC_FLOW(&flow_id);
2432 
2433    /* In case we're sending presentation feedback requests, make sure the
2434     * queue their events are in is dispatched.
2435     */
2436    struct timespec instant = {0};
2437    if (dispatch_present_id_queue(wsi_chain, &instant) == VK_ERROR_OUT_OF_DATE_KHR)
2438       return VK_ERROR_OUT_OF_DATE_KHR;
2439 
2440    /* While the specification suggests we can keep presenting already acquired
2441     * images on a retired swapchain, there is no requirement to support that.
2442     * From spec 1.3.278:
2443     *
2444     * After oldSwapchain is retired, the application can pass to vkQueuePresentKHR
2445     * any images it had already acquired from oldSwapchain.
2446     * E.g., an application may present an image from the old swapchain
2447     * before an image from the new swapchain is ready to be presented.
2448     * As usual, vkQueuePresentKHR may fail if oldSwapchain has entered a state
2449     * that causes VK_ERROR_OUT_OF_DATE_KHR to be returned. */
2450    if (chain->retired)
2451       return VK_ERROR_OUT_OF_DATE_KHR;
2452 
2453    struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
2454    bool mode_fifo = chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR;
2455 
2456    if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
2457       struct wsi_wl_image *image = &chain->images[image_index];
2458       memcpy(image->shm_ptr, image->base.cpu_map,
2459              image->base.row_pitches[0] * chain->extent.height);
2460    }
2461 
2462    /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
2463     * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
2464    while (!chain->legacy_fifo_ready) {
2465       int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
2466                                           wsi_wl_surface->display->queue);
2467       if (ret < 0)
2468          return VK_ERROR_OUT_OF_DATE_KHR;
2469 
2470       queue_dispatched = true;
2471    }
2472 
2473    if (chain->base.image_info.explicit_sync) {
2474       struct wsi_wl_image *image = &chain->images[image_index];
2475       /* Incremented by signal in base queue_present. */
2476       uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
2477       uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
2478       wp_linux_drm_syncobj_surface_v1_set_acquire_point(wsi_wl_surface->wl_syncobj_surface,
2479                                                         image->wl_syncobj_timeline[WSI_ES_ACQUIRE],
2480                                                         (uint32_t)(acquire_point >> 32),
2481                                                         (uint32_t)(acquire_point & 0xffffffff));
2482       wp_linux_drm_syncobj_surface_v1_set_release_point(wsi_wl_surface->wl_syncobj_surface,
2483                                                         image->wl_syncobj_timeline[WSI_ES_RELEASE],
2484                                                         (uint32_t)(release_point >> 32),
2485                                                         (uint32_t)(release_point & 0xffffffff));
2486    }
2487 
2488    assert(image_index < chain->base.image_count);
2489    wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
2490 
2491    if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
2492        damage->pRectangles && damage->rectangleCount > 0) {
2493       for (unsigned i = 0; i < damage->rectangleCount; i++) {
2494          const VkRectLayerKHR *rect = &damage->pRectangles[i];
2495          assert(rect->layer == 0);
2496          wl_surface_damage_buffer(wsi_wl_surface->surface,
2497                                   rect->offset.x, rect->offset.y,
2498                                   rect->extent.width, rect->extent.height);
2499       }
2500    } else {
2501       wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
2502    }
2503 
2504    if (present_id > 0 || (mode_fifo && chain->commit_timer) ||
2505        util_perfetto_is_tracing_enabled()) {
2506       struct wsi_wl_present_id *id =
2507          vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
2508                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2509       id->chain = chain;
2510       id->present_id = present_id;
2511       id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
2512       id->flow_id = flow_id;
2513       id->buffer_id =
2514          wl_proxy_get_id((struct wl_proxy *)chain->images[image_index].buffer);
2515 
2516       id->submission_time = os_time_get_nano();
2517 
2518       mtx_lock(&chain->present_ids.lock);
2519 
2520       if (mode_fifo && chain->fifo && chain->commit_timer)
2521          timestamped = set_timestamp(chain, &id->target_time, &id->correction);
2522 
2523       if (chain->present_ids.wp_presentation) {
2524          id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
2525                                                  chain->wsi_wl_surface->surface);
2526          wp_presentation_feedback_add_listener(id->feedback,
2527                                                &pres_feedback_listener,
2528                                                id);
2529       } else {
2530          id->frame = wl_surface_frame(chain->present_ids.surface);
2531          wl_callback_add_listener(id->frame, &pres_frame_listener, id);
2532       }
2533 
2534       chain->present_ids.prev_max_present_id = chain->present_ids.max_present_id;
2535       if (present_id > chain->present_ids.max_present_id)
2536          chain->present_ids.max_present_id = present_id;
2537 
2538       if (timestamped || !present_id) {
2539          /* In this case there is at least one commit that will replace the previous present in finite time. */
2540          chain->present_ids.max_forward_progress_present_id = chain->present_ids.max_present_id;
2541       } else if (chain->present_ids.prev_max_present_id > chain->present_ids.max_forward_progress_present_id) {
2542          /* The previous commit will complete in finite time now.
2543           * We need to keep track of this since it's possible for application to signal e.g. 2, 4, 6, 8, but wait for 7.
2544           * A naive presentID - 1 is not correct. */
2545          chain->present_ids.max_forward_progress_present_id = chain->present_ids.prev_max_present_id;
2546       }
2547 
2548       wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
2549       mtx_unlock(&chain->present_ids.lock);
2550    }
2551 
2552    chain->images[image_index].busy = true;
2553 
2554    if (mode_fifo && !chain->fifo) {
2555       /* If we don't have FIFO protocol, we must fall back to legacy mechanism for throttling. */
2556       chain->frame = wl_surface_frame(wsi_wl_surface->surface);
2557       wl_callback_add_listener(chain->frame, &frame_listener, chain);
2558       chain->legacy_fifo_ready = false;
2559    } else {
2560       /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
2561       chain->legacy_fifo_ready = true;
2562    }
2563 
2564    if (mode_fifo && chain->fifo) {
2565       wp_fifo_v1_set_barrier(chain->fifo);
2566       wp_fifo_v1_wait_barrier(chain->fifo);
2567 
2568       /* If our surface is occluded and we're using vkWaitForPresentKHR,
2569        * we can end up waiting forever. The FIFO condition and the time
2570        * constraint are met, but the image hasn't been presented because
2571        * we're occluded - but the image isn't discarded because there
2572        * are no further content updates for the compositor to process.
2573        *
2574        * This extra commit gives us the second content update to move
2575        * things along. If we're occluded the FIFO constraint is
2576        * satisfied immediately after the time constraint is, pushing
2577        * out a discard. If we're visible, the timed content update
2578        * receives presented feedback and the FIFO one blocks further
2579        * updates until the next refresh.
2580        */
2581 
2582       /* If the compositor supports FIFO, but not commit-timing, skip this.
2583        * In this scenario, we have to consider best-effort implementation instead.
2584        *
2585        * We have to make the assumption that presentation events come through eventually.
2586        * The FIFO protocol allows clearing the FIFO barrier earlier for forward progress guarantee purposes,
2587        * and there's nothing stopping a compositor from signalling a presentation complete for an occluded surface.
2588        * There are potential hazards with this approach,
2589        * but none of these are worse than the code paths before FIFO was introduced:
2590        * - Calling WaitPresentKHR on the last presented ID on a surface that starts occluded may hang until not occluded.
2591        *   A compositor that exposes FIFO and not commit-timing would likely not exhibit indefinite blocking behavior,
2592        *   i.e. it may not have special considerations to hold back frame callbacks for occluded surfaces.
2593        * - Occluded surfaces may run un-throttled. This is objectively better than blocking indefinitely (frame callback)
2594        *   as it breaks forward progress guarantees, but worse for power consumption.
2595        *   We add a pragmatic workaround to deal with this scenario similar to frame-callback based present wait.
2596        *   A compositor that exposes FIFO and not commit-timing would likely do throttling on its own,
2597        *   either to refresh rate or some fixed value. */
2598 
2599       if (timestamped) {
2600          wl_surface_commit(wsi_wl_surface->surface);
2601          /* Once we're in a steady state, we'd only need one of these
2602           * barrier waits. However, the first time we use a timestamp
2603           * we need both of our content updates to wait. The first
2604           * needs to wait to avoid potentially provoking a feedback
2605           * discarded event for the previous untimed content update,
2606           * the second to prevent provoking a discard event for the
2607           * timed update we've just made.
2608           *
2609           * Before the transition, we would only have a single content
2610           * update per call, which would contain a barrier wait. After
2611           * that, we would only need a barrier wait in the empty content
2612           * update.
2613           *
2614           * Instead of statefully tracking the transition across calls to
2615           * this function, just put a barrier wait in every content update.
2616           */
2617          wp_fifo_v1_wait_barrier(chain->fifo);
2618       }
2619 
2620       /* If the next frame transitions into MAILBOX mode make sure it observes the wait barrier.
2621        * When using timestamps, we already emit a dummy commit with the wait barrier anyway. */
2622       chain->next_present_force_wait_barrier = !timestamped;
2623    } else if (chain->fifo && chain->next_present_force_wait_barrier) {
2624       /* If we're using EXT_swapchain_maintenance1 to transition from FIFO to something non-FIFO
2625        * the previous frame's FIFO must persist for a refresh cycle, i.e. it cannot be replaced by a MAILBOX presentation.
2626        * From 1.4.303 spec:
2627        * "Transition from VK_PRESENT_MODE_FIFO_KHR or VK_PRESENT_MODE_FIFO_RELAXED_KHR or VK_PRESENT_MODE_FIFO_LATEST_READY_EXT to
2628        * VK_PRESENT_MODE_IMMEDIATE_KHR or VK_PRESENT_MODE_MAILBOX_KHR:
2629        * If the FIFO queue is empty, presentation is done according to the behavior of the new mode.
2630        * If there are present operations in the FIFO queue,
2631        * once the last present operation is performed based on the respective vertical blanking period,
2632        * the current and subsequent updates are applied according to the new mode"
2633        * Ensure we have used a wait barrier if the previous commit did not do that already. */
2634       wp_fifo_v1_wait_barrier(chain->fifo);
2635       chain->next_present_force_wait_barrier = false;
2636    }
2637    wl_surface_commit(wsi_wl_surface->surface);
2638    wl_display_flush(wsi_wl_surface->display->wl_display);
2639 
2640    if (!queue_dispatched && wsi_chain->image_info.explicit_sync) {
2641       wl_display_dispatch_queue_pending(wsi_wl_surface->display->wl_display,
2642                                         wsi_wl_surface->display->queue);
2643    }
2644 
2645    return VK_SUCCESS;
2646 }
2647 
2648 static void
buffer_handle_release(void * data,struct wl_buffer * buffer)2649 buffer_handle_release(void *data, struct wl_buffer *buffer)
2650 {
2651    struct wsi_wl_image *image = data;
2652 
2653    assert(image->buffer == buffer);
2654 
2655    image->busy = false;
2656 }
2657 
2658 static const struct wl_buffer_listener buffer_listener = {
2659    buffer_handle_release,
2660 };
2661 
2662 static uint8_t *
wsi_wl_alloc_image_shm(struct wsi_image * imagew,unsigned size)2663 wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
2664 {
2665    struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
2666 
2667    /* Create a shareable buffer */
2668    int fd = os_create_anonymous_file(size, NULL);
2669    if (fd < 0)
2670       return NULL;
2671 
2672    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2673    if (ptr == MAP_FAILED) {
2674       close(fd);
2675       return NULL;
2676    }
2677 
2678    image->shm_fd = fd;
2679    image->shm_ptr = ptr;
2680    image->shm_size = size;
2681 
2682    return ptr;
2683 }
2684 
2685 static VkResult
wsi_wl_image_init(struct wsi_wl_swapchain * chain,struct wsi_wl_image * image,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)2686 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
2687                   struct wsi_wl_image *image,
2688                   const VkSwapchainCreateInfoKHR *pCreateInfo,
2689                   const VkAllocationCallbacks* pAllocator)
2690 {
2691    struct wsi_wl_display *display = chain->wsi_wl_surface->display;
2692    VkResult result;
2693 
2694    result = wsi_create_image(&chain->base, &chain->base.image_info,
2695                              &image->base);
2696    if (result != VK_SUCCESS)
2697       return result;
2698 
2699    switch (chain->buffer_type) {
2700    case WSI_WL_BUFFER_GPU_SHM:
2701    case WSI_WL_BUFFER_SHM_MEMCPY: {
2702       if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
2703          wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
2704                                               chain->extent.height);
2705       }
2706       assert(image->shm_ptr != NULL);
2707 
2708       /* Share it in a wl_buffer */
2709       struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
2710                                                     image->shm_fd,
2711                                                     image->shm_size);
2712       wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
2713       image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
2714                                                 chain->extent.height,
2715                                                 image->base.row_pitches[0],
2716                                                 chain->shm_format);
2717       wl_shm_pool_destroy(pool);
2718       break;
2719    }
2720 
2721    case WSI_WL_BUFFER_NATIVE: {
2722       assert(display->wl_dmabuf);
2723 
2724       struct zwp_linux_buffer_params_v1 *params =
2725          zwp_linux_dmabuf_v1_create_params(display->wl_dmabuf);
2726       if (!params)
2727          goto fail_image;
2728 
2729       for (int i = 0; i < image->base.num_planes; i++) {
2730          zwp_linux_buffer_params_v1_add(params,
2731                                         image->base.dma_buf_fd,
2732                                         i,
2733                                         image->base.offsets[i],
2734                                         image->base.row_pitches[i],
2735                                         image->base.drm_modifier >> 32,
2736                                         image->base.drm_modifier & 0xffffffff);
2737       }
2738 
2739       image->buffer =
2740          zwp_linux_buffer_params_v1_create_immed(params,
2741                                                  chain->extent.width,
2742                                                  chain->extent.height,
2743                                                  chain->drm_format,
2744                                                  0);
2745       zwp_linux_buffer_params_v1_destroy(params);
2746 
2747       if (chain->base.image_info.explicit_sync) {
2748          for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2749             image->wl_syncobj_timeline[i] =
2750                wp_linux_drm_syncobj_manager_v1_import_timeline(display->wl_syncobj,
2751                                                                image->base.explicit_sync[i].fd);
2752             if (!image->wl_syncobj_timeline[i])
2753                goto fail_image;
2754          }
2755       }
2756 
2757       break;
2758    }
2759 
2760    default:
2761       unreachable("Invalid buffer type");
2762    }
2763 
2764    if (!image->buffer)
2765       goto fail_image;
2766 
2767    /* No need to listen for release if we are explicit sync. */
2768    if (!chain->base.image_info.explicit_sync)
2769       wl_buffer_add_listener(image->buffer, &buffer_listener, image);
2770 
2771    return VK_SUCCESS;
2772 
2773 fail_image:
2774    for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
2775       if (image->wl_syncobj_timeline[i])
2776          wp_linux_drm_syncobj_timeline_v1_destroy(image->wl_syncobj_timeline[i]);
2777    }
2778    wsi_destroy_image(&chain->base, &image->base);
2779 
2780    return VK_ERROR_OUT_OF_HOST_MEMORY;
2781 }
2782 
2783 static void
wsi_wl_swapchain_images_free(struct wsi_wl_swapchain * chain)2784 wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
2785 {
2786    for (uint32_t i = 0; i < chain->base.image_count; i++) {
2787       for (uint32_t j = 0; j < WSI_ES_COUNT; j++) {
2788          if (chain->images[i].wl_syncobj_timeline[j])
2789             wp_linux_drm_syncobj_timeline_v1_destroy(chain->images[i].wl_syncobj_timeline[j]);
2790       }
2791       if (chain->images[i].buffer) {
2792          wl_buffer_destroy(chain->images[i].buffer);
2793          wsi_destroy_image(&chain->base, &chain->images[i].base);
2794          if (chain->images[i].shm_size) {
2795             close(chain->images[i].shm_fd);
2796             munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
2797          }
2798       }
2799    }
2800 }
2801 
2802 static void
wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain * chain,const VkAllocationCallbacks * pAllocator)2803 wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
2804                             const VkAllocationCallbacks *pAllocator)
2805 {
2806    /* Force wayland-client to release fd sent during the swapchain
2807     * creation (see MAX_FDS_OUT) to avoid filling up VRAM with
2808     * released buffers.
2809     */
2810    struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
2811    if (!chain->retired)
2812       wl_display_flush(wsi_wl_surface->display->wl_display);
2813 
2814    if (chain->frame)
2815       wl_callback_destroy(chain->frame);
2816    if (chain->tearing_control)
2817       wp_tearing_control_v1_destroy(chain->tearing_control);
2818 
2819    /* Only unregister if we are the non-retired swapchain, or
2820     * we are a retired swapchain and memory allocation failed,
2821     * in which case there are only retired swapchains. */
2822    if (wsi_wl_surface->chain == chain)
2823       wsi_wl_surface->chain = NULL;
2824 
2825    assert(!chain->present_ids.dispatch_in_progress);
2826 
2827    /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
2828     * Waiting for the swapchain fence is enough.
2829     * Just clean up anything user did not wait for. */
2830    struct wsi_wl_present_id *id, *tmp;
2831    wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
2832       if (id->feedback)
2833          wp_presentation_feedback_destroy(id->feedback);
2834       if (id->frame)
2835          wl_callback_destroy(id->frame);
2836       wl_list_remove(&id->link);
2837       vk_free(id->alloc, id);
2838    }
2839 
2840    if (chain->present_ids.wp_presentation)
2841       wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
2842    if (chain->present_ids.surface)
2843       wl_proxy_wrapper_destroy(chain->present_ids.surface);
2844    u_cnd_monotonic_destroy(&chain->present_ids.list_advanced);
2845    mtx_destroy(&chain->present_ids.lock);
2846 
2847    if (chain->present_ids.queue)
2848       wl_event_queue_destroy(chain->present_ids.queue);
2849 
2850    vk_free(pAllocator, (void *)chain->drm_modifiers);
2851 
2852    if (chain->fifo)
2853       wp_fifo_v1_destroy(chain->fifo);
2854 
2855    if (chain->commit_timer)
2856       wp_commit_timer_v1_destroy(chain->commit_timer);
2857 
2858    wsi_swapchain_finish(&chain->base);
2859 }
2860 
2861 static VkResult
wsi_wl_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)2862 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
2863                          const VkAllocationCallbacks *pAllocator)
2864 {
2865    struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2866 
2867    wsi_wl_swapchain_images_free(chain);
2868    wsi_wl_swapchain_chain_free(chain, pAllocator);
2869 
2870    vk_free(pAllocator, chain);
2871 
2872    return VK_SUCCESS;
2873 }
2874 
2875 static VkResult
wsi_wl_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2876 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2877                                 VkDevice device,
2878                                 struct wsi_device *wsi_device,
2879                                 const VkSwapchainCreateInfoKHR* pCreateInfo,
2880                                 const VkAllocationCallbacks* pAllocator,
2881                                 struct wsi_swapchain **swapchain_out)
2882 {
2883    struct wsi_wl_surface *wsi_wl_surface =
2884       wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
2885    struct wsi_wl_swapchain *chain;
2886    VkResult result;
2887 
2888    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2889 
2890    /* From spec 1.3.278:
2891     * Upon calling vkCreateSwapchainKHR with an oldSwapchain that is not VK_NULL_HANDLE,
2892     * oldSwapchain is retired - even if creation of the new swapchain fails. */
2893    if (pCreateInfo->oldSwapchain) {
2894       VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
2895       /* oldSwapchain is extern-sync, so it is not possible to call AcquireNextImage or QueuePresent
2896        * concurrently with this function. Next call to acquire or present will immediately
2897        * return OUT_OF_DATE. */
2898       old_chain->retired = true;
2899    }
2900 
2901    /* We need to allocate the chain handle early, since display initialization code relies on it.
2902     * We do not know the actual image count until we have initialized the display handle,
2903     * so allocate conservatively in case we need to bump the image count. */
2904    size_t size = sizeof(*chain) + MAX2(WSI_WL_BUMPED_NUM_IMAGES, pCreateInfo->minImageCount) * sizeof(chain->images[0]);
2905    chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2906    if (chain == NULL)
2907       return VK_ERROR_OUT_OF_HOST_MEMORY;
2908 
2909    wl_list_init(&chain->present_ids.outstanding_list);
2910 
2911    /* We are taking ownership of the wsi_wl_surface, so remove ownership from
2912     * oldSwapchain. If the surface is currently owned by a swapchain that is
2913     * not oldSwapchain we return an error.
2914     */
2915    if (wsi_wl_surface->chain &&
2916        wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
2917       result = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
2918       goto fail;
2919    }
2920    if (pCreateInfo->oldSwapchain) {
2921       VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
2922       if (old_chain->tearing_control) {
2923          wp_tearing_control_v1_destroy(old_chain->tearing_control);
2924          old_chain->tearing_control = NULL;
2925       }
2926       if (old_chain->fifo) {
2927          wp_fifo_v1_destroy(old_chain->fifo);
2928          old_chain->fifo = NULL;
2929       }
2930       if (old_chain->commit_timer) {
2931          wp_commit_timer_v1_destroy(old_chain->commit_timer);
2932          old_chain->commit_timer = NULL;
2933       }
2934    }
2935 
2936    /* Take ownership of the wsi_wl_surface */
2937    chain->wsi_wl_surface = wsi_wl_surface;
2938    wsi_wl_surface->chain = chain;
2939 
2940    result = wsi_wl_surface_init(wsi_wl_surface, wsi_device, pAllocator);
2941    if (result != VK_SUCCESS)
2942       goto fail;
2943 
2944    uint32_t num_images = pCreateInfo->minImageCount;
2945 
2946    /* If app provides a present mode list from EXT_swapchain_maintenance1,
2947     * we don't know which present mode will be used.
2948     * Application is assumed to be well-behaved and be spec-compliant.
2949     * It needs to query all per-present mode minImageCounts individually and use the max() of those modes,
2950     * so there should never be any need to bump image counts. */
2951    bool uses_present_mode_group = vk_find_struct_const(
2952          pCreateInfo->pNext, SWAPCHAIN_PRESENT_MODES_CREATE_INFO_EXT) != NULL;
2953 
2954    /* If FIFO manager is not used, minImageCount is already the bumped value for reasons outlined in
2955     * wsi_wl_surface_get_min_image_count(), so skip any attempt to bump the counts. */
2956    if (wsi_wl_surface->display->fifo_manager && !uses_present_mode_group) {
2957       /* With proper FIFO, we return a lower minImageCount to make FIFO viable without requiring the use of KHR_present_wait.
2958        * The image count for MAILBOX should be bumped for performance reasons in this case.
2959        * This matches strategy for X11. */
2960       const VkSurfacePresentModeEXT mode =
2961             { VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT, NULL, pCreateInfo->presentMode };
2962 
2963       uint32_t min_images = wsi_wl_surface_get_min_image_count(wsi_wl_surface->display, &mode);
2964       bool requires_image_count_bump = min_images == WSI_WL_BUMPED_NUM_IMAGES;
2965       if (requires_image_count_bump)
2966          num_images = MAX2(min_images, num_images);
2967    }
2968 
2969    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2970    if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
2971       chain->tearing_control =
2972          wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
2973                                                            wsi_wl_surface->surface);
2974       if (!chain->tearing_control) {
2975          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2976          goto fail;
2977       }
2978       wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
2979                                                           WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
2980    }
2981 
2982    enum wsi_wl_buffer_type buffer_type;
2983    struct wsi_base_image_params *image_params = NULL;
2984    struct wsi_cpu_image_params cpu_image_params;
2985    struct wsi_drm_image_params drm_image_params;
2986    uint32_t num_drm_modifiers = 0;
2987    const uint64_t *drm_modifiers = NULL;
2988    if (wsi_device->sw) {
2989       cpu_image_params = (struct wsi_cpu_image_params) {
2990          .base.image_type = WSI_IMAGE_TYPE_CPU,
2991       };
2992       if (wsi_device->has_import_memory_host &&
2993           !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
2994          buffer_type = WSI_WL_BUFFER_GPU_SHM;
2995          cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
2996       } else {
2997          buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
2998       }
2999       image_params = &cpu_image_params.base;
3000    } else {
3001       drm_image_params = (struct wsi_drm_image_params) {
3002          .base.image_type = WSI_IMAGE_TYPE_DRM,
3003          .same_gpu = wsi_wl_surface->display->same_gpu,
3004          .explicit_sync = wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device),
3005       };
3006       /* Use explicit DRM format modifiers when both the server and the driver
3007        * support them.
3008        */
3009       if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
3010          struct wsi_wl_format *f = NULL;
3011          /* Try to select modifiers for our vk_format from surface dma-buf
3012           * feedback. If that doesn't work, fallback to the list of supported
3013           * formats/modifiers by the display. */
3014          if (wsi_wl_surface->wl_dmabuf_feedback)
3015             f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
3016                                                          pCreateInfo->imageFormat);
3017          if (f == NULL)
3018             f = find_format(&chain->wsi_wl_surface->display->formats,
3019                             pCreateInfo->imageFormat);
3020          if (f != NULL) {
3021             num_drm_modifiers = u_vector_length(&f->modifiers);
3022             drm_modifiers = u_vector_tail(&f->modifiers);
3023             if (num_drm_modifiers > 0)
3024                drm_image_params.num_modifier_lists = 1;
3025             else
3026                drm_image_params.num_modifier_lists = 0;
3027             drm_image_params.num_modifiers = &num_drm_modifiers;
3028             drm_image_params.modifiers = &drm_modifiers;
3029          }
3030       }
3031       buffer_type = WSI_WL_BUFFER_NATIVE;
3032       image_params = &drm_image_params.base;
3033    }
3034 
3035    result = wsi_swapchain_init(wsi_device, &chain->base, device,
3036                                pCreateInfo, image_params, pAllocator);
3037    if (result != VK_SUCCESS)
3038       goto fail;
3039 
3040    bool alpha = pCreateInfo->compositeAlpha ==
3041                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
3042 
3043    chain->base.destroy = wsi_wl_swapchain_destroy;
3044    chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
3045    chain->base.acquire_next_image = chain->base.image_info.explicit_sync
3046                                   ? wsi_wl_swapchain_acquire_next_image_explicit
3047                                   : wsi_wl_swapchain_acquire_next_image_implicit;
3048    chain->base.queue_present = wsi_wl_swapchain_queue_present;
3049    chain->base.release_images = wsi_wl_swapchain_release_images;
3050    chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
3051    chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
3052    chain->base.present_mode = present_mode;
3053    chain->base.image_count = num_images;
3054    chain->extent = pCreateInfo->imageExtent;
3055    chain->vk_format = pCreateInfo->imageFormat;
3056    chain->buffer_type = buffer_type;
3057    if (buffer_type == WSI_WL_BUFFER_NATIVE) {
3058       chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
3059    } else {
3060       chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
3061    }
3062    chain->num_drm_modifiers = num_drm_modifiers;
3063    if (num_drm_modifiers) {
3064       uint64_t *drm_modifiers_copy =
3065          vk_alloc(pAllocator, sizeof(*drm_modifiers) * num_drm_modifiers, 8,
3066                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3067       if (!drm_modifiers_copy) {
3068          result = VK_ERROR_OUT_OF_HOST_MEMORY;
3069          goto fail_free_wl_chain;
3070       }
3071 
3072       typed_memcpy(drm_modifiers_copy, drm_modifiers, num_drm_modifiers);
3073       chain->drm_modifiers = drm_modifiers_copy;
3074    }
3075 
3076    if (u_cnd_monotonic_init(&chain->present_ids.list_advanced) != thrd_success) {
3077       result = VK_ERROR_OUT_OF_HOST_MEMORY;
3078       goto fail_free_wl_chain;
3079    }
3080    mtx_init(&chain->present_ids.lock, mtx_plain);
3081 
3082    char *queue_name = vk_asprintf(pAllocator,
3083                                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT,
3084                                   "mesa vk surface %d swapchain %d queue",
3085                                   wl_proxy_get_id((struct wl_proxy *) wsi_wl_surface->surface),
3086                                   wsi_wl_surface->chain_count++);
3087    chain->present_ids.queue =
3088       wl_display_create_queue_with_name(chain->wsi_wl_surface->display->wl_display,
3089                                         queue_name);
3090    vk_free(pAllocator, queue_name);
3091 
3092    if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
3093       chain->present_ids.wp_presentation =
3094             wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
3095       wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
3096                          chain->present_ids.queue);
3097    } else {
3098       /* Fallback to frame callbacks when presentation protocol is not available.
3099        * We already have a proxy for the surface, but need another since
3100        * presentID is pumped through a different queue to not disrupt
3101        * QueuePresentKHR frame callback's queue. */
3102       chain->present_ids.surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
3103       wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.surface,
3104                          chain->present_ids.queue);
3105    }
3106 
3107    chain->legacy_fifo_ready = true;
3108    struct wsi_wl_display *dpy = chain->wsi_wl_surface->display;
3109    if (dpy->fifo_manager) {
3110       chain->fifo = wp_fifo_manager_v1_get_fifo(dpy->fifo_manager,
3111                                                 chain->wsi_wl_surface->surface);
3112    }
3113    if (dpy->commit_timing_manager && chain->present_ids.wp_presentation) {
3114       chain->commit_timer = wp_commit_timing_manager_v1_get_timer(dpy->commit_timing_manager,
3115                                                                   chain->wsi_wl_surface->surface);
3116    }
3117 
3118    for (uint32_t i = 0; i < chain->base.image_count; i++) {
3119       result = wsi_wl_image_init(chain, &chain->images[i],
3120                                  pCreateInfo, pAllocator);
3121       if (result != VK_SUCCESS)
3122          goto fail_free_wl_images;
3123       chain->images[i].busy = false;
3124    }
3125 
3126    chain->present_ids.valid_refresh_nsec = false;
3127    chain->present_ids.refresh_nsec = 0;
3128 
3129    *swapchain_out = &chain->base;
3130 
3131    return VK_SUCCESS;
3132 
3133 fail_free_wl_images:
3134    wsi_wl_swapchain_images_free(chain);
3135 fail_free_wl_chain:
3136    wsi_wl_swapchain_chain_free(chain, pAllocator);
3137 fail:
3138    vk_free(pAllocator, chain);
3139    wsi_wl_surface->chain = NULL;
3140 
3141    assert(result != VK_SUCCESS);
3142    return result;
3143 }
3144 
3145 VkResult
wsi_wl_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device)3146 wsi_wl_init_wsi(struct wsi_device *wsi_device,
3147                 const VkAllocationCallbacks *alloc,
3148                 VkPhysicalDevice physical_device)
3149 {
3150    struct wsi_wayland *wsi;
3151    VkResult result;
3152 
3153    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
3154                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
3155    if (!wsi) {
3156       result = VK_ERROR_OUT_OF_HOST_MEMORY;
3157       goto fail;
3158    }
3159 
3160    wsi->physical_device = physical_device;
3161    wsi->alloc = alloc;
3162    wsi->wsi = wsi_device;
3163 
3164    wsi->base.get_support = wsi_wl_surface_get_support;
3165    wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
3166    wsi->base.get_formats = wsi_wl_surface_get_formats;
3167    wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
3168    wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
3169    wsi->base.get_present_rectangles = wsi_wl_surface_get_present_rectangles;
3170    wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
3171 
3172    wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
3173 
3174    return VK_SUCCESS;
3175 
3176 fail:
3177    wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
3178 
3179    return result;
3180 }
3181 
3182 void
wsi_wl_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)3183 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
3184                   const VkAllocationCallbacks *alloc)
3185 {
3186    struct wsi_wayland *wsi =
3187       (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
3188    if (!wsi)
3189       return;
3190 
3191    vk_free(alloc, wsi);
3192 }
3193