• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  * Copyright © 2012 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19  * NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Kristian Høgsberg <krh@bitplanet.net>
27  *    Benjamin Franzke <benjaminfranzke@googlemail.com>
28  */
29 
30 #include <dlfcn.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <limits.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <xf86drm.h>
39 #include "drm-uapi/drm_fourcc.h"
40 #include <sys/mman.h>
41 #include <vulkan/vulkan_core.h>
42 #include <vulkan/vulkan_wayland.h>
43 
44 #include "util/anon_file.h"
45 #include "util/u_vector.h"
46 #include "util/format/u_formats.h"
47 #include "main/glconfig.h"
48 #include "pipe/p_screen.h"
49 #include "egl_dri2.h"
50 #include "eglglobals.h"
51 #include "kopper_interface.h"
52 #include "loader.h"
53 #include "loader_dri_helper.h"
54 #include "dri_screen.h"
55 #include "dri_util.h"
56 #include <loader_wayland_helper.h>
57 
58 #include "linux-dmabuf-unstable-v1-client-protocol.h"
59 #include "wayland-drm-client-protocol.h"
60 #include <wayland-client.h>
61 #include <wayland-egl-backend.h>
62 
63 /*
64  * The index of entries in this table is used as a bitmask in
65  * dri2_dpy->formats.formats_bitmap, which tracks the formats supported
66  * by our server.
67  */
68 static const struct dri2_wl_visual {
69    uint32_t wl_drm_format;
70    int pipe_format;
71    /* alt_pipe_format is a substitute wl_buffer format to use for a
72     * wl-server unsupported pipe_format, ie. some other pipe_format in
73     * the table, of the same precision but with different channel ordering, or
74     * PIPE_FORMAT_NONE if an alternate format is not needed or supported.
75     * The code checks if alt_pipe_format can be used as a fallback for a
76     * pipe_format for a given wl-server implementation.
77     */
78    int alt_pipe_format;
79    int opaque_wl_drm_format;
80 } dri2_wl_visuals[] = {
81    {
82       WL_DRM_FORMAT_ABGR16F,
83       PIPE_FORMAT_R16G16B16A16_FLOAT,
84       PIPE_FORMAT_NONE,
85       WL_DRM_FORMAT_XBGR16F,
86    },
87    {
88       WL_DRM_FORMAT_XBGR16F,
89       PIPE_FORMAT_R16G16B16X16_FLOAT,
90       PIPE_FORMAT_NONE,
91       WL_DRM_FORMAT_XBGR16F,
92    },
93    {
94       WL_DRM_FORMAT_XRGB2101010,
95       PIPE_FORMAT_B10G10R10X2_UNORM,
96       PIPE_FORMAT_R10G10B10X2_UNORM,
97       WL_DRM_FORMAT_XRGB2101010,
98    },
99    {
100       WL_DRM_FORMAT_ARGB2101010,
101       PIPE_FORMAT_B10G10R10A2_UNORM,
102       PIPE_FORMAT_R10G10B10A2_UNORM,
103       WL_DRM_FORMAT_XRGB2101010,
104    },
105    {
106       WL_DRM_FORMAT_XBGR2101010,
107       PIPE_FORMAT_R10G10B10X2_UNORM,
108       PIPE_FORMAT_B10G10R10X2_UNORM,
109       WL_DRM_FORMAT_XBGR2101010,
110    },
111    {
112       WL_DRM_FORMAT_ABGR2101010,
113       PIPE_FORMAT_R10G10B10A2_UNORM,
114       PIPE_FORMAT_B10G10R10A2_UNORM,
115       WL_DRM_FORMAT_XBGR2101010,
116    },
117    {
118       WL_DRM_FORMAT_XRGB8888,
119       PIPE_FORMAT_BGRX8888_UNORM,
120       PIPE_FORMAT_NONE,
121       WL_DRM_FORMAT_XRGB8888,
122    },
123    {
124       WL_DRM_FORMAT_ARGB8888,
125       PIPE_FORMAT_BGRA8888_UNORM,
126       PIPE_FORMAT_NONE,
127       WL_DRM_FORMAT_XRGB8888,
128    },
129    {
130       WL_DRM_FORMAT_ABGR8888,
131       PIPE_FORMAT_RGBA8888_UNORM,
132       PIPE_FORMAT_NONE,
133       WL_DRM_FORMAT_XBGR8888,
134    },
135    {
136       WL_DRM_FORMAT_XBGR8888,
137       PIPE_FORMAT_RGBX8888_UNORM,
138       PIPE_FORMAT_NONE,
139       WL_DRM_FORMAT_XBGR8888,
140    },
141    {
142       WL_DRM_FORMAT_RGB565,
143       PIPE_FORMAT_B5G6R5_UNORM,
144       PIPE_FORMAT_NONE,
145       WL_DRM_FORMAT_RGB565,
146    },
147    {
148       WL_DRM_FORMAT_ARGB1555,
149       PIPE_FORMAT_B5G5R5A1_UNORM,
150       PIPE_FORMAT_R5G5B5A1_UNORM,
151       WL_DRM_FORMAT_XRGB1555,
152    },
153    {
154       WL_DRM_FORMAT_XRGB1555,
155       PIPE_FORMAT_B5G5R5X1_UNORM,
156       PIPE_FORMAT_R5G5B5X1_UNORM,
157       WL_DRM_FORMAT_XRGB1555,
158    },
159    {
160       WL_DRM_FORMAT_ARGB4444,
161       PIPE_FORMAT_B4G4R4A4_UNORM,
162       PIPE_FORMAT_R4G4B4A4_UNORM,
163       WL_DRM_FORMAT_XRGB4444,
164    },
165    {
166       WL_DRM_FORMAT_XRGB4444,
167       PIPE_FORMAT_B4G4R4X4_UNORM,
168       PIPE_FORMAT_R4G4B4X4_UNORM,
169       WL_DRM_FORMAT_XRGB4444,
170    },
171 };
172 
173 static int
dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)174 dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)
175 {
176    if (util_format_is_srgb(pipe_format))
177       pipe_format = util_format_linear(pipe_format);
178 
179    for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
180       if (dri2_wl_visuals[i].pipe_format == pipe_format)
181          return i;
182    }
183 
184    return -1;
185 }
186 
187 static int
dri2_wl_visual_idx_from_config(const struct dri_config * config)188 dri2_wl_visual_idx_from_config(const struct dri_config *config)
189 {
190    struct gl_config *gl_config = (struct gl_config *) config;
191 
192    return dri2_wl_visual_idx_from_pipe_format(gl_config->color_format);
193 }
194 
195 static int
dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)196 dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)
197 {
198    for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
199       /* wl_drm format codes overlap with DRIImage FourCC codes for all formats
200        * we support. */
201       if (dri2_wl_visuals[i].wl_drm_format == fourcc)
202          return i;
203    }
204 
205    return -1;
206 }
207 
208 static int
dri2_wl_shm_format_from_visual_idx(int idx)209 dri2_wl_shm_format_from_visual_idx(int idx)
210 {
211    uint32_t fourcc = dri2_wl_visuals[idx].wl_drm_format;
212 
213    if (fourcc == WL_DRM_FORMAT_ARGB8888)
214       return WL_SHM_FORMAT_ARGB8888;
215    else if (fourcc == WL_DRM_FORMAT_XRGB8888)
216       return WL_SHM_FORMAT_XRGB8888;
217    else
218       return fourcc;
219 }
220 
221 static int
dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)222 dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)
223 {
224    uint32_t fourcc;
225 
226    if (shm_format == WL_SHM_FORMAT_ARGB8888)
227       fourcc = WL_DRM_FORMAT_ARGB8888;
228    else if (shm_format == WL_SHM_FORMAT_XRGB8888)
229       fourcc = WL_DRM_FORMAT_XRGB8888;
230    else
231       fourcc = shm_format;
232 
233    return dri2_wl_visual_idx_from_fourcc(fourcc);
234 }
235 
236 bool
dri2_wl_is_format_supported(void * user_data,uint32_t format)237 dri2_wl_is_format_supported(void *user_data, uint32_t format)
238 {
239    _EGLDisplay *disp = (_EGLDisplay *)user_data;
240    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
241    int j = dri2_wl_visual_idx_from_fourcc(format);
242 
243    if (j == -1)
244       return false;
245 
246    for (int i = 0; dri2_dpy->driver_configs[i]; i++)
247       if (j == dri2_wl_visual_idx_from_config(dri2_dpy->driver_configs[i]))
248          return true;
249 
250    return false;
251 }
252 
253 static bool
server_supports_format(struct dri2_wl_formats * formats,int idx)254 server_supports_format(struct dri2_wl_formats *formats, int idx)
255 {
256    return idx >= 0 && BITSET_TEST(formats->formats_bitmap, idx);
257 }
258 
259 static bool
server_supports_pipe_format(struct dri2_wl_formats * formats,enum pipe_format format)260 server_supports_pipe_format(struct dri2_wl_formats *formats,
261                             enum pipe_format format)
262 {
263    return server_supports_format(formats,
264                                  dri2_wl_visual_idx_from_pipe_format(format));
265 }
266 
267 static bool
server_supports_fourcc(struct dri2_wl_formats * formats,uint32_t fourcc)268 server_supports_fourcc(struct dri2_wl_formats *formats, uint32_t fourcc)
269 {
270    return server_supports_format(formats, dri2_wl_visual_idx_from_fourcc(fourcc));
271 }
272 
273 static int
roundtrip(struct dri2_egl_display * dri2_dpy)274 roundtrip(struct dri2_egl_display *dri2_dpy)
275 {
276    return wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_dpy->wl_queue);
277 }
278 
279 static void
wl_buffer_release(void * data,struct wl_buffer * buffer)280 wl_buffer_release(void *data, struct wl_buffer *buffer)
281 {
282    struct dri2_egl_surface *dri2_surf = data;
283    int i;
284 
285    for (i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); ++i)
286       if (dri2_surf->color_buffers[i].wl_buffer == buffer)
287          break;
288 
289    assert(i < ARRAY_SIZE(dri2_surf->color_buffers));
290 
291    if (dri2_surf->color_buffers[i].wl_release) {
292       wl_buffer_destroy(buffer);
293       dri2_surf->color_buffers[i].wl_release = false;
294       dri2_surf->color_buffers[i].wl_buffer = NULL;
295       dri2_surf->color_buffers[i].age = 0;
296    }
297 
298    dri2_surf->color_buffers[i].locked = false;
299 }
300 
301 static const struct wl_buffer_listener wl_buffer_listener = {
302    .release = wl_buffer_release,
303 };
304 
305 static void
dri2_wl_formats_fini(struct dri2_wl_formats * formats)306 dri2_wl_formats_fini(struct dri2_wl_formats *formats)
307 {
308    unsigned int i;
309 
310    for (i = 0; i < formats->num_formats; i++)
311       u_vector_finish(&formats->modifiers[i]);
312 
313    free(formats->modifiers);
314    free(formats->formats_bitmap);
315 }
316 
317 static int
dri2_wl_formats_init(struct dri2_wl_formats * formats)318 dri2_wl_formats_init(struct dri2_wl_formats *formats)
319 {
320    unsigned int i, j;
321 
322    /* formats->formats_bitmap tells us if a format in dri2_wl_visuals is present
323     * or not. So we must compute the amount of unsigned int's needed to
324     * represent all the formats of dri2_wl_visuals. We use BITSET_WORDS for
325     * this task. */
326    formats->num_formats = ARRAY_SIZE(dri2_wl_visuals);
327    formats->formats_bitmap = calloc(BITSET_WORDS(formats->num_formats),
328                                     sizeof(*formats->formats_bitmap));
329    if (!formats->formats_bitmap)
330       goto err;
331 
332    /* Here we have an array of u_vector's to store the modifiers supported by
333     * each format in the bitmask. */
334    formats->modifiers =
335       calloc(formats->num_formats, sizeof(*formats->modifiers));
336    if (!formats->modifiers)
337       goto err_modifier;
338 
339    for (i = 0; i < formats->num_formats; i++)
340       if (!u_vector_init_pow2(&formats->modifiers[i], 4, sizeof(uint64_t))) {
341          j = i;
342          goto err_vector_init;
343       }
344 
345    return 0;
346 
347 err_vector_init:
348    for (i = 0; i < j; i++)
349       u_vector_finish(&formats->modifiers[i]);
350    free(formats->modifiers);
351 err_modifier:
352    free(formats->formats_bitmap);
353 err:
354    _eglError(EGL_BAD_ALLOC, "dri2_wl_formats_init");
355    return -1;
356 }
357 
358 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)359 dmabuf_feedback_format_table_fini(
360    struct dmabuf_feedback_format_table *format_table)
361 {
362    if (format_table->data && format_table->data != MAP_FAILED)
363       munmap(format_table->data, format_table->size);
364 }
365 
366 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)367 dmabuf_feedback_format_table_init(
368    struct dmabuf_feedback_format_table *format_table)
369 {
370    memset(format_table, 0, sizeof(*format_table));
371 }
372 
373 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)374 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
375 {
376    dri2_wl_formats_fini(&tranche->formats);
377 }
378 
379 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)380 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
381 {
382    memset(tranche, 0, sizeof(*tranche));
383 
384    if (dri2_wl_formats_init(&tranche->formats) < 0)
385       return -1;
386 
387    return 0;
388 }
389 
390 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)391 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
392 {
393    dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
394 
395    util_dynarray_foreach (&dmabuf_feedback->tranches,
396                           struct dmabuf_feedback_tranche, tranche)
397       dmabuf_feedback_tranche_fini(tranche);
398    util_dynarray_fini(&dmabuf_feedback->tranches);
399 
400    dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
401 }
402 
403 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)404 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
405 {
406    memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
407 
408    if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
409       return -1;
410 
411    util_dynarray_init(&dmabuf_feedback->tranches, NULL);
412 
413    dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
414 
415    return 0;
416 }
417 
418 static void
resize_callback(struct wl_egl_window * wl_win,void * data)419 resize_callback(struct wl_egl_window *wl_win, void *data)
420 {
421    struct dri2_egl_surface *dri2_surf = data;
422 
423    if (dri2_surf->base.Width == wl_win->width &&
424        dri2_surf->base.Height == wl_win->height)
425       return;
426 
427    dri2_surf->resized = true;
428 
429    /* Update the surface size as soon as native window is resized; from user
430     * pov, this makes the effect that resize is done immediately after native
431     * window resize, without requiring to wait until the first draw.
432     *
433     * A more detailed and lengthy explanation can be found at
434     * https://lists.freedesktop.org/archives/mesa-dev/2018-June/196474.html
435     */
436    if (!dri2_surf->back) {
437       dri2_surf->base.Width = wl_win->width;
438       dri2_surf->base.Height = wl_win->height;
439    }
440    dri_invalidate_drawable(dri2_surf->dri_drawable);
441 }
442 
443 static void
destroy_window_callback(void * data)444 destroy_window_callback(void *data)
445 {
446    struct dri2_egl_surface *dri2_surf = data;
447    dri2_surf->wl_win = NULL;
448 }
449 
450 static struct wl_surface *
get_wl_surface(struct wl_egl_window * window)451 get_wl_surface(struct wl_egl_window *window)
452 {
453    /* Version 3 of wl_egl_window introduced a version field at the same
454     * location where a pointer to wl_surface was stored. Thus, if
455     * window->version is dereferenceable, we've been given an older version of
456     * wl_egl_window, and window->version points to wl_surface */
457    if (_eglPointerIsDereferenceable((void *)(window->version))) {
458       return (void *)(window->version);
459    }
460    return window->surface;
461 }
462 
463 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)464 surface_dmabuf_feedback_format_table(
465    void *data,
466    struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
467    int32_t fd, uint32_t size)
468 {
469    struct dri2_egl_surface *dri2_surf = data;
470    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
471 
472    feedback->format_table.size = size;
473    feedback->format_table.data =
474       mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
475 
476    close(fd);
477 }
478 
479 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)480 surface_dmabuf_feedback_main_device(
481    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
482    struct wl_array *device)
483 {
484    struct dri2_egl_surface *dri2_surf = data;
485    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
486 
487    memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
488 
489    /* Compositors may support switching render devices and change the main
490     * device of the dma-buf feedback. In this case, when we reallocate the
491     * buffers of the surface we must ensure that it is not allocated in memory
492     * that is only visible to the GPU that EGL is using, as the compositor will
493     * have to import them to the render device it is using.
494     *
495     * TODO: we still don't know how to allocate such buffers.
496     */
497    if (dri2_surf->dmabuf_feedback.main_device != 0 &&
498        (feedback->main_device != dri2_surf->dmabuf_feedback.main_device))
499       dri2_surf->compositor_using_another_device = true;
500    else
501       dri2_surf->compositor_using_another_device = false;
502 }
503 
504 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)505 surface_dmabuf_feedback_tranche_target_device(
506    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
507    struct wl_array *device)
508 {
509    struct dri2_egl_surface *dri2_surf = data;
510    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
511 
512    memcpy(&feedback->pending_tranche.target_device, device->data,
513           sizeof(feedback->pending_tranche.target_device));
514 }
515 
516 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)517 surface_dmabuf_feedback_tranche_flags(
518    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
519    uint32_t flags)
520 {
521    struct dri2_egl_surface *dri2_surf = data;
522    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
523 
524    feedback->pending_tranche.flags = flags;
525 }
526 
527 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)528 surface_dmabuf_feedback_tranche_formats(
529    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
530    struct wl_array *indices)
531 {
532    struct dri2_egl_surface *dri2_surf = data;
533    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
534    uint32_t present_format = dri2_surf->format;
535    uint64_t *modifier_ptr, modifier;
536    uint32_t format;
537    uint16_t *index;
538    int visual_idx;
539 
540    if (dri2_surf->base.PresentOpaque) {
541       visual_idx = dri2_wl_visual_idx_from_fourcc(present_format);
542       if (visual_idx != -1)
543          present_format = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
544    }
545 
546    /* Compositor may advertise or not a format table. If it does, we use it.
547     * Otherwise, we steal the most recent advertised format table. If we don't
548     * have a most recent advertised format table, compositor did something
549     * wrong. */
550    if (feedback->format_table.data == NULL) {
551       feedback->format_table = dri2_surf->dmabuf_feedback.format_table;
552       dmabuf_feedback_format_table_init(
553          &dri2_surf->dmabuf_feedback.format_table);
554    }
555    if (feedback->format_table.data == MAP_FAILED) {
556       _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
557                             "so we won't be able to use this batch of dma-buf "
558                             "feedback events.");
559       return;
560    }
561    if (feedback->format_table.data == NULL) {
562       _eglLog(_EGL_WARNING,
563               "wayland-egl: compositor didn't advertise a format "
564               "table, so we won't be able to use this batch of dma-buf "
565               "feedback events.");
566       return;
567    }
568 
569    wl_array_for_each (index, indices) {
570       format = feedback->format_table.data[*index].format;
571       modifier = feedback->format_table.data[*index].modifier;
572 
573       /* Skip formats that are not the one the surface is already using. We
574        * can't switch to another format. */
575       if (format != present_format)
576          continue;
577 
578       /* We are sure that the format is supported because of the check above. */
579       visual_idx = dri2_wl_visual_idx_from_fourcc(format);
580       assert(visual_idx != -1);
581 
582       BITSET_SET(feedback->pending_tranche.formats.formats_bitmap, visual_idx);
583       modifier_ptr =
584          u_vector_add(&feedback->pending_tranche.formats.modifiers[visual_idx]);
585       if (modifier_ptr)
586          *modifier_ptr = modifier;
587    }
588 }
589 
590 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)591 surface_dmabuf_feedback_tranche_done(
592    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
593 {
594    struct dri2_egl_surface *dri2_surf = data;
595    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
596 
597    /* Add tranche to array of tranches. */
598    util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
599                         feedback->pending_tranche);
600 
601    dmabuf_feedback_tranche_init(&feedback->pending_tranche);
602 }
603 
604 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)605 surface_dmabuf_feedback_done(
606    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
607 {
608    struct dri2_egl_surface *dri2_surf = data;
609 
610    /* The dma-buf feedback protocol states that surface dma-buf feedback should
611     * be sent by the compositor only if its buffers are using a suboptimal pair
612     * of format and modifier. We can't change the buffer format, but we can
613     * reallocate with another modifier. So we raise this flag in order to force
614     * buffer reallocation based on the dma-buf feedback sent. */
615    dri2_surf->received_dmabuf_feedback = true;
616 
617    dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
618    dri2_surf->dmabuf_feedback = dri2_surf->pending_dmabuf_feedback;
619    dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback);
620 }
621 
622 static const struct zwp_linux_dmabuf_feedback_v1_listener
623    surface_dmabuf_feedback_listener = {
624       .format_table = surface_dmabuf_feedback_format_table,
625       .main_device = surface_dmabuf_feedback_main_device,
626       .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
627       .tranche_flags = surface_dmabuf_feedback_tranche_flags,
628       .tranche_formats = surface_dmabuf_feedback_tranche_formats,
629       .tranche_done = surface_dmabuf_feedback_tranche_done,
630       .done = surface_dmabuf_feedback_done,
631 };
632 
633 static bool
dri2_wl_modifiers_have_common(struct u_vector * modifiers1,struct u_vector * modifiers2)634 dri2_wl_modifiers_have_common(struct u_vector *modifiers1,
635                               struct u_vector *modifiers2)
636 {
637    uint64_t *mod1, *mod2;
638 
639    /* If both modifier vectors are empty, assume there is a compatible
640     * implicit modifier. */
641    if (u_vector_length(modifiers1) == 0 && u_vector_length(modifiers2) == 0)
642        return true;
643 
644    u_vector_foreach(mod1, modifiers1)
645    {
646       u_vector_foreach(mod2, modifiers2)
647       {
648          if (*mod1 == *mod2)
649             return true;
650       }
651    }
652 
653    return false;
654 }
655 
656 /**
657  * Called via eglCreateWindowSurface(), drv->CreateWindowSurface().
658  */
659 static _EGLSurface *
dri2_wl_create_window_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)660 dri2_wl_create_window_surface(_EGLDisplay *disp, _EGLConfig *conf,
661                               void *native_window, const EGLint *attrib_list)
662 {
663    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
664    struct dri2_egl_config *dri2_conf = dri2_egl_config(conf);
665    struct wl_egl_window *window = native_window;
666    struct dri2_egl_surface *dri2_surf;
667    struct zwp_linux_dmabuf_v1 *dmabuf_wrapper;
668    int visual_idx;
669    const struct dri_config *config;
670 
671    if (!window) {
672       _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_create_surface");
673       return NULL;
674    }
675 
676    if (window->driver_private) {
677       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
678       return NULL;
679    }
680 
681    dri2_surf = calloc(1, sizeof *dri2_surf);
682    if (!dri2_surf) {
683       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
684       return NULL;
685    }
686 
687    if (!dri2_init_surface(&dri2_surf->base, disp, EGL_WINDOW_BIT, conf,
688                           attrib_list, false, native_window))
689       goto cleanup_surf;
690 
691    config = dri2_get_dri_config(dri2_conf, EGL_WINDOW_BIT,
692                                 dri2_surf->base.GLColorspace);
693 
694    if (!config) {
695       _eglError(EGL_BAD_MATCH,
696                 "Unsupported surfacetype/colorspace configuration");
697       goto cleanup_surf;
698    }
699 
700    dri2_surf->base.Width = window->width;
701    dri2_surf->base.Height = window->height;
702 
703    visual_idx = dri2_wl_visual_idx_from_config(config);
704    assert(visual_idx != -1);
705    assert(dri2_wl_visuals[visual_idx].pipe_format != PIPE_FORMAT_NONE);
706 
707    if (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm) {
708       dri2_surf->format = dri2_wl_visuals[visual_idx].wl_drm_format;
709    } else {
710       assert(dri2_dpy->wl_shm);
711       dri2_surf->format = dri2_wl_shm_format_from_visual_idx(visual_idx);
712    }
713 
714    if (dri2_surf->base.PresentOpaque) {
715       uint32_t opaque_fourcc =
716          dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
717       int opaque_visual_idx = dri2_wl_visual_idx_from_fourcc(opaque_fourcc);
718 
719       if (!server_supports_format(&dri2_dpy->formats, opaque_visual_idx) ||
720           !dri2_wl_modifiers_have_common(
721                &dri2_dpy->formats.modifiers[visual_idx],
722                &dri2_dpy->formats.modifiers[opaque_visual_idx])) {
723          _eglError(EGL_BAD_MATCH, "Unsupported opaque format");
724          goto cleanup_surf;
725       }
726    }
727 
728    dri2_surf->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
729                                                            "mesa egl surface queue");
730    if (!dri2_surf->wl_queue) {
731       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
732       goto cleanup_surf;
733    }
734 
735    if (dri2_dpy->wl_drm) {
736       dri2_surf->wl_drm_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_drm);
737       if (!dri2_surf->wl_drm_wrapper) {
738          _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
739          goto cleanup_queue;
740       }
741       wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_drm_wrapper,
742                          dri2_surf->wl_queue);
743    }
744 
745    dri2_surf->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
746    if (!dri2_surf->wl_dpy_wrapper) {
747       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
748       goto cleanup_drm;
749    }
750    wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_dpy_wrapper,
751                       dri2_surf->wl_queue);
752 
753    dri2_surf->wl_surface_wrapper =
754       wl_proxy_create_wrapper(get_wl_surface(window));
755    if (!dri2_surf->wl_surface_wrapper) {
756       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
757       goto cleanup_dpy_wrapper;
758    }
759    wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_surface_wrapper,
760                       dri2_surf->wl_queue);
761 
762    if (dri2_dpy->wl_dmabuf &&
763        zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
764           ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
765       dmabuf_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dmabuf);
766       if (!dmabuf_wrapper) {
767          _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
768          goto cleanup_surf_wrapper;
769       }
770       wl_proxy_set_queue((struct wl_proxy *)dmabuf_wrapper,
771                          dri2_surf->wl_queue);
772       dri2_surf->wl_dmabuf_feedback = zwp_linux_dmabuf_v1_get_surface_feedback(
773          dmabuf_wrapper, dri2_surf->wl_surface_wrapper);
774       wl_proxy_wrapper_destroy(dmabuf_wrapper);
775 
776       zwp_linux_dmabuf_feedback_v1_add_listener(
777          dri2_surf->wl_dmabuf_feedback, &surface_dmabuf_feedback_listener,
778          dri2_surf);
779 
780       if (dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback) < 0) {
781          zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
782          goto cleanup_surf_wrapper;
783       }
784       if (dmabuf_feedback_init(&dri2_surf->dmabuf_feedback) < 0) {
785          dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
786          zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
787          goto cleanup_surf_wrapper;
788       }
789 
790       if (roundtrip(dri2_dpy) < 0)
791          goto cleanup_dmabuf_feedback;
792    }
793 
794    dri2_surf->wl_win = window;
795    dri2_surf->wl_win->driver_private = dri2_surf;
796    dri2_surf->wl_win->destroy_window_callback = destroy_window_callback;
797    if (!dri2_dpy->swrast_not_kms)
798       dri2_surf->wl_win->resize_callback = resize_callback;
799 
800    if (!dri2_create_drawable(dri2_dpy, config, dri2_surf, dri2_surf))
801       goto cleanup_dmabuf_feedback;
802 
803    dri2_surf->base.SwapInterval = dri2_dpy->default_swap_interval;
804 
805    return &dri2_surf->base;
806 
807 cleanup_dmabuf_feedback:
808    if (dri2_surf->wl_dmabuf_feedback) {
809       zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
810       dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
811       dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
812    }
813 cleanup_surf_wrapper:
814    wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
815 cleanup_dpy_wrapper:
816    wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
817 cleanup_drm:
818    if (dri2_surf->wl_drm_wrapper)
819       wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
820 cleanup_queue:
821    wl_event_queue_destroy(dri2_surf->wl_queue);
822 cleanup_surf:
823    free(dri2_surf);
824 
825    return NULL;
826 }
827 
828 static _EGLSurface *
dri2_wl_create_pixmap_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)829 dri2_wl_create_pixmap_surface(_EGLDisplay *disp, _EGLConfig *conf,
830                               void *native_window, const EGLint *attrib_list)
831 {
832    /* From the EGL_EXT_platform_wayland spec, version 3:
833     *
834     *   It is not valid to call eglCreatePlatformPixmapSurfaceEXT with a <dpy>
835     *   that belongs to Wayland. Any such call fails and generates
836     *   EGL_BAD_PARAMETER.
837     */
838    _eglError(EGL_BAD_PARAMETER, "cannot create EGL pixmap surfaces on "
839                                 "Wayland");
840    return NULL;
841 }
842 
843 /**
844  * Called via eglDestroySurface(), drv->DestroySurface().
845  */
846 static EGLBoolean
dri2_wl_destroy_surface(_EGLDisplay * disp,_EGLSurface * surf)847 dri2_wl_destroy_surface(_EGLDisplay *disp, _EGLSurface *surf)
848 {
849    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
850 
851    driDestroyDrawable(dri2_surf->dri_drawable);
852 
853    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
854       if (dri2_surf->color_buffers[i].wl_buffer)
855          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
856       if (dri2_surf->color_buffers[i].dri_image)
857          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
858       if (dri2_surf->color_buffers[i].linear_copy)
859          dri2_destroy_image(dri2_surf->color_buffers[i].linear_copy);
860       if (dri2_surf->color_buffers[i].data)
861          munmap(dri2_surf->color_buffers[i].data,
862                 dri2_surf->color_buffers[i].data_size);
863    }
864 
865    if (dri2_surf->throttle_callback)
866       wl_callback_destroy(dri2_surf->throttle_callback);
867 
868    if (dri2_surf->wl_win) {
869       dri2_surf->wl_win->driver_private = NULL;
870       dri2_surf->wl_win->resize_callback = NULL;
871       dri2_surf->wl_win->destroy_window_callback = NULL;
872    }
873 
874    wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
875    wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
876    if (dri2_surf->wl_drm_wrapper)
877       wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
878    if (dri2_surf->wl_dmabuf_feedback) {
879       zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
880       dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
881       dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
882    }
883    wl_event_queue_destroy(dri2_surf->wl_queue);
884 
885    dri2_fini_surface(surf);
886    free(surf);
887 
888    return EGL_TRUE;
889 }
890 
891 static EGLBoolean
dri2_wl_swap_interval(_EGLDisplay * disp,_EGLSurface * surf,EGLint interval)892 dri2_wl_swap_interval(_EGLDisplay *disp, _EGLSurface *surf, EGLint interval)
893 {
894    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
895    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
896 
897    if (dri2_dpy->kopper)
898       kopperSetSwapInterval(dri2_surf->dri_drawable, interval);
899 
900    return EGL_TRUE;
901 }
902 
903 static void
dri2_wl_release_buffers(struct dri2_egl_surface * dri2_surf)904 dri2_wl_release_buffers(struct dri2_egl_surface *dri2_surf)
905 {
906    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
907       if (dri2_surf->color_buffers[i].wl_buffer) {
908          if (dri2_surf->color_buffers[i].locked) {
909             dri2_surf->color_buffers[i].wl_release = true;
910          } else {
911             wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
912             dri2_surf->color_buffers[i].wl_buffer = NULL;
913          }
914       }
915       if (dri2_surf->color_buffers[i].dri_image)
916          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
917       if (dri2_surf->color_buffers[i].linear_copy)
918          dri2_destroy_image(dri2_surf->color_buffers[i].linear_copy);
919       if (dri2_surf->color_buffers[i].data)
920          munmap(dri2_surf->color_buffers[i].data,
921                 dri2_surf->color_buffers[i].data_size);
922 
923       dri2_surf->color_buffers[i].dri_image = NULL;
924       dri2_surf->color_buffers[i].linear_copy = NULL;
925       dri2_surf->color_buffers[i].data = NULL;
926       dri2_surf->color_buffers[i].age = 0;
927    }
928 }
929 
930 /* Return list of modifiers that should be used to restrict the list of
931  * modifiers actually supported by the surface. As of now, it is only used
932  * to get the set of modifiers used for fixed-rate compression. */
933 static uint64_t *
get_surface_specific_modifiers(struct dri2_egl_surface * dri2_surf,int * modifiers_count)934 get_surface_specific_modifiers(struct dri2_egl_surface *dri2_surf,
935                                int *modifiers_count)
936 {
937    struct dri2_egl_display *dri2_dpy =
938       dri2_egl_display(dri2_surf->base.Resource.Display);
939    int rate = dri2_surf->base.CompressionRate;
940    uint64_t *modifiers;
941 
942    if (rate == EGL_SURFACE_COMPRESSION_FIXED_RATE_NONE_EXT ||
943        !dri2_surf->wl_win)
944       return NULL;
945 
946    if (!dri2_query_compression_modifiers(
947           dri2_dpy->dri_screen_render_gpu, dri2_surf->format, rate,
948           0, NULL, modifiers_count))
949       return NULL;
950 
951    modifiers = malloc(*modifiers_count * sizeof(uint64_t));
952    if (!modifiers)
953       return NULL;
954 
955    if (!dri2_query_compression_modifiers(
956           dri2_dpy->dri_screen_render_gpu, dri2_surf->format, rate,
957           *modifiers_count, modifiers, modifiers_count)) {
958       free(modifiers);
959       return NULL;
960    }
961 
962    return modifiers;
963 }
964 
965 static void
update_surface(struct dri2_egl_surface * dri2_surf,struct dri_image * dri_img)966 update_surface(struct dri2_egl_surface *dri2_surf, struct dri_image *dri_img)
967 {
968    int compression_rate;
969 
970    if (!dri_img)
971       return;
972 
973    /* Update the surface with the actual compression rate */
974    dri2_query_image(dri_img, __DRI_IMAGE_ATTRIB_COMPRESSION_RATE,
975                                &compression_rate);
976    dri2_surf->base.CompressionRate = compression_rate;
977 }
978 
979 static bool
intersect_modifiers(struct u_vector * subset,struct u_vector * set,uint64_t * other_modifiers,int other_modifiers_count)980 intersect_modifiers(struct u_vector *subset, struct u_vector *set,
981                     uint64_t *other_modifiers, int other_modifiers_count)
982 {
983    if (!u_vector_init_pow2(subset, 4, sizeof(uint64_t)))
984       return false;
985 
986    uint64_t *modifier_ptr, *mod;
987    u_vector_foreach(mod, set) {
988       for (int i = 0; i < other_modifiers_count; ++i) {
989          if (other_modifiers[i] != *mod)
990             continue;
991          modifier_ptr = u_vector_add(subset);
992          if (modifier_ptr)
993             *modifier_ptr = *mod;
994       }
995    }
996 
997    return true;
998 }
999 
1000 static void
create_dri_image(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count,struct dri2_wl_formats * formats)1001 create_dri_image(struct dri2_egl_surface *dri2_surf,
1002                  enum pipe_format pipe_format, uint32_t use_flags,
1003                  uint64_t *surf_modifiers, int surf_modifiers_count,
1004                  struct dri2_wl_formats *formats)
1005 {
1006    struct dri2_egl_display *dri2_dpy =
1007       dri2_egl_display(dri2_surf->base.Resource.Display);
1008    int visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1009    struct u_vector modifiers_subset;
1010    struct u_vector modifiers_subset_opaque;
1011    uint64_t *modifiers;
1012    unsigned int num_modifiers;
1013    struct u_vector *modifiers_present;
1014    bool implicit_mod_supported;
1015 
1016    assert(visual_idx != -1);
1017 
1018    if (dri2_surf->base.PresentOpaque) {
1019       uint32_t opaque_fourcc =
1020             dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1021       int opaque_visual_idx = dri2_wl_visual_idx_from_fourcc(opaque_fourcc);
1022       struct u_vector *modifiers_dpy = &dri2_dpy->formats.modifiers[visual_idx];
1023       /* Surface creation would have failed if we didn't support the matching
1024        * opaque format. */
1025       assert(opaque_visual_idx != -1);
1026 
1027       if (!BITSET_TEST(formats->formats_bitmap, opaque_visual_idx))
1028          return;
1029 
1030       if (!intersect_modifiers(&modifiers_subset_opaque,
1031                                &formats->modifiers[opaque_visual_idx],
1032                                u_vector_tail(modifiers_dpy),
1033                                u_vector_length(modifiers_dpy)))
1034          return;
1035 
1036       modifiers_present = &modifiers_subset_opaque;
1037    } else {
1038       if (!BITSET_TEST(formats->formats_bitmap, visual_idx))
1039          return;
1040       modifiers_present = &formats->modifiers[visual_idx];
1041    }
1042 
1043    if (surf_modifiers_count > 0) {
1044       if (!intersect_modifiers(&modifiers_subset, modifiers_present,
1045                                surf_modifiers, surf_modifiers_count))
1046          goto cleanup_present;
1047       modifiers = u_vector_tail(&modifiers_subset);
1048       num_modifiers = u_vector_length(&modifiers_subset);
1049    } else {
1050       modifiers = u_vector_tail(modifiers_present);
1051       num_modifiers = u_vector_length(modifiers_present);
1052    }
1053 
1054    if (!dri2_dpy->dri_screen_render_gpu->base.screen->resource_create_with_modifiers &&
1055        dri2_dpy->wl_dmabuf) {
1056       /* We don't support explicit modifiers, check if the compositor supports
1057        * implicit modifiers. */
1058       implicit_mod_supported = false;
1059       for (unsigned int i = 0; i < num_modifiers; i++) {
1060          if (modifiers[i] == DRM_FORMAT_MOD_INVALID) {
1061             implicit_mod_supported = true;
1062             break;
1063          }
1064       }
1065 
1066       if (!implicit_mod_supported) {
1067          return;
1068       }
1069 
1070       num_modifiers = 0;
1071       modifiers = NULL;
1072    }
1073 
1074    /* For the purposes of this function, an INVALID modifier on
1075     * its own means the modifiers aren't supported. */
1076    if (num_modifiers == 0 ||
1077        (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
1078       num_modifiers = 0;
1079       modifiers = NULL;
1080    }
1081 
1082    dri2_surf->back->dri_image = dri_create_image_with_modifiers(
1083       dri2_dpy->dri_screen_render_gpu, dri2_surf->base.Width,
1084       dri2_surf->base.Height, pipe_format,
1085       (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) ? 0 : use_flags,
1086       modifiers, num_modifiers, NULL);
1087 
1088    if (surf_modifiers_count > 0) {
1089       u_vector_finish(&modifiers_subset);
1090       update_surface(dri2_surf, dri2_surf->back->dri_image);
1091    }
1092 
1093 cleanup_present:
1094    if (modifiers_present == &modifiers_subset_opaque)
1095       u_vector_finish(&modifiers_subset_opaque);
1096 }
1097 
1098 static void
create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count)1099 create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface *dri2_surf,
1100                                       enum pipe_format pipe_format,
1101                                       uint32_t use_flags,
1102                                       uint64_t *surf_modifiers,
1103                                       int surf_modifiers_count)
1104 {
1105    uint32_t flags;
1106 
1107    /* We don't have valid dma-buf feedback, so return */
1108    if (dri2_surf->dmabuf_feedback.main_device == 0)
1109       return;
1110 
1111    /* Iterates through the dma-buf feedback to pick a new set of modifiers. The
1112     * tranches are sent in descending order of preference by the compositor, so
1113     * the first set that we can pick is the best one. For now we still can't
1114     * specify the target device in order to make the render device try its best
1115     * to allocate memory that can be directly scanned out by the KMS device. But
1116     * in the future this may change (newer versions of
1117     * createImageWithModifiers). Also, we are safe to pick modifiers from
1118     * tranches whose target device differs from the main device, as compositors
1119     * do not expose (in dma-buf feedback tranches) formats/modifiers that are
1120     * incompatible with the main device. */
1121    util_dynarray_foreach (&dri2_surf->dmabuf_feedback.tranches,
1122                           struct dmabuf_feedback_tranche, tranche) {
1123       flags = use_flags;
1124       if (tranche->flags & ZWP_LINUX_DMABUF_FEEDBACK_V1_TRANCHE_FLAGS_SCANOUT)
1125          flags |= __DRI_IMAGE_USE_SCANOUT;
1126 
1127       create_dri_image(dri2_surf, pipe_format, flags, surf_modifiers,
1128                        surf_modifiers_count, &tranche->formats);
1129 
1130       if (dri2_surf->back->dri_image)
1131          return;
1132    }
1133 }
1134 
1135 static void
create_dri_image_from_formats(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count)1136 create_dri_image_from_formats(struct dri2_egl_surface *dri2_surf,
1137                               enum pipe_format pipe_format, uint32_t use_flags,
1138                               uint64_t *surf_modifiers,
1139                               int surf_modifiers_count)
1140 {
1141    struct dri2_egl_display *dri2_dpy =
1142       dri2_egl_display(dri2_surf->base.Resource.Display);
1143    create_dri_image(dri2_surf, pipe_format, use_flags, surf_modifiers,
1144                     surf_modifiers_count, &dri2_dpy->formats);
1145 }
1146 
1147 static int
get_back_bo(struct dri2_egl_surface * dri2_surf)1148 get_back_bo(struct dri2_egl_surface *dri2_surf)
1149 {
1150    struct dri2_egl_display *dri2_dpy =
1151       dri2_egl_display(dri2_surf->base.Resource.Display);
1152    int use_flags;
1153    int visual_idx;
1154    unsigned int pipe_format;
1155    unsigned int linear_pipe_format;
1156 
1157    visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1158    assert(visual_idx != -1);
1159    pipe_format = dri2_wl_visuals[visual_idx].pipe_format;
1160    linear_pipe_format = pipe_format;
1161 
1162    /* Substitute dri image format if server does not support original format */
1163    if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, visual_idx))
1164       linear_pipe_format = dri2_wl_visuals[visual_idx].alt_pipe_format;
1165 
1166    /* These asserts hold, as long as dri2_wl_visuals[] is self-consistent and
1167     * the PRIME substitution logic in dri2_wl_add_configs_for_visuals() is free
1168     * of bugs.
1169     */
1170    assert(linear_pipe_format != PIPE_FORMAT_NONE);
1171    assert(BITSET_TEST(
1172       dri2_dpy->formats.formats_bitmap,
1173       dri2_wl_visual_idx_from_pipe_format(linear_pipe_format)));
1174 
1175    /* There might be a buffer release already queued that wasn't processed */
1176    wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
1177 
1178    while (dri2_surf->back == NULL) {
1179       for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1180          /* Get an unlocked buffer, preferably one with a dri_buffer
1181           * already allocated and with minimum age.
1182           */
1183          if (dri2_surf->color_buffers[i].locked)
1184             continue;
1185 
1186          if (!dri2_surf->back || !dri2_surf->back->dri_image ||
1187              (dri2_surf->color_buffers[i].age > 0 &&
1188               dri2_surf->color_buffers[i].age < dri2_surf->back->age))
1189             dri2_surf->back = &dri2_surf->color_buffers[i];
1190       }
1191 
1192       if (dri2_surf->back)
1193          break;
1194 
1195       /* If we don't have a buffer, then block on the server to release one for
1196        * us, and try again. wl_display_dispatch_queue will process any pending
1197        * events, however not all servers flush on issuing a buffer release
1198        * event. So, we spam the server with roundtrips as they always cause a
1199        * client flush.
1200        */
1201       if (wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) < 0)
1202          return -1;
1203    }
1204 
1205    if (dri2_surf->back == NULL)
1206       return -1;
1207 
1208    use_flags = __DRI_IMAGE_USE_SHARE | __DRI_IMAGE_USE_BACKBUFFER;
1209 
1210    if (dri2_surf->base.ProtectedContent) {
1211       /* Protected buffers can't be read from another GPU */
1212       if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1213          return -1;
1214       use_flags |= __DRI_IMAGE_USE_PROTECTED;
1215    }
1216 
1217    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu &&
1218        dri2_surf->back->linear_copy == NULL) {
1219       uint64_t linear_mod = DRM_FORMAT_MOD_LINEAR;
1220       const uint64_t *render_modifiers = NULL, *display_modifiers = NULL;
1221       unsigned int render_num_modifiers = 0, display_num_modifiers = 0;
1222       struct dri_image *linear_copy_display_gpu_image = NULL;
1223 
1224       if (dri2_dpy->dri_screen_render_gpu->base.screen->resource_create_with_modifiers) {
1225          render_modifiers = &linear_mod;
1226          render_num_modifiers = 1;
1227       }
1228 
1229       if (dri2_dpy->dri_screen_display_gpu) {
1230          if (dri2_dpy->dri_screen_display_gpu->base.screen->resource_create_with_modifiers) {
1231             display_modifiers = &linear_mod;
1232             display_num_modifiers = 1;
1233          }
1234 
1235          linear_copy_display_gpu_image = dri_create_image_with_modifiers(
1236             dri2_dpy->dri_screen_display_gpu,
1237             dri2_surf->base.Width, dri2_surf->base.Height,
1238             linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1239             display_modifiers, display_num_modifiers, NULL);
1240 
1241          if (linear_copy_display_gpu_image) {
1242             int i, ret = 1;
1243             int fourcc;
1244             int num_planes = 0;
1245             int buffer_fds[4];
1246             int strides[4];
1247             int offsets[4];
1248             unsigned error;
1249 
1250             if (!dri2_query_image(linear_copy_display_gpu_image,
1251                                              __DRI_IMAGE_ATTRIB_NUM_PLANES,
1252                                              &num_planes))
1253                num_planes = 1;
1254 
1255             for (i = 0; i < num_planes; i++) {
1256                struct dri_image *image = dri2_from_planar(
1257                   linear_copy_display_gpu_image, i, NULL);
1258 
1259                if (!image) {
1260                   assert(i == 0);
1261                   image = linear_copy_display_gpu_image;
1262                }
1263 
1264                buffer_fds[i] = -1;
1265                ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD,
1266                                                   &buffer_fds[i]);
1267                ret &= dri2_query_image(
1268                   image, __DRI_IMAGE_ATTRIB_STRIDE, &strides[i]);
1269                ret &= dri2_query_image(
1270                   image, __DRI_IMAGE_ATTRIB_OFFSET, &offsets[i]);
1271 
1272                if (image != linear_copy_display_gpu_image)
1273                   dri2_destroy_image(image);
1274 
1275                if (!ret) {
1276                   do {
1277                      if (buffer_fds[i] != -1)
1278                         close(buffer_fds[i]);
1279                   } while (--i >= 0);
1280                   dri2_destroy_image(linear_copy_display_gpu_image);
1281                   return -1;
1282                }
1283             }
1284 
1285             ret &= dri2_query_image(linear_copy_display_gpu_image,
1286                                                __DRI_IMAGE_ATTRIB_FOURCC,
1287                                                &fourcc);
1288             if (!ret) {
1289                do {
1290                   if (buffer_fds[i] != -1)
1291                      close(buffer_fds[i]);
1292                } while (--i >= 0);
1293                dri2_destroy_image(linear_copy_display_gpu_image);
1294                return -1;
1295             }
1296 
1297             /* The linear buffer was created in the display GPU's vram, so we
1298              * need to make it visible to render GPU
1299              */
1300             dri2_surf->back->linear_copy =
1301                dri2_from_dma_bufs(
1302                   dri2_dpy->dri_screen_render_gpu,
1303                   dri2_surf->base.Width, dri2_surf->base.Height,
1304                   fourcc, linear_mod,
1305                   &buffer_fds[0], num_planes, &strides[0], &offsets[0],
1306                   __DRI_YUV_COLOR_SPACE_UNDEFINED,
1307                   __DRI_YUV_RANGE_UNDEFINED, __DRI_YUV_CHROMA_SITING_UNDEFINED,
1308                   __DRI_YUV_CHROMA_SITING_UNDEFINED, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1309                   &error, dri2_surf->back);
1310 
1311             for (i = 0; i < num_planes; ++i) {
1312                if (buffer_fds[i] != -1)
1313                   close(buffer_fds[i]);
1314             }
1315             dri2_destroy_image(linear_copy_display_gpu_image);
1316          }
1317       }
1318 
1319       if (!dri2_surf->back->linear_copy) {
1320          dri2_surf->back->linear_copy = dri_create_image_with_modifiers(
1321             dri2_dpy->dri_screen_render_gpu,
1322             dri2_surf->base.Width, dri2_surf->base.Height,
1323             linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1324             render_modifiers, render_num_modifiers, NULL);
1325       }
1326 
1327       if (dri2_surf->back->linear_copy == NULL)
1328          return -1;
1329    }
1330 
1331    if (dri2_surf->back->dri_image == NULL) {
1332       int modifiers_count = 0;
1333       uint64_t *modifiers =
1334          get_surface_specific_modifiers(dri2_surf, &modifiers_count);
1335 
1336       if (dri2_surf->wl_dmabuf_feedback)
1337          create_dri_image_from_dmabuf_feedback(
1338             dri2_surf, pipe_format, use_flags, modifiers, modifiers_count);
1339       if (dri2_surf->back->dri_image == NULL)
1340          create_dri_image_from_formats(dri2_surf, pipe_format, use_flags,
1341                                        modifiers, modifiers_count);
1342 
1343       free(modifiers);
1344       dri2_surf->back->age = 0;
1345    }
1346 
1347    if (dri2_surf->back->dri_image == NULL)
1348       return -1;
1349 
1350    dri2_surf->back->locked = true;
1351 
1352    return 0;
1353 }
1354 
1355 static void
back_bo_to_dri_buffer(struct dri2_egl_surface * dri2_surf,__DRIbuffer * buffer)1356 back_bo_to_dri_buffer(struct dri2_egl_surface *dri2_surf, __DRIbuffer *buffer)
1357 {
1358    struct dri_image *image;
1359    int name, pitch;
1360 
1361    image = dri2_surf->back->dri_image;
1362 
1363    dri2_query_image(image, __DRI_IMAGE_ATTRIB_NAME, &name);
1364    dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE, &pitch);
1365 
1366    buffer->attachment = __DRI_BUFFER_BACK_LEFT;
1367    buffer->name = name;
1368    buffer->pitch = pitch;
1369    buffer->cpp = 4;
1370    buffer->flags = 0;
1371 }
1372 
1373 /* Value chosen empirically as a compromise between avoiding frequent
1374  * reallocations and extended time of increased memory consumption due to
1375  * unused buffers being kept.
1376  */
1377 #define BUFFER_TRIM_AGE_HYSTERESIS 20
1378 
1379 static int
update_buffers(struct dri2_egl_surface * dri2_surf)1380 update_buffers(struct dri2_egl_surface *dri2_surf)
1381 {
1382    struct dri2_egl_display *dri2_dpy =
1383       dri2_egl_display(dri2_surf->base.Resource.Display);
1384 
1385    if (dri2_surf->wl_win &&
1386        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
1387         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
1388 
1389       dri2_surf->base.Width = dri2_surf->wl_win->width;
1390       dri2_surf->base.Height = dri2_surf->wl_win->height;
1391       dri2_surf->dx = dri2_surf->wl_win->dx;
1392       dri2_surf->dy = dri2_surf->wl_win->dy;
1393    }
1394 
1395    if (dri2_surf->resized || dri2_surf->received_dmabuf_feedback) {
1396       dri2_wl_release_buffers(dri2_surf);
1397       dri2_surf->resized = false;
1398       dri2_surf->received_dmabuf_feedback = false;
1399    }
1400 
1401    if (get_back_bo(dri2_surf) < 0) {
1402       _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
1403       return -1;
1404    }
1405 
1406    /* If we have an extra unlocked buffer at this point, we had to do triple
1407     * buffering for a while, but now can go back to just double buffering.
1408     * That means we can free any unlocked buffer now. To avoid toggling between
1409     * going back to double buffering and needing to allocate another buffer too
1410     * fast we let the unneeded buffer sit around for a short while. */
1411    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1412       if (!dri2_surf->color_buffers[i].locked &&
1413           dri2_surf->color_buffers[i].wl_buffer &&
1414           dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
1415          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
1416          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
1417          if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1418             dri2_destroy_image(
1419                dri2_surf->color_buffers[i].linear_copy);
1420          dri2_surf->color_buffers[i].wl_buffer = NULL;
1421          dri2_surf->color_buffers[i].dri_image = NULL;
1422          dri2_surf->color_buffers[i].linear_copy = NULL;
1423          dri2_surf->color_buffers[i].age = 0;
1424       }
1425    }
1426 
1427    return 0;
1428 }
1429 
1430 static int
update_buffers_if_needed(struct dri2_egl_surface * dri2_surf)1431 update_buffers_if_needed(struct dri2_egl_surface *dri2_surf)
1432 {
1433    if (dri2_surf->back != NULL)
1434       return 0;
1435 
1436    return update_buffers(dri2_surf);
1437 }
1438 
1439 static int
image_get_buffers(struct dri_drawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)1440 image_get_buffers(struct dri_drawable *driDrawable, unsigned int format,
1441                   uint32_t *stamp, void *loaderPrivate, uint32_t buffer_mask,
1442                   struct __DRIimageList *buffers)
1443 {
1444    struct dri2_egl_surface *dri2_surf = loaderPrivate;
1445 
1446    if (update_buffers_if_needed(dri2_surf) < 0)
1447       return 0;
1448 
1449    buffers->image_mask = __DRI_IMAGE_BUFFER_BACK;
1450    buffers->back = dri2_surf->back->dri_image;
1451 
1452    return 1;
1453 }
1454 
1455 static void
dri2_wl_flush_front_buffer(struct dri_drawable * driDrawable,void * loaderPrivate)1456 dri2_wl_flush_front_buffer(struct dri_drawable *driDrawable, void *loaderPrivate)
1457 {
1458    (void)driDrawable;
1459    (void)loaderPrivate;
1460 }
1461 
1462 static unsigned
dri2_wl_get_capability(void * loaderPrivate,enum dri_loader_cap cap)1463 dri2_wl_get_capability(void *loaderPrivate, enum dri_loader_cap cap)
1464 {
1465    switch (cap) {
1466    case DRI_LOADER_CAP_FP16:
1467       return 1;
1468    case DRI_LOADER_CAP_RGBA_ORDERING:
1469       return 1;
1470    default:
1471       return 0;
1472    }
1473 }
1474 
1475 static const __DRIimageLoaderExtension image_loader_extension = {
1476    .base = {__DRI_IMAGE_LOADER, 2},
1477 
1478    .getBuffers = image_get_buffers,
1479    .flushFrontBuffer = dri2_wl_flush_front_buffer,
1480    .getCapability = dri2_wl_get_capability,
1481 };
1482 
1483 static void
wayland_throttle_callback(void * data,struct wl_callback * callback,uint32_t time)1484 wayland_throttle_callback(void *data, struct wl_callback *callback,
1485                           uint32_t time)
1486 {
1487    struct dri2_egl_surface *dri2_surf = data;
1488 
1489    dri2_surf->throttle_callback = NULL;
1490    wl_callback_destroy(callback);
1491 }
1492 
1493 static const struct wl_callback_listener throttle_listener = {
1494    .done = wayland_throttle_callback,
1495 };
1496 
1497 static struct wl_buffer *
create_wl_buffer(struct dri2_egl_display * dri2_dpy,struct dri2_egl_surface * dri2_surf,struct dri_image * image)1498 create_wl_buffer(struct dri2_egl_display *dri2_dpy,
1499                  struct dri2_egl_surface *dri2_surf, struct dri_image *image)
1500 {
1501    struct wl_buffer *ret = NULL;
1502    EGLBoolean query;
1503    int width, height, fourcc, num_planes;
1504    uint64_t modifier = DRM_FORMAT_MOD_INVALID;
1505    int mod_hi, mod_lo;
1506 
1507    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_WIDTH, &width);
1508    query &=
1509       dri2_query_image(image, __DRI_IMAGE_ATTRIB_HEIGHT, &height);
1510    query &=
1511       dri2_query_image(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1512    if (!query)
1513       return NULL;
1514 
1515    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1516                                        &num_planes);
1517    if (!query)
1518       num_planes = 1;
1519 
1520    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
1521                                        &mod_hi);
1522    query &= dri2_query_image(
1523       image, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod_lo);
1524    if (query) {
1525       modifier = combine_u32_into_u64(mod_hi, mod_lo);
1526    }
1527 
1528    bool supported_modifier = false;
1529    bool mod_invalid_supported = false;
1530    int visual_idx = dri2_wl_visual_idx_from_fourcc(fourcc);
1531    assert(visual_idx != -1);
1532 
1533    uint64_t *mod;
1534    u_vector_foreach(mod, &dri2_dpy->formats.modifiers[visual_idx])
1535    {
1536       if (*mod == DRM_FORMAT_MOD_INVALID) {
1537          mod_invalid_supported = true;
1538       }
1539       if (*mod == modifier) {
1540          supported_modifier = true;
1541          break;
1542       }
1543    }
1544    if (!supported_modifier && mod_invalid_supported) {
1545       /* If the server has advertised DRM_FORMAT_MOD_INVALID then we trust
1546        * that the client has allocated the buffer with the right implicit
1547        * modifier for the format, even though it's allocated a buffer the
1548        * server hasn't explicitly claimed to support. */
1549       modifier = DRM_FORMAT_MOD_INVALID;
1550       supported_modifier = true;
1551    }
1552 
1553    if (dri2_dpy->wl_dmabuf && supported_modifier) {
1554       struct zwp_linux_buffer_params_v1 *params;
1555       int i;
1556 
1557       /* We don't need a wrapper for wl_dmabuf objects, because we have to
1558        * create the intermediate params object; we can set the queue on this,
1559        * and the wl_buffer inherits it race-free. */
1560       params = zwp_linux_dmabuf_v1_create_params(dri2_dpy->wl_dmabuf);
1561       if (dri2_surf)
1562          wl_proxy_set_queue((struct wl_proxy *)params, dri2_surf->wl_queue);
1563 
1564       for (i = 0; i < num_planes; i++) {
1565          struct dri_image *p_image;
1566          int stride, offset;
1567          int fd = -1;
1568 
1569          p_image = dri2_from_planar(image, i, NULL);
1570          if (!p_image) {
1571             assert(i == 0);
1572             p_image = image;
1573          }
1574 
1575          query =
1576             dri2_query_image(p_image, __DRI_IMAGE_ATTRIB_FD, &fd);
1577          query &= dri2_query_image(
1578             p_image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1579          query &= dri2_query_image(
1580             p_image, __DRI_IMAGE_ATTRIB_OFFSET, &offset);
1581          if (image != p_image)
1582             dri2_destroy_image(p_image);
1583 
1584          if (!query) {
1585             if (fd >= 0)
1586                close(fd);
1587             zwp_linux_buffer_params_v1_destroy(params);
1588             return NULL;
1589          }
1590 
1591          zwp_linux_buffer_params_v1_add(params, fd, i, offset, stride,
1592                                         modifier >> 32, modifier & 0xffffffff);
1593          close(fd);
1594       }
1595 
1596       if (dri2_surf && dri2_surf->base.PresentOpaque)
1597          fourcc = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1598 
1599       ret = zwp_linux_buffer_params_v1_create_immed(params, width, height,
1600                                                     fourcc, 0);
1601       zwp_linux_buffer_params_v1_destroy(params);
1602    } else if (dri2_dpy->wl_drm) {
1603       struct wl_drm *wl_drm =
1604          dri2_surf ? dri2_surf->wl_drm_wrapper : dri2_dpy->wl_drm;
1605       int fd = -1, stride;
1606 
1607       /* wl_drm doesn't support explicit modifiers, so ideally we should bail
1608        * out if modifier != DRM_FORMAT_MOD_INVALID. However many drivers will
1609        * return a valid modifier when querying the DRIImage even if a buffer
1610        * was allocated without explicit modifiers.
1611        * XXX: bail out if the buffer was allocated without explicit modifiers
1612        */
1613       if (num_planes > 1)
1614          return NULL;
1615 
1616       query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD, &fd);
1617       query &=
1618          dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1619       if (!query) {
1620          if (fd >= 0)
1621             close(fd);
1622          return NULL;
1623       }
1624 
1625       ret = wl_drm_create_prime_buffer(wl_drm, fd, width, height, fourcc, 0,
1626                                        stride, 0, 0, 0, 0);
1627       close(fd);
1628    }
1629 
1630    return ret;
1631 }
1632 
1633 static EGLBoolean
try_damage_buffer(struct dri2_egl_surface * dri2_surf,const EGLint * rects,EGLint n_rects)1634 try_damage_buffer(struct dri2_egl_surface *dri2_surf, const EGLint *rects,
1635                   EGLint n_rects)
1636 {
1637    if (wl_proxy_get_version((struct wl_proxy *)dri2_surf->wl_surface_wrapper) <
1638        WL_SURFACE_DAMAGE_BUFFER_SINCE_VERSION)
1639       return EGL_FALSE;
1640 
1641    for (int i = 0; i < n_rects; i++) {
1642       const int *rect = &rects[i * 4];
1643 
1644       wl_surface_damage_buffer(dri2_surf->wl_surface_wrapper, rect[0],
1645                                dri2_surf->base.Height - rect[1] - rect[3],
1646                                rect[2], rect[3]);
1647    }
1648    return EGL_TRUE;
1649 }
1650 
1651 /**
1652  * Called via eglSwapBuffers(), drv->SwapBuffers().
1653  */
1654 static EGLBoolean
dri2_wl_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)1655 dri2_wl_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
1656                                  const EGLint *rects, EGLint n_rects)
1657 {
1658    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1659    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
1660 
1661    if (!dri2_surf->wl_win)
1662       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
1663 
1664    /* Flush (and finish glthread) before:
1665     *   - update_buffers_if_needed because the unmarshalling thread
1666     *     may be running currently, and we would concurrently alloc/free
1667     *     the back bo.
1668     *   - swapping current/back because flushing may free the buffer and
1669     *     dri_image and reallocate them using get_back_bo (which causes a
1670     *     a crash because 'current' becomes NULL).
1671     *   - using any wl_* function because accessing them from this thread
1672     *     and glthread causes troubles (see #7624 and #8136)
1673     */
1674    dri2_flush_drawable_for_swapbuffers(disp, draw);
1675    dri_invalidate_drawable(dri2_surf->dri_drawable);
1676 
1677    while (dri2_surf->throttle_callback != NULL)
1678       if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
1679           -1)
1680          return -1;
1681 
1682    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++)
1683       if (dri2_surf->color_buffers[i].age > 0)
1684          dri2_surf->color_buffers[i].age++;
1685 
1686    /* Make sure we have a back buffer in case we're swapping without ever
1687     * rendering. */
1688    if (update_buffers_if_needed(dri2_surf) < 0)
1689       return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1690 
1691    if (draw->SwapInterval > 0) {
1692       dri2_surf->throttle_callback =
1693          wl_surface_frame(dri2_surf->wl_surface_wrapper);
1694       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1695                                dri2_surf);
1696    }
1697 
1698    dri2_surf->back->age = 1;
1699    dri2_surf->current = dri2_surf->back;
1700    dri2_surf->back = NULL;
1701 
1702    if (!dri2_surf->current->wl_buffer) {
1703       struct dri_image *image;
1704 
1705       if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1706          image = dri2_surf->current->linear_copy;
1707       else
1708          image = dri2_surf->current->dri_image;
1709 
1710       dri2_surf->current->wl_buffer =
1711          create_wl_buffer(dri2_dpy, dri2_surf, image);
1712 
1713       if (dri2_surf->current->wl_buffer == NULL)
1714          return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1715 
1716       dri2_surf->current->wl_release = false;
1717 
1718       wl_buffer_add_listener(dri2_surf->current->wl_buffer, &wl_buffer_listener,
1719                              dri2_surf);
1720    }
1721 
1722    wl_surface_attach(dri2_surf->wl_surface_wrapper,
1723                      dri2_surf->current->wl_buffer, dri2_surf->dx,
1724                      dri2_surf->dy);
1725 
1726    dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
1727    dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
1728    /* reset resize growing parameters */
1729    dri2_surf->dx = 0;
1730    dri2_surf->dy = 0;
1731 
1732    /* If the compositor doesn't support damage_buffer, we deliberately
1733     * ignore the damage region and post maximum damage, due to
1734     * https://bugs.freedesktop.org/78190 */
1735    if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
1736       wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
1737                         INT32_MAX);
1738 
1739    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
1740       _EGLContext *ctx = _eglGetCurrentContext();
1741       struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
1742       struct dri_drawable *dri_drawable = dri2_dpy->vtbl->get_dri_drawable(draw);
1743       dri2_blit_image(
1744          dri2_ctx->dri_context, dri2_surf->current->linear_copy,
1745          dri2_surf->current->dri_image, 0, 0, dri2_surf->base.Width,
1746          dri2_surf->base.Height, 0, 0, dri2_surf->base.Width,
1747          dri2_surf->base.Height, 0);
1748       dri_flush_drawable(dri_drawable);
1749    }
1750 
1751    wl_surface_commit(dri2_surf->wl_surface_wrapper);
1752 
1753    /* If we're not waiting for a frame callback then we'll at least throttle
1754     * to a sync callback so that we always give a chance for the compositor to
1755     * handle the commit and send a release event before checking for a free
1756     * buffer */
1757    if (dri2_surf->throttle_callback == NULL) {
1758       dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
1759       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1760                                dri2_surf);
1761    }
1762 
1763    wl_display_flush(dri2_dpy->wl_dpy);
1764 
1765    return EGL_TRUE;
1766 }
1767 
1768 static EGLint
dri2_wl_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)1769 dri2_wl_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
1770 {
1771    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
1772 
1773    if (update_buffers_if_needed(dri2_surf) < 0) {
1774       _eglError(EGL_BAD_ALLOC, "dri2_query_buffer_age");
1775       return -1;
1776    }
1777 
1778    return dri2_surf->back->age;
1779 }
1780 
1781 static EGLBoolean
dri2_wl_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)1782 dri2_wl_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
1783 {
1784    return dri2_wl_swap_buffers_with_damage(disp, draw, NULL, 0);
1785 }
1786 
1787 static struct wl_buffer *
dri2_wl_create_wayland_buffer_from_image(_EGLDisplay * disp,_EGLImage * img)1788 dri2_wl_create_wayland_buffer_from_image(_EGLDisplay *disp, _EGLImage *img)
1789 {
1790    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1791    struct dri2_egl_image *dri2_img = dri2_egl_image(img);
1792    struct dri_image *image = dri2_img->dri_image;
1793    struct wl_buffer *buffer;
1794    int fourcc;
1795 
1796    /* Check the upstream display supports this buffer's format. */
1797    dri2_query_image(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1798    if (!server_supports_fourcc(&dri2_dpy->formats, fourcc))
1799       goto bad_format;
1800 
1801    buffer = create_wl_buffer(dri2_dpy, NULL, image);
1802 
1803    /* The buffer object will have been created with our internal event queue
1804     * because it is using wl_dmabuf/wl_drm as a proxy factory. We want the
1805     * buffer to be used by the application so we'll reset it to the display's
1806     * default event queue. This isn't actually racy, as the only event the
1807     * buffer can get is a buffer release, which doesn't happen with an explicit
1808     * attach. */
1809    if (buffer)
1810       wl_proxy_set_queue((struct wl_proxy *)buffer, NULL);
1811 
1812    return buffer;
1813 
1814 bad_format:
1815    _eglError(EGL_BAD_MATCH, "unsupported image format");
1816    return NULL;
1817 }
1818 
1819 static int
dri2_wl_authenticate(_EGLDisplay * disp,uint32_t id)1820 dri2_wl_authenticate(_EGLDisplay *disp, uint32_t id)
1821 {
1822    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1823    int ret = 0;
1824 
1825    if (dri2_dpy->is_render_node) {
1826       _eglLog(_EGL_WARNING, "wayland-egl: client asks server to "
1827                             "authenticate for render-nodes");
1828       return 0;
1829    }
1830    dri2_dpy->authenticated = false;
1831 
1832    wl_drm_authenticate(dri2_dpy->wl_drm, id);
1833    if (roundtrip(dri2_dpy) < 0)
1834       ret = -1;
1835 
1836    if (!dri2_dpy->authenticated)
1837       ret = -1;
1838 
1839    /* reset authenticated */
1840    dri2_dpy->authenticated = true;
1841 
1842    return ret;
1843 }
1844 
1845 static void
drm_handle_device(void * data,struct wl_drm * drm,const char * device)1846 drm_handle_device(void *data, struct wl_drm *drm, const char *device)
1847 {
1848    struct dri2_egl_display *dri2_dpy = data;
1849    drm_magic_t magic;
1850 
1851    dri2_dpy->device_name = strdup(device);
1852    if (!dri2_dpy->device_name)
1853       return;
1854 
1855    dri2_dpy->fd_render_gpu = loader_open_device(dri2_dpy->device_name);
1856    if (dri2_dpy->fd_render_gpu == -1) {
1857       _eglLog(_EGL_WARNING, "wayland-egl: could not open %s (%s)",
1858               dri2_dpy->device_name, strerror(errno));
1859       free(dri2_dpy->device_name);
1860       dri2_dpy->device_name = NULL;
1861       return;
1862    }
1863 
1864    if (drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER) {
1865       dri2_dpy->authenticated = true;
1866    } else {
1867       if (drmGetMagic(dri2_dpy->fd_render_gpu, &magic)) {
1868          close(dri2_dpy->fd_render_gpu);
1869          dri2_dpy->fd_render_gpu = -1;
1870          free(dri2_dpy->device_name);
1871          dri2_dpy->device_name = NULL;
1872          _eglLog(_EGL_WARNING, "wayland-egl: drmGetMagic failed");
1873          return;
1874       }
1875       wl_drm_authenticate(dri2_dpy->wl_drm, magic);
1876    }
1877 }
1878 
1879 static void
drm_handle_format(void * data,struct wl_drm * drm,uint32_t format)1880 drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)
1881 {
1882    struct dri2_egl_display *dri2_dpy = data;
1883    int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1884 
1885    if (visual_idx == -1)
1886       return;
1887 
1888    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1889 }
1890 
1891 static void
drm_handle_capabilities(void * data,struct wl_drm * drm,uint32_t value)1892 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)
1893 {
1894    struct dri2_egl_display *dri2_dpy = data;
1895 
1896    dri2_dpy->capabilities = value;
1897 }
1898 
1899 static void
drm_handle_authenticated(void * data,struct wl_drm * drm)1900 drm_handle_authenticated(void *data, struct wl_drm *drm)
1901 {
1902    struct dri2_egl_display *dri2_dpy = data;
1903 
1904    dri2_dpy->authenticated = true;
1905 }
1906 
1907 static const struct wl_drm_listener drm_listener = {
1908    .device = drm_handle_device,
1909    .format = drm_handle_format,
1910    .authenticated = drm_handle_authenticated,
1911    .capabilities = drm_handle_capabilities,
1912 };
1913 
1914 static void
dmabuf_ignore_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)1915 dmabuf_ignore_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1916                      uint32_t format)
1917 {
1918    /* formats are implicitly advertised by the 'modifier' event, so ignore */
1919 }
1920 
1921 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)1922 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1923                        uint32_t format, uint32_t modifier_hi,
1924                        uint32_t modifier_lo)
1925 {
1926    struct dri2_egl_display *dri2_dpy = data;
1927    int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1928    uint64_t *mod;
1929 
1930    /* Ignore this if the compositor advertised dma-buf feedback. From version 4
1931     * onwards (when dma-buf feedback was introduced), the compositor should not
1932     * advertise this event anymore, but let's keep this for safety. */
1933    if (dri2_dpy->wl_dmabuf_feedback)
1934       return;
1935 
1936    if (visual_idx == -1)
1937       return;
1938 
1939    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1940 
1941    mod = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1942    if (mod)
1943       *mod = combine_u32_into_u64(modifier_hi, modifier_lo);
1944 }
1945 
1946 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
1947    .format = dmabuf_ignore_format,
1948    .modifier = dmabuf_handle_modifier,
1949 };
1950 
1951 static void
wl_drm_bind(struct dri2_egl_display * dri2_dpy)1952 wl_drm_bind(struct dri2_egl_display *dri2_dpy)
1953 {
1954    dri2_dpy->wl_drm =
1955       wl_registry_bind(dri2_dpy->wl_registry, dri2_dpy->wl_drm_name,
1956                        &wl_drm_interface, dri2_dpy->wl_drm_version);
1957    wl_drm_add_listener(dri2_dpy->wl_drm, &drm_listener, dri2_dpy);
1958 }
1959 
1960 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1961 default_dmabuf_feedback_format_table(
1962    void *data,
1963    struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1964    int32_t fd, uint32_t size)
1965 {
1966    struct dri2_egl_display *dri2_dpy = data;
1967 
1968    dri2_dpy->format_table.size = size;
1969    dri2_dpy->format_table.data =
1970       mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1971 
1972    close(fd);
1973 }
1974 
1975 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1976 default_dmabuf_feedback_main_device(
1977    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1978    struct wl_array *device)
1979 {
1980    struct dri2_egl_display *dri2_dpy = data;
1981    char *node;
1982    int fd;
1983    dev_t dev;
1984 
1985    /* Given the device, look for a render node and try to open it. */
1986    memcpy(&dev, device->data, sizeof(dev));
1987    node = loader_get_render_node(dev);
1988    if (!node)
1989       return;
1990    fd = loader_open_device(node);
1991    if (fd == -1) {
1992       free(node);
1993       return;
1994    }
1995 
1996    dri2_dpy->device_name = node;
1997    dri2_dpy->fd_render_gpu = fd;
1998    dri2_dpy->authenticated = true;
1999 }
2000 
2001 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)2002 default_dmabuf_feedback_tranche_target_device(
2003    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
2004    struct wl_array *device)
2005 {
2006    /* ignore this event */
2007 }
2008 
2009 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)2010 default_dmabuf_feedback_tranche_flags(
2011    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
2012    uint32_t flags)
2013 {
2014    /* ignore this event */
2015 }
2016 
2017 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)2018 default_dmabuf_feedback_tranche_formats(
2019    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
2020    struct wl_array *indices)
2021 {
2022    struct dri2_egl_display *dri2_dpy = data;
2023    uint64_t *modifier_ptr, modifier;
2024    uint32_t format;
2025    uint16_t *index;
2026    int visual_idx;
2027 
2028    if (dri2_dpy->format_table.data == MAP_FAILED) {
2029       _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
2030                             "so we won't be able to use this batch of dma-buf "
2031                             "feedback events.");
2032       return;
2033    }
2034    if (dri2_dpy->format_table.data == NULL) {
2035       _eglLog(_EGL_WARNING,
2036               "wayland-egl: compositor didn't advertise a format "
2037               "table, so we won't be able to use this batch of dma-buf "
2038               "feedback events.");
2039       return;
2040    }
2041 
2042    wl_array_for_each (index, indices) {
2043       format = dri2_dpy->format_table.data[*index].format;
2044       modifier = dri2_dpy->format_table.data[*index].modifier;
2045 
2046       /* skip formats that we don't support */
2047       visual_idx = dri2_wl_visual_idx_from_fourcc(format);
2048       if (visual_idx == -1)
2049          continue;
2050 
2051       BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2052       modifier_ptr = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
2053       if (modifier_ptr)
2054          *modifier_ptr = modifier;
2055    }
2056 }
2057 
2058 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)2059 default_dmabuf_feedback_tranche_done(
2060    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
2061 {
2062    /* ignore this event */
2063 }
2064 
2065 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)2066 default_dmabuf_feedback_done(
2067    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
2068 {
2069    /* ignore this event */
2070 }
2071 
2072 static const struct zwp_linux_dmabuf_feedback_v1_listener
2073    dmabuf_feedback_listener = {
2074       .format_table = default_dmabuf_feedback_format_table,
2075       .main_device = default_dmabuf_feedback_main_device,
2076       .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
2077       .tranche_flags = default_dmabuf_feedback_tranche_flags,
2078       .tranche_formats = default_dmabuf_feedback_tranche_formats,
2079       .tranche_done = default_dmabuf_feedback_tranche_done,
2080       .done = default_dmabuf_feedback_done,
2081 };
2082 
2083 static void
registry_handle_global_drm(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2084 registry_handle_global_drm(void *data, struct wl_registry *registry,
2085                            uint32_t name, const char *interface,
2086                            uint32_t version)
2087 {
2088    struct dri2_egl_display *dri2_dpy = data;
2089 
2090    if (strcmp(interface, wl_drm_interface.name) == 0) {
2091       dri2_dpy->wl_drm_version = MIN2(version, 2);
2092       dri2_dpy->wl_drm_name = name;
2093    } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2094               version >= 3) {
2095       dri2_dpy->wl_dmabuf = wl_registry_bind(
2096          registry, name, &zwp_linux_dmabuf_v1_interface,
2097          MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2098       zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2099                                        dri2_dpy);
2100    }
2101 }
2102 
2103 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)2104 registry_handle_global_remove(void *data, struct wl_registry *registry,
2105                               uint32_t name)
2106 {
2107 }
2108 
2109 static const struct wl_registry_listener registry_listener_drm = {
2110    .global = registry_handle_global_drm,
2111    .global_remove = registry_handle_global_remove,
2112 };
2113 
2114 static void
dri2_wl_setup_swap_interval(_EGLDisplay * disp)2115 dri2_wl_setup_swap_interval(_EGLDisplay *disp)
2116 {
2117    /* We can't use values greater than 1 on Wayland because we are using the
2118     * frame callback to synchronise the frame and the only way we be sure to
2119     * get a frame callback is to attach a new buffer. Therefore we can't just
2120     * sit drawing nothing to wait until the next ‘n’ frame callbacks */
2121 
2122    dri2_setup_swap_interval(disp, 1);
2123 }
2124 
2125 static const struct dri2_egl_display_vtbl dri2_wl_display_vtbl = {
2126    .authenticate = dri2_wl_authenticate,
2127    .create_window_surface = dri2_wl_create_window_surface,
2128    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2129    .destroy_surface = dri2_wl_destroy_surface,
2130    .swap_interval = dri2_wl_swap_interval,
2131    .create_image = dri2_create_image_khr,
2132    .swap_buffers = dri2_wl_swap_buffers,
2133    .swap_buffers_with_damage = dri2_wl_swap_buffers_with_damage,
2134    .query_buffer_age = dri2_wl_query_buffer_age,
2135    .create_wayland_buffer_from_image = dri2_wl_create_wayland_buffer_from_image,
2136    .get_dri_drawable = dri2_surface_get_dri_drawable,
2137 };
2138 
2139 static const __DRIextension *dri2_loader_extensions[] = {
2140    &image_loader_extension.base,
2141    &image_lookup_extension.base,
2142    &use_invalidate.base,
2143    NULL,
2144 };
2145 
2146 static void
dri2_wl_add_configs_for_visuals(_EGLDisplay * disp)2147 dri2_wl_add_configs_for_visuals(_EGLDisplay *disp)
2148 {
2149    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2150    unsigned int format_count[ARRAY_SIZE(dri2_wl_visuals)] = {0};
2151 
2152    /* Try to create an EGLConfig for every config the driver declares */
2153    for (unsigned i = 0; dri2_dpy->driver_configs[i]; i++) {
2154       struct dri2_egl_config *dri2_conf;
2155       bool conversion = false;
2156       int idx = dri2_wl_visual_idx_from_config(dri2_dpy->driver_configs[i]);
2157 
2158       if (idx < 0)
2159          continue;
2160 
2161       /* Check if the server natively supports the colour buffer format */
2162       if (!server_supports_format(&dri2_dpy->formats, idx)) {
2163          /* In multi-GPU scenarios, we usually have a different buffer, so a
2164           * format conversion is easy compared to the overhead of the copy */
2165          if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2166             continue;
2167 
2168          /* Check if the server supports the alternate format */
2169          if (!server_supports_pipe_format(&dri2_dpy->formats,
2170                                           dri2_wl_visuals[idx].alt_pipe_format)) {
2171             continue;
2172          }
2173 
2174          conversion = true;
2175       }
2176 
2177       /* The format is supported one way or another; add the EGLConfig */
2178       dri2_conf = dri2_add_config(disp, dri2_dpy->driver_configs[i],
2179                                   EGL_WINDOW_BIT, NULL);
2180       if (!dri2_conf)
2181          continue;
2182 
2183       format_count[idx]++;
2184 
2185       if (conversion && format_count[idx] == 1) {
2186          _eglLog(_EGL_DEBUG, "Client format %s converted via PRIME blitImage.",
2187                  util_format_name(dri2_wl_visuals[idx].pipe_format));
2188       }
2189    }
2190 
2191    for (unsigned i = 0; i < ARRAY_SIZE(format_count); i++) {
2192       if (!format_count[i]) {
2193          _eglLog(_EGL_DEBUG, "No DRI config supports native format %s",
2194                  util_format_name(dri2_wl_visuals[i].pipe_format));
2195       }
2196    }
2197 }
2198 
2199 static bool
dri2_initialize_wayland_drm_extensions(struct dri2_egl_display * dri2_dpy)2200 dri2_initialize_wayland_drm_extensions(struct dri2_egl_display *dri2_dpy)
2201 {
2202    /* Get default dma-buf feedback */
2203    if (dri2_dpy->wl_dmabuf &&
2204        zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
2205           ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
2206       dmabuf_feedback_format_table_init(&dri2_dpy->format_table);
2207       dri2_dpy->wl_dmabuf_feedback =
2208          zwp_linux_dmabuf_v1_get_default_feedback(dri2_dpy->wl_dmabuf);
2209       zwp_linux_dmabuf_feedback_v1_add_listener(
2210          dri2_dpy->wl_dmabuf_feedback, &dmabuf_feedback_listener, dri2_dpy);
2211    }
2212 
2213    if (roundtrip(dri2_dpy) < 0)
2214       return false;
2215 
2216    /* Destroy the default dma-buf feedback and the format table. */
2217    if (dri2_dpy->wl_dmabuf_feedback) {
2218       zwp_linux_dmabuf_feedback_v1_destroy(dri2_dpy->wl_dmabuf_feedback);
2219       dri2_dpy->wl_dmabuf_feedback = NULL;
2220       dmabuf_feedback_format_table_fini(&dri2_dpy->format_table);
2221    }
2222 
2223    /* We couldn't retrieve a render node from the dma-buf feedback (or the
2224     * feedback was not advertised at all), so we must fallback to wl_drm. */
2225    if (dri2_dpy->fd_render_gpu == -1) {
2226       /* wl_drm not advertised by compositor, so can't continue */
2227       if (dri2_dpy->wl_drm_name == 0)
2228          return false;
2229       wl_drm_bind(dri2_dpy);
2230 
2231       if (dri2_dpy->wl_drm == NULL)
2232          return false;
2233       if (roundtrip(dri2_dpy) < 0 || dri2_dpy->fd_render_gpu == -1)
2234          return false;
2235 
2236       if (!dri2_dpy->authenticated &&
2237           (roundtrip(dri2_dpy) < 0 || !dri2_dpy->authenticated))
2238          return false;
2239    }
2240    return true;
2241 }
2242 
2243 static EGLBoolean
dri2_initialize_wayland_drm(_EGLDisplay * disp)2244 dri2_initialize_wayland_drm(_EGLDisplay *disp)
2245 {
2246    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2247 
2248    if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2249       goto cleanup;
2250 
2251    if (disp->PlatformDisplay == NULL) {
2252       dri2_dpy->wl_dpy = wl_display_connect(NULL);
2253       if (dri2_dpy->wl_dpy == NULL)
2254          goto cleanup;
2255       dri2_dpy->own_device = true;
2256    } else {
2257       dri2_dpy->wl_dpy = disp->PlatformDisplay;
2258    }
2259 
2260    dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
2261                                                           "mesa egl display queue");
2262 
2263    dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2264    if (dri2_dpy->wl_dpy_wrapper == NULL)
2265       goto cleanup;
2266 
2267    wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
2268                       dri2_dpy->wl_queue);
2269 
2270    if (dri2_dpy->own_device)
2271       wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2272 
2273    dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2274    wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_drm,
2275                             dri2_dpy);
2276 
2277    if (roundtrip(dri2_dpy) < 0)
2278       goto cleanup;
2279 
2280    if (!dri2_initialize_wayland_drm_extensions(dri2_dpy))
2281       goto cleanup;
2282 
2283    loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
2284                                 &dri2_dpy->fd_display_gpu);
2285 
2286    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
2287       free(dri2_dpy->device_name);
2288       dri2_dpy->device_name =
2289          loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
2290       if (!dri2_dpy->device_name) {
2291          _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
2292                                   "for requested GPU");
2293          goto cleanup;
2294       }
2295    }
2296 
2297    /* we have to do the check now, because loader_get_user_preferred_fd
2298     * will return a render-node when the requested gpu is different
2299     * to the server, but also if the client asks for the same gpu than
2300     * the server by requesting its pci-id */
2301    dri2_dpy->is_render_node =
2302       drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
2303 
2304    dri2_dpy->driver_name = loader_get_driver_for_fd(dri2_dpy->fd_render_gpu);
2305    if (dri2_dpy->driver_name == NULL) {
2306       _eglError(EGL_BAD_ALLOC, "DRI2: failed to get driver name");
2307       goto cleanup;
2308    }
2309 
2310    dri2_dpy->loader_extensions = dri2_loader_extensions;
2311    if (!dri2_load_driver(disp)) {
2312       _eglError(EGL_BAD_ALLOC, "DRI2: failed to load driver");
2313       goto cleanup;
2314    }
2315 
2316    if (!dri2_create_screen(disp))
2317       goto cleanup;
2318 
2319    if (!dri2_setup_device(disp, false)) {
2320       _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
2321       goto cleanup;
2322    }
2323 
2324    dri2_setup_screen(disp);
2325 
2326    dri2_wl_setup_swap_interval(disp);
2327 
2328    if (dri2_dpy->wl_drm) {
2329       /* To use Prime, we must have _DRI_IMAGE v7 at least.
2330        * createImageFromDmaBufs support indicates that Prime export/import is
2331        * supported by the driver. We deprecated the support to GEM names API, so
2332        * we bail out if the driver does not support Prime. */
2333       if (!(dri2_dpy->capabilities & WL_DRM_CAPABILITY_PRIME) ||
2334           !dri2_dpy->has_dmabuf_import) {
2335          _eglLog(_EGL_WARNING, "wayland-egl: display does not support prime");
2336          goto cleanup;
2337       }
2338    }
2339 
2340    dri2_wl_add_configs_for_visuals(disp);
2341 
2342    dri2_set_WL_bind_wayland_display(disp);
2343    /* When cannot convert EGLImage to wl_buffer when on a different gpu,
2344     * because the buffer of the EGLImage has likely a tiling mode the server
2345     * gpu won't support. These is no way to check for now. Thus do not support
2346     * the extension */
2347    if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2348       disp->Extensions.WL_create_wayland_buffer_from_image = EGL_TRUE;
2349 
2350    disp->Extensions.EXT_buffer_age = EGL_TRUE;
2351 
2352    disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
2353 
2354    disp->Extensions.EXT_present_opaque = EGL_TRUE;
2355 
2356    /* Fill vtbl last to prevent accidentally calling virtual function during
2357     * initialization.
2358     */
2359    dri2_dpy->vtbl = &dri2_wl_display_vtbl;
2360 
2361    return EGL_TRUE;
2362 
2363 cleanup:
2364    return EGL_FALSE;
2365 }
2366 
2367 static int
dri2_wl_swrast_get_stride_for_format(int format,int w)2368 dri2_wl_swrast_get_stride_for_format(int format, int w)
2369 {
2370    int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2371 
2372    assume(visual_idx != -1);
2373 
2374    return w * util_format_get_blocksize(dri2_wl_visuals[visual_idx].pipe_format);
2375 }
2376 
2377 static EGLBoolean
dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface * dri2_surf,int format,int w,int h,void ** data,int * size,struct wl_buffer ** buffer)2378 dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface *dri2_surf, int format,
2379                                int w, int h, void **data, int *size,
2380                                struct wl_buffer **buffer)
2381 {
2382    struct dri2_egl_display *dri2_dpy =
2383       dri2_egl_display(dri2_surf->base.Resource.Display);
2384    struct wl_shm_pool *pool;
2385    int fd, stride, size_map;
2386    void *data_map;
2387 
2388    assert(!*buffer);
2389 
2390    stride = dri2_wl_swrast_get_stride_for_format(format, w);
2391    size_map = h * stride;
2392 
2393    /* Create a shareable buffer */
2394    fd = os_create_anonymous_file(size_map, NULL);
2395    if (fd < 0)
2396       return EGL_FALSE;
2397 
2398    data_map = mmap(NULL, size_map, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2399    if (data_map == MAP_FAILED) {
2400       close(fd);
2401       return EGL_FALSE;
2402    }
2403 
2404    /* Share it in a wl_buffer */
2405    pool = wl_shm_create_pool(dri2_dpy->wl_shm, fd, size_map);
2406    wl_proxy_set_queue((struct wl_proxy *)pool, dri2_surf->wl_queue);
2407    *buffer = wl_shm_pool_create_buffer(pool, 0, w, h, stride, format);
2408    wl_shm_pool_destroy(pool);
2409    close(fd);
2410 
2411    *data = data_map;
2412    *size = size_map;
2413    return EGL_TRUE;
2414 }
2415 
2416 static void
kopper_update_buffers(struct dri2_egl_surface * dri2_surf)2417 kopper_update_buffers(struct dri2_egl_surface *dri2_surf)
2418 {
2419    /* we need to do the following operations only once per frame */
2420    if (dri2_surf->back)
2421       return;
2422 
2423    if (dri2_surf->wl_win &&
2424        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2425         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2426 
2427       dri2_surf->base.Width = dri2_surf->wl_win->width;
2428       dri2_surf->base.Height = dri2_surf->wl_win->height;
2429       dri2_surf->dx = dri2_surf->wl_win->dx;
2430       dri2_surf->dy = dri2_surf->wl_win->dy;
2431       dri2_surf->current = NULL;
2432    }
2433 }
2434 
2435 static int
swrast_update_buffers(struct dri2_egl_surface * dri2_surf)2436 swrast_update_buffers(struct dri2_egl_surface *dri2_surf)
2437 {
2438    struct dri2_egl_display *dri2_dpy =
2439       dri2_egl_display(dri2_surf->base.Resource.Display);
2440 
2441    /* we need to do the following operations only once per frame */
2442    if (dri2_surf->back)
2443       return 0;
2444 
2445    if (dri2_surf->wl_win &&
2446        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2447         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2448 
2449       dri2_wl_release_buffers(dri2_surf);
2450 
2451       dri2_surf->base.Width = dri2_surf->wl_win->width;
2452       dri2_surf->base.Height = dri2_surf->wl_win->height;
2453       dri2_surf->dx = dri2_surf->wl_win->dx;
2454       dri2_surf->dy = dri2_surf->wl_win->dy;
2455       dri2_surf->current = NULL;
2456    }
2457 
2458    /* find back buffer */
2459    /* There might be a buffer release already queued that wasn't processed */
2460    wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
2461 
2462    /* else choose any another free location */
2463    while (!dri2_surf->back) {
2464       for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2465          if (!dri2_surf->color_buffers[i].locked) {
2466             dri2_surf->back = &dri2_surf->color_buffers[i];
2467             if (dri2_surf->back->wl_buffer)
2468                break;
2469 
2470             if (!dri2_wl_swrast_allocate_buffer(
2471                    dri2_surf, dri2_surf->format, dri2_surf->base.Width,
2472                    dri2_surf->base.Height, &dri2_surf->back->data,
2473                    &dri2_surf->back->data_size, &dri2_surf->back->wl_buffer)) {
2474                _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
2475                return -1;
2476             }
2477             wl_buffer_add_listener(dri2_surf->back->wl_buffer,
2478                                    &wl_buffer_listener, dri2_surf);
2479             break;
2480          }
2481       }
2482 
2483       /* wait for the compositor to release a buffer */
2484       if (!dri2_surf->back) {
2485          if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
2486              -1) {
2487             _eglError(EGL_BAD_ALLOC, "waiting for a free buffer failed");
2488             return -1;
2489          }
2490       }
2491    }
2492 
2493    dri2_surf->back->locked = true;
2494 
2495    /* If we have an extra unlocked buffer at this point, we had to do triple
2496     * buffering for a while, but now can go back to just double buffering.
2497     * That means we can free any unlocked buffer now. To avoid toggling between
2498     * going back to double buffering and needing to allocate another buffer too
2499     * fast we let the unneeded buffer sit around for a short while. */
2500    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2501       if (!dri2_surf->color_buffers[i].locked &&
2502           dri2_surf->color_buffers[i].wl_buffer &&
2503           dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
2504          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
2505          munmap(dri2_surf->color_buffers[i].data,
2506                 dri2_surf->color_buffers[i].data_size);
2507          dri2_surf->color_buffers[i].wl_buffer = NULL;
2508          dri2_surf->color_buffers[i].data = NULL;
2509          dri2_surf->color_buffers[i].age = 0;
2510       }
2511    }
2512 
2513    return 0;
2514 }
2515 
2516 static void *
dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface * dri2_surf)2517 dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface *dri2_surf)
2518 {
2519    /* if there has been a resize: */
2520    if (!dri2_surf->current)
2521       return NULL;
2522 
2523    return dri2_surf->current->data;
2524 }
2525 
2526 static void *
dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface * dri2_surf)2527 dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface *dri2_surf)
2528 {
2529    assert(dri2_surf->back);
2530    return dri2_surf->back->data;
2531 }
2532 
2533 static EGLBoolean
dri2_wl_surface_throttle(struct dri2_egl_surface * dri2_surf)2534 dri2_wl_surface_throttle(struct dri2_egl_surface *dri2_surf)
2535 {
2536    struct dri2_egl_display *dri2_dpy =
2537       dri2_egl_display(dri2_surf->base.Resource.Display);
2538 
2539    while (dri2_surf->throttle_callback != NULL)
2540       if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
2541           -1)
2542          return EGL_FALSE;
2543 
2544    if (dri2_surf->base.SwapInterval > 0) {
2545       dri2_surf->throttle_callback =
2546          wl_surface_frame(dri2_surf->wl_surface_wrapper);
2547       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2548                                dri2_surf);
2549    }
2550 
2551    return EGL_TRUE;
2552 }
2553 
2554 static void
dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface * dri2_surf)2555 dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface *dri2_surf)
2556 {
2557    struct dri2_egl_display *dri2_dpy =
2558       dri2_egl_display(dri2_surf->base.Resource.Display);
2559 
2560    dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
2561    dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
2562    /* reset resize growing parameters */
2563    dri2_surf->dx = 0;
2564    dri2_surf->dy = 0;
2565 
2566    wl_surface_commit(dri2_surf->wl_surface_wrapper);
2567 
2568    /* If we're not waiting for a frame callback then we'll at least throttle
2569     * to a sync callback so that we always give a chance for the compositor to
2570     * handle the commit and send a release event before checking for a free
2571     * buffer */
2572    if (dri2_surf->throttle_callback == NULL) {
2573       dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
2574       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2575                                dri2_surf);
2576    }
2577 
2578    wl_display_flush(dri2_dpy->wl_dpy);
2579 }
2580 
2581 static void
dri2_wl_kopper_get_drawable_info(struct dri_drawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2582 dri2_wl_kopper_get_drawable_info(struct dri_drawable *draw, int *x, int *y, int *w,
2583                                  int *h, void *loaderPrivate)
2584 {
2585    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2586 
2587    kopper_update_buffers(dri2_surf);
2588    *x = 0;
2589    *y = 0;
2590    *w = dri2_surf->base.Width;
2591    *h = dri2_surf->base.Height;
2592 }
2593 
2594 static void
dri2_wl_swrast_get_drawable_info(struct dri_drawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2595 dri2_wl_swrast_get_drawable_info(struct dri_drawable *draw, int *x, int *y, int *w,
2596                                  int *h, void *loaderPrivate)
2597 {
2598    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2599 
2600    (void)swrast_update_buffers(dri2_surf);
2601    *x = 0;
2602    *y = 0;
2603    *w = dri2_surf->base.Width;
2604    *h = dri2_surf->base.Height;
2605 }
2606 
2607 static void
dri2_wl_swrast_get_image(struct dri_drawable * read,int x,int y,int w,int h,char * data,void * loaderPrivate)2608 dri2_wl_swrast_get_image(struct dri_drawable *read, int x, int y, int w, int h,
2609                          char *data, void *loaderPrivate)
2610 {
2611    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2612    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2613    int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2614    int src_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2615                                                          dri2_surf->base.Width);
2616    int dst_stride = copy_width;
2617    char *src, *dst;
2618 
2619    src = dri2_wl_swrast_get_frontbuffer_data(dri2_surf);
2620    /* this is already the most up-to-date buffer */
2621    if (src == data)
2622       return;
2623    if (!src) {
2624       memset(data, 0, copy_width * h);
2625       return;
2626    }
2627 
2628    assert(copy_width <= src_stride);
2629 
2630    src += x_offset;
2631    src += y * src_stride;
2632    dst = data;
2633 
2634    if (copy_width > src_stride - x_offset)
2635       copy_width = src_stride - x_offset;
2636    if (h > dri2_surf->base.Height - y)
2637       h = dri2_surf->base.Height - y;
2638 
2639    for (; h > 0; h--) {
2640       memcpy(dst, src, copy_width);
2641       src += src_stride;
2642       dst += dst_stride;
2643    }
2644 }
2645 
2646 static void
dri2_wl_swrast_put_image2(struct dri_drawable * draw,int op,int x,int y,int w,int h,int stride,char * data,void * loaderPrivate)2647 dri2_wl_swrast_put_image2(struct dri_drawable *draw, int op, int x, int y, int w,
2648                           int h, int stride, char *data, void *loaderPrivate)
2649 {
2650    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2651    /* clamp to surface size */
2652    w = MIN2(w, dri2_surf->base.Width);
2653    h = MIN2(h, dri2_surf->base.Height);
2654    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2655    int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2656                                                          dri2_surf->base.Width);
2657    int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2658    char *src, *dst;
2659 
2660    assert(copy_width <= stride);
2661 
2662    dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2663 
2664    dst += x_offset;
2665    dst += y * dst_stride;
2666 
2667    src = data;
2668 
2669    /* drivers expect we do these checks (and some rely on it) */
2670    if (copy_width > dst_stride - x_offset)
2671       copy_width = dst_stride - x_offset;
2672    if (h > dri2_surf->base.Height - y)
2673       h = dri2_surf->base.Height - y;
2674 
2675    for (; h > 0; h--) {
2676       memcpy(dst, src, copy_width);
2677       src += stride;
2678       dst += dst_stride;
2679    }
2680 }
2681 
2682 static void
dri2_wl_swrast_put_image(struct dri_drawable * draw,int op,int x,int y,int w,int h,char * data,void * loaderPrivate)2683 dri2_wl_swrast_put_image(struct dri_drawable *draw, int op, int x, int y, int w,
2684                          int h, char *data, void *loaderPrivate)
2685 {
2686    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2687    int stride;
2688 
2689    stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2690    dri2_wl_swrast_put_image2(draw, op, x, y, w, h, stride, data, loaderPrivate);
2691 }
2692 
2693 static EGLBoolean
dri2_wl_kopper_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)2694 dri2_wl_kopper_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
2695                                         const EGLint *rects, EGLint n_rects)
2696 {
2697    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2698    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2699 
2700    if (!dri2_surf->wl_win)
2701       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2702 
2703    if (!dri2_wl_surface_throttle(dri2_surf))
2704       return EGL_FALSE;
2705 
2706    if (n_rects) {
2707       if (dri2_dpy->kopper)
2708          kopperSwapBuffersWithDamage(dri2_surf->dri_drawable, __DRI2_FLUSH_INVALIDATE_ANCILLARY, n_rects, rects);
2709       else
2710          driSwapBuffersWithDamage(dri2_surf->dri_drawable, n_rects, rects);
2711    } else {
2712       if (dri2_dpy->kopper)
2713          kopperSwapBuffers(dri2_surf->dri_drawable, __DRI2_FLUSH_INVALIDATE_ANCILLARY);
2714       else
2715          driSwapBuffers(dri2_surf->dri_drawable);
2716    }
2717 
2718    dri2_surf->current = dri2_surf->back;
2719    dri2_surf->back = NULL;
2720 
2721    return EGL_TRUE;
2722 }
2723 
2724 static EGLBoolean
dri2_wl_kopper_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2725 dri2_wl_kopper_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2726 {
2727    dri2_wl_kopper_swap_buffers_with_damage(disp, draw, NULL, 0);
2728    return EGL_TRUE;
2729 }
2730 
2731 static EGLBoolean
dri2_wl_swrast_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)2732 dri2_wl_swrast_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
2733                                         const EGLint *rects, EGLint n_rects)
2734 {
2735    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2736 
2737    if (!dri2_surf->wl_win)
2738       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2739 
2740    (void)swrast_update_buffers(dri2_surf);
2741 
2742    if (dri2_wl_surface_throttle(dri2_surf))
2743       wl_surface_attach(dri2_surf->wl_surface_wrapper,
2744          /* 'back' here will be promoted to 'current' */
2745          dri2_surf->back->wl_buffer, dri2_surf->dx,
2746          dri2_surf->dy);
2747 
2748    /* If the compositor doesn't support damage_buffer, we deliberately
2749     * ignore the damage region and post maximum damage, due to
2750     * https://bugs.freedesktop.org/78190 */
2751    if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
2752       wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
2753                         INT32_MAX);
2754 
2755    /* guarantee full copy for partial update */
2756    int w = n_rects == 1 ? (rects[2] - rects[0]) : 0;
2757    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2758    int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2759                                                          dri2_surf->base.Width);
2760    char *dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2761 
2762    /* partial copy, copy old content */
2763    if (copy_width < dst_stride)
2764       dri2_wl_swrast_get_image(NULL, 0, 0, dri2_surf->base.Width,
2765                                  dri2_surf->base.Height, dst, dri2_surf);
2766 
2767    if (n_rects)
2768       driSwapBuffersWithDamage(dri2_surf->dri_drawable, n_rects, rects);
2769    else
2770       driSwapBuffers(dri2_surf->dri_drawable);
2771 
2772    dri2_surf->current = dri2_surf->back;
2773    dri2_surf->back = NULL;
2774 
2775    dri2_wl_swrast_commit_backbuffer(dri2_surf);
2776    return EGL_TRUE;
2777 }
2778 
2779 static EGLBoolean
dri2_wl_swrast_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2780 dri2_wl_swrast_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2781 {
2782    dri2_wl_swrast_swap_buffers_with_damage(disp, draw, NULL, 0);
2783    return EGL_TRUE;
2784 }
2785 
2786 static EGLint
dri2_wl_kopper_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2787 dri2_wl_kopper_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2788 {
2789    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2790    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2791 
2792    /* This can legitimately be null for lavapipe */
2793    if (dri2_dpy->kopper)
2794       return kopperQueryBufferAge(dri2_surf->dri_drawable);
2795    else
2796       return driSWRastQueryBufferAge(dri2_surf->dri_drawable);
2797    return 0;
2798 }
2799 
2800 static EGLint
dri2_wl_swrast_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2801 dri2_wl_swrast_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2802 {
2803    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2804    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2805 
2806    assert(dri2_dpy->swrast);
2807    return driSWRastQueryBufferAge(dri2_surf->dri_drawable);
2808 }
2809 
2810 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)2811 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
2812 {
2813    struct dri2_egl_display *dri2_dpy = data;
2814    int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2815 
2816    if (visual_idx == -1)
2817       return;
2818 
2819    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2820 }
2821 
2822 static const struct wl_shm_listener shm_listener = {
2823    .format = shm_handle_format,
2824 };
2825 
2826 static void
registry_handle_global_kopper(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2827 registry_handle_global_kopper(void *data, struct wl_registry *registry,
2828                               uint32_t name, const char *interface,
2829                               uint32_t version)
2830 {
2831    struct dri2_egl_display *dri2_dpy = data;
2832 
2833    if (strcmp(interface, wl_shm_interface.name) == 0) {
2834       dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2835       wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2836    }
2837    if (strcmp(interface, wl_drm_interface.name) == 0) {
2838       dri2_dpy->wl_drm_version = MIN2(version, 2);
2839       dri2_dpy->wl_drm_name = name;
2840    } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2841                version >= 3) {
2842       dri2_dpy->wl_dmabuf = wl_registry_bind(
2843          registry, name, &zwp_linux_dmabuf_v1_interface,
2844          MIN2(version,
2845                ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2846       zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2847                                        dri2_dpy);
2848    }
2849 }
2850 
2851 static const struct wl_registry_listener registry_listener_kopper = {
2852    .global = registry_handle_global_kopper,
2853    .global_remove = registry_handle_global_remove,
2854 };
2855 
2856 static void
registry_handle_global_swrast(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2857 registry_handle_global_swrast(void *data, struct wl_registry *registry,
2858                               uint32_t name, const char *interface,
2859                               uint32_t version)
2860 {
2861    struct dri2_egl_display *dri2_dpy = data;
2862 
2863    if (strcmp(interface, wl_shm_interface.name) == 0) {
2864       dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2865       wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2866    }
2867 }
2868 
2869 static const struct wl_registry_listener registry_listener_swrast = {
2870    .global = registry_handle_global_swrast,
2871    .global_remove = registry_handle_global_remove,
2872 };
2873 
2874 static const struct dri2_egl_display_vtbl dri2_wl_swrast_display_vtbl = {
2875    .authenticate = NULL,
2876    .create_window_surface = dri2_wl_create_window_surface,
2877    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2878    .destroy_surface = dri2_wl_destroy_surface,
2879    .swap_interval = dri2_wl_swap_interval,
2880    .create_image = dri2_create_image_khr,
2881    .swap_buffers = dri2_wl_swrast_swap_buffers,
2882    .swap_buffers_with_damage = dri2_wl_swrast_swap_buffers_with_damage,
2883    .get_dri_drawable = dri2_surface_get_dri_drawable,
2884    .query_buffer_age = dri2_wl_swrast_query_buffer_age,
2885 };
2886 
2887 static const struct dri2_egl_display_vtbl dri2_wl_kopper_display_vtbl = {
2888    .authenticate = NULL,
2889    .create_window_surface = dri2_wl_create_window_surface,
2890    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2891    .destroy_surface = dri2_wl_destroy_surface,
2892    .create_image = dri2_create_image_khr,
2893    .swap_buffers = dri2_wl_kopper_swap_buffers,
2894    .swap_buffers_with_damage = dri2_wl_kopper_swap_buffers_with_damage,
2895    .get_dri_drawable = dri2_surface_get_dri_drawable,
2896    .query_buffer_age = dri2_wl_kopper_query_buffer_age,
2897 };
2898 
2899 static const __DRIswrastLoaderExtension swrast_loader_extension = {
2900    .base = {__DRI_SWRAST_LOADER, 2},
2901 
2902    .getDrawableInfo = dri2_wl_swrast_get_drawable_info,
2903    .putImage = dri2_wl_swrast_put_image,
2904    .getImage = dri2_wl_swrast_get_image,
2905    .putImage2 = dri2_wl_swrast_put_image2,
2906 };
2907 
2908 static const __DRIswrastLoaderExtension kopper_swrast_loader_extension = {
2909    .base = {__DRI_SWRAST_LOADER, 2},
2910 
2911    .getDrawableInfo = dri2_wl_kopper_get_drawable_info,
2912    .putImage = dri2_wl_swrast_put_image,
2913    .getImage = dri2_wl_swrast_get_image,
2914    .putImage2 = dri2_wl_swrast_put_image2,
2915 };
2916 
2917 static_assert(sizeof(struct kopper_vk_surface_create_storage) >=
2918                  sizeof(VkWaylandSurfaceCreateInfoKHR),
2919               "");
2920 
2921 static void
kopperSetSurfaceCreateInfo(void * _draw,struct kopper_loader_info * out)2922 kopperSetSurfaceCreateInfo(void *_draw, struct kopper_loader_info *out)
2923 {
2924    struct dri2_egl_surface *dri2_surf = _draw;
2925    struct dri2_egl_display *dri2_dpy =
2926       dri2_egl_display(dri2_surf->base.Resource.Display);
2927    VkWaylandSurfaceCreateInfoKHR *wlsci =
2928       (VkWaylandSurfaceCreateInfoKHR *)&out->bos;
2929 
2930    wlsci->sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
2931    wlsci->pNext = NULL;
2932    wlsci->flags = 0;
2933    wlsci->display = dri2_dpy->wl_dpy;
2934    /* Pass the original wl_surface through to Vulkan WSI.  If we pass the
2935     * proxy wrapper, kopper won't be able to properly de-duplicate surfaces
2936     * and we may end up creating two VkSurfaceKHRs for the same underlying
2937     * wl_surface.  Vulkan WSI (which kopper calls into) will make its own
2938     * queues and proxy wrappers.
2939     */
2940    wlsci->surface = get_wl_surface(dri2_surf->base.NativeSurface);
2941    out->present_opaque = dri2_surf->base.PresentOpaque;
2942    /* convert to vulkan constants */
2943    switch (dri2_surf->base.CompressionRate) {
2944    case EGL_SURFACE_COMPRESSION_FIXED_RATE_NONE_EXT:
2945       out->compression = 0;
2946       break;
2947    case EGL_SURFACE_COMPRESSION_FIXED_RATE_DEFAULT_EXT:
2948       out->compression = UINT32_MAX;
2949       break;
2950 #define EGL_VK_COMP(NUM) \
2951    case EGL_SURFACE_COMPRESSION_FIXED_RATE_##NUM##BPC_EXT: \
2952       out->compression = VK_IMAGE_COMPRESSION_FIXED_RATE_##NUM##BPC_BIT_EXT; \
2953       break
2954    EGL_VK_COMP(1);
2955    EGL_VK_COMP(2);
2956    EGL_VK_COMP(3);
2957    EGL_VK_COMP(4);
2958    EGL_VK_COMP(5);
2959    EGL_VK_COMP(6);
2960    EGL_VK_COMP(7);
2961    EGL_VK_COMP(8);
2962    EGL_VK_COMP(9);
2963    EGL_VK_COMP(10);
2964    EGL_VK_COMP(11);
2965    EGL_VK_COMP(12);
2966 #undef EGL_VK_COMP
2967    default:
2968       unreachable("unknown compression rate");
2969    }
2970 }
2971 
2972 static const __DRIkopperLoaderExtension kopper_loader_extension = {
2973    .base = {__DRI_KOPPER_LOADER, 1},
2974 
2975    .SetSurfaceCreateInfo = kopperSetSurfaceCreateInfo,
2976 };
2977 static const __DRIextension *swrast_loader_extensions[] = {
2978    &swrast_loader_extension.base,
2979    &image_lookup_extension.base,
2980    NULL,
2981 };
2982 static const __DRIextension *kopper_swrast_loader_extensions[] = {
2983    &kopper_swrast_loader_extension.base,
2984    &image_lookup_extension.base,
2985    &kopper_loader_extension.base,
2986    &use_invalidate.base,
2987    NULL,
2988 };
2989 
2990 static EGLBoolean
dri2_initialize_wayland_swrast(_EGLDisplay * disp)2991 dri2_initialize_wayland_swrast(_EGLDisplay *disp)
2992 {
2993    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2994 
2995    if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2996       goto cleanup;
2997 
2998    if (disp->PlatformDisplay == NULL) {
2999       dri2_dpy->wl_dpy = wl_display_connect(NULL);
3000       if (dri2_dpy->wl_dpy == NULL)
3001          goto cleanup;
3002       dri2_dpy->own_device = true;
3003    } else {
3004       dri2_dpy->wl_dpy = disp->PlatformDisplay;
3005    }
3006 
3007    dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
3008                                                           "mesa egl swrast display queue");
3009 
3010    dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
3011    if (dri2_dpy->wl_dpy_wrapper == NULL)
3012       goto cleanup;
3013 
3014    wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
3015                       dri2_dpy->wl_queue);
3016 
3017    if (dri2_dpy->own_device)
3018       wl_display_dispatch_pending(dri2_dpy->wl_dpy);
3019 
3020    dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
3021    if (disp->Options.Zink)
3022       wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_kopper,
3023                               dri2_dpy);
3024    else
3025       wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_swrast,
3026                               dri2_dpy);
3027 
3028    if (roundtrip(dri2_dpy) < 0 || dri2_dpy->wl_shm == NULL)
3029       goto cleanup;
3030 
3031    if (roundtrip(dri2_dpy) < 0 ||
3032        !BITSET_TEST_RANGE(dri2_dpy->formats.formats_bitmap, 0,
3033                           dri2_dpy->formats.num_formats))
3034       goto cleanup;
3035 
3036    if (disp->Options.Zink) {
3037       if (!dri2_initialize_wayland_drm_extensions(dri2_dpy) && !disp->Options.ForceSoftware)
3038          goto cleanup;
3039 
3040       if (!disp->Options.ForceSoftware) {
3041          loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
3042                                        &dri2_dpy->fd_display_gpu);
3043 
3044          if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
3045             free(dri2_dpy->device_name);
3046             dri2_dpy->device_name =
3047                loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
3048             if (!dri2_dpy->device_name) {
3049                _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
3050                                           "for requested GPU");
3051                goto cleanup;
3052             }
3053          }
3054 
3055          /* we have to do the check now, because loader_get_user_preferred_fd
3056             * will return a render-node when the requested gpu is different
3057             * to the server, but also if the client asks for the same gpu than
3058             * the server by requesting its pci-id */
3059          dri2_dpy->is_render_node =
3060             drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
3061       }
3062    }
3063 
3064    dri2_dpy->driver_name = strdup(disp->Options.Zink ? "zink" : "swrast");
3065    if (!dri2_load_driver(disp))
3066       goto cleanup;
3067 
3068    dri2_dpy->loader_extensions = disp->Options.Zink ? kopper_swrast_loader_extensions : swrast_loader_extensions;
3069 
3070    if (!dri2_create_screen(disp))
3071       goto cleanup;
3072 
3073    if (!dri2_setup_device(disp, disp->Options.ForceSoftware)) {
3074       _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
3075       goto cleanup;
3076    }
3077 
3078    dri2_setup_screen(disp);
3079 
3080    dri2_wl_setup_swap_interval(disp);
3081 
3082    dri2_wl_add_configs_for_visuals(disp);
3083 
3084    if (disp->Options.Zink && dri2_dpy->fd_render_gpu >= 0 &&
3085        (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm))
3086       dri2_set_WL_bind_wayland_display(disp);
3087    disp->Extensions.EXT_buffer_age = EGL_TRUE;
3088    disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
3089    disp->Extensions.EXT_present_opaque = EGL_TRUE;
3090 
3091    /* Fill vtbl last to prevent accidentally calling virtual function during
3092     * initialization.
3093     */
3094    dri2_dpy->vtbl = disp->Options.Zink ? &dri2_wl_kopper_display_vtbl : &dri2_wl_swrast_display_vtbl;
3095 
3096    return EGL_TRUE;
3097 
3098 cleanup:
3099    return EGL_FALSE;
3100 }
3101 
3102 EGLBoolean
dri2_initialize_wayland(_EGLDisplay * disp)3103 dri2_initialize_wayland(_EGLDisplay *disp)
3104 {
3105    if (disp->Options.ForceSoftware || disp->Options.Zink)
3106       return dri2_initialize_wayland_swrast(disp);
3107    else
3108       return dri2_initialize_wayland_drm(disp);
3109 }
3110 
3111 void
dri2_teardown_wayland(struct dri2_egl_display * dri2_dpy)3112 dri2_teardown_wayland(struct dri2_egl_display *dri2_dpy)
3113 {
3114    dri2_wl_formats_fini(&dri2_dpy->formats);
3115    if (dri2_dpy->wl_drm)
3116       wl_drm_destroy(dri2_dpy->wl_drm);
3117    if (dri2_dpy->wl_dmabuf)
3118       zwp_linux_dmabuf_v1_destroy(dri2_dpy->wl_dmabuf);
3119    if (dri2_dpy->wl_shm)
3120       wl_shm_destroy(dri2_dpy->wl_shm);
3121    if (dri2_dpy->wl_registry)
3122       wl_registry_destroy(dri2_dpy->wl_registry);
3123    if (dri2_dpy->wl_dpy_wrapper)
3124       wl_proxy_wrapper_destroy(dri2_dpy->wl_dpy_wrapper);
3125    if (dri2_dpy->wl_queue)
3126       wl_event_queue_destroy(dri2_dpy->wl_queue);
3127 
3128    if (dri2_dpy->own_device)
3129       wl_display_disconnect(dri2_dpy->wl_dpy);
3130 }
3131