• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011-2012 Intel Corporation
3  * Copyright © 2012 Collabora, Ltd.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19  * NONINFRINGEMENT.  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Kristian Høgsberg <krh@bitplanet.net>
27  *    Benjamin Franzke <benjaminfranzke@googlemail.com>
28  */
29 
30 #include <dlfcn.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <limits.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <xf86drm.h>
39 #include "drm-uapi/drm_fourcc.h"
40 #include <sys/mman.h>
41 #include <vulkan/vulkan_core.h>
42 #include <vulkan/vulkan_wayland.h>
43 
44 #include "util/anon_file.h"
45 #include "util/u_vector.h"
46 #include "util/format/u_formats.h"
47 #include "main/glconfig.h"
48 #include "pipe/p_screen.h"
49 #include "egl_dri2.h"
50 #include "eglglobals.h"
51 #include "kopper_interface.h"
52 #include "loader.h"
53 #include "loader_dri_helper.h"
54 #include "dri_screen.h"
55 #include "dri_util.h"
56 #include <loader_wayland_helper.h>
57 
58 #include "linux-dmabuf-unstable-v1-client-protocol.h"
59 #include "wayland-drm-client-protocol.h"
60 #include <wayland-client.h>
61 #include <wayland-egl-backend.h>
62 
63 /*
64  * The index of entries in this table is used as a bitmask in
65  * dri2_dpy->formats.formats_bitmap, which tracks the formats supported
66  * by our server.
67  */
68 static const struct dri2_wl_visual {
69    uint32_t wl_drm_format;
70    int pipe_format;
71    /* alt_pipe_format is a substitute wl_buffer format to use for a
72     * wl-server unsupported pipe_format, ie. some other pipe_format in
73     * the table, of the same precision but with different channel ordering, or
74     * PIPE_FORMAT_NONE if an alternate format is not needed or supported.
75     * The code checks if alt_pipe_format can be used as a fallback for a
76     * pipe_format for a given wl-server implementation.
77     */
78    int alt_pipe_format;
79    int opaque_wl_drm_format;
80 } dri2_wl_visuals[] = {
81    {
82       WL_DRM_FORMAT_ABGR16F,
83       PIPE_FORMAT_R16G16B16A16_FLOAT,
84       PIPE_FORMAT_NONE,
85       WL_DRM_FORMAT_XBGR16F,
86    },
87    {
88       WL_DRM_FORMAT_XBGR16F,
89       PIPE_FORMAT_R16G16B16X16_FLOAT,
90       PIPE_FORMAT_NONE,
91       WL_DRM_FORMAT_XBGR16F,
92    },
93    {
94       WL_DRM_FORMAT_XRGB2101010,
95       PIPE_FORMAT_B10G10R10X2_UNORM,
96       PIPE_FORMAT_R10G10B10X2_UNORM,
97       WL_DRM_FORMAT_XRGB2101010,
98    },
99    {
100       WL_DRM_FORMAT_ARGB2101010,
101       PIPE_FORMAT_B10G10R10A2_UNORM,
102       PIPE_FORMAT_R10G10B10A2_UNORM,
103       WL_DRM_FORMAT_XRGB2101010,
104    },
105    {
106       WL_DRM_FORMAT_XBGR2101010,
107       PIPE_FORMAT_R10G10B10X2_UNORM,
108       PIPE_FORMAT_B10G10R10X2_UNORM,
109       WL_DRM_FORMAT_XBGR2101010,
110    },
111    {
112       WL_DRM_FORMAT_ABGR2101010,
113       PIPE_FORMAT_R10G10B10A2_UNORM,
114       PIPE_FORMAT_B10G10R10A2_UNORM,
115       WL_DRM_FORMAT_XBGR2101010,
116    },
117    {
118       WL_DRM_FORMAT_XRGB8888,
119       PIPE_FORMAT_BGRX8888_UNORM,
120       PIPE_FORMAT_NONE,
121       WL_DRM_FORMAT_XRGB8888,
122    },
123    {
124       WL_DRM_FORMAT_ARGB8888,
125       PIPE_FORMAT_BGRA8888_UNORM,
126       PIPE_FORMAT_NONE,
127       WL_DRM_FORMAT_XRGB8888,
128    },
129    {
130       WL_DRM_FORMAT_ABGR8888,
131       PIPE_FORMAT_RGBA8888_UNORM,
132       PIPE_FORMAT_NONE,
133       WL_DRM_FORMAT_XBGR8888,
134    },
135    {
136       WL_DRM_FORMAT_XBGR8888,
137       PIPE_FORMAT_RGBX8888_UNORM,
138       PIPE_FORMAT_NONE,
139       WL_DRM_FORMAT_XBGR8888,
140    },
141    {
142       WL_DRM_FORMAT_RGB565,
143       PIPE_FORMAT_B5G6R5_UNORM,
144       PIPE_FORMAT_NONE,
145       WL_DRM_FORMAT_RGB565,
146    },
147    {
148       WL_DRM_FORMAT_ARGB1555,
149       PIPE_FORMAT_B5G5R5A1_UNORM,
150       PIPE_FORMAT_R5G5B5A1_UNORM,
151       WL_DRM_FORMAT_XRGB1555,
152    },
153    {
154       WL_DRM_FORMAT_XRGB1555,
155       PIPE_FORMAT_B5G5R5X1_UNORM,
156       PIPE_FORMAT_R5G5B5X1_UNORM,
157       WL_DRM_FORMAT_XRGB1555,
158    },
159    {
160       WL_DRM_FORMAT_ARGB4444,
161       PIPE_FORMAT_B4G4R4A4_UNORM,
162       PIPE_FORMAT_R4G4B4A4_UNORM,
163       WL_DRM_FORMAT_XRGB4444,
164    },
165    {
166       WL_DRM_FORMAT_XRGB4444,
167       PIPE_FORMAT_B4G4R4X4_UNORM,
168       PIPE_FORMAT_R4G4B4X4_UNORM,
169       WL_DRM_FORMAT_XRGB4444,
170    },
171 };
172 
173 static int
dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)174 dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)
175 {
176    if (util_format_is_srgb(pipe_format))
177       pipe_format = util_format_linear(pipe_format);
178 
179    for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
180       if (dri2_wl_visuals[i].pipe_format == pipe_format)
181          return i;
182    }
183 
184    return -1;
185 }
186 
187 static int
dri2_wl_visual_idx_from_config(const struct dri_config * config)188 dri2_wl_visual_idx_from_config(const struct dri_config *config)
189 {
190    struct gl_config *gl_config = (struct gl_config *) config;
191 
192    return dri2_wl_visual_idx_from_pipe_format(gl_config->color_format);
193 }
194 
195 static int
dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)196 dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)
197 {
198    for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
199       /* wl_drm format codes overlap with DRIImage FourCC codes for all formats
200        * we support. */
201       if (dri2_wl_visuals[i].wl_drm_format == fourcc)
202          return i;
203    }
204 
205    return -1;
206 }
207 
208 static int
dri2_wl_shm_format_from_visual_idx(int idx)209 dri2_wl_shm_format_from_visual_idx(int idx)
210 {
211    uint32_t fourcc = dri2_wl_visuals[idx].wl_drm_format;
212 
213    if (fourcc == WL_DRM_FORMAT_ARGB8888)
214       return WL_SHM_FORMAT_ARGB8888;
215    else if (fourcc == WL_DRM_FORMAT_XRGB8888)
216       return WL_SHM_FORMAT_XRGB8888;
217    else
218       return fourcc;
219 }
220 
221 static int
dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)222 dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)
223 {
224    uint32_t fourcc;
225 
226    if (shm_format == WL_SHM_FORMAT_ARGB8888)
227       fourcc = WL_DRM_FORMAT_ARGB8888;
228    else if (shm_format == WL_SHM_FORMAT_XRGB8888)
229       fourcc = WL_DRM_FORMAT_XRGB8888;
230    else
231       fourcc = shm_format;
232 
233    return dri2_wl_visual_idx_from_fourcc(fourcc);
234 }
235 
236 bool
dri2_wl_is_format_supported(void * user_data,uint32_t format)237 dri2_wl_is_format_supported(void *user_data, uint32_t format)
238 {
239    _EGLDisplay *disp = (_EGLDisplay *)user_data;
240    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
241    int j = dri2_wl_visual_idx_from_fourcc(format);
242 
243    if (j == -1)
244       return false;
245 
246    for (int i = 0; dri2_dpy->driver_configs[i]; i++)
247       if (j == dri2_wl_visual_idx_from_config(dri2_dpy->driver_configs[i]))
248          return true;
249 
250    return false;
251 }
252 
253 static bool
server_supports_format(struct dri2_wl_formats * formats,int idx)254 server_supports_format(struct dri2_wl_formats *formats, int idx)
255 {
256    return idx >= 0 && BITSET_TEST(formats->formats_bitmap, idx);
257 }
258 
259 static bool
server_supports_pipe_format(struct dri2_wl_formats * formats,enum pipe_format format)260 server_supports_pipe_format(struct dri2_wl_formats *formats,
261                             enum pipe_format format)
262 {
263    return server_supports_format(formats,
264                                  dri2_wl_visual_idx_from_pipe_format(format));
265 }
266 
267 static bool
server_supports_fourcc(struct dri2_wl_formats * formats,uint32_t fourcc)268 server_supports_fourcc(struct dri2_wl_formats *formats, uint32_t fourcc)
269 {
270    return server_supports_format(formats, dri2_wl_visual_idx_from_fourcc(fourcc));
271 }
272 
273 static int
roundtrip(struct dri2_egl_display * dri2_dpy)274 roundtrip(struct dri2_egl_display *dri2_dpy)
275 {
276    return wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_dpy->wl_queue);
277 }
278 
279 static void
wl_buffer_release(void * data,struct wl_buffer * buffer)280 wl_buffer_release(void *data, struct wl_buffer *buffer)
281 {
282    struct dri2_egl_surface *dri2_surf = data;
283    int i;
284 
285    for (i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); ++i)
286       if (dri2_surf->color_buffers[i].wl_buffer == buffer)
287          break;
288 
289    assert(i < ARRAY_SIZE(dri2_surf->color_buffers));
290 
291    if (dri2_surf->color_buffers[i].wl_release) {
292       wl_buffer_destroy(buffer);
293       dri2_surf->color_buffers[i].wl_release = false;
294       dri2_surf->color_buffers[i].wl_buffer = NULL;
295       dri2_surf->color_buffers[i].age = 0;
296    }
297 
298    dri2_surf->color_buffers[i].locked = false;
299 }
300 
301 static const struct wl_buffer_listener wl_buffer_listener = {
302    .release = wl_buffer_release,
303 };
304 
305 static void
dri2_wl_formats_fini(struct dri2_wl_formats * formats)306 dri2_wl_formats_fini(struct dri2_wl_formats *formats)
307 {
308    unsigned int i;
309 
310    for (i = 0; i < formats->num_formats; i++)
311       u_vector_finish(&formats->modifiers[i]);
312 
313    free(formats->modifiers);
314    free(formats->formats_bitmap);
315 }
316 
317 static int
dri2_wl_formats_init(struct dri2_wl_formats * formats)318 dri2_wl_formats_init(struct dri2_wl_formats *formats)
319 {
320    unsigned int i, j;
321 
322    /* formats->formats_bitmap tells us if a format in dri2_wl_visuals is present
323     * or not. So we must compute the amount of unsigned int's needed to
324     * represent all the formats of dri2_wl_visuals. We use BITSET_WORDS for
325     * this task. */
326    formats->num_formats = ARRAY_SIZE(dri2_wl_visuals);
327    formats->formats_bitmap = calloc(BITSET_WORDS(formats->num_formats),
328                                     sizeof(*formats->formats_bitmap));
329    if (!formats->formats_bitmap)
330       goto err;
331 
332    /* Here we have an array of u_vector's to store the modifiers supported by
333     * each format in the bitmask. */
334    formats->modifiers =
335       calloc(formats->num_formats, sizeof(*formats->modifiers));
336    if (!formats->modifiers)
337       goto err_modifier;
338 
339    for (i = 0; i < formats->num_formats; i++)
340       if (!u_vector_init_pow2(&formats->modifiers[i], 4, sizeof(uint64_t))) {
341          j = i;
342          goto err_vector_init;
343       }
344 
345    return 0;
346 
347 err_vector_init:
348    for (i = 0; i < j; i++)
349       u_vector_finish(&formats->modifiers[i]);
350    free(formats->modifiers);
351 err_modifier:
352    free(formats->formats_bitmap);
353 err:
354    _eglError(EGL_BAD_ALLOC, "dri2_wl_formats_init");
355    return -1;
356 }
357 
358 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)359 dmabuf_feedback_format_table_fini(
360    struct dmabuf_feedback_format_table *format_table)
361 {
362    if (format_table->data && format_table->data != MAP_FAILED)
363       munmap(format_table->data, format_table->size);
364 }
365 
366 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)367 dmabuf_feedback_format_table_init(
368    struct dmabuf_feedback_format_table *format_table)
369 {
370    memset(format_table, 0, sizeof(*format_table));
371 }
372 
373 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)374 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
375 {
376    dri2_wl_formats_fini(&tranche->formats);
377 }
378 
379 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)380 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
381 {
382    memset(tranche, 0, sizeof(*tranche));
383 
384    if (dri2_wl_formats_init(&tranche->formats) < 0)
385       return -1;
386 
387    return 0;
388 }
389 
390 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)391 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
392 {
393    dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
394 
395    util_dynarray_foreach (&dmabuf_feedback->tranches,
396                           struct dmabuf_feedback_tranche, tranche)
397       dmabuf_feedback_tranche_fini(tranche);
398    util_dynarray_fini(&dmabuf_feedback->tranches);
399 
400    dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
401 }
402 
403 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)404 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
405 {
406    memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
407 
408    if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
409       return -1;
410 
411    util_dynarray_init(&dmabuf_feedback->tranches, NULL);
412 
413    dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
414 
415    return 0;
416 }
417 
418 static void
resize_callback(struct wl_egl_window * wl_win,void * data)419 resize_callback(struct wl_egl_window *wl_win, void *data)
420 {
421    struct dri2_egl_surface *dri2_surf = data;
422 
423    if (dri2_surf->base.Width == wl_win->width &&
424        dri2_surf->base.Height == wl_win->height)
425       return;
426 
427    dri2_surf->resized = true;
428 
429    /* Update the surface size as soon as native window is resized; from user
430     * pov, this makes the effect that resize is done immediately after native
431     * window resize, without requiring to wait until the first draw.
432     *
433     * A more detailed and lengthy explanation can be found at
434     * https://lists.freedesktop.org/archives/mesa-dev/2018-June/196474.html
435     */
436    if (!dri2_surf->back) {
437       dri2_surf->base.Width = wl_win->width;
438       dri2_surf->base.Height = wl_win->height;
439    }
440    dri_invalidate_drawable(dri2_surf->dri_drawable);
441 }
442 
443 static void
destroy_window_callback(void * data)444 destroy_window_callback(void *data)
445 {
446    struct dri2_egl_surface *dri2_surf = data;
447    dri2_surf->wl_win = NULL;
448 }
449 
450 static struct wl_surface *
get_wl_surface_proxy(struct wl_egl_window * window)451 get_wl_surface_proxy(struct wl_egl_window *window)
452 {
453    /* Version 3 of wl_egl_window introduced a version field at the same
454     * location where a pointer to wl_surface was stored. Thus, if
455     * window->version is dereferenceable, we've been given an older version of
456     * wl_egl_window, and window->version points to wl_surface */
457    if (_eglPointerIsDereferenceable((void *)(window->version))) {
458       return wl_proxy_create_wrapper((void *)(window->version));
459    }
460    return wl_proxy_create_wrapper(window->surface);
461 }
462 
463 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)464 surface_dmabuf_feedback_format_table(
465    void *data,
466    struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
467    int32_t fd, uint32_t size)
468 {
469    struct dri2_egl_surface *dri2_surf = data;
470    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
471 
472    feedback->format_table.size = size;
473    feedback->format_table.data =
474       mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
475 
476    close(fd);
477 }
478 
479 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)480 surface_dmabuf_feedback_main_device(
481    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
482    struct wl_array *device)
483 {
484    struct dri2_egl_surface *dri2_surf = data;
485    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
486 
487    memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
488 
489    /* Compositors may support switching render devices and change the main
490     * device of the dma-buf feedback. In this case, when we reallocate the
491     * buffers of the surface we must ensure that it is not allocated in memory
492     * that is only visible to the GPU that EGL is using, as the compositor will
493     * have to import them to the render device it is using.
494     *
495     * TODO: we still don't know how to allocate such buffers.
496     */
497    if (dri2_surf->dmabuf_feedback.main_device != 0 &&
498        (feedback->main_device != dri2_surf->dmabuf_feedback.main_device))
499       dri2_surf->compositor_using_another_device = true;
500    else
501       dri2_surf->compositor_using_another_device = false;
502 }
503 
504 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)505 surface_dmabuf_feedback_tranche_target_device(
506    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
507    struct wl_array *device)
508 {
509    struct dri2_egl_surface *dri2_surf = data;
510    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
511 
512    memcpy(&feedback->pending_tranche.target_device, device->data,
513           sizeof(feedback->pending_tranche.target_device));
514 }
515 
516 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)517 surface_dmabuf_feedback_tranche_flags(
518    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
519    uint32_t flags)
520 {
521    struct dri2_egl_surface *dri2_surf = data;
522    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
523 
524    feedback->pending_tranche.flags = flags;
525 }
526 
527 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)528 surface_dmabuf_feedback_tranche_formats(
529    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
530    struct wl_array *indices)
531 {
532    struct dri2_egl_surface *dri2_surf = data;
533    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
534    uint32_t present_format = dri2_surf->format;
535    uint64_t *modifier_ptr, modifier;
536    uint32_t format;
537    uint16_t *index;
538    int visual_idx;
539 
540    if (dri2_surf->base.PresentOpaque) {
541       visual_idx = dri2_wl_visual_idx_from_fourcc(present_format);
542       if (visual_idx != -1)
543          present_format = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
544    }
545 
546    /* Compositor may advertise or not a format table. If it does, we use it.
547     * Otherwise, we steal the most recent advertised format table. If we don't
548     * have a most recent advertised format table, compositor did something
549     * wrong. */
550    if (feedback->format_table.data == NULL) {
551       feedback->format_table = dri2_surf->dmabuf_feedback.format_table;
552       dmabuf_feedback_format_table_init(
553          &dri2_surf->dmabuf_feedback.format_table);
554    }
555    if (feedback->format_table.data == MAP_FAILED) {
556       _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
557                             "so we won't be able to use this batch of dma-buf "
558                             "feedback events.");
559       return;
560    }
561    if (feedback->format_table.data == NULL) {
562       _eglLog(_EGL_WARNING,
563               "wayland-egl: compositor didn't advertise a format "
564               "table, so we won't be able to use this batch of dma-buf "
565               "feedback events.");
566       return;
567    }
568 
569    wl_array_for_each (index, indices) {
570       format = feedback->format_table.data[*index].format;
571       modifier = feedback->format_table.data[*index].modifier;
572 
573       /* Skip formats that are not the one the surface is already using. We
574        * can't switch to another format. */
575       if (format != present_format)
576          continue;
577 
578       /* We are sure that the format is supported because of the check above. */
579       visual_idx = dri2_wl_visual_idx_from_fourcc(format);
580       assert(visual_idx != -1);
581 
582       BITSET_SET(feedback->pending_tranche.formats.formats_bitmap, visual_idx);
583       modifier_ptr =
584          u_vector_add(&feedback->pending_tranche.formats.modifiers[visual_idx]);
585       if (modifier_ptr)
586          *modifier_ptr = modifier;
587    }
588 }
589 
590 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)591 surface_dmabuf_feedback_tranche_done(
592    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
593 {
594    struct dri2_egl_surface *dri2_surf = data;
595    struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
596 
597    /* Add tranche to array of tranches. */
598    util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
599                         feedback->pending_tranche);
600 
601    dmabuf_feedback_tranche_init(&feedback->pending_tranche);
602 }
603 
604 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)605 surface_dmabuf_feedback_done(
606    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
607 {
608    struct dri2_egl_surface *dri2_surf = data;
609 
610    /* The dma-buf feedback protocol states that surface dma-buf feedback should
611     * be sent by the compositor only if its buffers are using a suboptimal pair
612     * of format and modifier. We can't change the buffer format, but we can
613     * reallocate with another modifier. So we raise this flag in order to force
614     * buffer reallocation based on the dma-buf feedback sent. */
615    dri2_surf->received_dmabuf_feedback = true;
616 
617    dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
618    dri2_surf->dmabuf_feedback = dri2_surf->pending_dmabuf_feedback;
619    dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback);
620 }
621 
622 static const struct zwp_linux_dmabuf_feedback_v1_listener
623    surface_dmabuf_feedback_listener = {
624       .format_table = surface_dmabuf_feedback_format_table,
625       .main_device = surface_dmabuf_feedback_main_device,
626       .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
627       .tranche_flags = surface_dmabuf_feedback_tranche_flags,
628       .tranche_formats = surface_dmabuf_feedback_tranche_formats,
629       .tranche_done = surface_dmabuf_feedback_tranche_done,
630       .done = surface_dmabuf_feedback_done,
631 };
632 
633 static bool
dri2_wl_modifiers_have_common(struct u_vector * modifiers1,struct u_vector * modifiers2)634 dri2_wl_modifiers_have_common(struct u_vector *modifiers1,
635                               struct u_vector *modifiers2)
636 {
637    uint64_t *mod1, *mod2;
638 
639    /* If both modifier vectors are empty, assume there is a compatible
640     * implicit modifier. */
641    if (u_vector_length(modifiers1) == 0 && u_vector_length(modifiers2) == 0)
642        return true;
643 
644    u_vector_foreach(mod1, modifiers1)
645    {
646       u_vector_foreach(mod2, modifiers2)
647       {
648          if (*mod1 == *mod2)
649             return true;
650       }
651    }
652 
653    return false;
654 }
655 
656 /**
657  * Called via eglCreateWindowSurface(), drv->CreateWindowSurface().
658  */
659 static _EGLSurface *
dri2_wl_create_window_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)660 dri2_wl_create_window_surface(_EGLDisplay *disp, _EGLConfig *conf,
661                               void *native_window, const EGLint *attrib_list)
662 {
663    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
664    struct dri2_egl_config *dri2_conf = dri2_egl_config(conf);
665    struct wl_egl_window *window = native_window;
666    struct dri2_egl_surface *dri2_surf;
667    struct zwp_linux_dmabuf_v1 *dmabuf_wrapper;
668    int visual_idx;
669    const struct dri_config *config;
670 
671    if (!window) {
672       _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_create_surface");
673       return NULL;
674    }
675 
676    if (window->driver_private) {
677       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
678       return NULL;
679    }
680 
681    dri2_surf = calloc(1, sizeof *dri2_surf);
682    if (!dri2_surf) {
683       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
684       return NULL;
685    }
686 
687    if (!dri2_init_surface(&dri2_surf->base, disp, EGL_WINDOW_BIT, conf,
688                           attrib_list, false, native_window))
689       goto cleanup_surf;
690 
691    config = dri2_get_dri_config(dri2_conf, EGL_WINDOW_BIT,
692                                 dri2_surf->base.GLColorspace);
693 
694    if (!config) {
695       _eglError(EGL_BAD_MATCH,
696                 "Unsupported surfacetype/colorspace configuration");
697       goto cleanup_surf;
698    }
699 
700    dri2_surf->base.Width = window->width;
701    dri2_surf->base.Height = window->height;
702 
703    visual_idx = dri2_wl_visual_idx_from_config(config);
704    assert(visual_idx != -1);
705    assert(dri2_wl_visuals[visual_idx].pipe_format != PIPE_FORMAT_NONE);
706 
707    if (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm) {
708       dri2_surf->format = dri2_wl_visuals[visual_idx].wl_drm_format;
709    } else {
710       assert(dri2_dpy->wl_shm);
711       dri2_surf->format = dri2_wl_shm_format_from_visual_idx(visual_idx);
712    }
713 
714    if (dri2_surf->base.PresentOpaque) {
715       uint32_t opaque_fourcc =
716          dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
717       int opaque_visual_idx = dri2_wl_visual_idx_from_fourcc(opaque_fourcc);
718 
719       if (!server_supports_format(&dri2_dpy->formats, opaque_visual_idx) ||
720           !dri2_wl_modifiers_have_common(
721                &dri2_dpy->formats.modifiers[visual_idx],
722                &dri2_dpy->formats.modifiers[opaque_visual_idx])) {
723          _eglError(EGL_BAD_MATCH, "Unsupported opaque format");
724          goto cleanup_surf;
725       }
726    }
727 
728    dri2_surf->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
729                                                            "mesa egl surface queue");
730    if (!dri2_surf->wl_queue) {
731       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
732       goto cleanup_surf;
733    }
734 
735    if (dri2_dpy->wl_drm) {
736       dri2_surf->wl_drm_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_drm);
737       if (!dri2_surf->wl_drm_wrapper) {
738          _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
739          goto cleanup_queue;
740       }
741       wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_drm_wrapper,
742                          dri2_surf->wl_queue);
743    }
744 
745    dri2_surf->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
746    if (!dri2_surf->wl_dpy_wrapper) {
747       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
748       goto cleanup_drm;
749    }
750    wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_dpy_wrapper,
751                       dri2_surf->wl_queue);
752 
753    dri2_surf->wl_surface_wrapper = get_wl_surface_proxy(window);
754    if (!dri2_surf->wl_surface_wrapper) {
755       _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
756       goto cleanup_dpy_wrapper;
757    }
758    wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_surface_wrapper,
759                       dri2_surf->wl_queue);
760 
761    if (dri2_dpy->wl_dmabuf &&
762        zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
763           ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
764       dmabuf_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dmabuf);
765       if (!dmabuf_wrapper) {
766          _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
767          goto cleanup_surf_wrapper;
768       }
769       wl_proxy_set_queue((struct wl_proxy *)dmabuf_wrapper,
770                          dri2_surf->wl_queue);
771       dri2_surf->wl_dmabuf_feedback = zwp_linux_dmabuf_v1_get_surface_feedback(
772          dmabuf_wrapper, dri2_surf->wl_surface_wrapper);
773       wl_proxy_wrapper_destroy(dmabuf_wrapper);
774 
775       zwp_linux_dmabuf_feedback_v1_add_listener(
776          dri2_surf->wl_dmabuf_feedback, &surface_dmabuf_feedback_listener,
777          dri2_surf);
778 
779       if (dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback) < 0) {
780          zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
781          goto cleanup_surf_wrapper;
782       }
783       if (dmabuf_feedback_init(&dri2_surf->dmabuf_feedback) < 0) {
784          dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
785          zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
786          goto cleanup_surf_wrapper;
787       }
788 
789       if (roundtrip(dri2_dpy) < 0)
790          goto cleanup_dmabuf_feedback;
791    }
792 
793    dri2_surf->wl_win = window;
794    dri2_surf->wl_win->driver_private = dri2_surf;
795    dri2_surf->wl_win->destroy_window_callback = destroy_window_callback;
796    if (!dri2_dpy->swrast_not_kms)
797       dri2_surf->wl_win->resize_callback = resize_callback;
798 
799    if (!dri2_create_drawable(dri2_dpy, config, dri2_surf, dri2_surf))
800       goto cleanup_dmabuf_feedback;
801 
802    dri2_surf->base.SwapInterval = dri2_dpy->default_swap_interval;
803 
804    return &dri2_surf->base;
805 
806 cleanup_dmabuf_feedback:
807    if (dri2_surf->wl_dmabuf_feedback) {
808       zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
809       dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
810       dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
811    }
812 cleanup_surf_wrapper:
813    wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
814 cleanup_dpy_wrapper:
815    wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
816 cleanup_drm:
817    if (dri2_surf->wl_drm_wrapper)
818       wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
819 cleanup_queue:
820    wl_event_queue_destroy(dri2_surf->wl_queue);
821 cleanup_surf:
822    free(dri2_surf);
823 
824    return NULL;
825 }
826 
827 static _EGLSurface *
dri2_wl_create_pixmap_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)828 dri2_wl_create_pixmap_surface(_EGLDisplay *disp, _EGLConfig *conf,
829                               void *native_window, const EGLint *attrib_list)
830 {
831    /* From the EGL_EXT_platform_wayland spec, version 3:
832     *
833     *   It is not valid to call eglCreatePlatformPixmapSurfaceEXT with a <dpy>
834     *   that belongs to Wayland. Any such call fails and generates
835     *   EGL_BAD_PARAMETER.
836     */
837    _eglError(EGL_BAD_PARAMETER, "cannot create EGL pixmap surfaces on "
838                                 "Wayland");
839    return NULL;
840 }
841 
842 /**
843  * Called via eglDestroySurface(), drv->DestroySurface().
844  */
845 static EGLBoolean
dri2_wl_destroy_surface(_EGLDisplay * disp,_EGLSurface * surf)846 dri2_wl_destroy_surface(_EGLDisplay *disp, _EGLSurface *surf)
847 {
848    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
849 
850    driDestroyDrawable(dri2_surf->dri_drawable);
851 
852    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
853       if (dri2_surf->color_buffers[i].wl_buffer)
854          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
855       if (dri2_surf->color_buffers[i].dri_image)
856          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
857       if (dri2_surf->color_buffers[i].linear_copy)
858          dri2_destroy_image(dri2_surf->color_buffers[i].linear_copy);
859       if (dri2_surf->color_buffers[i].data)
860          munmap(dri2_surf->color_buffers[i].data,
861                 dri2_surf->color_buffers[i].data_size);
862    }
863 
864    if (dri2_surf->throttle_callback)
865       wl_callback_destroy(dri2_surf->throttle_callback);
866 
867    if (dri2_surf->wl_win) {
868       dri2_surf->wl_win->driver_private = NULL;
869       dri2_surf->wl_win->resize_callback = NULL;
870       dri2_surf->wl_win->destroy_window_callback = NULL;
871    }
872 
873    wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
874    wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
875    if (dri2_surf->wl_drm_wrapper)
876       wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
877    if (dri2_surf->wl_dmabuf_feedback) {
878       zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
879       dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
880       dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
881    }
882    wl_event_queue_destroy(dri2_surf->wl_queue);
883 
884    dri2_fini_surface(surf);
885    free(surf);
886 
887    return EGL_TRUE;
888 }
889 
890 static EGLBoolean
dri2_wl_swap_interval(_EGLDisplay * disp,_EGLSurface * surf,EGLint interval)891 dri2_wl_swap_interval(_EGLDisplay *disp, _EGLSurface *surf, EGLint interval)
892 {
893    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
894    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
895 
896    if (dri2_dpy->kopper)
897       kopperSetSwapInterval(dri2_surf->dri_drawable, interval);
898 
899    return EGL_TRUE;
900 }
901 
902 static void
dri2_wl_release_buffers(struct dri2_egl_surface * dri2_surf)903 dri2_wl_release_buffers(struct dri2_egl_surface *dri2_surf)
904 {
905    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
906       if (dri2_surf->color_buffers[i].wl_buffer) {
907          if (dri2_surf->color_buffers[i].locked) {
908             dri2_surf->color_buffers[i].wl_release = true;
909          } else {
910             wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
911             dri2_surf->color_buffers[i].wl_buffer = NULL;
912          }
913       }
914       if (dri2_surf->color_buffers[i].dri_image)
915          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
916       if (dri2_surf->color_buffers[i].linear_copy)
917          dri2_destroy_image(dri2_surf->color_buffers[i].linear_copy);
918       if (dri2_surf->color_buffers[i].data)
919          munmap(dri2_surf->color_buffers[i].data,
920                 dri2_surf->color_buffers[i].data_size);
921 
922       dri2_surf->color_buffers[i].dri_image = NULL;
923       dri2_surf->color_buffers[i].linear_copy = NULL;
924       dri2_surf->color_buffers[i].data = NULL;
925       dri2_surf->color_buffers[i].age = 0;
926    }
927 }
928 
929 /* Return list of modifiers that should be used to restrict the list of
930  * modifiers actually supported by the surface. As of now, it is only used
931  * to get the set of modifiers used for fixed-rate compression. */
932 static uint64_t *
get_surface_specific_modifiers(struct dri2_egl_surface * dri2_surf,int * modifiers_count)933 get_surface_specific_modifiers(struct dri2_egl_surface *dri2_surf,
934                                int *modifiers_count)
935 {
936    struct dri2_egl_display *dri2_dpy =
937       dri2_egl_display(dri2_surf->base.Resource.Display);
938    int rate = dri2_surf->base.CompressionRate;
939    uint64_t *modifiers;
940 
941    if (rate == EGL_SURFACE_COMPRESSION_FIXED_RATE_NONE_EXT ||
942        !dri2_surf->wl_win)
943       return NULL;
944 
945    if (!dri2_query_compression_modifiers(
946           dri2_dpy->dri_screen_render_gpu, dri2_surf->format, rate,
947           0, NULL, modifiers_count))
948       return NULL;
949 
950    modifiers = malloc(*modifiers_count * sizeof(uint64_t));
951    if (!modifiers)
952       return NULL;
953 
954    if (!dri2_query_compression_modifiers(
955           dri2_dpy->dri_screen_render_gpu, dri2_surf->format, rate,
956           *modifiers_count, modifiers, modifiers_count)) {
957       free(modifiers);
958       return NULL;
959    }
960 
961    return modifiers;
962 }
963 
964 static void
update_surface(struct dri2_egl_surface * dri2_surf,struct dri_image * dri_img)965 update_surface(struct dri2_egl_surface *dri2_surf, struct dri_image *dri_img)
966 {
967    int compression_rate;
968 
969    if (!dri_img)
970       return;
971 
972    /* Update the surface with the actual compression rate */
973    dri2_query_image(dri_img, __DRI_IMAGE_ATTRIB_COMPRESSION_RATE,
974                                &compression_rate);
975    dri2_surf->base.CompressionRate = compression_rate;
976 }
977 
978 static bool
intersect_modifiers(struct u_vector * subset,struct u_vector * set,uint64_t * other_modifiers,int other_modifiers_count)979 intersect_modifiers(struct u_vector *subset, struct u_vector *set,
980                     uint64_t *other_modifiers, int other_modifiers_count)
981 {
982    if (!u_vector_init_pow2(subset, 4, sizeof(uint64_t)))
983       return false;
984 
985    uint64_t *modifier_ptr, *mod;
986    u_vector_foreach(mod, set) {
987       for (int i = 0; i < other_modifiers_count; ++i) {
988          if (other_modifiers[i] != *mod)
989             continue;
990          modifier_ptr = u_vector_add(subset);
991          if (modifier_ptr)
992             *modifier_ptr = *mod;
993       }
994    }
995 
996    return true;
997 }
998 
999 static void
create_dri_image(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count,struct dri2_wl_formats * formats)1000 create_dri_image(struct dri2_egl_surface *dri2_surf,
1001                  enum pipe_format pipe_format, uint32_t use_flags,
1002                  uint64_t *surf_modifiers, int surf_modifiers_count,
1003                  struct dri2_wl_formats *formats)
1004 {
1005    struct dri2_egl_display *dri2_dpy =
1006       dri2_egl_display(dri2_surf->base.Resource.Display);
1007    int visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1008    struct u_vector modifiers_subset;
1009    struct u_vector modifiers_subset_opaque;
1010    uint64_t *modifiers;
1011    unsigned int num_modifiers;
1012    struct u_vector *modifiers_present;
1013    bool implicit_mod_supported;
1014 
1015    assert(visual_idx != -1);
1016 
1017    if (dri2_surf->base.PresentOpaque) {
1018       uint32_t opaque_fourcc =
1019             dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1020       int opaque_visual_idx = dri2_wl_visual_idx_from_fourcc(opaque_fourcc);
1021       struct u_vector *modifiers_dpy = &dri2_dpy->formats.modifiers[visual_idx];
1022       /* Surface creation would have failed if we didn't support the matching
1023        * opaque format. */
1024       assert(opaque_visual_idx != -1);
1025 
1026       if (!BITSET_TEST(formats->formats_bitmap, opaque_visual_idx))
1027          return;
1028 
1029       if (!intersect_modifiers(&modifiers_subset_opaque,
1030                                &formats->modifiers[opaque_visual_idx],
1031                                u_vector_tail(modifiers_dpy),
1032                                u_vector_length(modifiers_dpy)))
1033          return;
1034 
1035       modifiers_present = &modifiers_subset_opaque;
1036    } else {
1037       if (!BITSET_TEST(formats->formats_bitmap, visual_idx))
1038          return;
1039       modifiers_present = &formats->modifiers[visual_idx];
1040    }
1041 
1042    if (surf_modifiers_count > 0) {
1043       if (!intersect_modifiers(&modifiers_subset, modifiers_present,
1044                                surf_modifiers, surf_modifiers_count))
1045          goto cleanup_present;
1046       modifiers = u_vector_tail(&modifiers_subset);
1047       num_modifiers = u_vector_length(&modifiers_subset);
1048    } else {
1049       modifiers = u_vector_tail(modifiers_present);
1050       num_modifiers = u_vector_length(modifiers_present);
1051    }
1052 
1053    if (!dri2_dpy->dri_screen_render_gpu->base.screen->resource_create_with_modifiers &&
1054        dri2_dpy->wl_dmabuf) {
1055       /* We don't support explicit modifiers, check if the compositor supports
1056        * implicit modifiers. */
1057       implicit_mod_supported = false;
1058       for (unsigned int i = 0; i < num_modifiers; i++) {
1059          if (modifiers[i] == DRM_FORMAT_MOD_INVALID) {
1060             implicit_mod_supported = true;
1061             break;
1062          }
1063       }
1064 
1065       if (!implicit_mod_supported) {
1066          return;
1067       }
1068 
1069       num_modifiers = 0;
1070       modifiers = NULL;
1071    }
1072 
1073    /* For the purposes of this function, an INVALID modifier on
1074     * its own means the modifiers aren't supported. */
1075    if (num_modifiers == 0 ||
1076        (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
1077       num_modifiers = 0;
1078       modifiers = NULL;
1079    }
1080 
1081    dri2_surf->back->dri_image = dri_create_image_with_modifiers(
1082       dri2_dpy->dri_screen_render_gpu, dri2_surf->base.Width,
1083       dri2_surf->base.Height, pipe_format,
1084       (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) ? 0 : use_flags,
1085       modifiers, num_modifiers, NULL);
1086 
1087    if (surf_modifiers_count > 0) {
1088       u_vector_finish(&modifiers_subset);
1089       update_surface(dri2_surf, dri2_surf->back->dri_image);
1090    }
1091 
1092 cleanup_present:
1093    if (modifiers_present == &modifiers_subset_opaque)
1094       u_vector_finish(&modifiers_subset_opaque);
1095 }
1096 
1097 static void
create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count)1098 create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface *dri2_surf,
1099                                       enum pipe_format pipe_format,
1100                                       uint32_t use_flags,
1101                                       uint64_t *surf_modifiers,
1102                                       int surf_modifiers_count)
1103 {
1104    uint32_t flags;
1105 
1106    /* We don't have valid dma-buf feedback, so return */
1107    if (dri2_surf->dmabuf_feedback.main_device == 0)
1108       return;
1109 
1110    /* Iterates through the dma-buf feedback to pick a new set of modifiers. The
1111     * tranches are sent in descending order of preference by the compositor, so
1112     * the first set that we can pick is the best one. For now we still can't
1113     * specify the target device in order to make the render device try its best
1114     * to allocate memory that can be directly scanned out by the KMS device. But
1115     * in the future this may change (newer versions of
1116     * createImageWithModifiers). Also, we are safe to pick modifiers from
1117     * tranches whose target device differs from the main device, as compositors
1118     * do not expose (in dma-buf feedback tranches) formats/modifiers that are
1119     * incompatible with the main device. */
1120    util_dynarray_foreach (&dri2_surf->dmabuf_feedback.tranches,
1121                           struct dmabuf_feedback_tranche, tranche) {
1122       flags = use_flags;
1123       if (tranche->flags & ZWP_LINUX_DMABUF_FEEDBACK_V1_TRANCHE_FLAGS_SCANOUT)
1124          flags |= __DRI_IMAGE_USE_SCANOUT;
1125 
1126       create_dri_image(dri2_surf, pipe_format, flags, surf_modifiers,
1127                        surf_modifiers_count, &tranche->formats);
1128 
1129       if (dri2_surf->back->dri_image)
1130          return;
1131    }
1132 }
1133 
1134 static void
create_dri_image_from_formats(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags,uint64_t * surf_modifiers,int surf_modifiers_count)1135 create_dri_image_from_formats(struct dri2_egl_surface *dri2_surf,
1136                               enum pipe_format pipe_format, uint32_t use_flags,
1137                               uint64_t *surf_modifiers,
1138                               int surf_modifiers_count)
1139 {
1140    struct dri2_egl_display *dri2_dpy =
1141       dri2_egl_display(dri2_surf->base.Resource.Display);
1142    create_dri_image(dri2_surf, pipe_format, use_flags, surf_modifiers,
1143                     surf_modifiers_count, &dri2_dpy->formats);
1144 }
1145 
1146 static int
get_back_bo(struct dri2_egl_surface * dri2_surf)1147 get_back_bo(struct dri2_egl_surface *dri2_surf)
1148 {
1149    struct dri2_egl_display *dri2_dpy =
1150       dri2_egl_display(dri2_surf->base.Resource.Display);
1151    int use_flags;
1152    int visual_idx;
1153    unsigned int pipe_format;
1154    unsigned int linear_pipe_format;
1155 
1156    visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
1157    assert(visual_idx != -1);
1158    pipe_format = dri2_wl_visuals[visual_idx].pipe_format;
1159    linear_pipe_format = pipe_format;
1160 
1161    /* Substitute dri image format if server does not support original format */
1162    if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, visual_idx))
1163       linear_pipe_format = dri2_wl_visuals[visual_idx].alt_pipe_format;
1164 
1165    /* These asserts hold, as long as dri2_wl_visuals[] is self-consistent and
1166     * the PRIME substitution logic in dri2_wl_add_configs_for_visuals() is free
1167     * of bugs.
1168     */
1169    assert(linear_pipe_format != PIPE_FORMAT_NONE);
1170    assert(BITSET_TEST(
1171       dri2_dpy->formats.formats_bitmap,
1172       dri2_wl_visual_idx_from_pipe_format(linear_pipe_format)));
1173 
1174    /* There might be a buffer release already queued that wasn't processed */
1175    wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
1176 
1177    while (dri2_surf->back == NULL) {
1178       for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1179          /* Get an unlocked buffer, preferably one with a dri_buffer
1180           * already allocated and with minimum age.
1181           */
1182          if (dri2_surf->color_buffers[i].locked)
1183             continue;
1184 
1185          if (!dri2_surf->back || !dri2_surf->back->dri_image ||
1186              (dri2_surf->color_buffers[i].age > 0 &&
1187               dri2_surf->color_buffers[i].age < dri2_surf->back->age))
1188             dri2_surf->back = &dri2_surf->color_buffers[i];
1189       }
1190 
1191       if (dri2_surf->back)
1192          break;
1193 
1194       /* If we don't have a buffer, then block on the server to release one for
1195        * us, and try again. wl_display_dispatch_queue will process any pending
1196        * events, however not all servers flush on issuing a buffer release
1197        * event. So, we spam the server with roundtrips as they always cause a
1198        * client flush.
1199        */
1200       if (wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) < 0)
1201          return -1;
1202    }
1203 
1204    if (dri2_surf->back == NULL)
1205       return -1;
1206 
1207    use_flags = __DRI_IMAGE_USE_SHARE | __DRI_IMAGE_USE_BACKBUFFER;
1208 
1209    if (dri2_surf->base.ProtectedContent) {
1210       /* Protected buffers can't be read from another GPU */
1211       if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1212          return -1;
1213       use_flags |= __DRI_IMAGE_USE_PROTECTED;
1214    }
1215 
1216    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu &&
1217        dri2_surf->back->linear_copy == NULL) {
1218       uint64_t linear_mod = DRM_FORMAT_MOD_LINEAR;
1219       const uint64_t *render_modifiers = NULL, *display_modifiers = NULL;
1220       unsigned int render_num_modifiers = 0, display_num_modifiers = 0;
1221       struct dri_image *linear_copy_display_gpu_image = NULL;
1222 
1223       if (dri2_dpy->dri_screen_render_gpu->base.screen->resource_create_with_modifiers) {
1224          render_modifiers = &linear_mod;
1225          render_num_modifiers = 1;
1226       }
1227       if (dri2_dpy->dri_screen_display_gpu->base.screen->resource_create_with_modifiers) {
1228          display_modifiers = &linear_mod;
1229          display_num_modifiers = 1;
1230       }
1231 
1232       if (dri2_dpy->dri_screen_display_gpu) {
1233          linear_copy_display_gpu_image = dri_create_image_with_modifiers(
1234             dri2_dpy->dri_screen_display_gpu,
1235             dri2_surf->base.Width, dri2_surf->base.Height,
1236             linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1237             display_modifiers, display_num_modifiers, NULL);
1238 
1239          if (linear_copy_display_gpu_image) {
1240             int i, ret = 1;
1241             int fourcc;
1242             int num_planes = 0;
1243             int buffer_fds[4];
1244             int strides[4];
1245             int offsets[4];
1246             unsigned error;
1247 
1248             if (!dri2_query_image(linear_copy_display_gpu_image,
1249                                              __DRI_IMAGE_ATTRIB_NUM_PLANES,
1250                                              &num_planes))
1251                num_planes = 1;
1252 
1253             for (i = 0; i < num_planes; i++) {
1254                struct dri_image *image = dri2_from_planar(
1255                   linear_copy_display_gpu_image, i, NULL);
1256 
1257                if (!image) {
1258                   assert(i == 0);
1259                   image = linear_copy_display_gpu_image;
1260                }
1261 
1262                buffer_fds[i] = -1;
1263                ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD,
1264                                                   &buffer_fds[i]);
1265                ret &= dri2_query_image(
1266                   image, __DRI_IMAGE_ATTRIB_STRIDE, &strides[i]);
1267                ret &= dri2_query_image(
1268                   image, __DRI_IMAGE_ATTRIB_OFFSET, &offsets[i]);
1269 
1270                if (image != linear_copy_display_gpu_image)
1271                   dri2_destroy_image(image);
1272 
1273                if (!ret) {
1274                   do {
1275                      if (buffer_fds[i] != -1)
1276                         close(buffer_fds[i]);
1277                   } while (--i >= 0);
1278                   dri2_destroy_image(linear_copy_display_gpu_image);
1279                   return -1;
1280                }
1281             }
1282 
1283             ret &= dri2_query_image(linear_copy_display_gpu_image,
1284                                                __DRI_IMAGE_ATTRIB_FOURCC,
1285                                                &fourcc);
1286             if (!ret) {
1287                do {
1288                   if (buffer_fds[i] != -1)
1289                      close(buffer_fds[i]);
1290                } while (--i >= 0);
1291                dri2_destroy_image(linear_copy_display_gpu_image);
1292                return -1;
1293             }
1294 
1295             /* The linear buffer was created in the display GPU's vram, so we
1296              * need to make it visible to render GPU
1297              */
1298             dri2_surf->back->linear_copy =
1299                dri2_from_dma_bufs(
1300                   dri2_dpy->dri_screen_render_gpu,
1301                   dri2_surf->base.Width, dri2_surf->base.Height,
1302                   fourcc, linear_mod,
1303                   &buffer_fds[0], num_planes, &strides[0], &offsets[0],
1304                   __DRI_YUV_COLOR_SPACE_UNDEFINED,
1305                   __DRI_YUV_RANGE_UNDEFINED, __DRI_YUV_CHROMA_SITING_UNDEFINED,
1306                   __DRI_YUV_CHROMA_SITING_UNDEFINED, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1307                   &error, dri2_surf->back);
1308 
1309             for (i = 0; i < num_planes; ++i) {
1310                if (buffer_fds[i] != -1)
1311                   close(buffer_fds[i]);
1312             }
1313             dri2_destroy_image(linear_copy_display_gpu_image);
1314          }
1315       }
1316 
1317       if (!dri2_surf->back->linear_copy) {
1318          dri2_surf->back->linear_copy = dri_create_image_with_modifiers(
1319             dri2_dpy->dri_screen_render_gpu,
1320             dri2_surf->base.Width, dri2_surf->base.Height,
1321             linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1322             render_modifiers, render_num_modifiers, NULL);
1323       }
1324 
1325       if (dri2_surf->back->linear_copy == NULL)
1326          return -1;
1327    }
1328 
1329    if (dri2_surf->back->dri_image == NULL) {
1330       int modifiers_count = 0;
1331       uint64_t *modifiers =
1332          get_surface_specific_modifiers(dri2_surf, &modifiers_count);
1333 
1334       if (dri2_surf->wl_dmabuf_feedback)
1335          create_dri_image_from_dmabuf_feedback(
1336             dri2_surf, pipe_format, use_flags, modifiers, modifiers_count);
1337       if (dri2_surf->back->dri_image == NULL)
1338          create_dri_image_from_formats(dri2_surf, pipe_format, use_flags,
1339                                        modifiers, modifiers_count);
1340 
1341       free(modifiers);
1342       dri2_surf->back->age = 0;
1343    }
1344 
1345    if (dri2_surf->back->dri_image == NULL)
1346       return -1;
1347 
1348    dri2_surf->back->locked = true;
1349 
1350    return 0;
1351 }
1352 
1353 static void
back_bo_to_dri_buffer(struct dri2_egl_surface * dri2_surf,__DRIbuffer * buffer)1354 back_bo_to_dri_buffer(struct dri2_egl_surface *dri2_surf, __DRIbuffer *buffer)
1355 {
1356    struct dri_image *image;
1357    int name, pitch;
1358 
1359    image = dri2_surf->back->dri_image;
1360 
1361    dri2_query_image(image, __DRI_IMAGE_ATTRIB_NAME, &name);
1362    dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE, &pitch);
1363 
1364    buffer->attachment = __DRI_BUFFER_BACK_LEFT;
1365    buffer->name = name;
1366    buffer->pitch = pitch;
1367    buffer->cpp = 4;
1368    buffer->flags = 0;
1369 }
1370 
1371 /* Value chosen empirically as a compromise between avoiding frequent
1372  * reallocations and extended time of increased memory consumption due to
1373  * unused buffers being kept.
1374  */
1375 #define BUFFER_TRIM_AGE_HYSTERESIS 20
1376 
1377 static int
update_buffers(struct dri2_egl_surface * dri2_surf)1378 update_buffers(struct dri2_egl_surface *dri2_surf)
1379 {
1380    struct dri2_egl_display *dri2_dpy =
1381       dri2_egl_display(dri2_surf->base.Resource.Display);
1382 
1383    if (dri2_surf->wl_win &&
1384        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
1385         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
1386 
1387       dri2_surf->base.Width = dri2_surf->wl_win->width;
1388       dri2_surf->base.Height = dri2_surf->wl_win->height;
1389       dri2_surf->dx = dri2_surf->wl_win->dx;
1390       dri2_surf->dy = dri2_surf->wl_win->dy;
1391    }
1392 
1393    if (dri2_surf->resized || dri2_surf->received_dmabuf_feedback) {
1394       dri2_wl_release_buffers(dri2_surf);
1395       dri2_surf->resized = false;
1396       dri2_surf->received_dmabuf_feedback = false;
1397    }
1398 
1399    if (get_back_bo(dri2_surf) < 0) {
1400       _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
1401       return -1;
1402    }
1403 
1404    /* If we have an extra unlocked buffer at this point, we had to do triple
1405     * buffering for a while, but now can go back to just double buffering.
1406     * That means we can free any unlocked buffer now. To avoid toggling between
1407     * going back to double buffering and needing to allocate another buffer too
1408     * fast we let the unneeded buffer sit around for a short while. */
1409    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1410       if (!dri2_surf->color_buffers[i].locked &&
1411           dri2_surf->color_buffers[i].wl_buffer &&
1412           dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
1413          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
1414          dri2_destroy_image(dri2_surf->color_buffers[i].dri_image);
1415          if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1416             dri2_destroy_image(
1417                dri2_surf->color_buffers[i].linear_copy);
1418          dri2_surf->color_buffers[i].wl_buffer = NULL;
1419          dri2_surf->color_buffers[i].dri_image = NULL;
1420          dri2_surf->color_buffers[i].linear_copy = NULL;
1421          dri2_surf->color_buffers[i].age = 0;
1422       }
1423    }
1424 
1425    return 0;
1426 }
1427 
1428 static int
update_buffers_if_needed(struct dri2_egl_surface * dri2_surf)1429 update_buffers_if_needed(struct dri2_egl_surface *dri2_surf)
1430 {
1431    if (dri2_surf->back != NULL)
1432       return 0;
1433 
1434    return update_buffers(dri2_surf);
1435 }
1436 
1437 static int
image_get_buffers(struct dri_drawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)1438 image_get_buffers(struct dri_drawable *driDrawable, unsigned int format,
1439                   uint32_t *stamp, void *loaderPrivate, uint32_t buffer_mask,
1440                   struct __DRIimageList *buffers)
1441 {
1442    struct dri2_egl_surface *dri2_surf = loaderPrivate;
1443 
1444    if (update_buffers_if_needed(dri2_surf) < 0)
1445       return 0;
1446 
1447    buffers->image_mask = __DRI_IMAGE_BUFFER_BACK;
1448    buffers->back = dri2_surf->back->dri_image;
1449 
1450    return 1;
1451 }
1452 
1453 static void
dri2_wl_flush_front_buffer(struct dri_drawable * driDrawable,void * loaderPrivate)1454 dri2_wl_flush_front_buffer(struct dri_drawable *driDrawable, void *loaderPrivate)
1455 {
1456    (void)driDrawable;
1457    (void)loaderPrivate;
1458 }
1459 
1460 static unsigned
dri2_wl_get_capability(void * loaderPrivate,enum dri_loader_cap cap)1461 dri2_wl_get_capability(void *loaderPrivate, enum dri_loader_cap cap)
1462 {
1463    switch (cap) {
1464    case DRI_LOADER_CAP_FP16:
1465       return 1;
1466    case DRI_LOADER_CAP_RGBA_ORDERING:
1467       return 1;
1468    default:
1469       return 0;
1470    }
1471 }
1472 
1473 static const __DRIimageLoaderExtension image_loader_extension = {
1474    .base = {__DRI_IMAGE_LOADER, 2},
1475 
1476    .getBuffers = image_get_buffers,
1477    .flushFrontBuffer = dri2_wl_flush_front_buffer,
1478    .getCapability = dri2_wl_get_capability,
1479 };
1480 
1481 static void
wayland_throttle_callback(void * data,struct wl_callback * callback,uint32_t time)1482 wayland_throttle_callback(void *data, struct wl_callback *callback,
1483                           uint32_t time)
1484 {
1485    struct dri2_egl_surface *dri2_surf = data;
1486 
1487    dri2_surf->throttle_callback = NULL;
1488    wl_callback_destroy(callback);
1489 }
1490 
1491 static const struct wl_callback_listener throttle_listener = {
1492    .done = wayland_throttle_callback,
1493 };
1494 
1495 static struct wl_buffer *
create_wl_buffer(struct dri2_egl_display * dri2_dpy,struct dri2_egl_surface * dri2_surf,struct dri_image * image)1496 create_wl_buffer(struct dri2_egl_display *dri2_dpy,
1497                  struct dri2_egl_surface *dri2_surf, struct dri_image *image)
1498 {
1499    struct wl_buffer *ret = NULL;
1500    EGLBoolean query;
1501    int width, height, fourcc, num_planes;
1502    uint64_t modifier = DRM_FORMAT_MOD_INVALID;
1503    int mod_hi, mod_lo;
1504 
1505    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_WIDTH, &width);
1506    query &=
1507       dri2_query_image(image, __DRI_IMAGE_ATTRIB_HEIGHT, &height);
1508    query &=
1509       dri2_query_image(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1510    if (!query)
1511       return NULL;
1512 
1513    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1514                                        &num_planes);
1515    if (!query)
1516       num_planes = 1;
1517 
1518    query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
1519                                        &mod_hi);
1520    query &= dri2_query_image(
1521       image, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod_lo);
1522    if (query) {
1523       modifier = combine_u32_into_u64(mod_hi, mod_lo);
1524    }
1525 
1526    bool supported_modifier = false;
1527    bool mod_invalid_supported = false;
1528    int visual_idx = dri2_wl_visual_idx_from_fourcc(fourcc);
1529    assert(visual_idx != -1);
1530 
1531    uint64_t *mod;
1532    u_vector_foreach(mod, &dri2_dpy->formats.modifiers[visual_idx])
1533    {
1534       if (*mod == DRM_FORMAT_MOD_INVALID) {
1535          mod_invalid_supported = true;
1536       }
1537       if (*mod == modifier) {
1538          supported_modifier = true;
1539          break;
1540       }
1541    }
1542    if (!supported_modifier && mod_invalid_supported) {
1543       /* If the server has advertised DRM_FORMAT_MOD_INVALID then we trust
1544        * that the client has allocated the buffer with the right implicit
1545        * modifier for the format, even though it's allocated a buffer the
1546        * server hasn't explicitly claimed to support. */
1547       modifier = DRM_FORMAT_MOD_INVALID;
1548       supported_modifier = true;
1549    }
1550 
1551    if (dri2_dpy->wl_dmabuf && supported_modifier) {
1552       struct zwp_linux_buffer_params_v1 *params;
1553       int i;
1554 
1555       /* We don't need a wrapper for wl_dmabuf objects, because we have to
1556        * create the intermediate params object; we can set the queue on this,
1557        * and the wl_buffer inherits it race-free. */
1558       params = zwp_linux_dmabuf_v1_create_params(dri2_dpy->wl_dmabuf);
1559       if (dri2_surf)
1560          wl_proxy_set_queue((struct wl_proxy *)params, dri2_surf->wl_queue);
1561 
1562       for (i = 0; i < num_planes; i++) {
1563          struct dri_image *p_image;
1564          int stride, offset;
1565          int fd = -1;
1566 
1567          p_image = dri2_from_planar(image, i, NULL);
1568          if (!p_image) {
1569             assert(i == 0);
1570             p_image = image;
1571          }
1572 
1573          query =
1574             dri2_query_image(p_image, __DRI_IMAGE_ATTRIB_FD, &fd);
1575          query &= dri2_query_image(
1576             p_image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1577          query &= dri2_query_image(
1578             p_image, __DRI_IMAGE_ATTRIB_OFFSET, &offset);
1579          if (image != p_image)
1580             dri2_destroy_image(p_image);
1581 
1582          if (!query) {
1583             if (fd >= 0)
1584                close(fd);
1585             zwp_linux_buffer_params_v1_destroy(params);
1586             return NULL;
1587          }
1588 
1589          zwp_linux_buffer_params_v1_add(params, fd, i, offset, stride,
1590                                         modifier >> 32, modifier & 0xffffffff);
1591          close(fd);
1592       }
1593 
1594       if (dri2_surf && dri2_surf->base.PresentOpaque)
1595          fourcc = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1596 
1597       ret = zwp_linux_buffer_params_v1_create_immed(params, width, height,
1598                                                     fourcc, 0);
1599       zwp_linux_buffer_params_v1_destroy(params);
1600    } else if (dri2_dpy->wl_drm) {
1601       struct wl_drm *wl_drm =
1602          dri2_surf ? dri2_surf->wl_drm_wrapper : dri2_dpy->wl_drm;
1603       int fd = -1, stride;
1604 
1605       /* wl_drm doesn't support explicit modifiers, so ideally we should bail
1606        * out if modifier != DRM_FORMAT_MOD_INVALID. However many drivers will
1607        * return a valid modifier when querying the DRIImage even if a buffer
1608        * was allocated without explicit modifiers.
1609        * XXX: bail out if the buffer was allocated without explicit modifiers
1610        */
1611       if (num_planes > 1)
1612          return NULL;
1613 
1614       query = dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD, &fd);
1615       query &=
1616          dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1617       if (!query) {
1618          if (fd >= 0)
1619             close(fd);
1620          return NULL;
1621       }
1622 
1623       ret = wl_drm_create_prime_buffer(wl_drm, fd, width, height, fourcc, 0,
1624                                        stride, 0, 0, 0, 0);
1625       close(fd);
1626    }
1627 
1628    return ret;
1629 }
1630 
1631 static EGLBoolean
try_damage_buffer(struct dri2_egl_surface * dri2_surf,const EGLint * rects,EGLint n_rects)1632 try_damage_buffer(struct dri2_egl_surface *dri2_surf, const EGLint *rects,
1633                   EGLint n_rects)
1634 {
1635    if (wl_proxy_get_version((struct wl_proxy *)dri2_surf->wl_surface_wrapper) <
1636        WL_SURFACE_DAMAGE_BUFFER_SINCE_VERSION)
1637       return EGL_FALSE;
1638 
1639    for (int i = 0; i < n_rects; i++) {
1640       const int *rect = &rects[i * 4];
1641 
1642       wl_surface_damage_buffer(dri2_surf->wl_surface_wrapper, rect[0],
1643                                dri2_surf->base.Height - rect[1] - rect[3],
1644                                rect[2], rect[3]);
1645    }
1646    return EGL_TRUE;
1647 }
1648 
1649 /**
1650  * Called via eglSwapBuffers(), drv->SwapBuffers().
1651  */
1652 static EGLBoolean
dri2_wl_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)1653 dri2_wl_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
1654                                  const EGLint *rects, EGLint n_rects)
1655 {
1656    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1657    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
1658 
1659    if (!dri2_surf->wl_win)
1660       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
1661 
1662    /* Flush (and finish glthread) before:
1663     *   - update_buffers_if_needed because the unmarshalling thread
1664     *     may be running currently, and we would concurrently alloc/free
1665     *     the back bo.
1666     *   - swapping current/back because flushing may free the buffer and
1667     *     dri_image and reallocate them using get_back_bo (which causes a
1668     *     a crash because 'current' becomes NULL).
1669     *   - using any wl_* function because accessing them from this thread
1670     *     and glthread causes troubles (see #7624 and #8136)
1671     */
1672    dri2_flush_drawable_for_swapbuffers(disp, draw);
1673    dri_invalidate_drawable(dri2_surf->dri_drawable);
1674 
1675    while (dri2_surf->throttle_callback != NULL)
1676       if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
1677           -1)
1678          return -1;
1679 
1680    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++)
1681       if (dri2_surf->color_buffers[i].age > 0)
1682          dri2_surf->color_buffers[i].age++;
1683 
1684    /* Make sure we have a back buffer in case we're swapping without ever
1685     * rendering. */
1686    if (update_buffers_if_needed(dri2_surf) < 0)
1687       return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1688 
1689    if (draw->SwapInterval > 0) {
1690       dri2_surf->throttle_callback =
1691          wl_surface_frame(dri2_surf->wl_surface_wrapper);
1692       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1693                                dri2_surf);
1694    }
1695 
1696    dri2_surf->back->age = 1;
1697    dri2_surf->current = dri2_surf->back;
1698    dri2_surf->back = NULL;
1699 
1700    if (!dri2_surf->current->wl_buffer) {
1701       struct dri_image *image;
1702 
1703       if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1704          image = dri2_surf->current->linear_copy;
1705       else
1706          image = dri2_surf->current->dri_image;
1707 
1708       dri2_surf->current->wl_buffer =
1709          create_wl_buffer(dri2_dpy, dri2_surf, image);
1710 
1711       if (dri2_surf->current->wl_buffer == NULL)
1712          return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1713 
1714       dri2_surf->current->wl_release = false;
1715 
1716       wl_buffer_add_listener(dri2_surf->current->wl_buffer, &wl_buffer_listener,
1717                              dri2_surf);
1718    }
1719 
1720    wl_surface_attach(dri2_surf->wl_surface_wrapper,
1721                      dri2_surf->current->wl_buffer, dri2_surf->dx,
1722                      dri2_surf->dy);
1723 
1724    dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
1725    dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
1726    /* reset resize growing parameters */
1727    dri2_surf->dx = 0;
1728    dri2_surf->dy = 0;
1729 
1730    /* If the compositor doesn't support damage_buffer, we deliberately
1731     * ignore the damage region and post maximum damage, due to
1732     * https://bugs.freedesktop.org/78190 */
1733    if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
1734       wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
1735                         INT32_MAX);
1736 
1737    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
1738       _EGLContext *ctx = _eglGetCurrentContext();
1739       struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
1740       struct dri_drawable *dri_drawable = dri2_dpy->vtbl->get_dri_drawable(draw);
1741       dri2_blit_image(
1742          dri2_ctx->dri_context, dri2_surf->current->linear_copy,
1743          dri2_surf->current->dri_image, 0, 0, dri2_surf->base.Width,
1744          dri2_surf->base.Height, 0, 0, dri2_surf->base.Width,
1745          dri2_surf->base.Height, 0);
1746       dri_flush_drawable(dri_drawable);
1747    }
1748 
1749    wl_surface_commit(dri2_surf->wl_surface_wrapper);
1750 
1751    /* If we're not waiting for a frame callback then we'll at least throttle
1752     * to a sync callback so that we always give a chance for the compositor to
1753     * handle the commit and send a release event before checking for a free
1754     * buffer */
1755    if (dri2_surf->throttle_callback == NULL) {
1756       dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
1757       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1758                                dri2_surf);
1759    }
1760 
1761    wl_display_flush(dri2_dpy->wl_dpy);
1762 
1763    return EGL_TRUE;
1764 }
1765 
1766 static EGLint
dri2_wl_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)1767 dri2_wl_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
1768 {
1769    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
1770 
1771    if (update_buffers_if_needed(dri2_surf) < 0) {
1772       _eglError(EGL_BAD_ALLOC, "dri2_query_buffer_age");
1773       return -1;
1774    }
1775 
1776    return dri2_surf->back->age;
1777 }
1778 
1779 static EGLBoolean
dri2_wl_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)1780 dri2_wl_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
1781 {
1782    return dri2_wl_swap_buffers_with_damage(disp, draw, NULL, 0);
1783 }
1784 
1785 static struct wl_buffer *
dri2_wl_create_wayland_buffer_from_image(_EGLDisplay * disp,_EGLImage * img)1786 dri2_wl_create_wayland_buffer_from_image(_EGLDisplay *disp, _EGLImage *img)
1787 {
1788    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1789    struct dri2_egl_image *dri2_img = dri2_egl_image(img);
1790    struct dri_image *image = dri2_img->dri_image;
1791    struct wl_buffer *buffer;
1792    int fourcc;
1793 
1794    /* Check the upstream display supports this buffer's format. */
1795    dri2_query_image(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1796    if (!server_supports_fourcc(&dri2_dpy->formats, fourcc))
1797       goto bad_format;
1798 
1799    buffer = create_wl_buffer(dri2_dpy, NULL, image);
1800 
1801    /* The buffer object will have been created with our internal event queue
1802     * because it is using wl_dmabuf/wl_drm as a proxy factory. We want the
1803     * buffer to be used by the application so we'll reset it to the display's
1804     * default event queue. This isn't actually racy, as the only event the
1805     * buffer can get is a buffer release, which doesn't happen with an explicit
1806     * attach. */
1807    if (buffer)
1808       wl_proxy_set_queue((struct wl_proxy *)buffer, NULL);
1809 
1810    return buffer;
1811 
1812 bad_format:
1813    _eglError(EGL_BAD_MATCH, "unsupported image format");
1814    return NULL;
1815 }
1816 
1817 static int
dri2_wl_authenticate(_EGLDisplay * disp,uint32_t id)1818 dri2_wl_authenticate(_EGLDisplay *disp, uint32_t id)
1819 {
1820    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1821    int ret = 0;
1822 
1823    if (dri2_dpy->is_render_node) {
1824       _eglLog(_EGL_WARNING, "wayland-egl: client asks server to "
1825                             "authenticate for render-nodes");
1826       return 0;
1827    }
1828    dri2_dpy->authenticated = false;
1829 
1830    wl_drm_authenticate(dri2_dpy->wl_drm, id);
1831    if (roundtrip(dri2_dpy) < 0)
1832       ret = -1;
1833 
1834    if (!dri2_dpy->authenticated)
1835       ret = -1;
1836 
1837    /* reset authenticated */
1838    dri2_dpy->authenticated = true;
1839 
1840    return ret;
1841 }
1842 
1843 static void
drm_handle_device(void * data,struct wl_drm * drm,const char * device)1844 drm_handle_device(void *data, struct wl_drm *drm, const char *device)
1845 {
1846    struct dri2_egl_display *dri2_dpy = data;
1847    drm_magic_t magic;
1848 
1849    dri2_dpy->device_name = strdup(device);
1850    if (!dri2_dpy->device_name)
1851       return;
1852 
1853    dri2_dpy->fd_render_gpu = loader_open_device(dri2_dpy->device_name);
1854    if (dri2_dpy->fd_render_gpu == -1) {
1855       _eglLog(_EGL_WARNING, "wayland-egl: could not open %s (%s)",
1856               dri2_dpy->device_name, strerror(errno));
1857       free(dri2_dpy->device_name);
1858       dri2_dpy->device_name = NULL;
1859       return;
1860    }
1861 
1862    if (drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER) {
1863       dri2_dpy->authenticated = true;
1864    } else {
1865       if (drmGetMagic(dri2_dpy->fd_render_gpu, &magic)) {
1866          close(dri2_dpy->fd_render_gpu);
1867          dri2_dpy->fd_render_gpu = -1;
1868          free(dri2_dpy->device_name);
1869          dri2_dpy->device_name = NULL;
1870          _eglLog(_EGL_WARNING, "wayland-egl: drmGetMagic failed");
1871          return;
1872       }
1873       wl_drm_authenticate(dri2_dpy->wl_drm, magic);
1874    }
1875 }
1876 
1877 static void
drm_handle_format(void * data,struct wl_drm * drm,uint32_t format)1878 drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)
1879 {
1880    struct dri2_egl_display *dri2_dpy = data;
1881    int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1882 
1883    if (visual_idx == -1)
1884       return;
1885 
1886    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1887 }
1888 
1889 static void
drm_handle_capabilities(void * data,struct wl_drm * drm,uint32_t value)1890 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)
1891 {
1892    struct dri2_egl_display *dri2_dpy = data;
1893 
1894    dri2_dpy->capabilities = value;
1895 }
1896 
1897 static void
drm_handle_authenticated(void * data,struct wl_drm * drm)1898 drm_handle_authenticated(void *data, struct wl_drm *drm)
1899 {
1900    struct dri2_egl_display *dri2_dpy = data;
1901 
1902    dri2_dpy->authenticated = true;
1903 }
1904 
1905 static const struct wl_drm_listener drm_listener = {
1906    .device = drm_handle_device,
1907    .format = drm_handle_format,
1908    .authenticated = drm_handle_authenticated,
1909    .capabilities = drm_handle_capabilities,
1910 };
1911 
1912 static void
dmabuf_ignore_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)1913 dmabuf_ignore_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1914                      uint32_t format)
1915 {
1916    /* formats are implicitly advertised by the 'modifier' event, so ignore */
1917 }
1918 
1919 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)1920 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1921                        uint32_t format, uint32_t modifier_hi,
1922                        uint32_t modifier_lo)
1923 {
1924    struct dri2_egl_display *dri2_dpy = data;
1925    int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1926    uint64_t *mod;
1927 
1928    /* Ignore this if the compositor advertised dma-buf feedback. From version 4
1929     * onwards (when dma-buf feedback was introduced), the compositor should not
1930     * advertise this event anymore, but let's keep this for safety. */
1931    if (dri2_dpy->wl_dmabuf_feedback)
1932       return;
1933 
1934    if (visual_idx == -1)
1935       return;
1936 
1937    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1938 
1939    mod = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1940    if (mod)
1941       *mod = combine_u32_into_u64(modifier_hi, modifier_lo);
1942 }
1943 
1944 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
1945    .format = dmabuf_ignore_format,
1946    .modifier = dmabuf_handle_modifier,
1947 };
1948 
1949 static void
wl_drm_bind(struct dri2_egl_display * dri2_dpy)1950 wl_drm_bind(struct dri2_egl_display *dri2_dpy)
1951 {
1952    dri2_dpy->wl_drm =
1953       wl_registry_bind(dri2_dpy->wl_registry, dri2_dpy->wl_drm_name,
1954                        &wl_drm_interface, dri2_dpy->wl_drm_version);
1955    wl_drm_add_listener(dri2_dpy->wl_drm, &drm_listener, dri2_dpy);
1956 }
1957 
1958 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1959 default_dmabuf_feedback_format_table(
1960    void *data,
1961    struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1962    int32_t fd, uint32_t size)
1963 {
1964    struct dri2_egl_display *dri2_dpy = data;
1965 
1966    dri2_dpy->format_table.size = size;
1967    dri2_dpy->format_table.data =
1968       mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1969 
1970    close(fd);
1971 }
1972 
1973 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1974 default_dmabuf_feedback_main_device(
1975    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1976    struct wl_array *device)
1977 {
1978    struct dri2_egl_display *dri2_dpy = data;
1979    char *node;
1980    int fd;
1981    dev_t dev;
1982 
1983    /* Given the device, look for a render node and try to open it. */
1984    memcpy(&dev, device->data, sizeof(dev));
1985    node = loader_get_render_node(dev);
1986    if (!node)
1987       return;
1988    fd = loader_open_device(node);
1989    if (fd == -1) {
1990       free(node);
1991       return;
1992    }
1993 
1994    dri2_dpy->device_name = node;
1995    dri2_dpy->fd_render_gpu = fd;
1996    dri2_dpy->authenticated = true;
1997 }
1998 
1999 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)2000 default_dmabuf_feedback_tranche_target_device(
2001    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
2002    struct wl_array *device)
2003 {
2004    /* ignore this event */
2005 }
2006 
2007 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)2008 default_dmabuf_feedback_tranche_flags(
2009    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
2010    uint32_t flags)
2011 {
2012    /* ignore this event */
2013 }
2014 
2015 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)2016 default_dmabuf_feedback_tranche_formats(
2017    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
2018    struct wl_array *indices)
2019 {
2020    struct dri2_egl_display *dri2_dpy = data;
2021    uint64_t *modifier_ptr, modifier;
2022    uint32_t format;
2023    uint16_t *index;
2024    int visual_idx;
2025 
2026    if (dri2_dpy->format_table.data == MAP_FAILED) {
2027       _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
2028                             "so we won't be able to use this batch of dma-buf "
2029                             "feedback events.");
2030       return;
2031    }
2032    if (dri2_dpy->format_table.data == NULL) {
2033       _eglLog(_EGL_WARNING,
2034               "wayland-egl: compositor didn't advertise a format "
2035               "table, so we won't be able to use this batch of dma-buf "
2036               "feedback events.");
2037       return;
2038    }
2039 
2040    wl_array_for_each (index, indices) {
2041       format = dri2_dpy->format_table.data[*index].format;
2042       modifier = dri2_dpy->format_table.data[*index].modifier;
2043 
2044       /* skip formats that we don't support */
2045       visual_idx = dri2_wl_visual_idx_from_fourcc(format);
2046       if (visual_idx == -1)
2047          continue;
2048 
2049       BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2050       modifier_ptr = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
2051       if (modifier_ptr)
2052          *modifier_ptr = modifier;
2053    }
2054 }
2055 
2056 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)2057 default_dmabuf_feedback_tranche_done(
2058    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
2059 {
2060    /* ignore this event */
2061 }
2062 
2063 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)2064 default_dmabuf_feedback_done(
2065    void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
2066 {
2067    /* ignore this event */
2068 }
2069 
2070 static const struct zwp_linux_dmabuf_feedback_v1_listener
2071    dmabuf_feedback_listener = {
2072       .format_table = default_dmabuf_feedback_format_table,
2073       .main_device = default_dmabuf_feedback_main_device,
2074       .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
2075       .tranche_flags = default_dmabuf_feedback_tranche_flags,
2076       .tranche_formats = default_dmabuf_feedback_tranche_formats,
2077       .tranche_done = default_dmabuf_feedback_tranche_done,
2078       .done = default_dmabuf_feedback_done,
2079 };
2080 
2081 static void
registry_handle_global_drm(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2082 registry_handle_global_drm(void *data, struct wl_registry *registry,
2083                            uint32_t name, const char *interface,
2084                            uint32_t version)
2085 {
2086    struct dri2_egl_display *dri2_dpy = data;
2087 
2088    if (strcmp(interface, wl_drm_interface.name) == 0) {
2089       dri2_dpy->wl_drm_version = MIN2(version, 2);
2090       dri2_dpy->wl_drm_name = name;
2091    } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2092               version >= 3) {
2093       dri2_dpy->wl_dmabuf = wl_registry_bind(
2094          registry, name, &zwp_linux_dmabuf_v1_interface,
2095          MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2096       zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2097                                        dri2_dpy);
2098    }
2099 }
2100 
2101 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)2102 registry_handle_global_remove(void *data, struct wl_registry *registry,
2103                               uint32_t name)
2104 {
2105 }
2106 
2107 static const struct wl_registry_listener registry_listener_drm = {
2108    .global = registry_handle_global_drm,
2109    .global_remove = registry_handle_global_remove,
2110 };
2111 
2112 static void
dri2_wl_setup_swap_interval(_EGLDisplay * disp)2113 dri2_wl_setup_swap_interval(_EGLDisplay *disp)
2114 {
2115    /* We can't use values greater than 1 on Wayland because we are using the
2116     * frame callback to synchronise the frame and the only way we be sure to
2117     * get a frame callback is to attach a new buffer. Therefore we can't just
2118     * sit drawing nothing to wait until the next ‘n’ frame callbacks */
2119 
2120    dri2_setup_swap_interval(disp, 1);
2121 }
2122 
2123 static const struct dri2_egl_display_vtbl dri2_wl_display_vtbl = {
2124    .authenticate = dri2_wl_authenticate,
2125    .create_window_surface = dri2_wl_create_window_surface,
2126    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2127    .destroy_surface = dri2_wl_destroy_surface,
2128    .swap_interval = dri2_wl_swap_interval,
2129    .create_image = dri2_create_image_khr,
2130    .swap_buffers = dri2_wl_swap_buffers,
2131    .swap_buffers_with_damage = dri2_wl_swap_buffers_with_damage,
2132    .query_buffer_age = dri2_wl_query_buffer_age,
2133    .create_wayland_buffer_from_image = dri2_wl_create_wayland_buffer_from_image,
2134    .get_dri_drawable = dri2_surface_get_dri_drawable,
2135 };
2136 
2137 static const __DRIextension *dri2_loader_extensions[] = {
2138    &image_loader_extension.base,
2139    &image_lookup_extension.base,
2140    &use_invalidate.base,
2141    NULL,
2142 };
2143 
2144 static void
dri2_wl_add_configs_for_visuals(_EGLDisplay * disp)2145 dri2_wl_add_configs_for_visuals(_EGLDisplay *disp)
2146 {
2147    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2148    unsigned int format_count[ARRAY_SIZE(dri2_wl_visuals)] = {0};
2149 
2150    /* Try to create an EGLConfig for every config the driver declares */
2151    for (unsigned i = 0; dri2_dpy->driver_configs[i]; i++) {
2152       struct dri2_egl_config *dri2_conf;
2153       bool conversion = false;
2154       int idx = dri2_wl_visual_idx_from_config(dri2_dpy->driver_configs[i]);
2155 
2156       if (idx < 0)
2157          continue;
2158 
2159       /* Check if the server natively supports the colour buffer format */
2160       if (!server_supports_format(&dri2_dpy->formats, idx)) {
2161          /* In multi-GPU scenarios, we usually have a different buffer, so a
2162           * format conversion is easy compared to the overhead of the copy */
2163          if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2164             continue;
2165 
2166          /* Check if the server supports the alternate format */
2167          if (!server_supports_pipe_format(&dri2_dpy->formats,
2168                                           dri2_wl_visuals[idx].alt_pipe_format)) {
2169             continue;
2170          }
2171 
2172          conversion = true;
2173       }
2174 
2175       /* The format is supported one way or another; add the EGLConfig */
2176       dri2_conf = dri2_add_config(disp, dri2_dpy->driver_configs[i],
2177                                   EGL_WINDOW_BIT, NULL);
2178       if (!dri2_conf)
2179          continue;
2180 
2181       format_count[idx]++;
2182 
2183       if (conversion && format_count[idx] == 1) {
2184          _eglLog(_EGL_DEBUG, "Client format %s converted via PRIME blitImage.",
2185                  util_format_name(dri2_wl_visuals[idx].pipe_format));
2186       }
2187    }
2188 
2189    for (unsigned i = 0; i < ARRAY_SIZE(format_count); i++) {
2190       if (!format_count[i]) {
2191          _eglLog(_EGL_DEBUG, "No DRI config supports native format %s",
2192                  util_format_name(dri2_wl_visuals[i].pipe_format));
2193       }
2194    }
2195 }
2196 
2197 static bool
dri2_initialize_wayland_drm_extensions(struct dri2_egl_display * dri2_dpy)2198 dri2_initialize_wayland_drm_extensions(struct dri2_egl_display *dri2_dpy)
2199 {
2200    /* Get default dma-buf feedback */
2201    if (dri2_dpy->wl_dmabuf &&
2202        zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
2203           ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
2204       dmabuf_feedback_format_table_init(&dri2_dpy->format_table);
2205       dri2_dpy->wl_dmabuf_feedback =
2206          zwp_linux_dmabuf_v1_get_default_feedback(dri2_dpy->wl_dmabuf);
2207       zwp_linux_dmabuf_feedback_v1_add_listener(
2208          dri2_dpy->wl_dmabuf_feedback, &dmabuf_feedback_listener, dri2_dpy);
2209    }
2210 
2211    if (roundtrip(dri2_dpy) < 0)
2212       return false;
2213 
2214    /* Destroy the default dma-buf feedback and the format table. */
2215    if (dri2_dpy->wl_dmabuf_feedback) {
2216       zwp_linux_dmabuf_feedback_v1_destroy(dri2_dpy->wl_dmabuf_feedback);
2217       dri2_dpy->wl_dmabuf_feedback = NULL;
2218       dmabuf_feedback_format_table_fini(&dri2_dpy->format_table);
2219    }
2220 
2221    /* We couldn't retrieve a render node from the dma-buf feedback (or the
2222     * feedback was not advertised at all), so we must fallback to wl_drm. */
2223    if (dri2_dpy->fd_render_gpu == -1) {
2224       /* wl_drm not advertised by compositor, so can't continue */
2225       if (dri2_dpy->wl_drm_name == 0)
2226          return false;
2227       wl_drm_bind(dri2_dpy);
2228 
2229       if (dri2_dpy->wl_drm == NULL)
2230          return false;
2231       if (roundtrip(dri2_dpy) < 0 || dri2_dpy->fd_render_gpu == -1)
2232          return false;
2233 
2234       if (!dri2_dpy->authenticated &&
2235           (roundtrip(dri2_dpy) < 0 || !dri2_dpy->authenticated))
2236          return false;
2237    }
2238    return true;
2239 }
2240 
2241 static EGLBoolean
dri2_initialize_wayland_drm(_EGLDisplay * disp)2242 dri2_initialize_wayland_drm(_EGLDisplay *disp)
2243 {
2244    struct dri2_egl_display *dri2_dpy = dri2_display_create();
2245    if (!dri2_dpy)
2246       return EGL_FALSE;
2247 
2248    disp->DriverData = (void *)dri2_dpy;
2249 
2250    if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2251       goto cleanup;
2252 
2253    if (disp->PlatformDisplay == NULL) {
2254       dri2_dpy->wl_dpy = wl_display_connect(NULL);
2255       if (dri2_dpy->wl_dpy == NULL)
2256          goto cleanup;
2257       dri2_dpy->own_device = true;
2258    } else {
2259       dri2_dpy->wl_dpy = disp->PlatformDisplay;
2260    }
2261 
2262    dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
2263                                                           "mesa egl display queue");
2264 
2265    dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2266    if (dri2_dpy->wl_dpy_wrapper == NULL)
2267       goto cleanup;
2268 
2269    wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
2270                       dri2_dpy->wl_queue);
2271 
2272    if (dri2_dpy->own_device)
2273       wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2274 
2275    dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2276    wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_drm,
2277                             dri2_dpy);
2278 
2279    if (roundtrip(dri2_dpy) < 0)
2280       goto cleanup;
2281 
2282    if (!dri2_initialize_wayland_drm_extensions(dri2_dpy))
2283       goto cleanup;
2284 
2285    loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
2286                                 &dri2_dpy->fd_display_gpu);
2287 
2288    if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
2289       free(dri2_dpy->device_name);
2290       dri2_dpy->device_name =
2291          loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
2292       if (!dri2_dpy->device_name) {
2293          _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
2294                                   "for requested GPU");
2295          goto cleanup;
2296       }
2297    }
2298 
2299    /* we have to do the check now, because loader_get_user_preferred_fd
2300     * will return a render-node when the requested gpu is different
2301     * to the server, but also if the client asks for the same gpu than
2302     * the server by requesting its pci-id */
2303    dri2_dpy->is_render_node =
2304       drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
2305 
2306    dri2_dpy->driver_name = loader_get_driver_for_fd(dri2_dpy->fd_render_gpu);
2307    if (dri2_dpy->driver_name == NULL) {
2308       _eglError(EGL_BAD_ALLOC, "DRI2: failed to get driver name");
2309       goto cleanup;
2310    }
2311 
2312    dri2_dpy->loader_extensions = dri2_loader_extensions;
2313    if (!dri2_load_driver(disp)) {
2314       _eglError(EGL_BAD_ALLOC, "DRI2: failed to load driver");
2315       goto cleanup;
2316    }
2317 
2318    if (!dri2_create_screen(disp))
2319       goto cleanup;
2320 
2321    if (!dri2_setup_device(disp, false)) {
2322       _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
2323       goto cleanup;
2324    }
2325 
2326    dri2_setup_screen(disp);
2327 
2328    dri2_wl_setup_swap_interval(disp);
2329 
2330    if (dri2_dpy->wl_drm) {
2331       /* To use Prime, we must have _DRI_IMAGE v7 at least.
2332        * createImageFromDmaBufs support indicates that Prime export/import is
2333        * supported by the driver. We deprecated the support to GEM names API, so
2334        * we bail out if the driver does not support Prime. */
2335       if (!(dri2_dpy->capabilities & WL_DRM_CAPABILITY_PRIME) ||
2336           !dri2_dpy->has_dmabuf_import) {
2337          _eglLog(_EGL_WARNING, "wayland-egl: display does not support prime");
2338          goto cleanup;
2339       }
2340    }
2341 
2342    dri2_wl_add_configs_for_visuals(disp);
2343 
2344    dri2_set_WL_bind_wayland_display(disp);
2345    /* When cannot convert EGLImage to wl_buffer when on a different gpu,
2346     * because the buffer of the EGLImage has likely a tiling mode the server
2347     * gpu won't support. These is no way to check for now. Thus do not support
2348     * the extension */
2349    if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2350       disp->Extensions.WL_create_wayland_buffer_from_image = EGL_TRUE;
2351 
2352    disp->Extensions.EXT_buffer_age = EGL_TRUE;
2353 
2354    disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
2355 
2356    disp->Extensions.EXT_present_opaque = EGL_TRUE;
2357 
2358    /* Fill vtbl last to prevent accidentally calling virtual function during
2359     * initialization.
2360     */
2361    dri2_dpy->vtbl = &dri2_wl_display_vtbl;
2362 
2363    return EGL_TRUE;
2364 
2365 cleanup:
2366    dri2_display_destroy(disp);
2367    return EGL_FALSE;
2368 }
2369 
2370 static int
dri2_wl_swrast_get_stride_for_format(int format,int w)2371 dri2_wl_swrast_get_stride_for_format(int format, int w)
2372 {
2373    int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2374 
2375    assume(visual_idx != -1);
2376 
2377    return w * util_format_get_blocksize(dri2_wl_visuals[visual_idx].pipe_format);
2378 }
2379 
2380 static EGLBoolean
dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface * dri2_surf,int format,int w,int h,void ** data,int * size,struct wl_buffer ** buffer)2381 dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface *dri2_surf, int format,
2382                                int w, int h, void **data, int *size,
2383                                struct wl_buffer **buffer)
2384 {
2385    struct dri2_egl_display *dri2_dpy =
2386       dri2_egl_display(dri2_surf->base.Resource.Display);
2387    struct wl_shm_pool *pool;
2388    int fd, stride, size_map;
2389    void *data_map;
2390 
2391    assert(!*buffer);
2392 
2393    stride = dri2_wl_swrast_get_stride_for_format(format, w);
2394    size_map = h * stride;
2395 
2396    /* Create a shareable buffer */
2397    fd = os_create_anonymous_file(size_map, NULL);
2398    if (fd < 0)
2399       return EGL_FALSE;
2400 
2401    data_map = mmap(NULL, size_map, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2402    if (data_map == MAP_FAILED) {
2403       close(fd);
2404       return EGL_FALSE;
2405    }
2406 
2407    /* Share it in a wl_buffer */
2408    pool = wl_shm_create_pool(dri2_dpy->wl_shm, fd, size_map);
2409    wl_proxy_set_queue((struct wl_proxy *)pool, dri2_surf->wl_queue);
2410    *buffer = wl_shm_pool_create_buffer(pool, 0, w, h, stride, format);
2411    wl_shm_pool_destroy(pool);
2412    close(fd);
2413 
2414    *data = data_map;
2415    *size = size_map;
2416    return EGL_TRUE;
2417 }
2418 
2419 static void
kopper_update_buffers(struct dri2_egl_surface * dri2_surf)2420 kopper_update_buffers(struct dri2_egl_surface *dri2_surf)
2421 {
2422    /* we need to do the following operations only once per frame */
2423    if (dri2_surf->back)
2424       return;
2425 
2426    if (dri2_surf->wl_win &&
2427        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2428         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2429 
2430       dri2_surf->base.Width = dri2_surf->wl_win->width;
2431       dri2_surf->base.Height = dri2_surf->wl_win->height;
2432       dri2_surf->dx = dri2_surf->wl_win->dx;
2433       dri2_surf->dy = dri2_surf->wl_win->dy;
2434       dri2_surf->current = NULL;
2435    }
2436 }
2437 
2438 static int
swrast_update_buffers(struct dri2_egl_surface * dri2_surf)2439 swrast_update_buffers(struct dri2_egl_surface *dri2_surf)
2440 {
2441    struct dri2_egl_display *dri2_dpy =
2442       dri2_egl_display(dri2_surf->base.Resource.Display);
2443 
2444    /* we need to do the following operations only once per frame */
2445    if (dri2_surf->back)
2446       return 0;
2447 
2448    if (dri2_surf->wl_win &&
2449        (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2450         dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2451 
2452       dri2_wl_release_buffers(dri2_surf);
2453 
2454       dri2_surf->base.Width = dri2_surf->wl_win->width;
2455       dri2_surf->base.Height = dri2_surf->wl_win->height;
2456       dri2_surf->dx = dri2_surf->wl_win->dx;
2457       dri2_surf->dy = dri2_surf->wl_win->dy;
2458       dri2_surf->current = NULL;
2459    }
2460 
2461    /* find back buffer */
2462    /* There might be a buffer release already queued that wasn't processed */
2463    wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
2464 
2465    /* else choose any another free location */
2466    while (!dri2_surf->back) {
2467       for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2468          if (!dri2_surf->color_buffers[i].locked) {
2469             dri2_surf->back = &dri2_surf->color_buffers[i];
2470             if (dri2_surf->back->wl_buffer)
2471                break;
2472 
2473             if (!dri2_wl_swrast_allocate_buffer(
2474                    dri2_surf, dri2_surf->format, dri2_surf->base.Width,
2475                    dri2_surf->base.Height, &dri2_surf->back->data,
2476                    &dri2_surf->back->data_size, &dri2_surf->back->wl_buffer)) {
2477                _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
2478                return -1;
2479             }
2480             wl_buffer_add_listener(dri2_surf->back->wl_buffer,
2481                                    &wl_buffer_listener, dri2_surf);
2482             break;
2483          }
2484       }
2485 
2486       /* wait for the compositor to release a buffer */
2487       if (!dri2_surf->back) {
2488          if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
2489              -1) {
2490             _eglError(EGL_BAD_ALLOC, "waiting for a free buffer failed");
2491             return -1;
2492          }
2493       }
2494    }
2495 
2496    dri2_surf->back->locked = true;
2497 
2498    /* If we have an extra unlocked buffer at this point, we had to do triple
2499     * buffering for a while, but now can go back to just double buffering.
2500     * That means we can free any unlocked buffer now. To avoid toggling between
2501     * going back to double buffering and needing to allocate another buffer too
2502     * fast we let the unneeded buffer sit around for a short while. */
2503    for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2504       if (!dri2_surf->color_buffers[i].locked &&
2505           dri2_surf->color_buffers[i].wl_buffer &&
2506           dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
2507          wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
2508          munmap(dri2_surf->color_buffers[i].data,
2509                 dri2_surf->color_buffers[i].data_size);
2510          dri2_surf->color_buffers[i].wl_buffer = NULL;
2511          dri2_surf->color_buffers[i].data = NULL;
2512          dri2_surf->color_buffers[i].age = 0;
2513       }
2514    }
2515 
2516    return 0;
2517 }
2518 
2519 static void *
dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface * dri2_surf)2520 dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface *dri2_surf)
2521 {
2522    /* if there has been a resize: */
2523    if (!dri2_surf->current)
2524       return NULL;
2525 
2526    return dri2_surf->current->data;
2527 }
2528 
2529 static void *
dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface * dri2_surf)2530 dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface *dri2_surf)
2531 {
2532    assert(dri2_surf->back);
2533    return dri2_surf->back->data;
2534 }
2535 
2536 static EGLBoolean
dri2_wl_surface_throttle(struct dri2_egl_surface * dri2_surf)2537 dri2_wl_surface_throttle(struct dri2_egl_surface *dri2_surf)
2538 {
2539    struct dri2_egl_display *dri2_dpy =
2540       dri2_egl_display(dri2_surf->base.Resource.Display);
2541 
2542    while (dri2_surf->throttle_callback != NULL)
2543       if (loader_wayland_dispatch(dri2_dpy->wl_dpy, dri2_surf->wl_queue, NULL) ==
2544           -1)
2545          return EGL_FALSE;
2546 
2547    if (dri2_surf->base.SwapInterval > 0) {
2548       dri2_surf->throttle_callback =
2549          wl_surface_frame(dri2_surf->wl_surface_wrapper);
2550       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2551                                dri2_surf);
2552    }
2553 
2554    return EGL_TRUE;
2555 }
2556 
2557 static void
dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface * dri2_surf)2558 dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface *dri2_surf)
2559 {
2560    struct dri2_egl_display *dri2_dpy =
2561       dri2_egl_display(dri2_surf->base.Resource.Display);
2562 
2563    dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
2564    dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
2565    /* reset resize growing parameters */
2566    dri2_surf->dx = 0;
2567    dri2_surf->dy = 0;
2568 
2569    wl_surface_commit(dri2_surf->wl_surface_wrapper);
2570 
2571    /* If we're not waiting for a frame callback then we'll at least throttle
2572     * to a sync callback so that we always give a chance for the compositor to
2573     * handle the commit and send a release event before checking for a free
2574     * buffer */
2575    if (dri2_surf->throttle_callback == NULL) {
2576       dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
2577       wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2578                                dri2_surf);
2579    }
2580 
2581    wl_display_flush(dri2_dpy->wl_dpy);
2582 }
2583 
2584 static void
dri2_wl_kopper_get_drawable_info(struct dri_drawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2585 dri2_wl_kopper_get_drawable_info(struct dri_drawable *draw, int *x, int *y, int *w,
2586                                  int *h, void *loaderPrivate)
2587 {
2588    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2589 
2590    kopper_update_buffers(dri2_surf);
2591    *x = 0;
2592    *y = 0;
2593    *w = dri2_surf->base.Width;
2594    *h = dri2_surf->base.Height;
2595 }
2596 
2597 static void
dri2_wl_swrast_get_drawable_info(struct dri_drawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2598 dri2_wl_swrast_get_drawable_info(struct dri_drawable *draw, int *x, int *y, int *w,
2599                                  int *h, void *loaderPrivate)
2600 {
2601    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2602 
2603    (void)swrast_update_buffers(dri2_surf);
2604    *x = 0;
2605    *y = 0;
2606    *w = dri2_surf->base.Width;
2607    *h = dri2_surf->base.Height;
2608 }
2609 
2610 static void
dri2_wl_swrast_get_image(struct dri_drawable * read,int x,int y,int w,int h,char * data,void * loaderPrivate)2611 dri2_wl_swrast_get_image(struct dri_drawable *read, int x, int y, int w, int h,
2612                          char *data, void *loaderPrivate)
2613 {
2614    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2615    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2616    int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2617    int src_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2618                                                          dri2_surf->base.Width);
2619    int dst_stride = copy_width;
2620    char *src, *dst;
2621 
2622    src = dri2_wl_swrast_get_frontbuffer_data(dri2_surf);
2623    /* this is already the most up-to-date buffer */
2624    if (src == data)
2625       return;
2626    if (!src) {
2627       memset(data, 0, copy_width * h);
2628       return;
2629    }
2630 
2631    assert(copy_width <= src_stride);
2632 
2633    src += x_offset;
2634    src += y * src_stride;
2635    dst = data;
2636 
2637    if (copy_width > src_stride - x_offset)
2638       copy_width = src_stride - x_offset;
2639    if (h > dri2_surf->base.Height - y)
2640       h = dri2_surf->base.Height - y;
2641 
2642    for (; h > 0; h--) {
2643       memcpy(dst, src, copy_width);
2644       src += src_stride;
2645       dst += dst_stride;
2646    }
2647 }
2648 
2649 static void
dri2_wl_swrast_put_image2(struct dri_drawable * draw,int op,int x,int y,int w,int h,int stride,char * data,void * loaderPrivate)2650 dri2_wl_swrast_put_image2(struct dri_drawable *draw, int op, int x, int y, int w,
2651                           int h, int stride, char *data, void *loaderPrivate)
2652 {
2653    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2654    /* clamp to surface size */
2655    w = MIN2(w, dri2_surf->base.Width);
2656    h = MIN2(h, dri2_surf->base.Height);
2657    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2658    int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2659                                                          dri2_surf->base.Width);
2660    int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2661    char *src, *dst;
2662 
2663    assert(copy_width <= stride);
2664 
2665    dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2666 
2667    dst += x_offset;
2668    dst += y * dst_stride;
2669 
2670    src = data;
2671 
2672    /* drivers expect we do these checks (and some rely on it) */
2673    if (copy_width > dst_stride - x_offset)
2674       copy_width = dst_stride - x_offset;
2675    if (h > dri2_surf->base.Height - y)
2676       h = dri2_surf->base.Height - y;
2677 
2678    for (; h > 0; h--) {
2679       memcpy(dst, src, copy_width);
2680       src += stride;
2681       dst += dst_stride;
2682    }
2683 }
2684 
2685 static void
dri2_wl_swrast_put_image(struct dri_drawable * draw,int op,int x,int y,int w,int h,char * data,void * loaderPrivate)2686 dri2_wl_swrast_put_image(struct dri_drawable *draw, int op, int x, int y, int w,
2687                          int h, char *data, void *loaderPrivate)
2688 {
2689    struct dri2_egl_surface *dri2_surf = loaderPrivate;
2690    int stride;
2691 
2692    stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2693    dri2_wl_swrast_put_image2(draw, op, x, y, w, h, stride, data, loaderPrivate);
2694 }
2695 
2696 static EGLBoolean
dri2_wl_kopper_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)2697 dri2_wl_kopper_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
2698                                         const EGLint *rects, EGLint n_rects)
2699 {
2700    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2701    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2702 
2703    if (!dri2_surf->wl_win)
2704       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2705 
2706    if (!dri2_wl_surface_throttle(dri2_surf))
2707       return EGL_FALSE;
2708 
2709    if (n_rects) {
2710       if (dri2_dpy->kopper)
2711          kopperSwapBuffersWithDamage(dri2_surf->dri_drawable, __DRI2_FLUSH_INVALIDATE_ANCILLARY, n_rects, rects);
2712       else
2713          driSwapBuffersWithDamage(dri2_surf->dri_drawable, n_rects, rects);
2714    } else {
2715       if (dri2_dpy->kopper)
2716          kopperSwapBuffers(dri2_surf->dri_drawable, __DRI2_FLUSH_INVALIDATE_ANCILLARY);
2717       else
2718          driSwapBuffers(dri2_surf->dri_drawable);
2719    }
2720 
2721    dri2_surf->current = dri2_surf->back;
2722    dri2_surf->back = NULL;
2723 
2724    return EGL_TRUE;
2725 }
2726 
2727 static EGLBoolean
dri2_wl_kopper_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2728 dri2_wl_kopper_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2729 {
2730    dri2_wl_kopper_swap_buffers_with_damage(disp, draw, NULL, 0);
2731    return EGL_TRUE;
2732 }
2733 
2734 static EGLBoolean
dri2_wl_swrast_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)2735 dri2_wl_swrast_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
2736                                         const EGLint *rects, EGLint n_rects)
2737 {
2738    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2739 
2740    if (!dri2_surf->wl_win)
2741       return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2742 
2743    (void)swrast_update_buffers(dri2_surf);
2744 
2745    if (dri2_wl_surface_throttle(dri2_surf))
2746       wl_surface_attach(dri2_surf->wl_surface_wrapper,
2747          /* 'back' here will be promoted to 'current' */
2748          dri2_surf->back->wl_buffer, dri2_surf->dx,
2749          dri2_surf->dy);
2750 
2751    /* If the compositor doesn't support damage_buffer, we deliberately
2752     * ignore the damage region and post maximum damage, due to
2753     * https://bugs.freedesktop.org/78190 */
2754    if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
2755       wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
2756                         INT32_MAX);
2757 
2758    /* guarantee full copy for partial update */
2759    int w = n_rects == 1 ? (rects[2] - rects[0]) : 0;
2760    int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2761    int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2762                                                          dri2_surf->base.Width);
2763    char *dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2764 
2765    /* partial copy, copy old content */
2766    if (copy_width < dst_stride)
2767       dri2_wl_swrast_get_image(NULL, 0, 0, dri2_surf->base.Width,
2768                                  dri2_surf->base.Height, dst, dri2_surf);
2769 
2770    if (n_rects)
2771       driSwapBuffersWithDamage(dri2_surf->dri_drawable, n_rects, rects);
2772    else
2773       driSwapBuffers(dri2_surf->dri_drawable);
2774 
2775    dri2_surf->current = dri2_surf->back;
2776    dri2_surf->back = NULL;
2777 
2778    dri2_wl_swrast_commit_backbuffer(dri2_surf);
2779    return EGL_TRUE;
2780 }
2781 
2782 static EGLBoolean
dri2_wl_swrast_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2783 dri2_wl_swrast_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2784 {
2785    dri2_wl_swrast_swap_buffers_with_damage(disp, draw, NULL, 0);
2786    return EGL_TRUE;
2787 }
2788 
2789 static EGLint
dri2_wl_kopper_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2790 dri2_wl_kopper_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2791 {
2792    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2793    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2794 
2795    /* This can legitimately be null for lavapipe */
2796    if (dri2_dpy->kopper)
2797       return kopperQueryBufferAge(dri2_surf->dri_drawable);
2798    else
2799       return driSWRastQueryBufferAge(dri2_surf->dri_drawable);
2800    return 0;
2801 }
2802 
2803 static EGLint
dri2_wl_swrast_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2804 dri2_wl_swrast_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2805 {
2806    struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2807    struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2808 
2809    assert(dri2_dpy->swrast);
2810    return driSWRastQueryBufferAge(dri2_surf->dri_drawable);
2811 }
2812 
2813 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)2814 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
2815 {
2816    struct dri2_egl_display *dri2_dpy = data;
2817    int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2818 
2819    if (visual_idx == -1)
2820       return;
2821 
2822    BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2823 }
2824 
2825 static const struct wl_shm_listener shm_listener = {
2826    .format = shm_handle_format,
2827 };
2828 
2829 static void
registry_handle_global_kopper(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2830 registry_handle_global_kopper(void *data, struct wl_registry *registry,
2831                               uint32_t name, const char *interface,
2832                               uint32_t version)
2833 {
2834    struct dri2_egl_display *dri2_dpy = data;
2835 
2836    if (strcmp(interface, wl_shm_interface.name) == 0) {
2837       dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2838       wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2839    }
2840    if (strcmp(interface, wl_drm_interface.name) == 0) {
2841       dri2_dpy->wl_drm_version = MIN2(version, 2);
2842       dri2_dpy->wl_drm_name = name;
2843    } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2844                version >= 3) {
2845       dri2_dpy->wl_dmabuf = wl_registry_bind(
2846          registry, name, &zwp_linux_dmabuf_v1_interface,
2847          MIN2(version,
2848                ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2849       zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2850                                        dri2_dpy);
2851    }
2852 }
2853 
2854 static const struct wl_registry_listener registry_listener_kopper = {
2855    .global = registry_handle_global_kopper,
2856    .global_remove = registry_handle_global_remove,
2857 };
2858 
2859 static void
registry_handle_global_swrast(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2860 registry_handle_global_swrast(void *data, struct wl_registry *registry,
2861                               uint32_t name, const char *interface,
2862                               uint32_t version)
2863 {
2864    struct dri2_egl_display *dri2_dpy = data;
2865 
2866    if (strcmp(interface, wl_shm_interface.name) == 0) {
2867       dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2868       wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2869    }
2870 }
2871 
2872 static const struct wl_registry_listener registry_listener_swrast = {
2873    .global = registry_handle_global_swrast,
2874    .global_remove = registry_handle_global_remove,
2875 };
2876 
2877 static const struct dri2_egl_display_vtbl dri2_wl_swrast_display_vtbl = {
2878    .authenticate = NULL,
2879    .create_window_surface = dri2_wl_create_window_surface,
2880    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2881    .destroy_surface = dri2_wl_destroy_surface,
2882    .swap_interval = dri2_wl_swap_interval,
2883    .create_image = dri2_create_image_khr,
2884    .swap_buffers = dri2_wl_swrast_swap_buffers,
2885    .swap_buffers_with_damage = dri2_wl_swrast_swap_buffers_with_damage,
2886    .get_dri_drawable = dri2_surface_get_dri_drawable,
2887    .query_buffer_age = dri2_wl_swrast_query_buffer_age,
2888 };
2889 
2890 static const struct dri2_egl_display_vtbl dri2_wl_kopper_display_vtbl = {
2891    .authenticate = NULL,
2892    .create_window_surface = dri2_wl_create_window_surface,
2893    .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2894    .destroy_surface = dri2_wl_destroy_surface,
2895    .create_image = dri2_create_image_khr,
2896    .swap_buffers = dri2_wl_kopper_swap_buffers,
2897    .swap_buffers_with_damage = dri2_wl_kopper_swap_buffers_with_damage,
2898    .get_dri_drawable = dri2_surface_get_dri_drawable,
2899    .query_buffer_age = dri2_wl_kopper_query_buffer_age,
2900 };
2901 
2902 static const __DRIswrastLoaderExtension swrast_loader_extension = {
2903    .base = {__DRI_SWRAST_LOADER, 2},
2904 
2905    .getDrawableInfo = dri2_wl_swrast_get_drawable_info,
2906    .putImage = dri2_wl_swrast_put_image,
2907    .getImage = dri2_wl_swrast_get_image,
2908    .putImage2 = dri2_wl_swrast_put_image2,
2909 };
2910 
2911 static const __DRIswrastLoaderExtension kopper_swrast_loader_extension = {
2912    .base = {__DRI_SWRAST_LOADER, 2},
2913 
2914    .getDrawableInfo = dri2_wl_kopper_get_drawable_info,
2915    .putImage = dri2_wl_swrast_put_image,
2916    .getImage = dri2_wl_swrast_get_image,
2917    .putImage2 = dri2_wl_swrast_put_image2,
2918 };
2919 
2920 static_assert(sizeof(struct kopper_vk_surface_create_storage) >=
2921                  sizeof(VkWaylandSurfaceCreateInfoKHR),
2922               "");
2923 
2924 static void
kopperSetSurfaceCreateInfo(void * _draw,struct kopper_loader_info * out)2925 kopperSetSurfaceCreateInfo(void *_draw, struct kopper_loader_info *out)
2926 {
2927    struct dri2_egl_surface *dri2_surf = _draw;
2928    struct dri2_egl_display *dri2_dpy =
2929       dri2_egl_display(dri2_surf->base.Resource.Display);
2930    VkWaylandSurfaceCreateInfoKHR *wlsci =
2931       (VkWaylandSurfaceCreateInfoKHR *)&out->bos;
2932 
2933    wlsci->sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
2934    wlsci->pNext = NULL;
2935    wlsci->flags = 0;
2936    wlsci->display = dri2_dpy->wl_dpy;
2937    wlsci->surface = dri2_surf->wl_surface_wrapper;
2938    out->present_opaque = dri2_surf->base.PresentOpaque;
2939    /* convert to vulkan constants */
2940    switch (dri2_surf->base.CompressionRate) {
2941    case EGL_SURFACE_COMPRESSION_FIXED_RATE_NONE_EXT:
2942       out->compression = 0;
2943       break;
2944    case EGL_SURFACE_COMPRESSION_FIXED_RATE_DEFAULT_EXT:
2945       out->compression = UINT32_MAX;
2946       break;
2947 #define EGL_VK_COMP(NUM) \
2948    case EGL_SURFACE_COMPRESSION_FIXED_RATE_##NUM##BPC_EXT: \
2949       out->compression = VK_IMAGE_COMPRESSION_FIXED_RATE_##NUM##BPC_BIT_EXT; \
2950       break
2951    EGL_VK_COMP(1);
2952    EGL_VK_COMP(2);
2953    EGL_VK_COMP(3);
2954    EGL_VK_COMP(4);
2955    EGL_VK_COMP(5);
2956    EGL_VK_COMP(6);
2957    EGL_VK_COMP(7);
2958    EGL_VK_COMP(8);
2959    EGL_VK_COMP(9);
2960    EGL_VK_COMP(10);
2961    EGL_VK_COMP(11);
2962    EGL_VK_COMP(12);
2963 #undef EGL_VK_COMP
2964    default:
2965       unreachable("unknown compression rate");
2966    }
2967 }
2968 
2969 static const __DRIkopperLoaderExtension kopper_loader_extension = {
2970    .base = {__DRI_KOPPER_LOADER, 1},
2971 
2972    .SetSurfaceCreateInfo = kopperSetSurfaceCreateInfo,
2973 };
2974 static const __DRIextension *swrast_loader_extensions[] = {
2975    &swrast_loader_extension.base,
2976    &image_lookup_extension.base,
2977    NULL,
2978 };
2979 static const __DRIextension *kopper_swrast_loader_extensions[] = {
2980    &kopper_swrast_loader_extension.base,
2981    &image_lookup_extension.base,
2982    &kopper_loader_extension.base,
2983    &use_invalidate.base,
2984    NULL,
2985 };
2986 
2987 static EGLBoolean
dri2_initialize_wayland_swrast(_EGLDisplay * disp)2988 dri2_initialize_wayland_swrast(_EGLDisplay *disp)
2989 {
2990    struct dri2_egl_display *dri2_dpy = dri2_display_create();
2991    if (!dri2_dpy)
2992       return EGL_FALSE;
2993 
2994    disp->DriverData = (void *)dri2_dpy;
2995 
2996    if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2997       goto cleanup;
2998 
2999    if (disp->PlatformDisplay == NULL) {
3000       dri2_dpy->wl_dpy = wl_display_connect(NULL);
3001       if (dri2_dpy->wl_dpy == NULL)
3002          goto cleanup;
3003       dri2_dpy->own_device = true;
3004    } else {
3005       dri2_dpy->wl_dpy = disp->PlatformDisplay;
3006    }
3007 
3008    dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
3009                                                           "mesa egl swrast display queue");
3010 
3011    dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
3012    if (dri2_dpy->wl_dpy_wrapper == NULL)
3013       goto cleanup;
3014 
3015    wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
3016                       dri2_dpy->wl_queue);
3017 
3018    if (dri2_dpy->own_device)
3019       wl_display_dispatch_pending(dri2_dpy->wl_dpy);
3020 
3021    dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
3022    if (disp->Options.Zink)
3023       wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_kopper,
3024                               dri2_dpy);
3025    else
3026       wl_registry_add_listener(dri2_dpy->wl_registry, &registry_listener_swrast,
3027                               dri2_dpy);
3028 
3029    if (roundtrip(dri2_dpy) < 0 || dri2_dpy->wl_shm == NULL)
3030       goto cleanup;
3031 
3032    if (roundtrip(dri2_dpy) < 0 ||
3033        !BITSET_TEST_RANGE(dri2_dpy->formats.formats_bitmap, 0,
3034                           dri2_dpy->formats.num_formats))
3035       goto cleanup;
3036 
3037    if (disp->Options.Zink) {
3038       if (!dri2_initialize_wayland_drm_extensions(dri2_dpy) && !disp->Options.ForceSoftware)
3039          goto cleanup;
3040 
3041       if (!disp->Options.ForceSoftware) {
3042          loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
3043                                        &dri2_dpy->fd_display_gpu);
3044 
3045          if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
3046             free(dri2_dpy->device_name);
3047             dri2_dpy->device_name =
3048                loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
3049             if (!dri2_dpy->device_name) {
3050                _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
3051                                           "for requested GPU");
3052                goto cleanup;
3053             }
3054          }
3055 
3056          /* we have to do the check now, because loader_get_user_preferred_fd
3057             * will return a render-node when the requested gpu is different
3058             * to the server, but also if the client asks for the same gpu than
3059             * the server by requesting its pci-id */
3060          dri2_dpy->is_render_node =
3061             drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
3062       }
3063    }
3064 
3065    dri2_dpy->driver_name = strdup(disp->Options.Zink ? "zink" : "swrast");
3066    if (!dri2_load_driver(disp))
3067       goto cleanup;
3068 
3069    dri2_dpy->loader_extensions = disp->Options.Zink ? kopper_swrast_loader_extensions : swrast_loader_extensions;
3070 
3071    if (!dri2_create_screen(disp))
3072       goto cleanup;
3073 
3074    if (!dri2_setup_device(disp, disp->Options.ForceSoftware)) {
3075       _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
3076       goto cleanup;
3077    }
3078 
3079    dri2_setup_screen(disp);
3080 
3081    dri2_wl_setup_swap_interval(disp);
3082 
3083    dri2_wl_add_configs_for_visuals(disp);
3084 
3085    if (disp->Options.Zink && dri2_dpy->fd_render_gpu >= 0 &&
3086        (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm))
3087       dri2_set_WL_bind_wayland_display(disp);
3088    disp->Extensions.EXT_buffer_age = EGL_TRUE;
3089    disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
3090    disp->Extensions.EXT_present_opaque = EGL_TRUE;
3091 
3092    /* Fill vtbl last to prevent accidentally calling virtual function during
3093     * initialization.
3094     */
3095    dri2_dpy->vtbl = disp->Options.Zink ? &dri2_wl_kopper_display_vtbl : &dri2_wl_swrast_display_vtbl;
3096 
3097    return EGL_TRUE;
3098 
3099 cleanup:
3100    dri2_display_destroy(disp);
3101    return EGL_FALSE;
3102 }
3103 
3104 EGLBoolean
dri2_initialize_wayland(_EGLDisplay * disp)3105 dri2_initialize_wayland(_EGLDisplay *disp)
3106 {
3107    if (disp->Options.ForceSoftware || disp->Options.Zink)
3108       return dri2_initialize_wayland_swrast(disp);
3109    else
3110       return dri2_initialize_wayland_drm(disp);
3111 }
3112 
3113 void
dri2_teardown_wayland(struct dri2_egl_display * dri2_dpy)3114 dri2_teardown_wayland(struct dri2_egl_display *dri2_dpy)
3115 {
3116    dri2_wl_formats_fini(&dri2_dpy->formats);
3117    if (dri2_dpy->wl_drm)
3118       wl_drm_destroy(dri2_dpy->wl_drm);
3119    if (dri2_dpy->wl_dmabuf)
3120       zwp_linux_dmabuf_v1_destroy(dri2_dpy->wl_dmabuf);
3121    if (dri2_dpy->wl_shm)
3122       wl_shm_destroy(dri2_dpy->wl_shm);
3123    if (dri2_dpy->wl_registry)
3124       wl_registry_destroy(dri2_dpy->wl_registry);
3125    if (dri2_dpy->wl_dpy_wrapper)
3126       wl_proxy_wrapper_destroy(dri2_dpy->wl_dpy_wrapper);
3127    if (dri2_dpy->wl_queue)
3128       wl_event_queue_destroy(dri2_dpy->wl_queue);
3129 
3130    if (dri2_dpy->own_device)
3131       wl_display_disconnect(dri2_dpy->wl_dpy);
3132 }
3133