1 /*
2 * Copyright © 2011-2012 Intel Corporation
3 * Copyright © 2012 Collabora, Ltd.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Kristian Høgsberg <krh@bitplanet.net>
27 * Benjamin Franzke <benjaminfranzke@googlemail.com>
28 */
29
30 #include <dlfcn.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <limits.h>
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <xf86drm.h>
39 #include "drm-uapi/drm_fourcc.h"
40 #include <sys/mman.h>
41 #include <vulkan/vulkan_core.h>
42 #include <vulkan/vulkan_wayland.h>
43
44 #include "util/anon_file.h"
45 #include "util/u_vector.h"
46 #include "util/format/u_formats.h"
47 #include "main/glconfig.h"
48 #include "egl_dri2.h"
49 #include "eglglobals.h"
50 #include "kopper_interface.h"
51 #include "loader.h"
52 #include "loader_dri_helper.h"
53 #include <loader_wayland_helper.h>
54
55 #include "linux-dmabuf-unstable-v1-client-protocol.h"
56 #include "wayland-drm-client-protocol.h"
57 #include <wayland-client.h>
58 #include <wayland-egl-backend.h>
59
60 /*
61 * The index of entries in this table is used as a bitmask in
62 * dri2_dpy->formats.formats_bitmap, which tracks the formats supported
63 * by our server.
64 */
65 static const struct dri2_wl_visual {
66 uint32_t wl_drm_format;
67 int pipe_format;
68 /* alt_pipe_format is a substitute wl_buffer format to use for a
69 * wl-server unsupported pipe_format, ie. some other pipe_format in
70 * the table, of the same precision but with different channel ordering, or
71 * PIPE_FORMAT_NONE if an alternate format is not needed or supported.
72 * The code checks if alt_pipe_format can be used as a fallback for a
73 * pipe_format for a given wl-server implementation.
74 */
75 int alt_pipe_format;
76 int opaque_wl_drm_format;
77 } dri2_wl_visuals[] = {
78 {
79 WL_DRM_FORMAT_ABGR16F,
80 PIPE_FORMAT_R16G16B16A16_FLOAT,
81 PIPE_FORMAT_NONE,
82 WL_DRM_FORMAT_XBGR16F,
83 },
84 {
85 WL_DRM_FORMAT_XBGR16F,
86 PIPE_FORMAT_R16G16B16X16_FLOAT,
87 PIPE_FORMAT_NONE,
88 WL_DRM_FORMAT_XBGR16F,
89 },
90 {
91 WL_DRM_FORMAT_XRGB2101010,
92 PIPE_FORMAT_B10G10R10X2_UNORM,
93 PIPE_FORMAT_R10G10B10X2_UNORM,
94 WL_DRM_FORMAT_XRGB2101010,
95 },
96 {
97 WL_DRM_FORMAT_ARGB2101010,
98 PIPE_FORMAT_B10G10R10A2_UNORM,
99 PIPE_FORMAT_R10G10B10A2_UNORM,
100 WL_DRM_FORMAT_XRGB2101010,
101 },
102 {
103 WL_DRM_FORMAT_XBGR2101010,
104 PIPE_FORMAT_R10G10B10X2_UNORM,
105 PIPE_FORMAT_B10G10R10X2_UNORM,
106 WL_DRM_FORMAT_XBGR2101010,
107 },
108 {
109 WL_DRM_FORMAT_ABGR2101010,
110 PIPE_FORMAT_R10G10B10A2_UNORM,
111 PIPE_FORMAT_B10G10R10A2_UNORM,
112 WL_DRM_FORMAT_XBGR2101010,
113 },
114 {
115 WL_DRM_FORMAT_XRGB8888,
116 PIPE_FORMAT_BGRX8888_UNORM,
117 PIPE_FORMAT_NONE,
118 WL_DRM_FORMAT_XRGB8888,
119 },
120 {
121 WL_DRM_FORMAT_ARGB8888,
122 PIPE_FORMAT_BGRA8888_UNORM,
123 PIPE_FORMAT_NONE,
124 WL_DRM_FORMAT_XRGB8888,
125 },
126 {
127 WL_DRM_FORMAT_ABGR8888,
128 PIPE_FORMAT_RGBA8888_UNORM,
129 PIPE_FORMAT_NONE,
130 WL_DRM_FORMAT_XBGR8888,
131 },
132 {
133 WL_DRM_FORMAT_XBGR8888,
134 PIPE_FORMAT_RGBX8888_UNORM,
135 PIPE_FORMAT_NONE,
136 WL_DRM_FORMAT_XBGR8888,
137 },
138 {
139 WL_DRM_FORMAT_RGB565,
140 PIPE_FORMAT_B5G6R5_UNORM,
141 PIPE_FORMAT_NONE,
142 WL_DRM_FORMAT_RGB565,
143 },
144 {
145 WL_DRM_FORMAT_ARGB1555,
146 PIPE_FORMAT_B5G5R5A1_UNORM,
147 PIPE_FORMAT_R5G5B5A1_UNORM,
148 WL_DRM_FORMAT_XRGB1555,
149 },
150 {
151 WL_DRM_FORMAT_XRGB1555,
152 PIPE_FORMAT_B5G5R5X1_UNORM,
153 PIPE_FORMAT_R5G5B5X1_UNORM,
154 WL_DRM_FORMAT_XRGB1555,
155 },
156 {
157 WL_DRM_FORMAT_ARGB4444,
158 PIPE_FORMAT_B4G4R4A4_UNORM,
159 PIPE_FORMAT_R4G4B4A4_UNORM,
160 WL_DRM_FORMAT_XRGB4444,
161 },
162 {
163 WL_DRM_FORMAT_XRGB4444,
164 PIPE_FORMAT_B4G4R4X4_UNORM,
165 PIPE_FORMAT_R4G4B4X4_UNORM,
166 WL_DRM_FORMAT_XRGB4444,
167 },
168 };
169
170 static int
dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)171 dri2_wl_visual_idx_from_pipe_format(enum pipe_format pipe_format)
172 {
173 for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
174 if (dri2_wl_visuals[i].pipe_format == pipe_format)
175 return i;
176 }
177
178 return -1;
179 }
180
181 static int
dri2_wl_visual_idx_from_config(struct dri2_egl_display * dri2_dpy,const __DRIconfig * config)182 dri2_wl_visual_idx_from_config(struct dri2_egl_display *dri2_dpy,
183 const __DRIconfig *config)
184 {
185 struct gl_config *gl_config = (struct gl_config *) config;
186
187 return dri2_wl_visual_idx_from_pipe_format(gl_config->color_format);
188 }
189
190 static int
dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)191 dri2_wl_visual_idx_from_fourcc(uint32_t fourcc)
192 {
193 for (int i = 0; i < ARRAY_SIZE(dri2_wl_visuals); i++) {
194 /* wl_drm format codes overlap with DRIImage FourCC codes for all formats
195 * we support. */
196 if (dri2_wl_visuals[i].wl_drm_format == fourcc)
197 return i;
198 }
199
200 return -1;
201 }
202
203 static int
dri2_wl_shm_format_from_visual_idx(int idx)204 dri2_wl_shm_format_from_visual_idx(int idx)
205 {
206 uint32_t fourcc = dri2_wl_visuals[idx].wl_drm_format;
207
208 if (fourcc == WL_DRM_FORMAT_ARGB8888)
209 return WL_SHM_FORMAT_ARGB8888;
210 else if (fourcc == WL_DRM_FORMAT_XRGB8888)
211 return WL_SHM_FORMAT_XRGB8888;
212 else
213 return fourcc;
214 }
215
216 static int
dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)217 dri2_wl_visual_idx_from_shm_format(uint32_t shm_format)
218 {
219 uint32_t fourcc;
220
221 if (shm_format == WL_SHM_FORMAT_ARGB8888)
222 fourcc = WL_DRM_FORMAT_ARGB8888;
223 else if (shm_format == WL_SHM_FORMAT_XRGB8888)
224 fourcc = WL_DRM_FORMAT_XRGB8888;
225 else
226 fourcc = shm_format;
227
228 return dri2_wl_visual_idx_from_fourcc(fourcc);
229 }
230
231 bool
dri2_wl_is_format_supported(void * user_data,uint32_t format)232 dri2_wl_is_format_supported(void *user_data, uint32_t format)
233 {
234 _EGLDisplay *disp = (_EGLDisplay *)user_data;
235 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
236 int j = dri2_wl_visual_idx_from_fourcc(format);
237
238 if (j == -1)
239 return false;
240
241 for (int i = 0; dri2_dpy->driver_configs[i]; i++)
242 if (j == dri2_wl_visual_idx_from_config(
243 dri2_dpy, dri2_dpy->driver_configs[i]))
244 return true;
245
246 return false;
247 }
248
249 static bool
server_supports_format(struct dri2_wl_formats * formats,int idx)250 server_supports_format(struct dri2_wl_formats *formats, int idx)
251 {
252 return idx >= 0 && BITSET_TEST(formats->formats_bitmap, idx);
253 }
254
255 static bool
server_supports_pipe_format(struct dri2_wl_formats * formats,enum pipe_format format)256 server_supports_pipe_format(struct dri2_wl_formats *formats,
257 enum pipe_format format)
258 {
259 return server_supports_format(formats,
260 dri2_wl_visual_idx_from_pipe_format(format));
261 }
262
263 static bool
server_supports_fourcc(struct dri2_wl_formats * formats,uint32_t fourcc)264 server_supports_fourcc(struct dri2_wl_formats *formats, uint32_t fourcc)
265 {
266 return server_supports_format(formats, dri2_wl_visual_idx_from_fourcc(fourcc));
267 }
268
269 static int
roundtrip(struct dri2_egl_display * dri2_dpy)270 roundtrip(struct dri2_egl_display *dri2_dpy)
271 {
272 return wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_dpy->wl_queue);
273 }
274
275 static void
wl_buffer_release(void * data,struct wl_buffer * buffer)276 wl_buffer_release(void *data, struct wl_buffer *buffer)
277 {
278 struct dri2_egl_surface *dri2_surf = data;
279 int i;
280
281 for (i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); ++i)
282 if (dri2_surf->color_buffers[i].wl_buffer == buffer)
283 break;
284
285 assert(i < ARRAY_SIZE(dri2_surf->color_buffers));
286
287 if (dri2_surf->color_buffers[i].wl_release) {
288 wl_buffer_destroy(buffer);
289 dri2_surf->color_buffers[i].wl_release = false;
290 dri2_surf->color_buffers[i].wl_buffer = NULL;
291 dri2_surf->color_buffers[i].age = 0;
292 }
293
294 dri2_surf->color_buffers[i].locked = false;
295 }
296
297 static const struct wl_buffer_listener wl_buffer_listener = {
298 .release = wl_buffer_release,
299 };
300
301 static void
dri2_wl_formats_fini(struct dri2_wl_formats * formats)302 dri2_wl_formats_fini(struct dri2_wl_formats *formats)
303 {
304 unsigned int i;
305
306 for (i = 0; i < formats->num_formats; i++)
307 u_vector_finish(&formats->modifiers[i]);
308
309 free(formats->modifiers);
310 free(formats->formats_bitmap);
311 }
312
313 static int
dri2_wl_formats_init(struct dri2_wl_formats * formats)314 dri2_wl_formats_init(struct dri2_wl_formats *formats)
315 {
316 unsigned int i, j;
317
318 /* formats->formats_bitmap tells us if a format in dri2_wl_visuals is present
319 * or not. So we must compute the amount of unsigned int's needed to
320 * represent all the formats of dri2_wl_visuals. We use BITSET_WORDS for
321 * this task. */
322 formats->num_formats = ARRAY_SIZE(dri2_wl_visuals);
323 formats->formats_bitmap = calloc(BITSET_WORDS(formats->num_formats),
324 sizeof(*formats->formats_bitmap));
325 if (!formats->formats_bitmap)
326 goto err;
327
328 /* Here we have an array of u_vector's to store the modifiers supported by
329 * each format in the bitmask. */
330 formats->modifiers =
331 calloc(formats->num_formats, sizeof(*formats->modifiers));
332 if (!formats->modifiers)
333 goto err_modifier;
334
335 for (i = 0; i < formats->num_formats; i++)
336 if (!u_vector_init_pow2(&formats->modifiers[i], 4, sizeof(uint64_t))) {
337 j = i;
338 goto err_vector_init;
339 }
340
341 return 0;
342
343 err_vector_init:
344 for (i = 0; i < j; i++)
345 u_vector_finish(&formats->modifiers[i]);
346 free(formats->modifiers);
347 err_modifier:
348 free(formats->formats_bitmap);
349 err:
350 _eglError(EGL_BAD_ALLOC, "dri2_wl_formats_init");
351 return -1;
352 }
353
354 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)355 dmabuf_feedback_format_table_fini(
356 struct dmabuf_feedback_format_table *format_table)
357 {
358 if (format_table->data && format_table->data != MAP_FAILED)
359 munmap(format_table->data, format_table->size);
360 }
361
362 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)363 dmabuf_feedback_format_table_init(
364 struct dmabuf_feedback_format_table *format_table)
365 {
366 memset(format_table, 0, sizeof(*format_table));
367 }
368
369 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)370 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
371 {
372 dri2_wl_formats_fini(&tranche->formats);
373 }
374
375 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)376 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
377 {
378 memset(tranche, 0, sizeof(*tranche));
379
380 if (dri2_wl_formats_init(&tranche->formats) < 0)
381 return -1;
382
383 return 0;
384 }
385
386 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)387 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
388 {
389 dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
390
391 util_dynarray_foreach (&dmabuf_feedback->tranches,
392 struct dmabuf_feedback_tranche, tranche)
393 dmabuf_feedback_tranche_fini(tranche);
394 util_dynarray_fini(&dmabuf_feedback->tranches);
395
396 dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
397 }
398
399 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)400 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
401 {
402 memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
403
404 if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
405 return -1;
406
407 util_dynarray_init(&dmabuf_feedback->tranches, NULL);
408
409 dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
410
411 return 0;
412 }
413
414 static void
resize_callback(struct wl_egl_window * wl_win,void * data)415 resize_callback(struct wl_egl_window *wl_win, void *data)
416 {
417 struct dri2_egl_surface *dri2_surf = data;
418 struct dri2_egl_display *dri2_dpy =
419 dri2_egl_display(dri2_surf->base.Resource.Display);
420
421 if (dri2_surf->base.Width == wl_win->width &&
422 dri2_surf->base.Height == wl_win->height)
423 return;
424
425 dri2_surf->resized = true;
426
427 /* Update the surface size as soon as native window is resized; from user
428 * pov, this makes the effect that resize is done immediately after native
429 * window resize, without requiring to wait until the first draw.
430 *
431 * A more detailed and lengthy explanation can be found at
432 * https://lists.freedesktop.org/archives/mesa-dev/2018-June/196474.html
433 */
434 if (!dri2_surf->back) {
435 dri2_surf->base.Width = wl_win->width;
436 dri2_surf->base.Height = wl_win->height;
437 }
438 dri2_dpy->flush->invalidate(dri2_surf->dri_drawable);
439 }
440
441 static void
destroy_window_callback(void * data)442 destroy_window_callback(void *data)
443 {
444 struct dri2_egl_surface *dri2_surf = data;
445 dri2_surf->wl_win = NULL;
446 }
447
448 static struct wl_surface *
get_wl_surface_proxy(struct wl_egl_window * window)449 get_wl_surface_proxy(struct wl_egl_window *window)
450 {
451 /* Version 3 of wl_egl_window introduced a version field at the same
452 * location where a pointer to wl_surface was stored. Thus, if
453 * window->version is dereferenceable, we've been given an older version of
454 * wl_egl_window, and window->version points to wl_surface */
455 if (_eglPointerIsDereferenceable((void *)(window->version))) {
456 return wl_proxy_create_wrapper((void *)(window->version));
457 }
458 return wl_proxy_create_wrapper(window->surface);
459 }
460
461 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)462 surface_dmabuf_feedback_format_table(
463 void *data,
464 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
465 int32_t fd, uint32_t size)
466 {
467 struct dri2_egl_surface *dri2_surf = data;
468 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
469
470 feedback->format_table.size = size;
471 feedback->format_table.data =
472 mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
473
474 close(fd);
475 }
476
477 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)478 surface_dmabuf_feedback_main_device(
479 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
480 struct wl_array *device)
481 {
482 struct dri2_egl_surface *dri2_surf = data;
483 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
484
485 memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
486
487 /* Compositors may support switching render devices and change the main
488 * device of the dma-buf feedback. In this case, when we reallocate the
489 * buffers of the surface we must ensure that it is not allocated in memory
490 * that is only visible to the GPU that EGL is using, as the compositor will
491 * have to import them to the render device it is using.
492 *
493 * TODO: we still don't know how to allocate such buffers.
494 */
495 if (dri2_surf->dmabuf_feedback.main_device != 0 &&
496 (feedback->main_device != dri2_surf->dmabuf_feedback.main_device))
497 dri2_surf->compositor_using_another_device = true;
498 else
499 dri2_surf->compositor_using_another_device = false;
500 }
501
502 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)503 surface_dmabuf_feedback_tranche_target_device(
504 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
505 struct wl_array *device)
506 {
507 struct dri2_egl_surface *dri2_surf = data;
508 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
509
510 memcpy(&feedback->pending_tranche.target_device, device->data,
511 sizeof(feedback->pending_tranche.target_device));
512 }
513
514 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)515 surface_dmabuf_feedback_tranche_flags(
516 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
517 uint32_t flags)
518 {
519 struct dri2_egl_surface *dri2_surf = data;
520 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
521
522 feedback->pending_tranche.flags = flags;
523 }
524
525 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)526 surface_dmabuf_feedback_tranche_formats(
527 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
528 struct wl_array *indices)
529 {
530 struct dri2_egl_surface *dri2_surf = data;
531 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
532 uint64_t *modifier_ptr, modifier;
533 uint32_t format;
534 uint16_t *index;
535 int visual_idx;
536
537 /* Compositor may advertise or not a format table. If it does, we use it.
538 * Otherwise, we steal the most recent advertised format table. If we don't
539 * have a most recent advertised format table, compositor did something
540 * wrong. */
541 if (feedback->format_table.data == NULL) {
542 feedback->format_table = dri2_surf->dmabuf_feedback.format_table;
543 dmabuf_feedback_format_table_init(
544 &dri2_surf->dmabuf_feedback.format_table);
545 }
546 if (feedback->format_table.data == MAP_FAILED) {
547 _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
548 "so we won't be able to use this batch of dma-buf "
549 "feedback events.");
550 return;
551 }
552 if (feedback->format_table.data == NULL) {
553 _eglLog(_EGL_WARNING,
554 "wayland-egl: compositor didn't advertise a format "
555 "table, so we won't be able to use this batch of dma-buf "
556 "feedback events.");
557 return;
558 }
559
560 wl_array_for_each (index, indices) {
561 format = feedback->format_table.data[*index].format;
562 modifier = feedback->format_table.data[*index].modifier;
563
564 /* Skip formats that are not the one the surface is already using. We
565 * can't switch to another format. */
566 if (format != dri2_surf->format)
567 continue;
568
569 /* We are sure that the format is supported because of the check above. */
570 visual_idx = dri2_wl_visual_idx_from_fourcc(format);
571 assert(visual_idx != -1);
572
573 BITSET_SET(feedback->pending_tranche.formats.formats_bitmap, visual_idx);
574 modifier_ptr =
575 u_vector_add(&feedback->pending_tranche.formats.modifiers[visual_idx]);
576 if (modifier_ptr)
577 *modifier_ptr = modifier;
578 }
579 }
580
581 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)582 surface_dmabuf_feedback_tranche_done(
583 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
584 {
585 struct dri2_egl_surface *dri2_surf = data;
586 struct dmabuf_feedback *feedback = &dri2_surf->pending_dmabuf_feedback;
587
588 /* Add tranche to array of tranches. */
589 util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
590 feedback->pending_tranche);
591
592 dmabuf_feedback_tranche_init(&feedback->pending_tranche);
593 }
594
595 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)596 surface_dmabuf_feedback_done(
597 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
598 {
599 struct dri2_egl_surface *dri2_surf = data;
600
601 /* The dma-buf feedback protocol states that surface dma-buf feedback should
602 * be sent by the compositor only if its buffers are using a suboptimal pair
603 * of format and modifier. We can't change the buffer format, but we can
604 * reallocate with another modifier. So we raise this flag in order to force
605 * buffer reallocation based on the dma-buf feedback sent. */
606 dri2_surf->received_dmabuf_feedback = true;
607
608 dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
609 dri2_surf->dmabuf_feedback = dri2_surf->pending_dmabuf_feedback;
610 dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback);
611 }
612
613 static const struct zwp_linux_dmabuf_feedback_v1_listener
614 surface_dmabuf_feedback_listener = {
615 .format_table = surface_dmabuf_feedback_format_table,
616 .main_device = surface_dmabuf_feedback_main_device,
617 .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
618 .tranche_flags = surface_dmabuf_feedback_tranche_flags,
619 .tranche_formats = surface_dmabuf_feedback_tranche_formats,
620 .tranche_done = surface_dmabuf_feedback_tranche_done,
621 .done = surface_dmabuf_feedback_done,
622 };
623
624 /**
625 * Called via eglCreateWindowSurface(), drv->CreateWindowSurface().
626 */
627 static _EGLSurface *
dri2_wl_create_window_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)628 dri2_wl_create_window_surface(_EGLDisplay *disp, _EGLConfig *conf,
629 void *native_window, const EGLint *attrib_list)
630 {
631 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
632 struct dri2_egl_config *dri2_conf = dri2_egl_config(conf);
633 struct wl_egl_window *window = native_window;
634 struct dri2_egl_surface *dri2_surf;
635 struct zwp_linux_dmabuf_v1 *dmabuf_wrapper;
636 int visual_idx;
637 const __DRIconfig *config;
638
639 if (!window) {
640 _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_create_surface");
641 return NULL;
642 }
643
644 if (window->driver_private) {
645 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
646 return NULL;
647 }
648
649 dri2_surf = calloc(1, sizeof *dri2_surf);
650 if (!dri2_surf) {
651 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
652 return NULL;
653 }
654
655 if (!dri2_init_surface(&dri2_surf->base, disp, EGL_WINDOW_BIT, conf,
656 attrib_list, false, native_window))
657 goto cleanup_surf;
658
659 config = dri2_get_dri_config(dri2_conf, EGL_WINDOW_BIT,
660 dri2_surf->base.GLColorspace);
661
662 if (!config) {
663 _eglError(EGL_BAD_MATCH,
664 "Unsupported surfacetype/colorspace configuration");
665 goto cleanup_surf;
666 }
667
668 dri2_surf->base.Width = window->width;
669 dri2_surf->base.Height = window->height;
670
671 visual_idx = dri2_wl_visual_idx_from_config(dri2_dpy, config);
672 assert(visual_idx != -1);
673 assert(dri2_wl_visuals[visual_idx].pipe_format != PIPE_FORMAT_NONE);
674
675 if (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm) {
676 dri2_surf->format = dri2_wl_visuals[visual_idx].wl_drm_format;
677 } else {
678 assert(dri2_dpy->wl_shm);
679 dri2_surf->format = dri2_wl_shm_format_from_visual_idx(visual_idx);
680 }
681
682 dri2_surf->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
683 "mesa egl surface queue");
684 if (!dri2_surf->wl_queue) {
685 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
686 goto cleanup_surf;
687 }
688
689 if (dri2_dpy->wl_drm) {
690 dri2_surf->wl_drm_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_drm);
691 if (!dri2_surf->wl_drm_wrapper) {
692 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
693 goto cleanup_queue;
694 }
695 wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_drm_wrapper,
696 dri2_surf->wl_queue);
697 }
698
699 dri2_surf->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
700 if (!dri2_surf->wl_dpy_wrapper) {
701 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
702 goto cleanup_drm;
703 }
704 wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_dpy_wrapper,
705 dri2_surf->wl_queue);
706
707 dri2_surf->wl_surface_wrapper = get_wl_surface_proxy(window);
708 if (!dri2_surf->wl_surface_wrapper) {
709 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
710 goto cleanup_dpy_wrapper;
711 }
712 wl_proxy_set_queue((struct wl_proxy *)dri2_surf->wl_surface_wrapper,
713 dri2_surf->wl_queue);
714
715 if (dri2_dpy->wl_dmabuf &&
716 zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
717 ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
718 dmabuf_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dmabuf);
719 if (!dmabuf_wrapper) {
720 _eglError(EGL_BAD_ALLOC, "dri2_create_surface");
721 goto cleanup_surf_wrapper;
722 }
723 wl_proxy_set_queue((struct wl_proxy *)dmabuf_wrapper,
724 dri2_surf->wl_queue);
725 dri2_surf->wl_dmabuf_feedback = zwp_linux_dmabuf_v1_get_surface_feedback(
726 dmabuf_wrapper, dri2_surf->wl_surface_wrapper);
727 wl_proxy_wrapper_destroy(dmabuf_wrapper);
728
729 zwp_linux_dmabuf_feedback_v1_add_listener(
730 dri2_surf->wl_dmabuf_feedback, &surface_dmabuf_feedback_listener,
731 dri2_surf);
732
733 if (dmabuf_feedback_init(&dri2_surf->pending_dmabuf_feedback) < 0) {
734 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
735 goto cleanup_surf_wrapper;
736 }
737 if (dmabuf_feedback_init(&dri2_surf->dmabuf_feedback) < 0) {
738 dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
739 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
740 goto cleanup_surf_wrapper;
741 }
742
743 if (roundtrip(dri2_dpy) < 0)
744 goto cleanup_dmabuf_feedback;
745 }
746
747 dri2_surf->wl_win = window;
748 dri2_surf->wl_win->driver_private = dri2_surf;
749 dri2_surf->wl_win->destroy_window_callback = destroy_window_callback;
750 if (dri2_dpy->flush)
751 dri2_surf->wl_win->resize_callback = resize_callback;
752
753 if (!dri2_create_drawable(dri2_dpy, config, dri2_surf, dri2_surf))
754 goto cleanup_dmabuf_feedback;
755
756 dri2_surf->base.SwapInterval = dri2_dpy->default_swap_interval;
757
758 return &dri2_surf->base;
759
760 cleanup_dmabuf_feedback:
761 if (dri2_surf->wl_dmabuf_feedback) {
762 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
763 dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
764 dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
765 }
766 cleanup_surf_wrapper:
767 wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
768 cleanup_dpy_wrapper:
769 wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
770 cleanup_drm:
771 if (dri2_surf->wl_drm_wrapper)
772 wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
773 cleanup_queue:
774 wl_event_queue_destroy(dri2_surf->wl_queue);
775 cleanup_surf:
776 free(dri2_surf);
777
778 return NULL;
779 }
780
781 static _EGLSurface *
dri2_wl_create_pixmap_surface(_EGLDisplay * disp,_EGLConfig * conf,void * native_window,const EGLint * attrib_list)782 dri2_wl_create_pixmap_surface(_EGLDisplay *disp, _EGLConfig *conf,
783 void *native_window, const EGLint *attrib_list)
784 {
785 /* From the EGL_EXT_platform_wayland spec, version 3:
786 *
787 * It is not valid to call eglCreatePlatformPixmapSurfaceEXT with a <dpy>
788 * that belongs to Wayland. Any such call fails and generates
789 * EGL_BAD_PARAMETER.
790 */
791 _eglError(EGL_BAD_PARAMETER, "cannot create EGL pixmap surfaces on "
792 "Wayland");
793 return NULL;
794 }
795
796 /**
797 * Called via eglDestroySurface(), drv->DestroySurface().
798 */
799 static EGLBoolean
dri2_wl_destroy_surface(_EGLDisplay * disp,_EGLSurface * surf)800 dri2_wl_destroy_surface(_EGLDisplay *disp, _EGLSurface *surf)
801 {
802 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
803 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
804
805 dri2_dpy->core->destroyDrawable(dri2_surf->dri_drawable);
806
807 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
808 if (dri2_surf->color_buffers[i].wl_buffer)
809 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
810 if (dri2_surf->color_buffers[i].dri_image)
811 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].dri_image);
812 if (dri2_surf->color_buffers[i].linear_copy)
813 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].linear_copy);
814 if (dri2_surf->color_buffers[i].data)
815 munmap(dri2_surf->color_buffers[i].data,
816 dri2_surf->color_buffers[i].data_size);
817 }
818
819 if (dri2_dpy->dri2)
820 dri2_egl_surface_free_local_buffers(dri2_surf);
821
822 if (dri2_surf->throttle_callback)
823 wl_callback_destroy(dri2_surf->throttle_callback);
824
825 if (dri2_surf->wl_win) {
826 dri2_surf->wl_win->driver_private = NULL;
827 dri2_surf->wl_win->resize_callback = NULL;
828 dri2_surf->wl_win->destroy_window_callback = NULL;
829 }
830
831 wl_proxy_wrapper_destroy(dri2_surf->wl_surface_wrapper);
832 wl_proxy_wrapper_destroy(dri2_surf->wl_dpy_wrapper);
833 if (dri2_surf->wl_drm_wrapper)
834 wl_proxy_wrapper_destroy(dri2_surf->wl_drm_wrapper);
835 if (dri2_surf->wl_dmabuf_feedback) {
836 zwp_linux_dmabuf_feedback_v1_destroy(dri2_surf->wl_dmabuf_feedback);
837 dmabuf_feedback_fini(&dri2_surf->dmabuf_feedback);
838 dmabuf_feedback_fini(&dri2_surf->pending_dmabuf_feedback);
839 }
840 wl_event_queue_destroy(dri2_surf->wl_queue);
841
842 dri2_fini_surface(surf);
843 free(surf);
844
845 return EGL_TRUE;
846 }
847
848 static EGLBoolean
dri2_wl_swap_interval(_EGLDisplay * disp,_EGLSurface * surf,EGLint interval)849 dri2_wl_swap_interval(_EGLDisplay *disp, _EGLSurface *surf, EGLint interval)
850 {
851 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
852 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surf);
853
854 if (dri2_dpy->kopper)
855 dri2_dpy->kopper->setSwapInterval(dri2_surf->dri_drawable, interval);
856
857 return EGL_TRUE;
858 }
859
860 static void
dri2_wl_release_buffers(struct dri2_egl_surface * dri2_surf)861 dri2_wl_release_buffers(struct dri2_egl_surface *dri2_surf)
862 {
863 struct dri2_egl_display *dri2_dpy =
864 dri2_egl_display(dri2_surf->base.Resource.Display);
865
866 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
867 if (dri2_surf->color_buffers[i].wl_buffer) {
868 if (dri2_surf->color_buffers[i].locked) {
869 dri2_surf->color_buffers[i].wl_release = true;
870 } else {
871 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
872 dri2_surf->color_buffers[i].wl_buffer = NULL;
873 }
874 }
875 if (dri2_surf->color_buffers[i].dri_image)
876 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].dri_image);
877 if (dri2_surf->color_buffers[i].linear_copy)
878 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].linear_copy);
879 if (dri2_surf->color_buffers[i].data)
880 munmap(dri2_surf->color_buffers[i].data,
881 dri2_surf->color_buffers[i].data_size);
882
883 dri2_surf->color_buffers[i].dri_image = NULL;
884 dri2_surf->color_buffers[i].linear_copy = NULL;
885 dri2_surf->color_buffers[i].data = NULL;
886 dri2_surf->color_buffers[i].age = 0;
887 }
888
889 if (dri2_dpy->dri2)
890 dri2_egl_surface_free_local_buffers(dri2_surf);
891 }
892
893 static void
create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags)894 create_dri_image_from_dmabuf_feedback(struct dri2_egl_surface *dri2_surf,
895 enum pipe_format pipe_format,
896 uint32_t use_flags)
897 {
898 struct dri2_egl_display *dri2_dpy =
899 dri2_egl_display(dri2_surf->base.Resource.Display);
900 int visual_idx;
901 uint64_t *modifiers;
902 unsigned int num_modifiers;
903 uint32_t flags;
904
905 /* We don't have valid dma-buf feedback, so return */
906 if (dri2_surf->dmabuf_feedback.main_device == 0)
907 return;
908
909 visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
910 assert(visual_idx != -1);
911
912 /* Iterates through the dma-buf feedback to pick a new set of modifiers. The
913 * tranches are sent in descending order of preference by the compositor, so
914 * the first set that we can pick is the best one. For now we still can't
915 * specify the target device in order to make the render device try its best
916 * to allocate memory that can be directly scanned out by the KMS device. But
917 * in the future this may change (newer versions of
918 * createImageWithModifiers). Also, we are safe to pick modifiers from
919 * tranches whose target device differs from the main device, as compositors
920 * do not expose (in dma-buf feedback tranches) formats/modifiers that are
921 * incompatible with the main device. */
922 util_dynarray_foreach (&dri2_surf->dmabuf_feedback.tranches,
923 struct dmabuf_feedback_tranche, tranche) {
924 /* Ignore tranches that do not contain dri2_surf->format */
925 if (!BITSET_TEST(tranche->formats.formats_bitmap, visual_idx))
926 continue;
927 modifiers = u_vector_tail(&tranche->formats.modifiers[visual_idx]);
928 num_modifiers = u_vector_length(&tranche->formats.modifiers[visual_idx]);
929
930 /* For the purposes of this function, an INVALID modifier on
931 * its own means the modifiers aren't supported. */
932 if (num_modifiers == 0 ||
933 (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
934 num_modifiers = 0;
935 modifiers = NULL;
936 }
937
938 flags = use_flags;
939 if (tranche->flags & ZWP_LINUX_DMABUF_FEEDBACK_V1_TRANCHE_FLAGS_SCANOUT)
940 flags |= __DRI_IMAGE_USE_SCANOUT;
941
942 dri2_surf->back->dri_image = loader_dri_create_image(
943 dri2_dpy->dri_screen_render_gpu, dri2_dpy->image,
944 dri2_surf->base.Width, dri2_surf->base.Height, pipe_format,
945 (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) ? 0 : flags,
946 modifiers, num_modifiers, NULL);
947
948 if (dri2_surf->back->dri_image)
949 return;
950 }
951 }
952
953 static void
create_dri_image(struct dri2_egl_surface * dri2_surf,enum pipe_format pipe_format,uint32_t use_flags)954 create_dri_image(struct dri2_egl_surface *dri2_surf,
955 enum pipe_format pipe_format, uint32_t use_flags)
956 {
957 struct dri2_egl_display *dri2_dpy =
958 dri2_egl_display(dri2_surf->base.Resource.Display);
959 int visual_idx;
960 uint64_t *modifiers;
961 unsigned int num_modifiers;
962
963 visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
964 modifiers = u_vector_tail(&dri2_dpy->formats.modifiers[visual_idx]);
965 num_modifiers = u_vector_length(&dri2_dpy->formats.modifiers[visual_idx]);
966
967 /* For the purposes of this function, an INVALID modifier on
968 * its own means the modifiers aren't supported. */
969 if (num_modifiers == 0 ||
970 (num_modifiers == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID)) {
971 num_modifiers = 0;
972 modifiers = NULL;
973 }
974
975 /* If our DRIImage implementation does not support createImageWithModifiers,
976 * then fall back to the old createImage, and hope it allocates an image
977 * which is acceptable to the winsys. */
978 dri2_surf->back->dri_image = loader_dri_create_image(
979 dri2_dpy->dri_screen_render_gpu, dri2_dpy->image, dri2_surf->base.Width,
980 dri2_surf->base.Height, pipe_format,
981 (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) ? 0 : use_flags,
982 modifiers, num_modifiers, NULL);
983 }
984
985 static int
get_back_bo(struct dri2_egl_surface * dri2_surf)986 get_back_bo(struct dri2_egl_surface *dri2_surf)
987 {
988 struct dri2_egl_display *dri2_dpy =
989 dri2_egl_display(dri2_surf->base.Resource.Display);
990 int use_flags;
991 int visual_idx;
992 unsigned int pipe_format;
993 unsigned int linear_pipe_format;
994
995 visual_idx = dri2_wl_visual_idx_from_fourcc(dri2_surf->format);
996 assert(visual_idx != -1);
997 pipe_format = dri2_wl_visuals[visual_idx].pipe_format;
998 linear_pipe_format = pipe_format;
999
1000 /* Substitute dri image format if server does not support original format */
1001 if (!BITSET_TEST(dri2_dpy->formats.formats_bitmap, visual_idx))
1002 linear_pipe_format = dri2_wl_visuals[visual_idx].alt_pipe_format;
1003
1004 /* These asserts hold, as long as dri2_wl_visuals[] is self-consistent and
1005 * the PRIME substitution logic in dri2_wl_add_configs_for_visuals() is free
1006 * of bugs.
1007 */
1008 assert(linear_pipe_format != PIPE_FORMAT_NONE);
1009 assert(BITSET_TEST(
1010 dri2_dpy->formats.formats_bitmap,
1011 dri2_wl_visual_idx_from_pipe_format(linear_pipe_format)));
1012
1013 /* There might be a buffer release already queued that wasn't processed */
1014 wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
1015
1016 while (dri2_surf->back == NULL) {
1017 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1018 /* Get an unlocked buffer, preferably one with a dri_buffer
1019 * already allocated and with minimum age.
1020 */
1021 if (dri2_surf->color_buffers[i].locked)
1022 continue;
1023
1024 if (!dri2_surf->back || !dri2_surf->back->dri_image ||
1025 (dri2_surf->color_buffers[i].age > 0 &&
1026 dri2_surf->color_buffers[i].age < dri2_surf->back->age))
1027 dri2_surf->back = &dri2_surf->color_buffers[i];
1028 }
1029
1030 if (dri2_surf->back)
1031 break;
1032
1033 /* If we don't have a buffer, then block on the server to release one for
1034 * us, and try again. wl_display_dispatch_queue will process any pending
1035 * events, however not all servers flush on issuing a buffer release
1036 * event. So, we spam the server with roundtrips as they always cause a
1037 * client flush.
1038 */
1039 if (wl_display_roundtrip_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) < 0)
1040 return -1;
1041 }
1042
1043 if (dri2_surf->back == NULL)
1044 return -1;
1045
1046 use_flags = __DRI_IMAGE_USE_SHARE | __DRI_IMAGE_USE_BACKBUFFER;
1047
1048 if (dri2_surf->base.ProtectedContent) {
1049 /* Protected buffers can't be read from another GPU */
1050 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1051 return -1;
1052 use_flags |= __DRI_IMAGE_USE_PROTECTED;
1053 }
1054
1055 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu &&
1056 dri2_surf->back->linear_copy == NULL) {
1057 uint64_t linear_mod = DRM_FORMAT_MOD_LINEAR;
1058 __DRIimage *linear_copy_display_gpu_image = NULL;
1059
1060 if (dri2_dpy->dri_screen_display_gpu) {
1061 linear_copy_display_gpu_image = loader_dri_create_image(
1062 dri2_dpy->dri_screen_display_gpu, dri2_dpy->image,
1063 dri2_surf->base.Width, dri2_surf->base.Height,
1064 linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1065 &linear_mod, 1, NULL);
1066
1067 if (linear_copy_display_gpu_image) {
1068 int i, ret = 1;
1069 int fourcc;
1070 int num_planes = 0;
1071 int buffer_fds[4];
1072 int strides[4];
1073 int offsets[4];
1074 unsigned error;
1075
1076 if (!dri2_dpy->image->queryImage(linear_copy_display_gpu_image,
1077 __DRI_IMAGE_ATTRIB_NUM_PLANES,
1078 &num_planes))
1079 num_planes = 1;
1080
1081 for (i = 0; i < num_planes; i++) {
1082 __DRIimage *image = dri2_dpy->image->fromPlanar(
1083 linear_copy_display_gpu_image, i, NULL);
1084
1085 if (!image) {
1086 assert(i == 0);
1087 image = linear_copy_display_gpu_image;
1088 }
1089
1090 buffer_fds[i] = -1;
1091 ret &= dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1092 &buffer_fds[i]);
1093 ret &= dri2_dpy->image->queryImage(
1094 image, __DRI_IMAGE_ATTRIB_STRIDE, &strides[i]);
1095 ret &= dri2_dpy->image->queryImage(
1096 image, __DRI_IMAGE_ATTRIB_OFFSET, &offsets[i]);
1097
1098 if (image != linear_copy_display_gpu_image)
1099 dri2_dpy->image->destroyImage(image);
1100
1101 if (!ret) {
1102 do {
1103 if (buffer_fds[i] != -1)
1104 close(buffer_fds[i]);
1105 } while (--i >= 0);
1106 dri2_dpy->image->destroyImage(linear_copy_display_gpu_image);
1107 return -1;
1108 }
1109 }
1110
1111 ret &= dri2_dpy->image->queryImage(linear_copy_display_gpu_image,
1112 __DRI_IMAGE_ATTRIB_FOURCC,
1113 &fourcc);
1114 if (!ret) {
1115 do {
1116 if (buffer_fds[i] != -1)
1117 close(buffer_fds[i]);
1118 } while (--i >= 0);
1119 dri2_dpy->image->destroyImage(linear_copy_display_gpu_image);
1120 return -1;
1121 }
1122
1123 /* The linear buffer was created in the display GPU's vram, so we
1124 * need to make it visible to render GPU
1125 */
1126 dri2_surf->back->linear_copy =
1127 dri2_dpy->image->createImageFromDmaBufs3(
1128 dri2_dpy->dri_screen_render_gpu,
1129 dri2_surf->base.Width, dri2_surf->base.Height,
1130 fourcc, linear_mod,
1131 &buffer_fds[0], num_planes, &strides[0], &offsets[0],
1132 __DRI_YUV_COLOR_SPACE_UNDEFINED,
1133 __DRI_YUV_RANGE_UNDEFINED, __DRI_YUV_CHROMA_SITING_UNDEFINED,
1134 __DRI_YUV_CHROMA_SITING_UNDEFINED, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1135 &error, dri2_surf->back);
1136
1137 for (i = 0; i < num_planes; ++i) {
1138 if (buffer_fds[i] != -1)
1139 close(buffer_fds[i]);
1140 }
1141 dri2_dpy->image->destroyImage(linear_copy_display_gpu_image);
1142 }
1143 }
1144
1145 if (!dri2_surf->back->linear_copy) {
1146 dri2_surf->back->linear_copy = loader_dri_create_image(
1147 dri2_dpy->dri_screen_render_gpu, dri2_dpy->image,
1148 dri2_surf->base.Width, dri2_surf->base.Height,
1149 linear_pipe_format, use_flags | __DRI_IMAGE_USE_LINEAR,
1150 &linear_mod, 1, NULL);
1151 }
1152
1153 if (dri2_surf->back->linear_copy == NULL)
1154 return -1;
1155 }
1156
1157 if (dri2_surf->back->dri_image == NULL) {
1158 if (dri2_surf->wl_dmabuf_feedback)
1159 create_dri_image_from_dmabuf_feedback(dri2_surf, pipe_format,
1160 use_flags);
1161 if (dri2_surf->back->dri_image == NULL)
1162 create_dri_image(dri2_surf, pipe_format, use_flags);
1163 dri2_surf->back->age = 0;
1164 }
1165
1166 if (dri2_surf->back->dri_image == NULL)
1167 return -1;
1168
1169 dri2_surf->back->locked = true;
1170
1171 return 0;
1172 }
1173
1174 static void
back_bo_to_dri_buffer(struct dri2_egl_surface * dri2_surf,__DRIbuffer * buffer)1175 back_bo_to_dri_buffer(struct dri2_egl_surface *dri2_surf, __DRIbuffer *buffer)
1176 {
1177 struct dri2_egl_display *dri2_dpy =
1178 dri2_egl_display(dri2_surf->base.Resource.Display);
1179 __DRIimage *image;
1180 int name, pitch;
1181
1182 image = dri2_surf->back->dri_image;
1183
1184 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_NAME, &name);
1185 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &pitch);
1186
1187 buffer->attachment = __DRI_BUFFER_BACK_LEFT;
1188 buffer->name = name;
1189 buffer->pitch = pitch;
1190 buffer->cpp = 4;
1191 buffer->flags = 0;
1192 }
1193
1194 /* Value chosen empirically as a compromise between avoiding frequent
1195 * reallocations and extended time of increased memory consumption due to
1196 * unused buffers being kept.
1197 */
1198 #define BUFFER_TRIM_AGE_HYSTERESIS 20
1199
1200 static int
update_buffers(struct dri2_egl_surface * dri2_surf)1201 update_buffers(struct dri2_egl_surface *dri2_surf)
1202 {
1203 struct dri2_egl_display *dri2_dpy =
1204 dri2_egl_display(dri2_surf->base.Resource.Display);
1205
1206 if (dri2_surf->wl_win &&
1207 (dri2_surf->base.Width != dri2_surf->wl_win->width ||
1208 dri2_surf->base.Height != dri2_surf->wl_win->height)) {
1209
1210 dri2_surf->base.Width = dri2_surf->wl_win->width;
1211 dri2_surf->base.Height = dri2_surf->wl_win->height;
1212 dri2_surf->dx = dri2_surf->wl_win->dx;
1213 dri2_surf->dy = dri2_surf->wl_win->dy;
1214 }
1215
1216 if (dri2_surf->resized || dri2_surf->received_dmabuf_feedback) {
1217 dri2_wl_release_buffers(dri2_surf);
1218 dri2_surf->resized = false;
1219 dri2_surf->received_dmabuf_feedback = false;
1220 }
1221
1222 if (get_back_bo(dri2_surf) < 0) {
1223 _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
1224 return -1;
1225 }
1226
1227 /* If we have an extra unlocked buffer at this point, we had to do triple
1228 * buffering for a while, but now can go back to just double buffering.
1229 * That means we can free any unlocked buffer now. To avoid toggling between
1230 * going back to double buffering and needing to allocate another buffer too
1231 * fast we let the unneeded buffer sit around for a short while. */
1232 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
1233 if (!dri2_surf->color_buffers[i].locked &&
1234 dri2_surf->color_buffers[i].wl_buffer &&
1235 dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
1236 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
1237 dri2_dpy->image->destroyImage(dri2_surf->color_buffers[i].dri_image);
1238 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1239 dri2_dpy->image->destroyImage(
1240 dri2_surf->color_buffers[i].linear_copy);
1241 dri2_surf->color_buffers[i].wl_buffer = NULL;
1242 dri2_surf->color_buffers[i].dri_image = NULL;
1243 dri2_surf->color_buffers[i].linear_copy = NULL;
1244 dri2_surf->color_buffers[i].age = 0;
1245 }
1246 }
1247
1248 return 0;
1249 }
1250
1251 static int
update_buffers_if_needed(struct dri2_egl_surface * dri2_surf)1252 update_buffers_if_needed(struct dri2_egl_surface *dri2_surf)
1253 {
1254 if (dri2_surf->back != NULL)
1255 return 0;
1256
1257 return update_buffers(dri2_surf);
1258 }
1259
1260 static int
image_get_buffers(__DRIdrawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)1261 image_get_buffers(__DRIdrawable *driDrawable, unsigned int format,
1262 uint32_t *stamp, void *loaderPrivate, uint32_t buffer_mask,
1263 struct __DRIimageList *buffers)
1264 {
1265 struct dri2_egl_surface *dri2_surf = loaderPrivate;
1266
1267 if (update_buffers_if_needed(dri2_surf) < 0)
1268 return 0;
1269
1270 buffers->image_mask = __DRI_IMAGE_BUFFER_BACK;
1271 buffers->back = dri2_surf->back->dri_image;
1272
1273 return 1;
1274 }
1275
1276 static void
dri2_wl_flush_front_buffer(__DRIdrawable * driDrawable,void * loaderPrivate)1277 dri2_wl_flush_front_buffer(__DRIdrawable *driDrawable, void *loaderPrivate)
1278 {
1279 (void)driDrawable;
1280 (void)loaderPrivate;
1281 }
1282
1283 static unsigned
dri2_wl_get_capability(void * loaderPrivate,enum dri_loader_cap cap)1284 dri2_wl_get_capability(void *loaderPrivate, enum dri_loader_cap cap)
1285 {
1286 switch (cap) {
1287 case DRI_LOADER_CAP_FP16:
1288 return 1;
1289 case DRI_LOADER_CAP_RGBA_ORDERING:
1290 return 1;
1291 default:
1292 return 0;
1293 }
1294 }
1295
1296 static const __DRIimageLoaderExtension image_loader_extension = {
1297 .base = {__DRI_IMAGE_LOADER, 2},
1298
1299 .getBuffers = image_get_buffers,
1300 .flushFrontBuffer = dri2_wl_flush_front_buffer,
1301 .getCapability = dri2_wl_get_capability,
1302 };
1303
1304 static void
wayland_throttle_callback(void * data,struct wl_callback * callback,uint32_t time)1305 wayland_throttle_callback(void *data, struct wl_callback *callback,
1306 uint32_t time)
1307 {
1308 struct dri2_egl_surface *dri2_surf = data;
1309
1310 dri2_surf->throttle_callback = NULL;
1311 wl_callback_destroy(callback);
1312 }
1313
1314 static const struct wl_callback_listener throttle_listener = {
1315 .done = wayland_throttle_callback,
1316 };
1317
1318 static struct wl_buffer *
create_wl_buffer(struct dri2_egl_display * dri2_dpy,struct dri2_egl_surface * dri2_surf,__DRIimage * image)1319 create_wl_buffer(struct dri2_egl_display *dri2_dpy,
1320 struct dri2_egl_surface *dri2_surf, __DRIimage *image)
1321 {
1322 struct wl_buffer *ret = NULL;
1323 EGLBoolean query;
1324 int width, height, fourcc, num_planes;
1325 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
1326 int mod_hi, mod_lo;
1327
1328 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_WIDTH, &width);
1329 query &=
1330 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_HEIGHT, &height);
1331 query &=
1332 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1333 if (!query)
1334 return NULL;
1335
1336 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1337 &num_planes);
1338 if (!query)
1339 num_planes = 1;
1340
1341 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER,
1342 &mod_hi);
1343 query &= dri2_dpy->image->queryImage(
1344 image, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod_lo);
1345 if (query) {
1346 modifier = combine_u32_into_u64(mod_hi, mod_lo);
1347 }
1348
1349 bool supported_modifier = false;
1350 bool mod_invalid_supported = false;
1351 int visual_idx = dri2_wl_visual_idx_from_fourcc(fourcc);
1352 assert(visual_idx != -1);
1353
1354 uint64_t *mod;
1355 u_vector_foreach(mod, &dri2_dpy->formats.modifiers[visual_idx])
1356 {
1357 if (*mod == DRM_FORMAT_MOD_INVALID) {
1358 mod_invalid_supported = true;
1359 }
1360 if (*mod == modifier) {
1361 supported_modifier = true;
1362 break;
1363 }
1364 }
1365 if (!supported_modifier && mod_invalid_supported) {
1366 /* If the server has advertised DRM_FORMAT_MOD_INVALID then we trust
1367 * that the client has allocated the buffer with the right implicit
1368 * modifier for the format, even though it's allocated a buffer the
1369 * server hasn't explicitly claimed to support. */
1370 modifier = DRM_FORMAT_MOD_INVALID;
1371 supported_modifier = true;
1372 }
1373
1374 if (dri2_dpy->wl_dmabuf && supported_modifier) {
1375 struct zwp_linux_buffer_params_v1 *params;
1376 int i;
1377
1378 /* We don't need a wrapper for wl_dmabuf objects, because we have to
1379 * create the intermediate params object; we can set the queue on this,
1380 * and the wl_buffer inherits it race-free. */
1381 params = zwp_linux_dmabuf_v1_create_params(dri2_dpy->wl_dmabuf);
1382 if (dri2_surf)
1383 wl_proxy_set_queue((struct wl_proxy *)params, dri2_surf->wl_queue);
1384
1385 for (i = 0; i < num_planes; i++) {
1386 __DRIimage *p_image;
1387 int stride, offset;
1388 int fd = -1;
1389
1390 p_image = dri2_dpy->image->fromPlanar(image, i, NULL);
1391 if (!p_image) {
1392 assert(i == 0);
1393 p_image = image;
1394 }
1395
1396 query =
1397 dri2_dpy->image->queryImage(p_image, __DRI_IMAGE_ATTRIB_FD, &fd);
1398 query &= dri2_dpy->image->queryImage(
1399 p_image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1400 query &= dri2_dpy->image->queryImage(
1401 p_image, __DRI_IMAGE_ATTRIB_OFFSET, &offset);
1402 if (image != p_image)
1403 dri2_dpy->image->destroyImage(p_image);
1404
1405 if (!query) {
1406 if (fd >= 0)
1407 close(fd);
1408 zwp_linux_buffer_params_v1_destroy(params);
1409 return NULL;
1410 }
1411
1412 zwp_linux_buffer_params_v1_add(params, fd, i, offset, stride,
1413 modifier >> 32, modifier & 0xffffffff);
1414 close(fd);
1415 }
1416
1417 if (dri2_surf && dri2_surf->base.PresentOpaque)
1418 fourcc = dri2_wl_visuals[visual_idx].opaque_wl_drm_format;
1419
1420 ret = zwp_linux_buffer_params_v1_create_immed(params, width, height,
1421 fourcc, 0);
1422 zwp_linux_buffer_params_v1_destroy(params);
1423 } else {
1424 struct wl_drm *wl_drm =
1425 dri2_surf ? dri2_surf->wl_drm_wrapper : dri2_dpy->wl_drm;
1426 int fd = -1, stride;
1427
1428 if (num_planes > 1)
1429 return NULL;
1430
1431 query = dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD, &fd);
1432 query &=
1433 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride);
1434 if (!query) {
1435 if (fd >= 0)
1436 close(fd);
1437 return NULL;
1438 }
1439
1440 ret = wl_drm_create_prime_buffer(wl_drm, fd, width, height, fourcc, 0,
1441 stride, 0, 0, 0, 0);
1442 close(fd);
1443 }
1444
1445 return ret;
1446 }
1447
1448 static EGLBoolean
try_damage_buffer(struct dri2_egl_surface * dri2_surf,const EGLint * rects,EGLint n_rects)1449 try_damage_buffer(struct dri2_egl_surface *dri2_surf, const EGLint *rects,
1450 EGLint n_rects)
1451 {
1452 if (wl_proxy_get_version((struct wl_proxy *)dri2_surf->wl_surface_wrapper) <
1453 WL_SURFACE_DAMAGE_BUFFER_SINCE_VERSION)
1454 return EGL_FALSE;
1455
1456 for (int i = 0; i < n_rects; i++) {
1457 const int *rect = &rects[i * 4];
1458
1459 wl_surface_damage_buffer(dri2_surf->wl_surface_wrapper, rect[0],
1460 dri2_surf->base.Height - rect[1] - rect[3],
1461 rect[2], rect[3]);
1462 }
1463 return EGL_TRUE;
1464 }
1465
1466 /**
1467 * Called via eglSwapBuffers(), drv->SwapBuffers().
1468 */
1469 static EGLBoolean
dri2_wl_swap_buffers_with_damage(_EGLDisplay * disp,_EGLSurface * draw,const EGLint * rects,EGLint n_rects)1470 dri2_wl_swap_buffers_with_damage(_EGLDisplay *disp, _EGLSurface *draw,
1471 const EGLint *rects, EGLint n_rects)
1472 {
1473 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1474 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
1475
1476 if (!dri2_surf->wl_win)
1477 return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
1478
1479 /* Flush (and finish glthread) before:
1480 * - update_buffers_if_needed because the unmarshalling thread
1481 * may be running currently, and we would concurrently alloc/free
1482 * the back bo.
1483 * - swapping current/back because flushing may free the buffer and
1484 * dri_image and reallocate them using get_back_bo (which causes a
1485 * a crash because 'current' becomes NULL).
1486 * - using any wl_* function because accessing them from this thread
1487 * and glthread causes troubles (see #7624 and #8136)
1488 */
1489 dri2_flush_drawable_for_swapbuffers(disp, draw);
1490 dri2_dpy->flush->invalidate(dri2_surf->dri_drawable);
1491
1492 while (dri2_surf->throttle_callback != NULL)
1493 if (wl_display_dispatch_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) ==
1494 -1)
1495 return -1;
1496
1497 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++)
1498 if (dri2_surf->color_buffers[i].age > 0)
1499 dri2_surf->color_buffers[i].age++;
1500
1501 /* Make sure we have a back buffer in case we're swapping without ever
1502 * rendering. */
1503 if (update_buffers_if_needed(dri2_surf) < 0)
1504 return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1505
1506 if (draw->SwapInterval > 0) {
1507 dri2_surf->throttle_callback =
1508 wl_surface_frame(dri2_surf->wl_surface_wrapper);
1509 wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1510 dri2_surf);
1511 }
1512
1513 dri2_surf->back->age = 1;
1514 dri2_surf->current = dri2_surf->back;
1515 dri2_surf->back = NULL;
1516
1517 if (!dri2_surf->current->wl_buffer) {
1518 __DRIimage *image;
1519
1520 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu)
1521 image = dri2_surf->current->linear_copy;
1522 else
1523 image = dri2_surf->current->dri_image;
1524
1525 dri2_surf->current->wl_buffer =
1526 create_wl_buffer(dri2_dpy, dri2_surf, image);
1527
1528 if (dri2_surf->current->wl_buffer == NULL)
1529 return _eglError(EGL_BAD_ALLOC, "dri2_swap_buffers");
1530
1531 dri2_surf->current->wl_release = false;
1532
1533 wl_buffer_add_listener(dri2_surf->current->wl_buffer, &wl_buffer_listener,
1534 dri2_surf);
1535 }
1536
1537 wl_surface_attach(dri2_surf->wl_surface_wrapper,
1538 dri2_surf->current->wl_buffer, dri2_surf->dx,
1539 dri2_surf->dy);
1540
1541 dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
1542 dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
1543 /* reset resize growing parameters */
1544 dri2_surf->dx = 0;
1545 dri2_surf->dy = 0;
1546
1547 /* If the compositor doesn't support damage_buffer, we deliberately
1548 * ignore the damage region and post maximum damage, due to
1549 * https://bugs.freedesktop.org/78190 */
1550 if (!n_rects || !try_damage_buffer(dri2_surf, rects, n_rects))
1551 wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX,
1552 INT32_MAX);
1553
1554 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
1555 _EGLContext *ctx = _eglGetCurrentContext();
1556 struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
1557 dri2_dpy->image->blitImage(
1558 dri2_ctx->dri_context, dri2_surf->current->linear_copy,
1559 dri2_surf->current->dri_image, 0, 0, dri2_surf->base.Width,
1560 dri2_surf->base.Height, 0, 0, dri2_surf->base.Width,
1561 dri2_surf->base.Height, 0);
1562
1563 if (dri2_dpy->flush) {
1564 __DRIdrawable *dri_drawable = dri2_dpy->vtbl->get_dri_drawable(draw);
1565
1566 dri2_dpy->flush->flush(dri_drawable);
1567 }
1568 }
1569
1570 wl_surface_commit(dri2_surf->wl_surface_wrapper);
1571
1572 /* If we're not waiting for a frame callback then we'll at least throttle
1573 * to a sync callback so that we always give a chance for the compositor to
1574 * handle the commit and send a release event before checking for a free
1575 * buffer */
1576 if (dri2_surf->throttle_callback == NULL) {
1577 dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
1578 wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
1579 dri2_surf);
1580 }
1581
1582 wl_display_flush(dri2_dpy->wl_dpy);
1583
1584 return EGL_TRUE;
1585 }
1586
1587 static EGLint
dri2_wl_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)1588 dri2_wl_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
1589 {
1590 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
1591
1592 if (update_buffers_if_needed(dri2_surf) < 0) {
1593 _eglError(EGL_BAD_ALLOC, "dri2_query_buffer_age");
1594 return -1;
1595 }
1596
1597 return dri2_surf->back->age;
1598 }
1599
1600 static EGLBoolean
dri2_wl_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)1601 dri2_wl_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
1602 {
1603 return dri2_wl_swap_buffers_with_damage(disp, draw, NULL, 0);
1604 }
1605
1606 static struct wl_buffer *
dri2_wl_create_wayland_buffer_from_image(_EGLDisplay * disp,_EGLImage * img)1607 dri2_wl_create_wayland_buffer_from_image(_EGLDisplay *disp, _EGLImage *img)
1608 {
1609 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1610 struct dri2_egl_image *dri2_img = dri2_egl_image(img);
1611 __DRIimage *image = dri2_img->dri_image;
1612 struct wl_buffer *buffer;
1613 int fourcc;
1614
1615 /* Check the upstream display supports this buffer's format. */
1616 dri2_dpy->image->queryImage(image, __DRI_IMAGE_ATTRIB_FOURCC, &fourcc);
1617 if (!server_supports_fourcc(&dri2_dpy->formats, fourcc))
1618 goto bad_format;
1619
1620 buffer = create_wl_buffer(dri2_dpy, NULL, image);
1621
1622 /* The buffer object will have been created with our internal event queue
1623 * because it is using wl_dmabuf/wl_drm as a proxy factory. We want the
1624 * buffer to be used by the application so we'll reset it to the display's
1625 * default event queue. This isn't actually racy, as the only event the
1626 * buffer can get is a buffer release, which doesn't happen with an explicit
1627 * attach. */
1628 if (buffer)
1629 wl_proxy_set_queue((struct wl_proxy *)buffer, NULL);
1630
1631 return buffer;
1632
1633 bad_format:
1634 _eglError(EGL_BAD_MATCH, "unsupported image format");
1635 return NULL;
1636 }
1637
1638 static int
dri2_wl_authenticate(_EGLDisplay * disp,uint32_t id)1639 dri2_wl_authenticate(_EGLDisplay *disp, uint32_t id)
1640 {
1641 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1642 int ret = 0;
1643
1644 if (dri2_dpy->is_render_node) {
1645 _eglLog(_EGL_WARNING, "wayland-egl: client asks server to "
1646 "authenticate for render-nodes");
1647 return 0;
1648 }
1649 dri2_dpy->authenticated = false;
1650
1651 wl_drm_authenticate(dri2_dpy->wl_drm, id);
1652 if (roundtrip(dri2_dpy) < 0)
1653 ret = -1;
1654
1655 if (!dri2_dpy->authenticated)
1656 ret = -1;
1657
1658 /* reset authenticated */
1659 dri2_dpy->authenticated = true;
1660
1661 return ret;
1662 }
1663
1664 static void
drm_handle_device(void * data,struct wl_drm * drm,const char * device)1665 drm_handle_device(void *data, struct wl_drm *drm, const char *device)
1666 {
1667 struct dri2_egl_display *dri2_dpy = data;
1668 drm_magic_t magic;
1669
1670 dri2_dpy->device_name = strdup(device);
1671 if (!dri2_dpy->device_name)
1672 return;
1673
1674 dri2_dpy->fd_render_gpu = loader_open_device(dri2_dpy->device_name);
1675 if (dri2_dpy->fd_render_gpu == -1) {
1676 _eglLog(_EGL_WARNING, "wayland-egl: could not open %s (%s)",
1677 dri2_dpy->device_name, strerror(errno));
1678 free(dri2_dpy->device_name);
1679 dri2_dpy->device_name = NULL;
1680 return;
1681 }
1682
1683 if (drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER) {
1684 dri2_dpy->authenticated = true;
1685 } else {
1686 if (drmGetMagic(dri2_dpy->fd_render_gpu, &magic)) {
1687 close(dri2_dpy->fd_render_gpu);
1688 dri2_dpy->fd_render_gpu = -1;
1689 free(dri2_dpy->device_name);
1690 dri2_dpy->device_name = NULL;
1691 _eglLog(_EGL_WARNING, "wayland-egl: drmGetMagic failed");
1692 return;
1693 }
1694 wl_drm_authenticate(dri2_dpy->wl_drm, magic);
1695 }
1696 }
1697
1698 static void
drm_handle_format(void * data,struct wl_drm * drm,uint32_t format)1699 drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)
1700 {
1701 struct dri2_egl_display *dri2_dpy = data;
1702 int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1703
1704 if (visual_idx == -1)
1705 return;
1706
1707 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1708 }
1709
1710 static void
drm_handle_capabilities(void * data,struct wl_drm * drm,uint32_t value)1711 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)
1712 {
1713 struct dri2_egl_display *dri2_dpy = data;
1714
1715 dri2_dpy->capabilities = value;
1716 }
1717
1718 static void
drm_handle_authenticated(void * data,struct wl_drm * drm)1719 drm_handle_authenticated(void *data, struct wl_drm *drm)
1720 {
1721 struct dri2_egl_display *dri2_dpy = data;
1722
1723 dri2_dpy->authenticated = true;
1724 }
1725
1726 static const struct wl_drm_listener drm_listener = {
1727 .device = drm_handle_device,
1728 .format = drm_handle_format,
1729 .authenticated = drm_handle_authenticated,
1730 .capabilities = drm_handle_capabilities,
1731 };
1732
1733 static void
dmabuf_ignore_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)1734 dmabuf_ignore_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1735 uint32_t format)
1736 {
1737 /* formats are implicitly advertised by the 'modifier' event, so ignore */
1738 }
1739
1740 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)1741 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
1742 uint32_t format, uint32_t modifier_hi,
1743 uint32_t modifier_lo)
1744 {
1745 struct dri2_egl_display *dri2_dpy = data;
1746 int visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1747 uint64_t *mod;
1748
1749 /* Ignore this if the compositor advertised dma-buf feedback. From version 4
1750 * onwards (when dma-buf feedback was introduced), the compositor should not
1751 * advertise this event anymore, but let's keep this for safety. */
1752 if (dri2_dpy->wl_dmabuf_feedback)
1753 return;
1754
1755 if (visual_idx == -1)
1756 return;
1757
1758 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1759
1760 mod = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1761 if (mod)
1762 *mod = combine_u32_into_u64(modifier_hi, modifier_lo);
1763 }
1764
1765 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
1766 .format = dmabuf_ignore_format,
1767 .modifier = dmabuf_handle_modifier,
1768 };
1769
1770 static void
wl_drm_bind(struct dri2_egl_display * dri2_dpy)1771 wl_drm_bind(struct dri2_egl_display *dri2_dpy)
1772 {
1773 dri2_dpy->wl_drm =
1774 wl_registry_bind(dri2_dpy->wl_registry, dri2_dpy->wl_drm_name,
1775 &wl_drm_interface, dri2_dpy->wl_drm_version);
1776 wl_drm_add_listener(dri2_dpy->wl_drm, &drm_listener, dri2_dpy);
1777 }
1778
1779 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1780 default_dmabuf_feedback_format_table(
1781 void *data,
1782 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1783 int32_t fd, uint32_t size)
1784 {
1785 struct dri2_egl_display *dri2_dpy = data;
1786
1787 dri2_dpy->format_table.size = size;
1788 dri2_dpy->format_table.data =
1789 mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1790
1791 close(fd);
1792 }
1793
1794 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1795 default_dmabuf_feedback_main_device(
1796 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1797 struct wl_array *device)
1798 {
1799 struct dri2_egl_display *dri2_dpy = data;
1800 char *node;
1801 int fd;
1802 dev_t dev;
1803
1804 /* Given the device, look for a render node and try to open it. */
1805 memcpy(&dev, device->data, sizeof(dev));
1806 node = loader_get_render_node(dev);
1807 if (!node)
1808 return;
1809 fd = loader_open_device(node);
1810 if (fd == -1) {
1811 free(node);
1812 return;
1813 }
1814
1815 dri2_dpy->device_name = node;
1816 dri2_dpy->fd_render_gpu = fd;
1817 dri2_dpy->authenticated = true;
1818 }
1819
1820 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1821 default_dmabuf_feedback_tranche_target_device(
1822 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1823 struct wl_array *device)
1824 {
1825 /* ignore this event */
1826 }
1827
1828 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)1829 default_dmabuf_feedback_tranche_flags(
1830 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1831 uint32_t flags)
1832 {
1833 /* ignore this event */
1834 }
1835
1836 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)1837 default_dmabuf_feedback_tranche_formats(
1838 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1839 struct wl_array *indices)
1840 {
1841 struct dri2_egl_display *dri2_dpy = data;
1842 uint64_t *modifier_ptr, modifier;
1843 uint32_t format;
1844 uint16_t *index;
1845 int visual_idx;
1846
1847 if (dri2_dpy->format_table.data == MAP_FAILED) {
1848 _eglLog(_EGL_WARNING, "wayland-egl: we could not map the format table "
1849 "so we won't be able to use this batch of dma-buf "
1850 "feedback events.");
1851 return;
1852 }
1853 if (dri2_dpy->format_table.data == NULL) {
1854 _eglLog(_EGL_WARNING,
1855 "wayland-egl: compositor didn't advertise a format "
1856 "table, so we won't be able to use this batch of dma-buf "
1857 "feedback events.");
1858 return;
1859 }
1860
1861 wl_array_for_each (index, indices) {
1862 format = dri2_dpy->format_table.data[*index].format;
1863 modifier = dri2_dpy->format_table.data[*index].modifier;
1864
1865 /* skip formats that we don't support */
1866 visual_idx = dri2_wl_visual_idx_from_fourcc(format);
1867 if (visual_idx == -1)
1868 continue;
1869
1870 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
1871 modifier_ptr = u_vector_add(&dri2_dpy->formats.modifiers[visual_idx]);
1872 if (modifier_ptr)
1873 *modifier_ptr = modifier;
1874 }
1875 }
1876
1877 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1878 default_dmabuf_feedback_tranche_done(
1879 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1880 {
1881 /* ignore this event */
1882 }
1883
1884 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1885 default_dmabuf_feedback_done(
1886 void *data, struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1887 {
1888 /* ignore this event */
1889 }
1890
1891 static const struct zwp_linux_dmabuf_feedback_v1_listener
1892 dmabuf_feedback_listener = {
1893 .format_table = default_dmabuf_feedback_format_table,
1894 .main_device = default_dmabuf_feedback_main_device,
1895 .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
1896 .tranche_flags = default_dmabuf_feedback_tranche_flags,
1897 .tranche_formats = default_dmabuf_feedback_tranche_formats,
1898 .tranche_done = default_dmabuf_feedback_tranche_done,
1899 .done = default_dmabuf_feedback_done,
1900 };
1901
1902 static void
registry_handle_global_drm(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)1903 registry_handle_global_drm(void *data, struct wl_registry *registry,
1904 uint32_t name, const char *interface,
1905 uint32_t version)
1906 {
1907 struct dri2_egl_display *dri2_dpy = data;
1908
1909 if (strcmp(interface, wl_drm_interface.name) == 0) {
1910 dri2_dpy->wl_drm_version = MIN2(version, 2);
1911 dri2_dpy->wl_drm_name = name;
1912 } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
1913 version >= 3) {
1914 dri2_dpy->wl_dmabuf = wl_registry_bind(
1915 registry, name, &zwp_linux_dmabuf_v1_interface,
1916 MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
1917 zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
1918 dri2_dpy);
1919 }
1920 }
1921
1922 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)1923 registry_handle_global_remove(void *data, struct wl_registry *registry,
1924 uint32_t name)
1925 {
1926 }
1927
1928 static const struct wl_registry_listener registry_listener_drm = {
1929 .global = registry_handle_global_drm,
1930 .global_remove = registry_handle_global_remove,
1931 };
1932
1933 static void
dri2_wl_setup_swap_interval(_EGLDisplay * disp)1934 dri2_wl_setup_swap_interval(_EGLDisplay *disp)
1935 {
1936 /* We can't use values greater than 1 on Wayland because we are using the
1937 * frame callback to synchronise the frame and the only way we be sure to
1938 * get a frame callback is to attach a new buffer. Therefore we can't just
1939 * sit drawing nothing to wait until the next ‘n’ frame callbacks */
1940
1941 dri2_setup_swap_interval(disp, 1);
1942 }
1943
1944 static const struct dri2_egl_display_vtbl dri2_wl_display_vtbl = {
1945 .authenticate = dri2_wl_authenticate,
1946 .create_window_surface = dri2_wl_create_window_surface,
1947 .create_pixmap_surface = dri2_wl_create_pixmap_surface,
1948 .destroy_surface = dri2_wl_destroy_surface,
1949 .swap_interval = dri2_wl_swap_interval,
1950 .create_image = dri2_create_image_khr,
1951 .swap_buffers = dri2_wl_swap_buffers,
1952 .swap_buffers_with_damage = dri2_wl_swap_buffers_with_damage,
1953 .query_buffer_age = dri2_wl_query_buffer_age,
1954 .create_wayland_buffer_from_image = dri2_wl_create_wayland_buffer_from_image,
1955 .get_dri_drawable = dri2_surface_get_dri_drawable,
1956 };
1957
1958 static const __DRIextension *dri2_loader_extensions[] = {
1959 &image_loader_extension.base,
1960 &image_lookup_extension.base,
1961 &use_invalidate.base,
1962 NULL,
1963 };
1964
1965 static void
dri2_wl_add_configs_for_visuals(_EGLDisplay * disp)1966 dri2_wl_add_configs_for_visuals(_EGLDisplay *disp)
1967 {
1968 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
1969 unsigned int format_count[ARRAY_SIZE(dri2_wl_visuals)] = {0};
1970
1971 /* Try to create an EGLConfig for every config the driver declares */
1972 for (unsigned i = 0; dri2_dpy->driver_configs[i]; i++) {
1973 struct dri2_egl_config *dri2_conf;
1974 bool conversion = false;
1975 int idx = dri2_wl_visual_idx_from_config(dri2_dpy,
1976 dri2_dpy->driver_configs[i]);
1977
1978 /* Check if the server natively supports the colour buffer format */
1979 if (!server_supports_format(&dri2_dpy->formats, idx)) {
1980 /* In multi-GPU scenarios, we usually have a different buffer, so a
1981 * format conversion is easy compared to the overhead of the copy */
1982 if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
1983 continue;
1984
1985 /* Check if the server supports the alternate format */
1986 if (!server_supports_pipe_format(&dri2_dpy->formats,
1987 dri2_wl_visuals[idx].alt_pipe_format)) {
1988 continue;
1989 }
1990
1991 conversion = true;
1992 }
1993
1994 /* The format is supported one way or another; add the EGLConfig */
1995 dri2_conf = dri2_add_config(disp, dri2_dpy->driver_configs[i],
1996 EGL_WINDOW_BIT, NULL);
1997 if (!dri2_conf)
1998 continue;
1999
2000 format_count[idx]++;
2001
2002 if (conversion && format_count[idx] == 1) {
2003 _eglLog(_EGL_DEBUG, "Client format %s converted via PRIME blitImage.",
2004 util_format_name(dri2_wl_visuals[idx].pipe_format));
2005 }
2006 }
2007
2008 for (unsigned i = 0; i < ARRAY_SIZE(format_count); i++) {
2009 if (!format_count[i]) {
2010 _eglLog(_EGL_DEBUG, "No DRI config supports native format %s",
2011 util_format_name(dri2_wl_visuals[i].pipe_format));
2012 }
2013 }
2014 }
2015
2016 static bool
dri2_initialize_wayland_drm_extensions(struct dri2_egl_display * dri2_dpy)2017 dri2_initialize_wayland_drm_extensions(struct dri2_egl_display *dri2_dpy)
2018 {
2019 /* Get default dma-buf feedback */
2020 if (dri2_dpy->wl_dmabuf &&
2021 zwp_linux_dmabuf_v1_get_version(dri2_dpy->wl_dmabuf) >=
2022 ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
2023 dmabuf_feedback_format_table_init(&dri2_dpy->format_table);
2024 dri2_dpy->wl_dmabuf_feedback =
2025 zwp_linux_dmabuf_v1_get_default_feedback(dri2_dpy->wl_dmabuf);
2026 zwp_linux_dmabuf_feedback_v1_add_listener(
2027 dri2_dpy->wl_dmabuf_feedback, &dmabuf_feedback_listener, dri2_dpy);
2028 }
2029
2030 if (roundtrip(dri2_dpy) < 0)
2031 return false;
2032
2033 /* Destroy the default dma-buf feedback and the format table. */
2034 if (dri2_dpy->wl_dmabuf_feedback) {
2035 zwp_linux_dmabuf_feedback_v1_destroy(dri2_dpy->wl_dmabuf_feedback);
2036 dri2_dpy->wl_dmabuf_feedback = NULL;
2037 dmabuf_feedback_format_table_fini(&dri2_dpy->format_table);
2038 }
2039
2040 /* We couldn't retrieve a render node from the dma-buf feedback (or the
2041 * feedback was not advertised at all), so we must fallback to wl_drm. */
2042 if (dri2_dpy->fd_render_gpu == -1) {
2043 /* wl_drm not advertised by compositor, so can't continue */
2044 if (dri2_dpy->wl_drm_name == 0)
2045 return false;
2046 wl_drm_bind(dri2_dpy);
2047
2048 if (dri2_dpy->wl_drm == NULL)
2049 return false;
2050 if (roundtrip(dri2_dpy) < 0 || dri2_dpy->fd_render_gpu == -1)
2051 return false;
2052
2053 if (!dri2_dpy->authenticated &&
2054 (roundtrip(dri2_dpy) < 0 || !dri2_dpy->authenticated))
2055 return false;
2056 }
2057 return true;
2058 }
2059
2060 static EGLBoolean
dri2_initialize_wayland_drm(_EGLDisplay * disp)2061 dri2_initialize_wayland_drm(_EGLDisplay *disp)
2062 {
2063 struct dri2_egl_display *dri2_dpy = dri2_display_create();
2064 if (!dri2_dpy)
2065 return EGL_FALSE;
2066
2067 disp->DriverData = (void *)dri2_dpy;
2068
2069 if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2070 goto cleanup;
2071
2072 if (disp->PlatformDisplay == NULL) {
2073 dri2_dpy->wl_dpy = wl_display_connect(NULL);
2074 if (dri2_dpy->wl_dpy == NULL)
2075 goto cleanup;
2076 dri2_dpy->own_device = true;
2077 } else {
2078 dri2_dpy->wl_dpy = disp->PlatformDisplay;
2079 }
2080
2081 dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
2082 "mesa egl display queue");
2083
2084 dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2085 if (dri2_dpy->wl_dpy_wrapper == NULL)
2086 goto cleanup;
2087
2088 wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
2089 dri2_dpy->wl_queue);
2090
2091 if (dri2_dpy->own_device)
2092 wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2093
2094 dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2095 wl_registry_add_listener(dri2_dpy->wl_registry, ®istry_listener_drm,
2096 dri2_dpy);
2097
2098 if (roundtrip(dri2_dpy) < 0)
2099 goto cleanup;
2100
2101 if (!dri2_initialize_wayland_drm_extensions(dri2_dpy))
2102 goto cleanup;
2103
2104 loader_get_user_preferred_fd(&dri2_dpy->fd_render_gpu,
2105 &dri2_dpy->fd_display_gpu);
2106
2107 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu) {
2108 free(dri2_dpy->device_name);
2109 dri2_dpy->device_name =
2110 loader_get_device_name_for_fd(dri2_dpy->fd_render_gpu);
2111 if (!dri2_dpy->device_name) {
2112 _eglError(EGL_BAD_ALLOC, "wayland-egl: failed to get device name "
2113 "for requested GPU");
2114 goto cleanup;
2115 }
2116 }
2117
2118 /* we have to do the check now, because loader_get_user_preferred_fd
2119 * will return a render-node when the requested gpu is different
2120 * to the server, but also if the client asks for the same gpu than
2121 * the server by requesting its pci-id */
2122 dri2_dpy->is_render_node =
2123 drmGetNodeTypeFromFd(dri2_dpy->fd_render_gpu) == DRM_NODE_RENDER;
2124
2125 dri2_dpy->driver_name = loader_get_driver_for_fd(dri2_dpy->fd_render_gpu);
2126 if (dri2_dpy->driver_name == NULL) {
2127 _eglError(EGL_BAD_ALLOC, "DRI2: failed to get driver name");
2128 goto cleanup;
2129 }
2130
2131 dri2_dpy->loader_extensions = dri2_loader_extensions;
2132 if (!dri2_load_driver_dri3(disp)) {
2133 _eglError(EGL_BAD_ALLOC, "DRI2: failed to load driver");
2134 goto cleanup;
2135 }
2136
2137 if (!dri2_create_screen(disp))
2138 goto cleanup;
2139
2140 if (!dri2_setup_extensions(disp))
2141 goto cleanup;
2142
2143 if (!dri2_setup_device(disp, false)) {
2144 _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
2145 goto cleanup;
2146 }
2147
2148 dri2_setup_screen(disp);
2149
2150 dri2_wl_setup_swap_interval(disp);
2151
2152 if (dri2_dpy->wl_drm) {
2153 /* To use Prime, we must have _DRI_IMAGE v7 at least. createImageFromFds
2154 * support indicates that Prime export/import is supported by the driver.
2155 * We deprecated the support to GEM names API, so we bail out if the
2156 * driver does not support Prime. */
2157 if (!(dri2_dpy->capabilities & WL_DRM_CAPABILITY_PRIME) ||
2158 (dri2_dpy->image->createImageFromFds == NULL)) {
2159 _eglLog(_EGL_WARNING, "wayland-egl: display does not support prime");
2160 goto cleanup;
2161 }
2162 }
2163
2164 if (dri2_dpy->fd_render_gpu != dri2_dpy->fd_display_gpu &&
2165 dri2_dpy->image->blitImage == NULL) {
2166 _eglLog(_EGL_WARNING, "wayland-egl: Different GPU selected, but the "
2167 "Image extension in the driver is not "
2168 "compatible. blitImage() is required");
2169 goto cleanup;
2170 }
2171
2172 dri2_wl_add_configs_for_visuals(disp);
2173
2174 dri2_set_WL_bind_wayland_display(disp);
2175 /* When cannot convert EGLImage to wl_buffer when on a different gpu,
2176 * because the buffer of the EGLImage has likely a tiling mode the server
2177 * gpu won't support. These is no way to check for now. Thus do not support
2178 * the extension */
2179 if (dri2_dpy->fd_render_gpu == dri2_dpy->fd_display_gpu)
2180 disp->Extensions.WL_create_wayland_buffer_from_image = EGL_TRUE;
2181
2182 disp->Extensions.EXT_buffer_age = EGL_TRUE;
2183
2184 disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
2185
2186 disp->Extensions.EXT_present_opaque = EGL_TRUE;
2187
2188 /* Fill vtbl last to prevent accidentally calling virtual function during
2189 * initialization.
2190 */
2191 dri2_dpy->vtbl = &dri2_wl_display_vtbl;
2192
2193 return EGL_TRUE;
2194
2195 cleanup:
2196 dri2_display_destroy(disp);
2197 return EGL_FALSE;
2198 }
2199
2200 static int
dri2_wl_swrast_get_stride_for_format(int format,int w)2201 dri2_wl_swrast_get_stride_for_format(int format, int w)
2202 {
2203 int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2204
2205 assume(visual_idx != -1);
2206
2207 return w * util_format_get_blocksize(dri2_wl_visuals[visual_idx].pipe_format);
2208 }
2209
2210 static EGLBoolean
dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface * dri2_surf,int format,int w,int h,void ** data,int * size,struct wl_buffer ** buffer)2211 dri2_wl_swrast_allocate_buffer(struct dri2_egl_surface *dri2_surf, int format,
2212 int w, int h, void **data, int *size,
2213 struct wl_buffer **buffer)
2214 {
2215 struct dri2_egl_display *dri2_dpy =
2216 dri2_egl_display(dri2_surf->base.Resource.Display);
2217 struct wl_shm_pool *pool;
2218 int fd, stride, size_map;
2219 void *data_map;
2220
2221 assert(!*buffer);
2222
2223 stride = dri2_wl_swrast_get_stride_for_format(format, w);
2224 size_map = h * stride;
2225
2226 /* Create a shareable buffer */
2227 fd = os_create_anonymous_file(size_map, NULL);
2228 if (fd < 0)
2229 return EGL_FALSE;
2230
2231 data_map = mmap(NULL, size_map, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2232 if (data_map == MAP_FAILED) {
2233 close(fd);
2234 return EGL_FALSE;
2235 }
2236
2237 /* Share it in a wl_buffer */
2238 pool = wl_shm_create_pool(dri2_dpy->wl_shm, fd, size_map);
2239 wl_proxy_set_queue((struct wl_proxy *)pool, dri2_surf->wl_queue);
2240 *buffer = wl_shm_pool_create_buffer(pool, 0, w, h, stride, format);
2241 wl_shm_pool_destroy(pool);
2242 close(fd);
2243
2244 *data = data_map;
2245 *size = size_map;
2246 return EGL_TRUE;
2247 }
2248
2249 static int
swrast_update_buffers(struct dri2_egl_surface * dri2_surf)2250 swrast_update_buffers(struct dri2_egl_surface *dri2_surf)
2251 {
2252 struct dri2_egl_display *dri2_dpy =
2253 dri2_egl_display(dri2_surf->base.Resource.Display);
2254 bool zink = dri2_surf->base.Resource.Display->Options.Zink;
2255
2256 /* we need to do the following operations only once per frame */
2257 if (dri2_surf->back)
2258 return 0;
2259
2260 if (dri2_surf->wl_win &&
2261 (dri2_surf->base.Width != dri2_surf->wl_win->width ||
2262 dri2_surf->base.Height != dri2_surf->wl_win->height)) {
2263
2264 if (!zink)
2265 dri2_wl_release_buffers(dri2_surf);
2266
2267 dri2_surf->base.Width = dri2_surf->wl_win->width;
2268 dri2_surf->base.Height = dri2_surf->wl_win->height;
2269 dri2_surf->dx = dri2_surf->wl_win->dx;
2270 dri2_surf->dy = dri2_surf->wl_win->dy;
2271 dri2_surf->current = NULL;
2272 }
2273
2274 /* find back buffer */
2275 if (zink)
2276 return 0;
2277
2278 /* There might be a buffer release already queued that wasn't processed */
2279 wl_display_dispatch_queue_pending(dri2_dpy->wl_dpy, dri2_surf->wl_queue);
2280
2281 /* else choose any another free location */
2282 while (!dri2_surf->back) {
2283 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2284 if (!dri2_surf->color_buffers[i].locked) {
2285 dri2_surf->back = &dri2_surf->color_buffers[i];
2286 if (dri2_surf->back->wl_buffer)
2287 break;
2288
2289 if (!dri2_wl_swrast_allocate_buffer(
2290 dri2_surf, dri2_surf->format, dri2_surf->base.Width,
2291 dri2_surf->base.Height, &dri2_surf->back->data,
2292 &dri2_surf->back->data_size, &dri2_surf->back->wl_buffer)) {
2293 _eglError(EGL_BAD_ALLOC, "failed to allocate color buffer");
2294 return -1;
2295 }
2296 wl_buffer_add_listener(dri2_surf->back->wl_buffer,
2297 &wl_buffer_listener, dri2_surf);
2298 break;
2299 }
2300 }
2301
2302 /* wait for the compositor to release a buffer */
2303 if (!dri2_surf->back) {
2304 if (wl_display_dispatch_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) ==
2305 -1) {
2306 _eglError(EGL_BAD_ALLOC, "waiting for a free buffer failed");
2307 return -1;
2308 }
2309 }
2310 }
2311
2312 dri2_surf->back->locked = true;
2313
2314 /* If we have an extra unlocked buffer at this point, we had to do triple
2315 * buffering for a while, but now can go back to just double buffering.
2316 * That means we can free any unlocked buffer now. To avoid toggling between
2317 * going back to double buffering and needing to allocate another buffer too
2318 * fast we let the unneeded buffer sit around for a short while. */
2319 for (int i = 0; i < ARRAY_SIZE(dri2_surf->color_buffers); i++) {
2320 if (!dri2_surf->color_buffers[i].locked &&
2321 dri2_surf->color_buffers[i].wl_buffer &&
2322 dri2_surf->color_buffers[i].age > BUFFER_TRIM_AGE_HYSTERESIS) {
2323 wl_buffer_destroy(dri2_surf->color_buffers[i].wl_buffer);
2324 munmap(dri2_surf->color_buffers[i].data,
2325 dri2_surf->color_buffers[i].data_size);
2326 dri2_surf->color_buffers[i].wl_buffer = NULL;
2327 dri2_surf->color_buffers[i].data = NULL;
2328 dri2_surf->color_buffers[i].age = 0;
2329 }
2330 }
2331
2332 return 0;
2333 }
2334
2335 static void *
dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface * dri2_surf)2336 dri2_wl_swrast_get_frontbuffer_data(struct dri2_egl_surface *dri2_surf)
2337 {
2338 /* if there has been a resize: */
2339 if (!dri2_surf->current)
2340 return NULL;
2341
2342 return dri2_surf->current->data;
2343 }
2344
2345 static void *
dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface * dri2_surf)2346 dri2_wl_swrast_get_backbuffer_data(struct dri2_egl_surface *dri2_surf)
2347 {
2348 assert(dri2_surf->back);
2349 return dri2_surf->back->data;
2350 }
2351
2352 static void
dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface * dri2_surf)2353 dri2_wl_swrast_commit_backbuffer(struct dri2_egl_surface *dri2_surf)
2354 {
2355 struct dri2_egl_display *dri2_dpy =
2356 dri2_egl_display(dri2_surf->base.Resource.Display);
2357
2358 while (dri2_surf->throttle_callback != NULL)
2359 if (wl_display_dispatch_queue(dri2_dpy->wl_dpy, dri2_surf->wl_queue) ==
2360 -1)
2361 return;
2362
2363 if (dri2_surf->base.SwapInterval > 0) {
2364 dri2_surf->throttle_callback =
2365 wl_surface_frame(dri2_surf->wl_surface_wrapper);
2366 wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2367 dri2_surf);
2368 }
2369
2370 dri2_surf->current = dri2_surf->back;
2371 dri2_surf->back = NULL;
2372
2373 wl_surface_attach(dri2_surf->wl_surface_wrapper,
2374 dri2_surf->current->wl_buffer, dri2_surf->dx,
2375 dri2_surf->dy);
2376
2377 dri2_surf->wl_win->attached_width = dri2_surf->base.Width;
2378 dri2_surf->wl_win->attached_height = dri2_surf->base.Height;
2379 /* reset resize growing parameters */
2380 dri2_surf->dx = 0;
2381 dri2_surf->dy = 0;
2382
2383 wl_surface_damage(dri2_surf->wl_surface_wrapper, 0, 0, INT32_MAX, INT32_MAX);
2384 wl_surface_commit(dri2_surf->wl_surface_wrapper);
2385
2386 /* If we're not waiting for a frame callback then we'll at least throttle
2387 * to a sync callback so that we always give a chance for the compositor to
2388 * handle the commit and send a release event before checking for a free
2389 * buffer */
2390 if (dri2_surf->throttle_callback == NULL) {
2391 dri2_surf->throttle_callback = wl_display_sync(dri2_surf->wl_dpy_wrapper);
2392 wl_callback_add_listener(dri2_surf->throttle_callback, &throttle_listener,
2393 dri2_surf);
2394 }
2395
2396 wl_display_flush(dri2_dpy->wl_dpy);
2397 }
2398
2399 static void
dri2_wl_swrast_get_drawable_info(__DRIdrawable * draw,int * x,int * y,int * w,int * h,void * loaderPrivate)2400 dri2_wl_swrast_get_drawable_info(__DRIdrawable *draw, int *x, int *y, int *w,
2401 int *h, void *loaderPrivate)
2402 {
2403 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2404
2405 (void)swrast_update_buffers(dri2_surf);
2406 *x = 0;
2407 *y = 0;
2408 *w = dri2_surf->base.Width;
2409 *h = dri2_surf->base.Height;
2410 }
2411
2412 static void
dri2_wl_swrast_get_image(__DRIdrawable * read,int x,int y,int w,int h,char * data,void * loaderPrivate)2413 dri2_wl_swrast_get_image(__DRIdrawable *read, int x, int y, int w, int h,
2414 char *data, void *loaderPrivate)
2415 {
2416 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2417 int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2418 int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2419 int src_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2420 dri2_surf->base.Width);
2421 int dst_stride = copy_width;
2422 char *src, *dst;
2423
2424 src = dri2_wl_swrast_get_frontbuffer_data(dri2_surf);
2425 if (!src) {
2426 memset(data, 0, copy_width * h);
2427 return;
2428 }
2429
2430 assert(data != src);
2431 assert(copy_width <= src_stride);
2432
2433 src += x_offset;
2434 src += y * src_stride;
2435 dst = data;
2436
2437 if (copy_width > src_stride - x_offset)
2438 copy_width = src_stride - x_offset;
2439 if (h > dri2_surf->base.Height - y)
2440 h = dri2_surf->base.Height - y;
2441
2442 for (; h > 0; h--) {
2443 memcpy(dst, src, copy_width);
2444 src += src_stride;
2445 dst += dst_stride;
2446 }
2447 }
2448
2449 static void
dri2_wl_swrast_put_image2(__DRIdrawable * draw,int op,int x,int y,int w,int h,int stride,char * data,void * loaderPrivate)2450 dri2_wl_swrast_put_image2(__DRIdrawable *draw, int op, int x, int y, int w,
2451 int h, int stride, char *data, void *loaderPrivate)
2452 {
2453 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2454 int copy_width = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2455 int dst_stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format,
2456 dri2_surf->base.Width);
2457 int x_offset = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, x);
2458 char *src, *dst;
2459
2460 assert(copy_width <= stride);
2461
2462 (void)swrast_update_buffers(dri2_surf);
2463 dst = dri2_wl_swrast_get_backbuffer_data(dri2_surf);
2464
2465 /* partial copy, copy old content */
2466 if (copy_width < dst_stride)
2467 dri2_wl_swrast_get_image(draw, 0, 0, dri2_surf->base.Width,
2468 dri2_surf->base.Height, dst, loaderPrivate);
2469
2470 dst += x_offset;
2471 dst += y * dst_stride;
2472
2473 src = data;
2474
2475 /* drivers expect we do these checks (and some rely on it) */
2476 if (copy_width > dst_stride - x_offset)
2477 copy_width = dst_stride - x_offset;
2478 if (h > dri2_surf->base.Height - y)
2479 h = dri2_surf->base.Height - y;
2480
2481 for (; h > 0; h--) {
2482 memcpy(dst, src, copy_width);
2483 src += stride;
2484 dst += dst_stride;
2485 }
2486 dri2_wl_swrast_commit_backbuffer(dri2_surf);
2487 }
2488
2489 static void
dri2_wl_swrast_put_image(__DRIdrawable * draw,int op,int x,int y,int w,int h,char * data,void * loaderPrivate)2490 dri2_wl_swrast_put_image(__DRIdrawable *draw, int op, int x, int y, int w,
2491 int h, char *data, void *loaderPrivate)
2492 {
2493 struct dri2_egl_surface *dri2_surf = loaderPrivate;
2494 int stride;
2495
2496 stride = dri2_wl_swrast_get_stride_for_format(dri2_surf->format, w);
2497 dri2_wl_swrast_put_image2(draw, op, x, y, w, h, stride, data, loaderPrivate);
2498 }
2499
2500 static EGLBoolean
dri2_wl_swrast_swap_buffers(_EGLDisplay * disp,_EGLSurface * draw)2501 dri2_wl_swrast_swap_buffers(_EGLDisplay *disp, _EGLSurface *draw)
2502 {
2503 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2504 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(draw);
2505
2506 if (!dri2_surf->wl_win)
2507 return _eglError(EGL_BAD_NATIVE_WINDOW, "dri2_swap_buffers");
2508
2509 dri2_dpy->core->swapBuffers(dri2_surf->dri_drawable);
2510 if (disp->Options.Zink) {
2511 dri2_surf->current = dri2_surf->back;
2512 dri2_surf->back = NULL;
2513 }
2514 return EGL_TRUE;
2515 }
2516
2517 static EGLint
dri2_wl_swrast_query_buffer_age(_EGLDisplay * disp,_EGLSurface * surface)2518 dri2_wl_swrast_query_buffer_age(_EGLDisplay *disp, _EGLSurface *surface)
2519 {
2520 struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
2521 struct dri2_egl_surface *dri2_surf = dri2_egl_surface(surface);
2522
2523 assert(dri2_dpy->swrast);
2524 return dri2_dpy->swrast->queryBufferAge(dri2_surf->dri_drawable);
2525 }
2526
2527 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)2528 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
2529 {
2530 struct dri2_egl_display *dri2_dpy = data;
2531 int visual_idx = dri2_wl_visual_idx_from_shm_format(format);
2532
2533 if (visual_idx == -1)
2534 return;
2535
2536 BITSET_SET(dri2_dpy->formats.formats_bitmap, visual_idx);
2537 }
2538
2539 static const struct wl_shm_listener shm_listener = {
2540 .format = shm_handle_format,
2541 };
2542
2543 static void
registry_handle_global_swrast(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)2544 registry_handle_global_swrast(void *data, struct wl_registry *registry,
2545 uint32_t name, const char *interface,
2546 uint32_t version)
2547 {
2548 struct dri2_egl_display *dri2_dpy = data;
2549
2550 if (strcmp(interface, wl_shm_interface.name) == 0) {
2551 dri2_dpy->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
2552 wl_shm_add_listener(dri2_dpy->wl_shm, &shm_listener, dri2_dpy);
2553 }
2554 if (dri2_dpy->fd_render_gpu != -1 || dri2_dpy->fd_display_gpu != -1) {
2555 if (strcmp(interface, wl_drm_interface.name) == 0) {
2556 dri2_dpy->wl_drm_version = MIN2(version, 2);
2557 dri2_dpy->wl_drm_name = name;
2558 } else if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 &&
2559 version >= 3) {
2560 dri2_dpy->wl_dmabuf = wl_registry_bind(
2561 registry, name, &zwp_linux_dmabuf_v1_interface,
2562 MIN2(version,
2563 ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
2564 zwp_linux_dmabuf_v1_add_listener(dri2_dpy->wl_dmabuf, &dmabuf_listener,
2565 dri2_dpy);
2566 }
2567 }
2568 }
2569
2570 static const struct wl_registry_listener registry_listener_swrast = {
2571 .global = registry_handle_global_swrast,
2572 .global_remove = registry_handle_global_remove,
2573 };
2574
2575 static const struct dri2_egl_display_vtbl dri2_wl_swrast_display_vtbl = {
2576 .authenticate = NULL,
2577 .create_window_surface = dri2_wl_create_window_surface,
2578 .create_pixmap_surface = dri2_wl_create_pixmap_surface,
2579 .destroy_surface = dri2_wl_destroy_surface,
2580 .create_image = dri2_create_image_khr,
2581 .swap_buffers = dri2_wl_swrast_swap_buffers,
2582 .get_dri_drawable = dri2_surface_get_dri_drawable,
2583 .query_buffer_age = dri2_wl_swrast_query_buffer_age,
2584 };
2585
2586 static const __DRIswrastLoaderExtension swrast_loader_extension = {
2587 .base = {__DRI_SWRAST_LOADER, 2},
2588
2589 .getDrawableInfo = dri2_wl_swrast_get_drawable_info,
2590 .putImage = dri2_wl_swrast_put_image,
2591 .getImage = dri2_wl_swrast_get_image,
2592 .putImage2 = dri2_wl_swrast_put_image2,
2593 };
2594
2595 static_assert(sizeof(struct kopper_vk_surface_create_storage) >=
2596 sizeof(VkWaylandSurfaceCreateInfoKHR),
2597 "");
2598
2599 static void
kopperSetSurfaceCreateInfo(void * _draw,struct kopper_loader_info * out)2600 kopperSetSurfaceCreateInfo(void *_draw, struct kopper_loader_info *out)
2601 {
2602 struct dri2_egl_surface *dri2_surf = _draw;
2603 struct dri2_egl_display *dri2_dpy =
2604 dri2_egl_display(dri2_surf->base.Resource.Display);
2605 VkWaylandSurfaceCreateInfoKHR *wlsci =
2606 (VkWaylandSurfaceCreateInfoKHR *)&out->bos;
2607
2608 wlsci->sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
2609 wlsci->pNext = NULL;
2610 wlsci->flags = 0;
2611 wlsci->display = dri2_dpy->wl_dpy;
2612 wlsci->surface = dri2_surf->wl_surface_wrapper;
2613 }
2614
2615 static const __DRIkopperLoaderExtension kopper_loader_extension = {
2616 .base = {__DRI_KOPPER_LOADER, 1},
2617
2618 .SetSurfaceCreateInfo = kopperSetSurfaceCreateInfo,
2619 };
2620 static const __DRIextension *swrast_loader_extensions[] = {
2621 &swrast_loader_extension.base,
2622 &image_lookup_extension.base,
2623 &kopper_loader_extension.base,
2624 NULL,
2625 };
2626
2627 static EGLBoolean
dri2_initialize_wayland_swrast(_EGLDisplay * disp)2628 dri2_initialize_wayland_swrast(_EGLDisplay *disp)
2629 {
2630 struct dri2_egl_display *dri2_dpy = dri2_display_create();
2631 if (!dri2_dpy)
2632 return EGL_FALSE;
2633
2634 disp->DriverData = (void *)dri2_dpy;
2635
2636 if (dri2_wl_formats_init(&dri2_dpy->formats) < 0)
2637 goto cleanup;
2638
2639 if (disp->PlatformDisplay == NULL) {
2640 dri2_dpy->wl_dpy = wl_display_connect(NULL);
2641 if (dri2_dpy->wl_dpy == NULL)
2642 goto cleanup;
2643 dri2_dpy->own_device = true;
2644 } else {
2645 dri2_dpy->wl_dpy = disp->PlatformDisplay;
2646 }
2647
2648 dri2_dpy->wl_queue = wl_display_create_queue_with_name(dri2_dpy->wl_dpy,
2649 "mesa egl swrast display queue");
2650
2651 dri2_dpy->wl_dpy_wrapper = wl_proxy_create_wrapper(dri2_dpy->wl_dpy);
2652 if (dri2_dpy->wl_dpy_wrapper == NULL)
2653 goto cleanup;
2654
2655 wl_proxy_set_queue((struct wl_proxy *)dri2_dpy->wl_dpy_wrapper,
2656 dri2_dpy->wl_queue);
2657
2658 if (dri2_dpy->own_device)
2659 wl_display_dispatch_pending(dri2_dpy->wl_dpy);
2660
2661 dri2_dpy->wl_registry = wl_display_get_registry(dri2_dpy->wl_dpy_wrapper);
2662 wl_registry_add_listener(dri2_dpy->wl_registry, ®istry_listener_swrast,
2663 dri2_dpy);
2664
2665 if (roundtrip(dri2_dpy) < 0 || dri2_dpy->wl_shm == NULL)
2666 goto cleanup;
2667
2668 if (roundtrip(dri2_dpy) < 0 ||
2669 !BITSET_TEST_RANGE(dri2_dpy->formats.formats_bitmap, 0,
2670 dri2_dpy->formats.num_formats))
2671 goto cleanup;
2672
2673 if (disp->Options.Zink)
2674 dri2_initialize_wayland_drm_extensions(dri2_dpy);
2675
2676 dri2_dpy->driver_name = strdup(disp->Options.Zink ? "zink" : "swrast");
2677 if (!dri2_load_driver_swrast(disp))
2678 goto cleanup;
2679
2680 dri2_dpy->loader_extensions = swrast_loader_extensions;
2681
2682 if (!dri2_create_screen(disp))
2683 goto cleanup;
2684
2685 if (!dri2_setup_extensions(disp))
2686 goto cleanup;
2687
2688 if (!dri2_setup_device(disp, true)) {
2689 _eglError(EGL_NOT_INITIALIZED, "DRI2: failed to setup EGLDevice");
2690 goto cleanup;
2691 }
2692
2693 dri2_setup_screen(disp);
2694
2695 dri2_wl_setup_swap_interval(disp);
2696
2697 dri2_wl_add_configs_for_visuals(disp);
2698
2699 if (disp->Options.Zink && dri2_dpy->fd_render_gpu >= 0 &&
2700 (dri2_dpy->wl_dmabuf || dri2_dpy->wl_drm))
2701 dri2_set_WL_bind_wayland_display(disp);
2702 disp->Extensions.EXT_buffer_age = EGL_TRUE;
2703 disp->Extensions.EXT_swap_buffers_with_damage = EGL_TRUE;
2704 disp->Extensions.EXT_present_opaque = EGL_TRUE;
2705
2706 /* Fill vtbl last to prevent accidentally calling virtual function during
2707 * initialization.
2708 */
2709 dri2_dpy->vtbl = &dri2_wl_swrast_display_vtbl;
2710
2711 return EGL_TRUE;
2712
2713 cleanup:
2714 dri2_display_destroy(disp);
2715 return EGL_FALSE;
2716 }
2717
2718 EGLBoolean
dri2_initialize_wayland(_EGLDisplay * disp)2719 dri2_initialize_wayland(_EGLDisplay *disp)
2720 {
2721 if (disp->Options.ForceSoftware || disp->Options.Zink)
2722 return dri2_initialize_wayland_swrast(disp);
2723 else
2724 return dri2_initialize_wayland_drm(disp);
2725 }
2726
2727 void
dri2_teardown_wayland(struct dri2_egl_display * dri2_dpy)2728 dri2_teardown_wayland(struct dri2_egl_display *dri2_dpy)
2729 {
2730 dri2_wl_formats_fini(&dri2_dpy->formats);
2731 if (dri2_dpy->wl_drm)
2732 wl_drm_destroy(dri2_dpy->wl_drm);
2733 if (dri2_dpy->wl_dmabuf)
2734 zwp_linux_dmabuf_v1_destroy(dri2_dpy->wl_dmabuf);
2735 if (dri2_dpy->wl_shm)
2736 wl_shm_destroy(dri2_dpy->wl_shm);
2737 if (dri2_dpy->wl_registry)
2738 wl_registry_destroy(dri2_dpy->wl_registry);
2739 if (dri2_dpy->wl_dpy_wrapper)
2740 wl_proxy_wrapper_destroy(dri2_dpy->wl_dpy_wrapper);
2741 if (dri2_dpy->wl_queue)
2742 wl_event_queue_destroy(dri2_dpy->wl_queue);
2743
2744 if (dri2_dpy->own_device)
2745 wl_display_disconnect(dri2_dpy->wl_dpy);
2746 }
2747