1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <pthread.h>
33 #include <poll.h>
34 #include <sys/mman.h>
35 #include <sys/types.h>
36
37 #include "drm-uapi/drm_fourcc.h"
38
39 #include "vk_instance.h"
40 #include "vk_physical_device.h"
41 #include "vk_util.h"
42 #include "wsi_common_entrypoints.h"
43 #include "wsi_common_private.h"
44 #include "linux-dmabuf-unstable-v1-client-protocol.h"
45 #include "presentation-time-client-protocol.h"
46 #include "tearing-control-v1-client-protocol.h"
47
48 #include <util/compiler.h>
49 #include <util/hash_table.h>
50 #include <util/timespec.h>
51 #include <util/u_endian.h>
52 #include <util/u_vector.h>
53 #include <util/u_dynarray.h>
54 #include <util/anon_file.h>
55 #include <util/os_time.h>
56
57 #include <loader/loader_wayland_helper.h>
58
59 #ifdef MAJOR_IN_MKDEV
60 #include <sys/mkdev.h>
61 #endif
62 #ifdef MAJOR_IN_SYSMACROS
63 #include <sys/sysmacros.h>
64 #endif
65
66 struct wsi_wayland;
67
68 struct wsi_wl_format {
69 VkFormat vk_format;
70 uint32_t flags;
71 struct u_vector modifiers;
72 };
73
74 struct dmabuf_feedback_format_table {
75 unsigned int size;
76 struct {
77 uint32_t format;
78 uint32_t padding; /* unused */
79 uint64_t modifier;
80 } *data;
81 };
82
83 struct dmabuf_feedback_tranche {
84 dev_t target_device;
85 uint32_t flags;
86 struct u_vector formats;
87 };
88
89 struct dmabuf_feedback {
90 dev_t main_device;
91 struct dmabuf_feedback_format_table format_table;
92 struct util_dynarray tranches;
93 struct dmabuf_feedback_tranche pending_tranche;
94 };
95
96 struct wsi_wl_display {
97 /* The real wl_display */
98 struct wl_display *wl_display;
99 /* Actually a proxy wrapper around the event queue */
100 struct wl_display *wl_display_wrapper;
101 struct wl_event_queue *queue;
102
103 struct wl_shm *wl_shm;
104 struct zwp_linux_dmabuf_v1 *wl_dmabuf;
105 struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
106 struct wp_tearing_control_manager_v1 *tearing_control_manager;
107
108 struct dmabuf_feedback_format_table format_table;
109
110 /* users want per-chain wsi_wl_swapchain->present_ids.wp_presentation */
111 struct wp_presentation *wp_presentation_notwrapped;
112
113 struct wsi_wayland *wsi_wl;
114
115 /* Formats populated by zwp_linux_dmabuf_v1 or wl_shm interfaces */
116 struct u_vector formats;
117
118 bool sw;
119
120 dev_t main_device;
121 bool same_gpu;
122 };
123
124 struct wsi_wayland {
125 struct wsi_interface base;
126
127 struct wsi_device *wsi;
128
129 const VkAllocationCallbacks *alloc;
130 VkPhysicalDevice physical_device;
131 };
132
133 struct wsi_wl_image {
134 struct wsi_image base;
135 struct wl_buffer *buffer;
136 bool busy;
137 int shm_fd;
138 void *shm_ptr;
139 unsigned shm_size;
140 };
141
142 enum wsi_wl_buffer_type {
143 WSI_WL_BUFFER_NATIVE,
144 WSI_WL_BUFFER_GPU_SHM,
145 WSI_WL_BUFFER_SHM_MEMCPY,
146 };
147
148 struct wsi_wl_surface {
149 VkIcdSurfaceWayland base;
150
151 struct wsi_wl_swapchain *chain;
152 struct wl_surface *surface;
153 struct wsi_wl_display *display;
154
155 struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
156 struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
157 };
158
159 struct wsi_wl_swapchain {
160 struct wsi_swapchain base;
161
162 struct wsi_wl_surface *wsi_wl_surface;
163 struct wp_tearing_control_v1 *tearing_control;
164
165 struct wl_callback *frame;
166
167 VkExtent2D extent;
168 VkFormat vk_format;
169 enum wsi_wl_buffer_type buffer_type;
170 uint32_t drm_format;
171 enum wl_shm_format shm_format;
172
173 bool suboptimal;
174
175 uint32_t num_drm_modifiers;
176 const uint64_t *drm_modifiers;
177
178 VkPresentModeKHR present_mode;
179 bool fifo_ready;
180
181 struct {
182 pthread_mutex_t lock; /* protects all members */
183 uint64_t max_completed;
184 struct wl_list outstanding_list;
185 pthread_cond_t list_advanced;
186 struct wl_event_queue *queue;
187 struct wp_presentation *wp_presentation;
188 /* Fallback when wp_presentation is not supported */
189 struct wl_surface *surface;
190 bool dispatch_in_progress;
191 } present_ids;
192
193 struct wsi_wl_image images[0];
194 };
195 VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
196 VK_OBJECT_TYPE_SWAPCHAIN_KHR)
197
198 enum wsi_wl_fmt_flag {
199 WSI_WL_FMT_ALPHA = 1 << 0,
200 WSI_WL_FMT_OPAQUE = 1 << 1,
201 };
202
203 static struct wsi_wl_format *
find_format(struct u_vector * formats,VkFormat format)204 find_format(struct u_vector *formats, VkFormat format)
205 {
206 struct wsi_wl_format *f;
207
208 u_vector_foreach(f, formats)
209 if (f->vk_format == format)
210 return f;
211
212 return NULL;
213 }
214
215 static struct wsi_wl_format *
wsi_wl_display_add_vk_format(struct wsi_wl_display * display,struct u_vector * formats,VkFormat format,uint32_t flags)216 wsi_wl_display_add_vk_format(struct wsi_wl_display *display,
217 struct u_vector *formats,
218 VkFormat format, uint32_t flags)
219 {
220 assert(flags & (WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE));
221
222 /* Don't add a format that's already in the list */
223 struct wsi_wl_format *f = find_format(formats, format);
224 if (f) {
225 f->flags |= flags;
226 return f;
227 }
228
229 /* Don't add formats that aren't renderable. */
230 VkFormatProperties props;
231
232 display->wsi_wl->wsi->GetPhysicalDeviceFormatProperties(display->wsi_wl->physical_device,
233 format, &props);
234 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
235 return NULL;
236
237 struct u_vector modifiers;
238 if (!u_vector_init_pow2(&modifiers, 4, sizeof(uint64_t)))
239 return NULL;
240
241 f = u_vector_add(formats);
242 if (!f) {
243 u_vector_finish(&modifiers);
244 return NULL;
245 }
246
247 f->vk_format = format;
248 f->flags = flags;
249 f->modifiers = modifiers;
250
251 return f;
252 }
253
254 static void
wsi_wl_format_add_modifier(struct wsi_wl_format * format,uint64_t modifier)255 wsi_wl_format_add_modifier(struct wsi_wl_format *format, uint64_t modifier)
256 {
257 uint64_t *mod;
258
259 if (modifier == DRM_FORMAT_MOD_INVALID)
260 return;
261
262 u_vector_foreach(mod, &format->modifiers)
263 if (*mod == modifier)
264 return;
265
266 mod = u_vector_add(&format->modifiers);
267 if (mod)
268 *mod = modifier;
269 }
270
271 static void
wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,VkFormat vk_format,uint32_t flags,uint64_t modifier)272 wsi_wl_display_add_vk_format_modifier(struct wsi_wl_display *display,
273 struct u_vector *formats,
274 VkFormat vk_format, uint32_t flags,
275 uint64_t modifier)
276 {
277 struct wsi_wl_format *format;
278
279 format = wsi_wl_display_add_vk_format(display, formats, vk_format, flags);
280 if (format)
281 wsi_wl_format_add_modifier(format, modifier);
282 }
283
284 static void
wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display * display,struct u_vector * formats,uint32_t drm_format,uint64_t modifier)285 wsi_wl_display_add_drm_format_modifier(struct wsi_wl_display *display,
286 struct u_vector *formats,
287 uint32_t drm_format, uint64_t modifier)
288 {
289 switch (drm_format) {
290 #if 0
291 /* TODO: These are only available when VK_EXT_4444_formats is enabled, so
292 * we probably need to make their use conditional on this extension. */
293 case DRM_FORMAT_ARGB4444:
294 wsi_wl_display_add_vk_format_modifier(display, formats,
295 VK_FORMAT_A4R4G4B4_UNORM_PACK16,
296 WSI_WL_FMT_ALPHA, modifier);
297 break;
298 case DRM_FORMAT_XRGB4444:
299 wsi_wl_display_add_vk_format_modifier(display, formats,
300 VK_FORMAT_A4R4G4B4_UNORM_PACK16,
301 WSI_WL_FMT_OPAQUE, modifier);
302 break;
303 case DRM_FORMAT_ABGR4444:
304 wsi_wl_display_add_vk_format_modifier(display, formats,
305 VK_FORMAT_A4B4G4R4_UNORM_PACK16,
306 WSI_WL_FMT_ALPHA, modifier);
307 break;
308 case DRM_FORMAT_XBGR4444:
309 wsi_wl_display_add_vk_format_modifier(display, formats,
310 VK_FORMAT_A4B4G4R4_UNORM_PACK16,
311 WSI_WL_FMT_OPAQUE, modifier);
312 break;
313 #endif
314
315 /* Vulkan _PACKN formats have the same component order as DRM formats
316 * on little endian systems, on big endian there exists no analog. */
317 #if UTIL_ARCH_LITTLE_ENDIAN
318 case DRM_FORMAT_RGBA4444:
319 wsi_wl_display_add_vk_format_modifier(display, formats,
320 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
321 WSI_WL_FMT_ALPHA, modifier);
322 break;
323 case DRM_FORMAT_RGBX4444:
324 wsi_wl_display_add_vk_format_modifier(display, formats,
325 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
326 WSI_WL_FMT_OPAQUE, modifier);
327 break;
328 case DRM_FORMAT_BGRA4444:
329 wsi_wl_display_add_vk_format_modifier(display, formats,
330 VK_FORMAT_B4G4R4A4_UNORM_PACK16,
331 WSI_WL_FMT_ALPHA, modifier);
332 break;
333 case DRM_FORMAT_BGRX4444:
334 wsi_wl_display_add_vk_format_modifier(display, formats,
335 VK_FORMAT_B4G4R4A4_UNORM_PACK16,
336 WSI_WL_FMT_OPAQUE, modifier);
337 break;
338 case DRM_FORMAT_RGB565:
339 wsi_wl_display_add_vk_format_modifier(display, formats,
340 VK_FORMAT_R5G6B5_UNORM_PACK16,
341 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
342 modifier);
343 break;
344 case DRM_FORMAT_BGR565:
345 wsi_wl_display_add_vk_format_modifier(display, formats,
346 VK_FORMAT_B5G6R5_UNORM_PACK16,
347 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
348 modifier);
349 break;
350 case DRM_FORMAT_ARGB1555:
351 wsi_wl_display_add_vk_format_modifier(display, formats,
352 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
353 WSI_WL_FMT_ALPHA, modifier);
354 break;
355 case DRM_FORMAT_XRGB1555:
356 wsi_wl_display_add_vk_format_modifier(display, formats,
357 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
358 WSI_WL_FMT_OPAQUE, modifier);
359 break;
360 case DRM_FORMAT_RGBA5551:
361 wsi_wl_display_add_vk_format_modifier(display, formats,
362 VK_FORMAT_R5G5B5A1_UNORM_PACK16,
363 WSI_WL_FMT_ALPHA, modifier);
364 break;
365 case DRM_FORMAT_RGBX5551:
366 wsi_wl_display_add_vk_format_modifier(display, formats,
367 VK_FORMAT_R5G5B5A1_UNORM_PACK16,
368 WSI_WL_FMT_OPAQUE, modifier);
369 break;
370 case DRM_FORMAT_BGRA5551:
371 wsi_wl_display_add_vk_format_modifier(display, formats,
372 VK_FORMAT_B5G5R5A1_UNORM_PACK16,
373 WSI_WL_FMT_ALPHA, modifier);
374 break;
375 case DRM_FORMAT_BGRX5551:
376 wsi_wl_display_add_vk_format_modifier(display, formats,
377 VK_FORMAT_B5G5R5A1_UNORM_PACK16,
378 WSI_WL_FMT_OPAQUE, modifier);
379 break;
380 case DRM_FORMAT_ARGB2101010:
381 wsi_wl_display_add_vk_format_modifier(display, formats,
382 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
383 WSI_WL_FMT_ALPHA, modifier);
384 break;
385 case DRM_FORMAT_XRGB2101010:
386 wsi_wl_display_add_vk_format_modifier(display, formats,
387 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
388 WSI_WL_FMT_OPAQUE, modifier);
389 break;
390 case DRM_FORMAT_ABGR2101010:
391 wsi_wl_display_add_vk_format_modifier(display, formats,
392 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
393 WSI_WL_FMT_ALPHA, modifier);
394 break;
395 case DRM_FORMAT_XBGR2101010:
396 wsi_wl_display_add_vk_format_modifier(display, formats,
397 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
398 WSI_WL_FMT_OPAQUE, modifier);
399 break;
400
401 /* Vulkan 16-bits-per-channel formats have an inverted channel order
402 * compared to DRM formats, just like the 8-bits-per-channel ones.
403 * On little endian systems the memory representation of each channel
404 * matches the DRM formats'. */
405 case DRM_FORMAT_ABGR16161616:
406 wsi_wl_display_add_vk_format_modifier(display, formats,
407 VK_FORMAT_R16G16B16A16_UNORM,
408 WSI_WL_FMT_ALPHA, modifier);
409 break;
410 case DRM_FORMAT_XBGR16161616:
411 wsi_wl_display_add_vk_format_modifier(display, formats,
412 VK_FORMAT_R16G16B16A16_UNORM,
413 WSI_WL_FMT_OPAQUE, modifier);
414 break;
415 case DRM_FORMAT_ABGR16161616F:
416 wsi_wl_display_add_vk_format_modifier(display, formats,
417 VK_FORMAT_R16G16B16A16_SFLOAT,
418 WSI_WL_FMT_ALPHA, modifier);
419 break;
420 case DRM_FORMAT_XBGR16161616F:
421 wsi_wl_display_add_vk_format_modifier(display, formats,
422 VK_FORMAT_R16G16B16A16_SFLOAT,
423 WSI_WL_FMT_OPAQUE, modifier);
424 break;
425 #endif
426
427 /* Non-packed 8-bit formats have an inverted channel order compared to the
428 * little endian DRM formats, because the DRM channel ordering is high->low
429 * but the vulkan channel ordering is in memory byte order
430 *
431 * For all UNORM formats which have a SRGB variant, we must support both if
432 * we can. SRGB in this context means that rendering to it will result in a
433 * linear -> nonlinear SRGB colorspace conversion before the data is stored.
434 * The inverse function is applied when sampling from SRGB images.
435 * From Wayland's perspective nothing changes, the difference is just how
436 * Vulkan interprets the pixel data. */
437 case DRM_FORMAT_XBGR8888:
438 wsi_wl_display_add_vk_format_modifier(display, formats,
439 VK_FORMAT_R8G8B8_SRGB,
440 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
441 modifier);
442 wsi_wl_display_add_vk_format_modifier(display, formats,
443 VK_FORMAT_R8G8B8_UNORM,
444 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
445 modifier);
446 wsi_wl_display_add_vk_format_modifier(display, formats,
447 VK_FORMAT_R8G8B8A8_SRGB,
448 WSI_WL_FMT_OPAQUE, modifier);
449 wsi_wl_display_add_vk_format_modifier(display, formats,
450 VK_FORMAT_R8G8B8A8_UNORM,
451 WSI_WL_FMT_OPAQUE, modifier);
452 break;
453 case DRM_FORMAT_ABGR8888:
454 wsi_wl_display_add_vk_format_modifier(display, formats,
455 VK_FORMAT_R8G8B8A8_SRGB,
456 WSI_WL_FMT_ALPHA, modifier);
457 wsi_wl_display_add_vk_format_modifier(display, formats,
458 VK_FORMAT_R8G8B8A8_UNORM,
459 WSI_WL_FMT_ALPHA, modifier);
460 break;
461 case DRM_FORMAT_XRGB8888:
462 wsi_wl_display_add_vk_format_modifier(display, formats,
463 VK_FORMAT_B8G8R8_SRGB,
464 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
465 modifier);
466 wsi_wl_display_add_vk_format_modifier(display, formats,
467 VK_FORMAT_B8G8R8_UNORM,
468 WSI_WL_FMT_ALPHA | WSI_WL_FMT_OPAQUE,
469 modifier);
470 wsi_wl_display_add_vk_format_modifier(display, formats,
471 VK_FORMAT_B8G8R8A8_SRGB,
472 WSI_WL_FMT_OPAQUE, modifier);
473 wsi_wl_display_add_vk_format_modifier(display, formats,
474 VK_FORMAT_B8G8R8A8_UNORM,
475 WSI_WL_FMT_OPAQUE, modifier);
476 break;
477 case DRM_FORMAT_ARGB8888:
478 wsi_wl_display_add_vk_format_modifier(display, formats,
479 VK_FORMAT_B8G8R8A8_SRGB,
480 WSI_WL_FMT_ALPHA, modifier);
481 wsi_wl_display_add_vk_format_modifier(display, formats,
482 VK_FORMAT_B8G8R8A8_UNORM,
483 WSI_WL_FMT_ALPHA, modifier);
484 break;
485 }
486 }
487
488 static uint32_t
drm_format_for_wl_shm_format(enum wl_shm_format shm_format)489 drm_format_for_wl_shm_format(enum wl_shm_format shm_format)
490 {
491 /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
492 switch (shm_format) {
493 case WL_SHM_FORMAT_ARGB8888:
494 return DRM_FORMAT_ARGB8888;
495 case WL_SHM_FORMAT_XRGB8888:
496 return DRM_FORMAT_XRGB8888;
497 default:
498 return shm_format;
499 }
500 }
501
502 static void
wsi_wl_display_add_wl_shm_format(struct wsi_wl_display * display,struct u_vector * formats,enum wl_shm_format shm_format)503 wsi_wl_display_add_wl_shm_format(struct wsi_wl_display *display,
504 struct u_vector *formats,
505 enum wl_shm_format shm_format)
506 {
507 uint32_t drm_format = drm_format_for_wl_shm_format(shm_format);
508
509 wsi_wl_display_add_drm_format_modifier(display, formats, drm_format,
510 DRM_FORMAT_MOD_INVALID);
511 }
512
513 static uint32_t
wl_drm_format_for_vk_format(VkFormat vk_format,bool alpha)514 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
515 {
516 switch (vk_format) {
517 #if 0
518 case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
519 return alpha ? DRM_FORMAT_ARGB4444 : DRM_FORMAT_XRGB4444;
520 case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
521 return alpha ? DRM_FORMAT_ABGR4444 : DRM_FORMAT_XBGR4444;
522 #endif
523 #if UTIL_ARCH_LITTLE_ENDIAN
524 case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
525 return alpha ? DRM_FORMAT_RGBA4444 : DRM_FORMAT_RGBX4444;
526 case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
527 return alpha ? DRM_FORMAT_BGRA4444 : DRM_FORMAT_BGRX4444;
528 case VK_FORMAT_R5G6B5_UNORM_PACK16:
529 return DRM_FORMAT_RGB565;
530 case VK_FORMAT_B5G6R5_UNORM_PACK16:
531 return DRM_FORMAT_BGR565;
532 case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
533 return alpha ? DRM_FORMAT_ARGB1555 : DRM_FORMAT_XRGB1555;
534 case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
535 return alpha ? DRM_FORMAT_RGBA5551 : DRM_FORMAT_RGBX5551;
536 case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
537 return alpha ? DRM_FORMAT_BGRA5551 : DRM_FORMAT_BGRX5551;
538 case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
539 return alpha ? DRM_FORMAT_ARGB2101010 : DRM_FORMAT_XRGB2101010;
540 case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
541 return alpha ? DRM_FORMAT_ABGR2101010 : DRM_FORMAT_XBGR2101010;
542 case VK_FORMAT_R16G16B16A16_UNORM:
543 return alpha ? DRM_FORMAT_ABGR16161616 : DRM_FORMAT_XBGR16161616;
544 case VK_FORMAT_R16G16B16A16_SFLOAT:
545 return alpha ? DRM_FORMAT_ABGR16161616F : DRM_FORMAT_XBGR16161616F;
546 #endif
547 case VK_FORMAT_R8G8B8_UNORM:
548 case VK_FORMAT_R8G8B8_SRGB:
549 return DRM_FORMAT_XBGR8888;
550 case VK_FORMAT_R8G8B8A8_UNORM:
551 case VK_FORMAT_R8G8B8A8_SRGB:
552 return alpha ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_XBGR8888;
553 case VK_FORMAT_B8G8R8_UNORM:
554 case VK_FORMAT_B8G8R8_SRGB:
555 return DRM_FORMAT_BGRX8888;
556 case VK_FORMAT_B8G8R8A8_UNORM:
557 case VK_FORMAT_B8G8R8A8_SRGB:
558 return alpha ? DRM_FORMAT_ARGB8888 : DRM_FORMAT_XRGB8888;
559
560 default:
561 assert(!"Unsupported Vulkan format");
562 return DRM_FORMAT_INVALID;
563 }
564 }
565
566 static enum wl_shm_format
wl_shm_format_for_vk_format(VkFormat vk_format,bool alpha)567 wl_shm_format_for_vk_format(VkFormat vk_format, bool alpha)
568 {
569 uint32_t drm_format = wl_drm_format_for_vk_format(vk_format, alpha);
570 if (drm_format == DRM_FORMAT_INVALID) {
571 return 0;
572 }
573
574 /* wl_shm formats are identical to DRM, except ARGB8888 and XRGB8888 */
575 switch (drm_format) {
576 case DRM_FORMAT_ARGB8888:
577 return WL_SHM_FORMAT_ARGB8888;
578 case DRM_FORMAT_XRGB8888:
579 return WL_SHM_FORMAT_XRGB8888;
580 default:
581 return drm_format;
582 }
583 }
584
585 static void
dmabuf_handle_format(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format)586 dmabuf_handle_format(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
587 uint32_t format)
588 {
589 /* Formats are implicitly advertised by the modifier event, so we ignore
590 * them here. */
591 }
592
593 static void
dmabuf_handle_modifier(void * data,struct zwp_linux_dmabuf_v1 * dmabuf,uint32_t format,uint32_t modifier_hi,uint32_t modifier_lo)594 dmabuf_handle_modifier(void *data, struct zwp_linux_dmabuf_v1 *dmabuf,
595 uint32_t format, uint32_t modifier_hi,
596 uint32_t modifier_lo)
597 {
598 struct wsi_wl_display *display = data;
599 uint64_t modifier;
600
601 /* Ignore this if the compositor advertised dma-buf feedback. From version 4
602 * onwards (when dma-buf feedback was introduced), the compositor should not
603 * advertise this event anymore, but let's keep this for safety. */
604 if (display->wl_dmabuf_feedback)
605 return;
606
607 modifier = ((uint64_t) modifier_hi << 32) | modifier_lo;
608 wsi_wl_display_add_drm_format_modifier(display, &display->formats,
609 format, modifier);
610 }
611
612 static const struct zwp_linux_dmabuf_v1_listener dmabuf_listener = {
613 dmabuf_handle_format,
614 dmabuf_handle_modifier,
615 };
616
617 static void
dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table * format_table)618 dmabuf_feedback_format_table_fini(struct dmabuf_feedback_format_table *format_table)
619 {
620 if (format_table->data && format_table->data != MAP_FAILED)
621 munmap(format_table->data, format_table->size);
622 }
623
624 static void
dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table * format_table)625 dmabuf_feedback_format_table_init(struct dmabuf_feedback_format_table *format_table)
626 {
627 memset(format_table, 0, sizeof(*format_table));
628 }
629
630 static void
dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche * tranche)631 dmabuf_feedback_tranche_fini(struct dmabuf_feedback_tranche *tranche)
632 {
633 struct wsi_wl_format *format;
634
635 u_vector_foreach(format, &tranche->formats)
636 u_vector_finish(&format->modifiers);
637
638 u_vector_finish(&tranche->formats);
639 }
640
641 static int
dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche * tranche)642 dmabuf_feedback_tranche_init(struct dmabuf_feedback_tranche *tranche)
643 {
644 memset(tranche, 0, sizeof(*tranche));
645
646 if (!u_vector_init(&tranche->formats, 8, sizeof(struct wsi_wl_format)))
647 return -1;
648
649 return 0;
650 }
651
652 static void
dmabuf_feedback_fini(struct dmabuf_feedback * dmabuf_feedback)653 dmabuf_feedback_fini(struct dmabuf_feedback *dmabuf_feedback)
654 {
655 dmabuf_feedback_tranche_fini(&dmabuf_feedback->pending_tranche);
656
657 util_dynarray_foreach(&dmabuf_feedback->tranches,
658 struct dmabuf_feedback_tranche, tranche)
659 dmabuf_feedback_tranche_fini(tranche);
660 util_dynarray_fini(&dmabuf_feedback->tranches);
661
662 dmabuf_feedback_format_table_fini(&dmabuf_feedback->format_table);
663 }
664
665 static int
dmabuf_feedback_init(struct dmabuf_feedback * dmabuf_feedback)666 dmabuf_feedback_init(struct dmabuf_feedback *dmabuf_feedback)
667 {
668 memset(dmabuf_feedback, 0, sizeof(*dmabuf_feedback));
669
670 if (dmabuf_feedback_tranche_init(&dmabuf_feedback->pending_tranche) < 0)
671 return -1;
672
673 util_dynarray_init(&dmabuf_feedback->tranches, NULL);
674
675 dmabuf_feedback_format_table_init(&dmabuf_feedback->format_table);
676
677 return 0;
678 }
679
680 static void
default_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)681 default_dmabuf_feedback_format_table(void *data,
682 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
683 int32_t fd, uint32_t size)
684 {
685 struct wsi_wl_display *display = data;
686
687 display->format_table.size = size;
688 display->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
689
690 close(fd);
691 }
692
693 static void
default_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)694 default_dmabuf_feedback_main_device(void *data,
695 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
696 struct wl_array *device)
697 {
698 struct wsi_wl_display *display = data;
699
700 assert(device->size == sizeof(dev_t));
701 memcpy(&display->main_device, device->data, device->size);
702 }
703
704 static void
default_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)705 default_dmabuf_feedback_tranche_target_device(void *data,
706 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
707 struct wl_array *device)
708 {
709 /* ignore this event */
710 }
711
712 static void
default_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)713 default_dmabuf_feedback_tranche_flags(void *data,
714 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
715 uint32_t flags)
716 {
717 /* ignore this event */
718 }
719
720 static void
default_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)721 default_dmabuf_feedback_tranche_formats(void *data,
722 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
723 struct wl_array *indices)
724 {
725 struct wsi_wl_display *display = data;
726 uint32_t format;
727 uint64_t modifier;
728 uint16_t *index;
729
730 /* We couldn't map the format table or the compositor didn't advertise it,
731 * so we have to ignore the feedback. */
732 if (display->format_table.data == MAP_FAILED ||
733 display->format_table.data == NULL)
734 return;
735
736 wl_array_for_each(index, indices) {
737 format = display->format_table.data[*index].format;
738 modifier = display->format_table.data[*index].modifier;
739 wsi_wl_display_add_drm_format_modifier(display, &display->formats,
740 format, modifier);
741 }
742 }
743
744 static void
default_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)745 default_dmabuf_feedback_tranche_done(void *data,
746 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
747 {
748 /* ignore this event */
749 }
750
751 static void
default_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)752 default_dmabuf_feedback_done(void *data,
753 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
754 {
755 /* ignore this event */
756 }
757
758 static const struct zwp_linux_dmabuf_feedback_v1_listener
759 dmabuf_feedback_listener = {
760 .format_table = default_dmabuf_feedback_format_table,
761 .main_device = default_dmabuf_feedback_main_device,
762 .tranche_target_device = default_dmabuf_feedback_tranche_target_device,
763 .tranche_flags = default_dmabuf_feedback_tranche_flags,
764 .tranche_formats = default_dmabuf_feedback_tranche_formats,
765 .tranche_done = default_dmabuf_feedback_tranche_done,
766 .done = default_dmabuf_feedback_done,
767 };
768
769 static void
shm_handle_format(void * data,struct wl_shm * shm,uint32_t format)770 shm_handle_format(void *data, struct wl_shm *shm, uint32_t format)
771 {
772 struct wsi_wl_display *display = data;
773
774 wsi_wl_display_add_wl_shm_format(display, &display->formats, format);
775 }
776
777 static const struct wl_shm_listener shm_listener = {
778 .format = shm_handle_format
779 };
780
781 static void
registry_handle_global(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)782 registry_handle_global(void *data, struct wl_registry *registry,
783 uint32_t name, const char *interface, uint32_t version)
784 {
785 struct wsi_wl_display *display = data;
786
787 if (display->sw) {
788 if (strcmp(interface, wl_shm_interface.name) == 0) {
789 display->wl_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1);
790 wl_shm_add_listener(display->wl_shm, &shm_listener, display);
791 }
792 } else {
793 if (strcmp(interface, zwp_linux_dmabuf_v1_interface.name) == 0 && version >= 3) {
794 display->wl_dmabuf =
795 wl_registry_bind(registry, name, &zwp_linux_dmabuf_v1_interface,
796 MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
797 zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
798 &dmabuf_listener, display);
799 }
800 }
801
802 if (strcmp(interface, wp_presentation_interface.name) == 0) {
803 display->wp_presentation_notwrapped =
804 wl_registry_bind(registry, name, &wp_presentation_interface, 1);
805 } else if (strcmp(interface, wp_tearing_control_manager_v1_interface.name) == 0) {
806 display->tearing_control_manager =
807 wl_registry_bind(registry, name, &wp_tearing_control_manager_v1_interface, 1);
808 }
809 }
810
811 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)812 registry_handle_global_remove(void *data, struct wl_registry *registry,
813 uint32_t name)
814 { /* No-op */ }
815
816 static const struct wl_registry_listener registry_listener = {
817 registry_handle_global,
818 registry_handle_global_remove
819 };
820
821 static void
wsi_wl_display_finish(struct wsi_wl_display * display)822 wsi_wl_display_finish(struct wsi_wl_display *display)
823 {
824 struct wsi_wl_format *f;
825 u_vector_foreach(f, &display->formats)
826 u_vector_finish(&f->modifiers);
827 u_vector_finish(&display->formats);
828 if (display->wl_shm)
829 wl_shm_destroy(display->wl_shm);
830 if (display->wl_dmabuf)
831 zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
832 if (display->wp_presentation_notwrapped)
833 wp_presentation_destroy(display->wp_presentation_notwrapped);
834 if (display->tearing_control_manager)
835 wp_tearing_control_manager_v1_destroy(display->tearing_control_manager);
836 if (display->wl_display_wrapper)
837 wl_proxy_wrapper_destroy(display->wl_display_wrapper);
838 if (display->queue)
839 wl_event_queue_destroy(display->queue);
840 }
841
842 static VkResult
wsi_wl_display_init(struct wsi_wayland * wsi_wl,struct wsi_wl_display * display,struct wl_display * wl_display,bool get_format_list,bool sw)843 wsi_wl_display_init(struct wsi_wayland *wsi_wl,
844 struct wsi_wl_display *display,
845 struct wl_display *wl_display,
846 bool get_format_list, bool sw)
847 {
848 VkResult result = VK_SUCCESS;
849 memset(display, 0, sizeof(*display));
850
851 if (!u_vector_init(&display->formats, 8, sizeof(struct wsi_wl_format)))
852 return VK_ERROR_OUT_OF_HOST_MEMORY;
853
854 display->wsi_wl = wsi_wl;
855 display->wl_display = wl_display;
856 display->sw = sw;
857
858 display->queue = wl_display_create_queue_with_name(wl_display,
859 "mesa vk display queue");
860 if (!display->queue) {
861 result = VK_ERROR_OUT_OF_HOST_MEMORY;
862 goto fail;
863 }
864
865 display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
866 if (!display->wl_display_wrapper) {
867 result = VK_ERROR_OUT_OF_HOST_MEMORY;
868 goto fail;
869 }
870
871 wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
872 display->queue);
873
874 struct wl_registry *registry =
875 wl_display_get_registry(display->wl_display_wrapper);
876 if (!registry) {
877 result = VK_ERROR_OUT_OF_HOST_MEMORY;
878 goto fail;
879 }
880
881 wl_registry_add_listener(registry, ®istry_listener, display);
882
883 /* Round-trip to get wl_shm and zwp_linux_dmabuf_v1 globals */
884 wl_display_roundtrip_queue(display->wl_display, display->queue);
885 if (!display->wl_dmabuf && !display->wl_shm) {
886 result = VK_ERROR_SURFACE_LOST_KHR;
887 goto fail_registry;
888 }
889
890 /* Caller doesn't expect us to query formats/modifiers, so return */
891 if (!get_format_list)
892 goto out;
893
894 /* Default assumption */
895 display->same_gpu = true;
896
897 /* Get the default dma-buf feedback */
898 if (display->wl_dmabuf && zwp_linux_dmabuf_v1_get_version(display->wl_dmabuf) >=
899 ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION) {
900 dmabuf_feedback_format_table_init(&display->format_table);
901 display->wl_dmabuf_feedback =
902 zwp_linux_dmabuf_v1_get_default_feedback(display->wl_dmabuf);
903 zwp_linux_dmabuf_feedback_v1_add_listener(display->wl_dmabuf_feedback,
904 &dmabuf_feedback_listener, display);
905
906 /* Round-trip again to fetch dma-buf feedback */
907 wl_display_roundtrip_queue(display->wl_display, display->queue);
908
909 if (wsi_wl->wsi->drm_info.hasRender ||
910 wsi_wl->wsi->drm_info.hasPrimary) {
911 /* Apparently some wayland compositor do not send the render
912 * device node but the primary, so test against both.
913 */
914 display->same_gpu =
915 (wsi_wl->wsi->drm_info.hasRender &&
916 major(display->main_device) == wsi_wl->wsi->drm_info.renderMajor &&
917 minor(display->main_device) == wsi_wl->wsi->drm_info.renderMinor) ||
918 (wsi_wl->wsi->drm_info.hasPrimary &&
919 major(display->main_device) == wsi_wl->wsi->drm_info.primaryMajor &&
920 minor(display->main_device) == wsi_wl->wsi->drm_info.primaryMinor);
921 }
922 }
923
924 /* Round-trip again to get formats and modifiers */
925 wl_display_roundtrip_queue(display->wl_display, display->queue);
926
927 if (wsi_wl->wsi->force_bgra8_unorm_first) {
928 /* Find BGRA8_UNORM in the list and swap it to the first position if we
929 * can find it. Some apps get confused if SRGB is first in the list.
930 */
931 struct wsi_wl_format *first_fmt = u_vector_tail(&display->formats);
932 struct wsi_wl_format *f, tmp_fmt;
933 f = find_format(&display->formats, VK_FORMAT_B8G8R8A8_UNORM);
934 if (f) {
935 tmp_fmt = *f;
936 *f = *first_fmt;
937 *first_fmt = tmp_fmt;
938 }
939 }
940
941 out:
942 /* We don't need this anymore */
943 wl_registry_destroy(registry);
944
945 /* Destroy default dma-buf feedback object and format table */
946 if (display->wl_dmabuf_feedback) {
947 zwp_linux_dmabuf_feedback_v1_destroy(display->wl_dmabuf_feedback);
948 display->wl_dmabuf_feedback = NULL;
949 dmabuf_feedback_format_table_fini(&display->format_table);
950 }
951
952 return VK_SUCCESS;
953
954 fail_registry:
955 if (registry)
956 wl_registry_destroy(registry);
957
958 fail:
959 wsi_wl_display_finish(display);
960 return result;
961 }
962
963 static VkResult
wsi_wl_display_create(struct wsi_wayland * wsi,struct wl_display * wl_display,bool sw,struct wsi_wl_display ** display_out)964 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
965 bool sw,
966 struct wsi_wl_display **display_out)
967 {
968 struct wsi_wl_display *display =
969 vk_alloc(wsi->alloc, sizeof(*display), 8,
970 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
971 if (!display)
972 return VK_ERROR_OUT_OF_HOST_MEMORY;
973
974 VkResult result = wsi_wl_display_init(wsi, display, wl_display, true,
975 sw);
976 if (result != VK_SUCCESS) {
977 vk_free(wsi->alloc, display);
978 return result;
979 }
980
981 *display_out = display;
982
983 return result;
984 }
985
986 static void
wsi_wl_display_destroy(struct wsi_wl_display * display)987 wsi_wl_display_destroy(struct wsi_wl_display *display)
988 {
989 struct wsi_wayland *wsi = display->wsi_wl;
990 wsi_wl_display_finish(display);
991 vk_free(wsi->alloc, display);
992 }
993
994 VKAPI_ATTR VkBool32 VKAPI_CALL
wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,struct wl_display * wl_display)995 wsi_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
996 uint32_t queueFamilyIndex,
997 struct wl_display *wl_display)
998 {
999 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
1000 struct wsi_device *wsi_device = pdevice->wsi_device;
1001 struct wsi_wayland *wsi =
1002 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1003
1004 if (!(wsi_device->queue_supports_blit & BITFIELD64_BIT(queueFamilyIndex)))
1005 return false;
1006
1007 struct wsi_wl_display display;
1008 VkResult ret = wsi_wl_display_init(wsi, &display, wl_display, false,
1009 wsi_device->sw);
1010 if (ret == VK_SUCCESS)
1011 wsi_wl_display_finish(&display);
1012
1013 return ret == VK_SUCCESS;
1014 }
1015
1016 static VkResult
wsi_wl_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t queueFamilyIndex,VkBool32 * pSupported)1017 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
1018 struct wsi_device *wsi_device,
1019 uint32_t queueFamilyIndex,
1020 VkBool32* pSupported)
1021 {
1022 *pSupported = true;
1023
1024 return VK_SUCCESS;
1025 }
1026
1027 static uint32_t
wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT * present_mode)1028 wsi_wl_surface_get_min_image_count(const VkSurfacePresentModeEXT *present_mode)
1029 {
1030 if (present_mode && (present_mode->presentMode == VK_PRESENT_MODE_FIFO_KHR ||
1031 present_mode->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)) {
1032 /* If we receive a FIFO present mode, only 2 images is required for forward progress.
1033 * Performance with 2 images will be questionable, but we only allow it for applications
1034 * using the new API, so we don't risk breaking any existing apps this way.
1035 * Other ICDs expose 2 images here already. */
1036 return 2;
1037 } else {
1038 /* For true mailbox mode, we need at least 4 images:
1039 * 1) One to scan out from
1040 * 2) One to have queued for scan-out
1041 * 3) One to be currently held by the Wayland compositor
1042 * 4) One to render to
1043 */
1044 return 4;
1045 }
1046 }
1047
1048 static uint32_t
wsi_wl_surface_get_min_image_count_for_mode_group(const VkSwapchainPresentModesCreateInfoEXT * modes)1049 wsi_wl_surface_get_min_image_count_for_mode_group(const VkSwapchainPresentModesCreateInfoEXT *modes)
1050 {
1051 /* If we don't provide the PresentModeCreateInfo struct, we must be backwards compatible,
1052 * and assume that minImageCount is the default one, i.e. 4, which supports both FIFO and MAILBOX. */
1053 if (!modes) {
1054 return wsi_wl_surface_get_min_image_count(NULL);
1055 }
1056
1057 uint32_t max_required = 0;
1058 for (uint32_t i = 0; i < modes->presentModeCount; i++) {
1059 const VkSurfacePresentModeEXT mode = {
1060 VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT,
1061 NULL,
1062 modes->pPresentModes[i]
1063 };
1064 max_required = MAX2(max_required, wsi_wl_surface_get_min_image_count(&mode));
1065 }
1066
1067 return max_required;
1068 }
1069
1070 static VkResult
wsi_wl_surface_get_capabilities(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const VkSurfacePresentModeEXT * present_mode,VkSurfaceCapabilitiesKHR * caps)1071 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
1072 struct wsi_device *wsi_device,
1073 const VkSurfacePresentModeEXT *present_mode,
1074 VkSurfaceCapabilitiesKHR* caps)
1075 {
1076 caps->minImageCount = wsi_wl_surface_get_min_image_count(present_mode);
1077 /* There is no real maximum */
1078 caps->maxImageCount = 0;
1079
1080 caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
1081 caps->minImageExtent = (VkExtent2D) { 1, 1 };
1082 caps->maxImageExtent = (VkExtent2D) {
1083 wsi_device->maxImageDimension2D,
1084 wsi_device->maxImageDimension2D,
1085 };
1086
1087 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1088 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
1089 caps->maxImageArrayLayers = 1;
1090
1091 caps->supportedCompositeAlpha =
1092 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
1093 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
1094
1095 caps->supportedUsageFlags = wsi_caps_get_image_usage();
1096
1097 VK_FROM_HANDLE(vk_physical_device, pdevice, wsi_device->pdevice);
1098 if (pdevice->supported_extensions.EXT_attachment_feedback_loop_layout)
1099 caps->supportedUsageFlags |= VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT;
1100
1101 return VK_SUCCESS;
1102 }
1103
1104 static VkResult
wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const void * info_next,VkSurfaceCapabilities2KHR * caps)1105 wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
1106 struct wsi_device *wsi_device,
1107 const void *info_next,
1108 VkSurfaceCapabilities2KHR* caps)
1109 {
1110 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
1111
1112 const VkSurfacePresentModeEXT *present_mode = vk_find_struct_const(info_next, SURFACE_PRESENT_MODE_EXT);
1113
1114 VkResult result =
1115 wsi_wl_surface_get_capabilities(surface, wsi_device, present_mode,
1116 &caps->surfaceCapabilities);
1117
1118 vk_foreach_struct(ext, caps->pNext) {
1119 switch (ext->sType) {
1120 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
1121 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
1122 protected->supportsProtected = VK_FALSE;
1123 break;
1124 }
1125
1126 case VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT: {
1127 /* Unsupported. */
1128 VkSurfacePresentScalingCapabilitiesEXT *scaling = (void *)ext;
1129 scaling->supportedPresentScaling = 0;
1130 scaling->supportedPresentGravityX = 0;
1131 scaling->supportedPresentGravityY = 0;
1132 scaling->minScaledImageExtent = caps->surfaceCapabilities.minImageExtent;
1133 scaling->maxScaledImageExtent = caps->surfaceCapabilities.maxImageExtent;
1134 break;
1135 }
1136
1137 case VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT: {
1138 /* Can easily toggle between FIFO and MAILBOX on Wayland. */
1139 VkSurfacePresentModeCompatibilityEXT *compat = (void *)ext;
1140 if (compat->pPresentModes) {
1141 assert(present_mode);
1142 VK_OUTARRAY_MAKE_TYPED(VkPresentModeKHR, modes, compat->pPresentModes, &compat->presentModeCount);
1143 /* Must always return queried present mode even when truncating. */
1144 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1145 *mode = present_mode->presentMode;
1146 }
1147 switch (present_mode->presentMode) {
1148 case VK_PRESENT_MODE_MAILBOX_KHR:
1149 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1150 *mode = VK_PRESENT_MODE_FIFO_KHR;
1151 }
1152 break;
1153 case VK_PRESENT_MODE_FIFO_KHR:
1154 vk_outarray_append_typed(VkPresentModeKHR, &modes, mode) {
1155 *mode = VK_PRESENT_MODE_MAILBOX_KHR;
1156 }
1157 break;
1158 default:
1159 break;
1160 }
1161 } else {
1162 if (!present_mode) {
1163 wsi_common_vk_warn_once("Use of VkSurfacePresentModeCompatibilityEXT "
1164 "without a VkSurfacePresentModeEXT set. This is an "
1165 "application bug.\n");
1166 compat->presentModeCount = 1;
1167 } else {
1168 switch (present_mode->presentMode) {
1169 case VK_PRESENT_MODE_MAILBOX_KHR:
1170 case VK_PRESENT_MODE_FIFO_KHR:
1171 compat->presentModeCount = 2;
1172 break;
1173 default:
1174 compat->presentModeCount = 1;
1175 break;
1176 }
1177 }
1178 }
1179 break;
1180 }
1181
1182 default:
1183 /* Ignored */
1184 break;
1185 }
1186 }
1187
1188 return result;
1189 }
1190
1191 static VkResult
wsi_wl_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)1192 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
1193 struct wsi_device *wsi_device,
1194 uint32_t* pSurfaceFormatCount,
1195 VkSurfaceFormatKHR* pSurfaceFormats)
1196 {
1197 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1198 struct wsi_wayland *wsi =
1199 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1200
1201 struct wsi_wl_display display;
1202 if (wsi_wl_display_init(wsi, &display, surface->display, true,
1203 wsi_device->sw))
1204 return VK_ERROR_SURFACE_LOST_KHR;
1205
1206 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
1207 pSurfaceFormats, pSurfaceFormatCount);
1208
1209 struct wsi_wl_format *disp_fmt;
1210 u_vector_foreach(disp_fmt, &display.formats) {
1211 /* Skip formats for which we can't support both alpha & opaque
1212 * formats.
1213 */
1214 if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1215 !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1216 continue;
1217
1218 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, out_fmt) {
1219 out_fmt->format = disp_fmt->vk_format;
1220 out_fmt->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1221 }
1222 }
1223
1224 wsi_wl_display_finish(&display);
1225
1226 return vk_outarray_status(&out);
1227 }
1228
1229 static VkResult
wsi_wl_surface_get_formats2(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,const void * info_next,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)1230 wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
1231 struct wsi_device *wsi_device,
1232 const void *info_next,
1233 uint32_t* pSurfaceFormatCount,
1234 VkSurfaceFormat2KHR* pSurfaceFormats)
1235 {
1236 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1237 struct wsi_wayland *wsi =
1238 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1239
1240 struct wsi_wl_display display;
1241 if (wsi_wl_display_init(wsi, &display, surface->display, true,
1242 wsi_device->sw))
1243 return VK_ERROR_SURFACE_LOST_KHR;
1244
1245 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
1246 pSurfaceFormats, pSurfaceFormatCount);
1247
1248 struct wsi_wl_format *disp_fmt;
1249 u_vector_foreach(disp_fmt, &display.formats) {
1250 /* Skip formats for which we can't support both alpha & opaque
1251 * formats.
1252 */
1253 if (!(disp_fmt->flags & WSI_WL_FMT_ALPHA) ||
1254 !(disp_fmt->flags & WSI_WL_FMT_OPAQUE))
1255 continue;
1256
1257 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, out_fmt) {
1258 out_fmt->surfaceFormat.format = disp_fmt->vk_format;
1259 out_fmt->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
1260 }
1261 }
1262
1263 wsi_wl_display_finish(&display);
1264
1265 return vk_outarray_status(&out);
1266 }
1267
1268 static VkResult
wsi_wl_surface_get_present_modes(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)1269 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *icd_surface,
1270 struct wsi_device *wsi_device,
1271 uint32_t* pPresentModeCount,
1272 VkPresentModeKHR* pPresentModes)
1273 {
1274 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
1275 struct wsi_wayland *wsi =
1276 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1277
1278 struct wsi_wl_display display;
1279 if (wsi_wl_display_init(wsi, &display, surface->display, true,
1280 wsi_device->sw))
1281 return VK_ERROR_SURFACE_LOST_KHR;
1282
1283 VkPresentModeKHR present_modes[3];
1284 uint32_t present_modes_count = 0;
1285
1286 /* The following two modes are always supported */
1287 present_modes[present_modes_count++] = VK_PRESENT_MODE_MAILBOX_KHR;
1288 present_modes[present_modes_count++] = VK_PRESENT_MODE_FIFO_KHR;
1289
1290 if (display.tearing_control_manager)
1291 present_modes[present_modes_count++] = VK_PRESENT_MODE_IMMEDIATE_KHR;
1292
1293 assert(present_modes_count <= ARRAY_SIZE(present_modes));
1294 wsi_wl_display_finish(&display);
1295
1296 if (pPresentModes == NULL) {
1297 *pPresentModeCount = present_modes_count;
1298 return VK_SUCCESS;
1299 }
1300
1301 *pPresentModeCount = MIN2(*pPresentModeCount, present_modes_count);
1302 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
1303
1304 if (*pPresentModeCount < present_modes_count)
1305 return VK_INCOMPLETE;
1306 else
1307 return VK_SUCCESS;
1308 }
1309
1310 static VkResult
wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,uint32_t * pRectCount,VkRect2D * pRects)1311 wsi_wl_surface_get_present_rectangles(VkIcdSurfaceBase *surface,
1312 struct wsi_device *wsi_device,
1313 uint32_t* pRectCount,
1314 VkRect2D* pRects)
1315 {
1316 VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
1317
1318 vk_outarray_append_typed(VkRect2D, &out, rect) {
1319 /* We don't know a size so just return the usual "I don't know." */
1320 *rect = (VkRect2D) {
1321 .offset = { 0, 0 },
1322 .extent = { UINT32_MAX, UINT32_MAX },
1323 };
1324 }
1325
1326 return vk_outarray_status(&out);
1327 }
1328
1329 void
wsi_wl_surface_destroy(VkIcdSurfaceBase * icd_surface,VkInstance _instance,const VkAllocationCallbacks * pAllocator)1330 wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
1331 const VkAllocationCallbacks *pAllocator)
1332 {
1333 VK_FROM_HANDLE(vk_instance, instance, _instance);
1334 struct wsi_wl_surface *wsi_wl_surface =
1335 wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
1336
1337 if (wsi_wl_surface->wl_dmabuf_feedback) {
1338 zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1339 dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1340 dmabuf_feedback_fini(&wsi_wl_surface->pending_dmabuf_feedback);
1341 }
1342
1343 if (wsi_wl_surface->surface)
1344 wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1345
1346 if (wsi_wl_surface->display)
1347 wsi_wl_display_destroy(wsi_wl_surface->display);
1348
1349 vk_free2(&instance->alloc, pAllocator, wsi_wl_surface);
1350 }
1351
1352 static struct wsi_wl_format *
pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface,VkFormat vk_format)1353 pick_format_from_surface_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface,
1354 VkFormat vk_format)
1355 {
1356 struct wsi_wl_format *f = NULL;
1357
1358 /* If the main_device was not advertised, we don't have valid feedback */
1359 if (wsi_wl_surface->dmabuf_feedback.main_device == 0)
1360 return NULL;
1361
1362 util_dynarray_foreach(&wsi_wl_surface->dmabuf_feedback.tranches,
1363 struct dmabuf_feedback_tranche, tranche) {
1364 f = find_format(&tranche->formats, vk_format);
1365 if (f)
1366 break;
1367 }
1368
1369 return f;
1370 }
1371
1372 static void
surface_dmabuf_feedback_format_table(void * data,struct zwp_linux_dmabuf_feedback_v1 * zwp_linux_dmabuf_feedback_v1,int32_t fd,uint32_t size)1373 surface_dmabuf_feedback_format_table(void *data,
1374 struct zwp_linux_dmabuf_feedback_v1 *zwp_linux_dmabuf_feedback_v1,
1375 int32_t fd, uint32_t size)
1376 {
1377 struct wsi_wl_surface *wsi_wl_surface = data;
1378 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1379
1380 feedback->format_table.size = size;
1381 feedback->format_table.data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
1382
1383 close(fd);
1384 }
1385
1386 static void
surface_dmabuf_feedback_main_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1387 surface_dmabuf_feedback_main_device(void *data,
1388 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1389 struct wl_array *device)
1390 {
1391 struct wsi_wl_surface *wsi_wl_surface = data;
1392 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1393
1394 memcpy(&feedback->main_device, device->data, sizeof(feedback->main_device));
1395 }
1396
1397 static void
surface_dmabuf_feedback_tranche_target_device(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * device)1398 surface_dmabuf_feedback_tranche_target_device(void *data,
1399 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1400 struct wl_array *device)
1401 {
1402 struct wsi_wl_surface *wsi_wl_surface = data;
1403 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1404
1405 memcpy(&feedback->pending_tranche.target_device, device->data,
1406 sizeof(feedback->pending_tranche.target_device));
1407 }
1408
1409 static void
surface_dmabuf_feedback_tranche_flags(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,uint32_t flags)1410 surface_dmabuf_feedback_tranche_flags(void *data,
1411 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1412 uint32_t flags)
1413 {
1414 struct wsi_wl_surface *wsi_wl_surface = data;
1415 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1416
1417 feedback->pending_tranche.flags = flags;
1418 }
1419
1420 static void
surface_dmabuf_feedback_tranche_formats(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback,struct wl_array * indices)1421 surface_dmabuf_feedback_tranche_formats(void *data,
1422 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback,
1423 struct wl_array *indices)
1424 {
1425 struct wsi_wl_surface *wsi_wl_surface = data;
1426 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1427 uint32_t format;
1428 uint64_t modifier;
1429 uint16_t *index;
1430
1431 /* Compositor may advertise or not a format table. If it does, we use it.
1432 * Otherwise, we steal the most recent advertised format table. If we don't have
1433 * a most recent advertised format table, compositor did something wrong. */
1434 if (feedback->format_table.data == NULL) {
1435 feedback->format_table = wsi_wl_surface->dmabuf_feedback.format_table;
1436 dmabuf_feedback_format_table_init(&wsi_wl_surface->dmabuf_feedback.format_table);
1437 }
1438 if (feedback->format_table.data == MAP_FAILED ||
1439 feedback->format_table.data == NULL)
1440 return;
1441
1442 wl_array_for_each(index, indices) {
1443 format = feedback->format_table.data[*index].format;
1444 modifier = feedback->format_table.data[*index].modifier;
1445
1446 wsi_wl_display_add_drm_format_modifier(wsi_wl_surface->display,
1447 &wsi_wl_surface->pending_dmabuf_feedback.pending_tranche.formats,
1448 format, modifier);
1449 }
1450 }
1451
1452 static void
surface_dmabuf_feedback_tranche_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1453 surface_dmabuf_feedback_tranche_done(void *data,
1454 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1455 {
1456 struct wsi_wl_surface *wsi_wl_surface = data;
1457 struct dmabuf_feedback *feedback = &wsi_wl_surface->pending_dmabuf_feedback;
1458
1459 /* Add tranche to array of tranches. */
1460 util_dynarray_append(&feedback->tranches, struct dmabuf_feedback_tranche,
1461 feedback->pending_tranche);
1462
1463 dmabuf_feedback_tranche_init(&feedback->pending_tranche);
1464 }
1465
1466 static bool
sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A,const uint64_t * modifiers_A,uint32_t num_drm_modifiers_B,const uint64_t * modifiers_B)1467 sets_of_modifiers_are_the_same(uint32_t num_drm_modifiers_A, const uint64_t *modifiers_A,
1468 uint32_t num_drm_modifiers_B, const uint64_t *modifiers_B)
1469 {
1470 uint32_t i, j;
1471 bool mod_found;
1472
1473 if (num_drm_modifiers_A != num_drm_modifiers_B)
1474 return false;
1475
1476 for (i = 0; i < num_drm_modifiers_A; i++) {
1477 mod_found = false;
1478 for (j = 0; j < num_drm_modifiers_B; j++) {
1479 if (modifiers_A[i] == modifiers_B[j]) {
1480 mod_found = true;
1481 break;
1482 }
1483 }
1484 if (!mod_found)
1485 return false;
1486 }
1487
1488 return true;
1489 }
1490
1491 static void
surface_dmabuf_feedback_done(void * data,struct zwp_linux_dmabuf_feedback_v1 * dmabuf_feedback)1492 surface_dmabuf_feedback_done(void *data,
1493 struct zwp_linux_dmabuf_feedback_v1 *dmabuf_feedback)
1494 {
1495 struct wsi_wl_surface *wsi_wl_surface = data;
1496 struct wsi_wl_swapchain *chain = wsi_wl_surface->chain;
1497 struct wsi_wl_format *f;
1498
1499 dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1500 wsi_wl_surface->dmabuf_feedback = wsi_wl_surface->pending_dmabuf_feedback;
1501 dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback);
1502
1503 /* It's not just because we received dma-buf feedback that re-allocation is a
1504 * good idea. In order to know if we should re-allocate or not, we must
1505 * compare the most recent parameters that we used to allocate with the ones
1506 * from the feedback we just received.
1507 *
1508 * The allocation parameters are: the format, its set of modifiers and the
1509 * tranche flags. On WSI we are not using the tranche flags for anything, so
1510 * we disconsider this. As we can't switch to another format (it is selected
1511 * by the client), we just need to compare the set of modifiers.
1512 *
1513 * So we just look for the vk_format in the tranches (respecting their
1514 * preferences), and compare its set of modifiers with the set of modifiers
1515 * we've used to allocate previously. If they differ, we are using suboptimal
1516 * parameters and should re-allocate.
1517 */
1518 f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface, chain->vk_format);
1519 if (f && !sets_of_modifiers_are_the_same(u_vector_length(&f->modifiers),
1520 u_vector_tail(&f->modifiers),
1521 chain->num_drm_modifiers,
1522 chain->drm_modifiers))
1523 wsi_wl_surface->chain->suboptimal = true;
1524 }
1525
1526 static const struct zwp_linux_dmabuf_feedback_v1_listener
1527 surface_dmabuf_feedback_listener = {
1528 .format_table = surface_dmabuf_feedback_format_table,
1529 .main_device = surface_dmabuf_feedback_main_device,
1530 .tranche_target_device = surface_dmabuf_feedback_tranche_target_device,
1531 .tranche_flags = surface_dmabuf_feedback_tranche_flags,
1532 .tranche_formats = surface_dmabuf_feedback_tranche_formats,
1533 .tranche_done = surface_dmabuf_feedback_tranche_done,
1534 .done = surface_dmabuf_feedback_done,
1535 };
1536
wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface * wsi_wl_surface)1537 static VkResult wsi_wl_surface_bind_to_dmabuf_feedback(struct wsi_wl_surface *wsi_wl_surface)
1538 {
1539 wsi_wl_surface->wl_dmabuf_feedback =
1540 zwp_linux_dmabuf_v1_get_surface_feedback(wsi_wl_surface->display->wl_dmabuf,
1541 wsi_wl_surface->surface);
1542
1543 zwp_linux_dmabuf_feedback_v1_add_listener(wsi_wl_surface->wl_dmabuf_feedback,
1544 &surface_dmabuf_feedback_listener,
1545 wsi_wl_surface);
1546
1547 if (dmabuf_feedback_init(&wsi_wl_surface->dmabuf_feedback) < 0)
1548 goto fail;
1549 if (dmabuf_feedback_init(&wsi_wl_surface->pending_dmabuf_feedback) < 0)
1550 goto fail_pending;
1551
1552 return VK_SUCCESS;
1553
1554 fail_pending:
1555 dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
1556 fail:
1557 zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
1558 wsi_wl_surface->wl_dmabuf_feedback = NULL;
1559 return VK_ERROR_OUT_OF_HOST_MEMORY;
1560 }
1561
wsi_wl_surface_init(struct wsi_wl_surface * wsi_wl_surface,struct wsi_device * wsi_device)1562 static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
1563 struct wsi_device *wsi_device)
1564 {
1565 struct wsi_wayland *wsi =
1566 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
1567 VkResult result;
1568
1569 /* wsi_wl_surface has already been initialized. */
1570 if (wsi_wl_surface->display)
1571 return VK_SUCCESS;
1572
1573 result = wsi_wl_display_create(wsi, wsi_wl_surface->base.display,
1574 wsi_device->sw, &wsi_wl_surface->display);
1575 if (result != VK_SUCCESS)
1576 goto fail;
1577
1578 wsi_wl_surface->surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
1579 if (!wsi_wl_surface->surface) {
1580 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1581 goto fail;
1582 }
1583 wl_proxy_set_queue((struct wl_proxy *) wsi_wl_surface->surface,
1584 wsi_wl_surface->display->queue);
1585
1586 /* Bind wsi_wl_surface to dma-buf feedback. */
1587 if (wsi_wl_surface->display->wl_dmabuf &&
1588 zwp_linux_dmabuf_v1_get_version(wsi_wl_surface->display->wl_dmabuf) >=
1589 ZWP_LINUX_DMABUF_V1_GET_SURFACE_FEEDBACK_SINCE_VERSION) {
1590 result = wsi_wl_surface_bind_to_dmabuf_feedback(wsi_wl_surface);
1591 if (result != VK_SUCCESS)
1592 goto fail;
1593
1594 wl_display_roundtrip_queue(wsi_wl_surface->display->wl_display,
1595 wsi_wl_surface->display->queue);
1596 }
1597
1598 return VK_SUCCESS;
1599
1600 fail:
1601 if (wsi_wl_surface->surface)
1602 wl_proxy_wrapper_destroy(wsi_wl_surface->surface);
1603
1604 if (wsi_wl_surface->display)
1605 wsi_wl_display_destroy(wsi_wl_surface->display);
1606 return result;
1607 }
1608
1609 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateWaylandSurfaceKHR(VkInstance _instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)1610 wsi_CreateWaylandSurfaceKHR(VkInstance _instance,
1611 const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
1612 const VkAllocationCallbacks *pAllocator,
1613 VkSurfaceKHR *pSurface)
1614 {
1615 VK_FROM_HANDLE(vk_instance, instance, _instance);
1616 struct wsi_wl_surface *wsi_wl_surface;
1617 VkIcdSurfaceWayland *surface;
1618
1619 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
1620
1621 wsi_wl_surface = vk_zalloc2(&instance->alloc, pAllocator, sizeof *wsi_wl_surface,
1622 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1623 if (wsi_wl_surface == NULL)
1624 return VK_ERROR_OUT_OF_HOST_MEMORY;
1625
1626 surface = &wsi_wl_surface->base;
1627
1628 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
1629 surface->display = pCreateInfo->display;
1630 surface->surface = pCreateInfo->surface;
1631
1632 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
1633
1634 return VK_SUCCESS;
1635 }
1636
1637 struct wsi_wl_present_id {
1638 struct wp_presentation_feedback *feedback;
1639 /* Fallback when wp_presentation is not supported.
1640 * Using frame callback is not the intended way to achieve
1641 * this, but it is the best effort alternative when the proper interface is
1642 * not available. This approach also matches Xwayland,
1643 * which uses frame callback to signal DRI3 COMPLETE. */
1644 struct wl_callback *frame;
1645 uint64_t present_id;
1646 const VkAllocationCallbacks *alloc;
1647 struct wsi_wl_swapchain *chain;
1648 struct wl_list link;
1649 };
1650
1651 static struct wsi_image *
wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain * wsi_chain,uint32_t image_index)1652 wsi_wl_swapchain_get_wsi_image(struct wsi_swapchain *wsi_chain,
1653 uint32_t image_index)
1654 {
1655 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1656 return &chain->images[image_index].base;
1657 }
1658
1659 static VkResult
wsi_wl_swapchain_release_images(struct wsi_swapchain * wsi_chain,uint32_t count,const uint32_t * indices)1660 wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
1661 uint32_t count, const uint32_t *indices)
1662 {
1663 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1664 for (uint32_t i = 0; i < count; i++) {
1665 uint32_t index = indices[i];
1666 assert(chain->images[index].busy);
1667 chain->images[index].busy = false;
1668 }
1669 return VK_SUCCESS;
1670 }
1671
1672 static void
wsi_wl_swapchain_set_present_mode(struct wsi_swapchain * wsi_chain,VkPresentModeKHR mode)1673 wsi_wl_swapchain_set_present_mode(struct wsi_swapchain *wsi_chain,
1674 VkPresentModeKHR mode)
1675 {
1676 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1677 chain->base.present_mode = mode;
1678 }
1679
1680 static VkResult
wsi_wl_swapchain_wait_for_present(struct wsi_swapchain * wsi_chain,uint64_t present_id,uint64_t timeout)1681 wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
1682 uint64_t present_id,
1683 uint64_t timeout)
1684 {
1685 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1686 struct wl_display *wl_display = chain->wsi_wl_surface->display->wl_display;
1687 struct timespec end_time;
1688 int wl_fd = wl_display_get_fd(wl_display);
1689 VkResult ret;
1690 int err;
1691
1692 uint64_t atimeout;
1693 if (timeout == 0 || timeout == UINT64_MAX)
1694 atimeout = timeout;
1695 else
1696 atimeout = os_time_get_absolute_timeout(timeout);
1697
1698 /* Need to observe that the swapchain semaphore has been unsignalled,
1699 * as this is guaranteed when a present is complete. */
1700 VkResult result = wsi_swapchain_wait_for_present_semaphore(
1701 &chain->base, present_id, timeout);
1702 if (result != VK_SUCCESS)
1703 return result;
1704
1705 /* If using frame callback, guard against lack of forward progress
1706 * of the frame callback in some situations,
1707 * e.g. the surface might not be visible.
1708 * If rendering has completed on GPU,
1709 * and we still haven't received a callback after 100ms, unblock the application.
1710 * 100ms is chosen arbitrarily.
1711 * The queue depth in WL WSI is just one frame due to frame callback in FIFO mode,
1712 * so from the time a frame has completed render to when it should be considered presented
1713 * will not exceed 100ms except in contrived edge cases. */
1714 uint64_t assumed_success_at = UINT64_MAX;
1715 if (!chain->present_ids.wp_presentation)
1716 assumed_success_at = os_time_get_absolute_timeout(100 * 1000 * 1000);
1717
1718 /* If app timeout is beyond the deadline we set for reply,
1719 * always treat the timeout as successful. */
1720 VkResult timeout_result = assumed_success_at < atimeout ? VK_SUCCESS : VK_TIMEOUT;
1721 timespec_from_nsec(&end_time, MIN2(atimeout, assumed_success_at));
1722
1723 /* PresentWait can be called concurrently.
1724 * If there is contention on this mutex, it means there is currently a dispatcher in flight holding the lock.
1725 * The lock is only held while there is forward progress processing events from Wayland,
1726 * so there should be no problem locking without timeout.
1727 * We would like to be able to support timeout = 0 to query the current max_completed count.
1728 * A timedlock with no timeout can be problematic in that scenario. */
1729 err = pthread_mutex_lock(&chain->present_ids.lock);
1730 if (err != 0)
1731 return VK_ERROR_OUT_OF_DATE_KHR;
1732
1733 if (chain->present_ids.max_completed >= present_id) {
1734 pthread_mutex_unlock(&chain->present_ids.lock);
1735 return VK_SUCCESS;
1736 }
1737
1738 /* Someone else is dispatching events; wait for them to update the chain
1739 * status and wake us up. */
1740 while (chain->present_ids.dispatch_in_progress) {
1741 /* We only own the lock when the wait succeeds. */
1742 err = pthread_cond_timedwait(&chain->present_ids.list_advanced,
1743 &chain->present_ids.lock, &end_time);
1744
1745 if (err == ETIMEDOUT) {
1746 pthread_mutex_unlock(&chain->present_ids.lock);
1747 return timeout_result;
1748 } else if (err != 0) {
1749 pthread_mutex_unlock(&chain->present_ids.lock);
1750 return VK_ERROR_OUT_OF_DATE_KHR;
1751 }
1752
1753 if (chain->present_ids.max_completed >= present_id) {
1754 pthread_mutex_unlock(&chain->present_ids.lock);
1755 return VK_SUCCESS;
1756 }
1757
1758 /* Whoever was previously dispatching the events isn't anymore, so we
1759 * will take over and fall through below. */
1760 if (!chain->present_ids.dispatch_in_progress)
1761 break;
1762 }
1763
1764 assert(!chain->present_ids.dispatch_in_progress);
1765 chain->present_ids.dispatch_in_progress = true;
1766
1767 /* Whether or not we were dispatching the events before, we are now: pull
1768 * all the new events from our event queue, post them, and wake up everyone
1769 * else who might be waiting. */
1770 while (1) {
1771 ret = wl_display_dispatch_queue_pending(wl_display, chain->present_ids.queue);
1772 if (ret < 0) {
1773 ret = VK_ERROR_OUT_OF_DATE_KHR;
1774 goto relinquish_dispatch;
1775 }
1776
1777 /* Some events dispatched: check the new completions. */
1778 if (ret > 0) {
1779 /* Completed our own present; stop our own dispatching and let
1780 * someone else pick it up. */
1781 if (chain->present_ids.max_completed >= present_id) {
1782 ret = VK_SUCCESS;
1783 goto relinquish_dispatch;
1784 }
1785
1786 /* Wake up other waiters who may have been unblocked by the events
1787 * we just read. */
1788 pthread_cond_broadcast(&chain->present_ids.list_advanced);
1789 }
1790
1791 /* Check for timeout, and relinquish the dispatch to another thread
1792 * if we're over our budget. */
1793 uint64_t current_time_nsec = os_time_get_nano();
1794 if (current_time_nsec > atimeout) {
1795 ret = timeout_result;
1796 goto relinquish_dispatch;
1797 }
1798
1799 /* To poll and read from WL fd safely, we must be cooperative.
1800 * See wl_display_prepare_read_queue in https://wayland.freedesktop.org/docs/html/apb.html */
1801
1802 /* Try to read events from the server. */
1803 ret = wl_display_prepare_read_queue(wl_display, chain->present_ids.queue);
1804 if (ret < 0) {
1805 /* Another thread might have read events for our queue already. Go
1806 * back to dispatch them.
1807 */
1808 if (errno == EAGAIN)
1809 continue;
1810 ret = VK_ERROR_OUT_OF_DATE_KHR;
1811 goto relinquish_dispatch;
1812 }
1813
1814 /* Drop the lock around poll, so people can wait whilst we sleep. */
1815 pthread_mutex_unlock(&chain->present_ids.lock);
1816
1817 struct pollfd pollfd = {
1818 .fd = wl_fd,
1819 .events = POLLIN
1820 };
1821 struct timespec current_time, rel_timeout;
1822 timespec_from_nsec(¤t_time, current_time_nsec);
1823 timespec_sub(&rel_timeout, &end_time, ¤t_time);
1824 ret = ppoll(&pollfd, 1, &rel_timeout, NULL);
1825
1826 /* Re-lock after poll; either we're dispatching events under the lock or
1827 * bouncing out from an error also under the lock. We can't use timedlock
1828 * here because we need to acquire to clear dispatch_in_progress. */
1829 pthread_mutex_lock(&chain->present_ids.lock);
1830
1831 if (ret <= 0) {
1832 int lerrno = errno;
1833 wl_display_cancel_read(wl_display);
1834 if (ret < 0) {
1835 /* If ppoll() was interrupted, try again. */
1836 if (lerrno == EINTR || lerrno == EAGAIN)
1837 continue;
1838 ret = VK_ERROR_OUT_OF_DATE_KHR;
1839 goto relinquish_dispatch;
1840 }
1841 assert(ret == 0);
1842 continue;
1843 }
1844
1845 ret = wl_display_read_events(wl_display);
1846 if (ret < 0) {
1847 ret = VK_ERROR_OUT_OF_DATE_KHR;
1848 goto relinquish_dispatch;
1849 }
1850 }
1851
1852 relinquish_dispatch:
1853 assert(chain->present_ids.dispatch_in_progress);
1854 chain->present_ids.dispatch_in_progress = false;
1855 pthread_cond_broadcast(&chain->present_ids.list_advanced);
1856 pthread_mutex_unlock(&chain->present_ids.lock);
1857 return ret;
1858 }
1859
1860 static VkResult
wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain * wsi_chain,const VkAcquireNextImageInfoKHR * info,uint32_t * image_index)1861 wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
1862 const VkAcquireNextImageInfoKHR *info,
1863 uint32_t *image_index)
1864 {
1865 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1866 struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
1867 struct timespec start_time, end_time;
1868 struct timespec rel_timeout;
1869
1870 timespec_from_nsec(&rel_timeout, info->timeout);
1871
1872 clock_gettime(CLOCK_MONOTONIC, &start_time);
1873 timespec_add(&end_time, &rel_timeout, &start_time);
1874
1875 while (1) {
1876 /* Try to find a free image. */
1877 for (uint32_t i = 0; i < chain->base.image_count; i++) {
1878 if (!chain->images[i].busy) {
1879 /* We found a non-busy image */
1880 *image_index = i;
1881 chain->images[i].busy = true;
1882 return (chain->suboptimal ? VK_SUBOPTIMAL_KHR : VK_SUCCESS);
1883 }
1884 }
1885
1886 struct timespec current_time, remaining_timeout;
1887 clock_gettime(CLOCK_MONOTONIC, ¤t_time);
1888 timespec_sub_saturate(&remaining_timeout, &end_time, ¤t_time);
1889
1890 /* Try to dispatch potential events. */
1891 int ret = wl_display_dispatch_queue_timeout(wsi_wl_surface->display->wl_display,
1892 wsi_wl_surface->display->queue,
1893 &remaining_timeout);
1894 if (ret == -1)
1895 return VK_ERROR_OUT_OF_DATE_KHR;
1896
1897 /* Check for timeout. */
1898 if (ret == 0)
1899 return (info->timeout ? VK_TIMEOUT : VK_NOT_READY);
1900 }
1901 }
1902
1903 static void
presentation_handle_sync_output(void * data,struct wp_presentation_feedback * feedback,struct wl_output * output)1904 presentation_handle_sync_output(void *data,
1905 struct wp_presentation_feedback *feedback,
1906 struct wl_output *output)
1907 {
1908 }
1909
1910 static void
wsi_wl_presentation_update_present_id(struct wsi_wl_present_id * id)1911 wsi_wl_presentation_update_present_id(struct wsi_wl_present_id *id)
1912 {
1913 /* present_ids.lock already held around dispatch */
1914 if (id->present_id > id->chain->present_ids.max_completed)
1915 id->chain->present_ids.max_completed = id->present_id;
1916
1917 wl_list_remove(&id->link);
1918 vk_free(id->alloc, id);
1919 }
1920
1921 static void
presentation_handle_presented(void * data,struct wp_presentation_feedback * feedback,uint32_t tv_sec_hi,uint32_t tv_sec_lo,uint32_t tv_nsec,uint32_t refresh,uint32_t seq_hi,uint32_t seq_lo,uint32_t flags)1922 presentation_handle_presented(void *data,
1923 struct wp_presentation_feedback *feedback,
1924 uint32_t tv_sec_hi, uint32_t tv_sec_lo,
1925 uint32_t tv_nsec, uint32_t refresh,
1926 uint32_t seq_hi, uint32_t seq_lo,
1927 uint32_t flags)
1928 {
1929 struct wsi_wl_present_id *id = data;
1930 wsi_wl_presentation_update_present_id(id);
1931 wp_presentation_feedback_destroy(feedback);
1932 }
1933
1934 static void
presentation_handle_discarded(void * data,struct wp_presentation_feedback * feedback)1935 presentation_handle_discarded(void *data,
1936 struct wp_presentation_feedback *feedback)
1937 {
1938 struct wsi_wl_present_id *id = data;
1939 wsi_wl_presentation_update_present_id(id);
1940 wp_presentation_feedback_destroy(feedback);
1941 }
1942
1943 static const struct wp_presentation_feedback_listener
1944 pres_feedback_listener = {
1945 presentation_handle_sync_output,
1946 presentation_handle_presented,
1947 presentation_handle_discarded,
1948 };
1949
1950 static void
presentation_frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)1951 presentation_frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
1952 {
1953 struct wsi_wl_present_id *id = data;
1954 wsi_wl_presentation_update_present_id(id);
1955 wl_callback_destroy(callback);
1956 }
1957
1958 static const struct wl_callback_listener pres_frame_listener = {
1959 presentation_frame_handle_done,
1960 };
1961
1962 static void
frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)1963 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
1964 {
1965 struct wsi_wl_swapchain *chain = data;
1966
1967 chain->frame = NULL;
1968 chain->fifo_ready = true;
1969
1970 wl_callback_destroy(callback);
1971 }
1972
1973 static const struct wl_callback_listener frame_listener = {
1974 frame_handle_done,
1975 };
1976
1977 static VkResult
wsi_wl_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index,uint64_t present_id,const VkPresentRegionKHR * damage)1978 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
1979 uint32_t image_index,
1980 uint64_t present_id,
1981 const VkPresentRegionKHR *damage)
1982 {
1983 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
1984 struct wsi_wl_surface *wsi_wl_surface = chain->wsi_wl_surface;
1985
1986 if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
1987 struct wsi_wl_image *image = &chain->images[image_index];
1988 memcpy(image->shm_ptr, image->base.cpu_map,
1989 image->base.row_pitches[0] * chain->extent.height);
1990 }
1991
1992 /* For EXT_swapchain_maintenance1. We might have transitioned from FIFO to MAILBOX.
1993 * In this case we need to let the FIFO request complete, before presenting MAILBOX. */
1994 while (!chain->fifo_ready) {
1995 int ret = wl_display_dispatch_queue(wsi_wl_surface->display->wl_display,
1996 wsi_wl_surface->display->queue);
1997 if (ret < 0)
1998 return VK_ERROR_OUT_OF_DATE_KHR;
1999 }
2000
2001 assert(image_index < chain->base.image_count);
2002 wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
2003
2004 if (wl_surface_get_version(wsi_wl_surface->surface) >= 4 && damage &&
2005 damage->pRectangles && damage->rectangleCount > 0) {
2006 for (unsigned i = 0; i < damage->rectangleCount; i++) {
2007 const VkRectLayerKHR *rect = &damage->pRectangles[i];
2008 assert(rect->layer == 0);
2009 wl_surface_damage_buffer(wsi_wl_surface->surface,
2010 rect->offset.x, rect->offset.y,
2011 rect->extent.width, rect->extent.height);
2012 }
2013 } else {
2014 wl_surface_damage(wsi_wl_surface->surface, 0, 0, INT32_MAX, INT32_MAX);
2015 }
2016
2017 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
2018 chain->frame = wl_surface_frame(wsi_wl_surface->surface);
2019 wl_callback_add_listener(chain->frame, &frame_listener, chain);
2020 chain->fifo_ready = false;
2021 } else {
2022 /* If we present MAILBOX, any subsequent presentation in FIFO can replace this image. */
2023 chain->fifo_ready = true;
2024 }
2025
2026 if (present_id > 0) {
2027 struct wsi_wl_present_id *id =
2028 vk_zalloc(chain->wsi_wl_surface->display->wsi_wl->alloc, sizeof(*id), sizeof(uintptr_t),
2029 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2030 id->chain = chain;
2031 id->present_id = present_id;
2032 id->alloc = chain->wsi_wl_surface->display->wsi_wl->alloc;
2033
2034 pthread_mutex_lock(&chain->present_ids.lock);
2035
2036 if (chain->present_ids.wp_presentation) {
2037 id->feedback = wp_presentation_feedback(chain->present_ids.wp_presentation,
2038 chain->wsi_wl_surface->surface);
2039 wp_presentation_feedback_add_listener(id->feedback,
2040 &pres_feedback_listener,
2041 id);
2042 } else {
2043 id->frame = wl_surface_frame(chain->present_ids.surface);
2044 wl_callback_add_listener(id->frame, &pres_frame_listener, id);
2045 }
2046
2047 wl_list_insert(&chain->present_ids.outstanding_list, &id->link);
2048 pthread_mutex_unlock(&chain->present_ids.lock);
2049 }
2050
2051 chain->images[image_index].busy = true;
2052 wl_surface_commit(wsi_wl_surface->surface);
2053 wl_display_flush(wsi_wl_surface->display->wl_display);
2054
2055 return VK_SUCCESS;
2056 }
2057
2058 static void
buffer_handle_release(void * data,struct wl_buffer * buffer)2059 buffer_handle_release(void *data, struct wl_buffer *buffer)
2060 {
2061 struct wsi_wl_image *image = data;
2062
2063 assert(image->buffer == buffer);
2064
2065 image->busy = false;
2066 }
2067
2068 static const struct wl_buffer_listener buffer_listener = {
2069 buffer_handle_release,
2070 };
2071
2072 static uint8_t *
wsi_wl_alloc_image_shm(struct wsi_image * imagew,unsigned size)2073 wsi_wl_alloc_image_shm(struct wsi_image *imagew, unsigned size)
2074 {
2075 struct wsi_wl_image *image = (struct wsi_wl_image *)imagew;
2076
2077 /* Create a shareable buffer */
2078 int fd = os_create_anonymous_file(size, NULL);
2079 if (fd < 0)
2080 return NULL;
2081
2082 void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2083 if (ptr == MAP_FAILED) {
2084 close(fd);
2085 return NULL;
2086 }
2087
2088 image->shm_fd = fd;
2089 image->shm_ptr = ptr;
2090 image->shm_size = size;
2091
2092 return ptr;
2093 }
2094
2095 static VkResult
wsi_wl_image_init(struct wsi_wl_swapchain * chain,struct wsi_wl_image * image,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)2096 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
2097 struct wsi_wl_image *image,
2098 const VkSwapchainCreateInfoKHR *pCreateInfo,
2099 const VkAllocationCallbacks* pAllocator)
2100 {
2101 struct wsi_wl_display *display = chain->wsi_wl_surface->display;
2102 VkResult result;
2103
2104 result = wsi_create_image(&chain->base, &chain->base.image_info,
2105 &image->base);
2106 if (result != VK_SUCCESS)
2107 return result;
2108
2109 switch (chain->buffer_type) {
2110 case WSI_WL_BUFFER_GPU_SHM:
2111 case WSI_WL_BUFFER_SHM_MEMCPY: {
2112 if (chain->buffer_type == WSI_WL_BUFFER_SHM_MEMCPY) {
2113 wsi_wl_alloc_image_shm(&image->base, image->base.row_pitches[0] *
2114 chain->extent.height);
2115 }
2116 assert(image->shm_ptr != NULL);
2117
2118 /* Share it in a wl_buffer */
2119 struct wl_shm_pool *pool = wl_shm_create_pool(display->wl_shm,
2120 image->shm_fd,
2121 image->shm_size);
2122 wl_proxy_set_queue((struct wl_proxy *)pool, display->queue);
2123 image->buffer = wl_shm_pool_create_buffer(pool, 0, chain->extent.width,
2124 chain->extent.height,
2125 image->base.row_pitches[0],
2126 chain->shm_format);
2127 wl_shm_pool_destroy(pool);
2128 break;
2129 }
2130
2131 case WSI_WL_BUFFER_NATIVE: {
2132 assert(display->wl_dmabuf);
2133
2134 struct zwp_linux_buffer_params_v1 *params =
2135 zwp_linux_dmabuf_v1_create_params(display->wl_dmabuf);
2136 if (!params)
2137 goto fail_image;
2138
2139 for (int i = 0; i < image->base.num_planes; i++) {
2140 zwp_linux_buffer_params_v1_add(params,
2141 image->base.dma_buf_fd,
2142 i,
2143 image->base.offsets[i],
2144 image->base.row_pitches[i],
2145 image->base.drm_modifier >> 32,
2146 image->base.drm_modifier & 0xffffffff);
2147 }
2148
2149 image->buffer =
2150 zwp_linux_buffer_params_v1_create_immed(params,
2151 chain->extent.width,
2152 chain->extent.height,
2153 chain->drm_format,
2154 0);
2155 zwp_linux_buffer_params_v1_destroy(params);
2156 break;
2157 }
2158
2159 default:
2160 unreachable("Invalid buffer type");
2161 }
2162
2163 if (!image->buffer)
2164 goto fail_image;
2165
2166 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
2167
2168 return VK_SUCCESS;
2169
2170 fail_image:
2171 wsi_destroy_image(&chain->base, &image->base);
2172
2173 return VK_ERROR_OUT_OF_HOST_MEMORY;
2174 }
2175
2176 static void
wsi_wl_swapchain_images_free(struct wsi_wl_swapchain * chain)2177 wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
2178 {
2179 for (uint32_t i = 0; i < chain->base.image_count; i++) {
2180 if (chain->images[i].buffer) {
2181 wl_buffer_destroy(chain->images[i].buffer);
2182 wsi_destroy_image(&chain->base, &chain->images[i].base);
2183 if (chain->images[i].shm_size) {
2184 close(chain->images[i].shm_fd);
2185 munmap(chain->images[i].shm_ptr, chain->images[i].shm_size);
2186 }
2187 }
2188 }
2189 }
2190
2191 static void
wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain * chain,const VkAllocationCallbacks * pAllocator)2192 wsi_wl_swapchain_chain_free(struct wsi_wl_swapchain *chain,
2193 const VkAllocationCallbacks *pAllocator)
2194 {
2195 /* Force wayland-client to release fd sent during the swapchain
2196 * creation (see MAX_FDS_OUT) to avoid filling up VRAM with
2197 * released buffers.
2198 */
2199 if (chain->wsi_wl_surface)
2200 wl_display_flush(chain->wsi_wl_surface->display->wl_display);
2201
2202 if (chain->frame)
2203 wl_callback_destroy(chain->frame);
2204 if (chain->tearing_control)
2205 wp_tearing_control_v1_destroy(chain->tearing_control);
2206 if (chain->wsi_wl_surface)
2207 chain->wsi_wl_surface->chain = NULL;
2208
2209 assert(!chain->present_ids.dispatch_in_progress);
2210
2211 /* In VK_EXT_swapchain_maintenance1 there is no requirement to wait for all present IDs to be complete.
2212 * Waiting for the swapchain fence is enough.
2213 * Just clean up anything user did not wait for. */
2214 struct wsi_wl_present_id *id, *tmp;
2215 wl_list_for_each_safe(id, tmp, &chain->present_ids.outstanding_list, link) {
2216 if (id->feedback)
2217 wp_presentation_feedback_destroy(id->feedback);
2218 if (id->frame)
2219 wl_callback_destroy(id->frame);
2220 wl_list_remove(&id->link);
2221 vk_free(id->alloc, id);
2222 }
2223
2224 if (chain->present_ids.wp_presentation)
2225 wl_proxy_wrapper_destroy(chain->present_ids.wp_presentation);
2226 if (chain->present_ids.surface)
2227 wl_proxy_wrapper_destroy(chain->present_ids.surface);
2228 pthread_cond_destroy(&chain->present_ids.list_advanced);
2229 pthread_mutex_destroy(&chain->present_ids.lock);
2230
2231 if (chain->present_ids.queue)
2232 wl_event_queue_destroy(chain->present_ids.queue);
2233
2234 vk_free(pAllocator, (void *)chain->drm_modifiers);
2235
2236 wsi_swapchain_finish(&chain->base);
2237 }
2238
2239 static VkResult
wsi_wl_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)2240 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
2241 const VkAllocationCallbacks *pAllocator)
2242 {
2243 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
2244
2245 wsi_wl_swapchain_images_free(chain);
2246 wsi_wl_swapchain_chain_free(chain, pAllocator);
2247
2248 vk_free(pAllocator, chain);
2249
2250 return VK_SUCCESS;
2251 }
2252
2253 static VkResult
wsi_wl_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct wsi_swapchain ** swapchain_out)2254 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
2255 VkDevice device,
2256 struct wsi_device *wsi_device,
2257 const VkSwapchainCreateInfoKHR* pCreateInfo,
2258 const VkAllocationCallbacks* pAllocator,
2259 struct wsi_swapchain **swapchain_out)
2260 {
2261 struct wsi_wl_surface *wsi_wl_surface =
2262 wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
2263 struct wsi_wl_swapchain *chain;
2264 VkResult result;
2265
2266 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
2267
2268 int num_images = pCreateInfo->minImageCount;
2269
2270 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2271 chain = vk_zalloc(pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2272 if (chain == NULL)
2273 return VK_ERROR_OUT_OF_HOST_MEMORY;
2274
2275 /* We are taking ownership of the wsi_wl_surface, so remove ownership from
2276 * oldSwapchain. If the surface is currently owned by a swapchain that is
2277 * not oldSwapchain we return an error.
2278 */
2279 if (wsi_wl_surface->chain &&
2280 wsi_swapchain_to_handle(&wsi_wl_surface->chain->base) != pCreateInfo->oldSwapchain) {
2281 result = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR;
2282 goto fail;
2283 }
2284 if (pCreateInfo->oldSwapchain) {
2285 VK_FROM_HANDLE(wsi_wl_swapchain, old_chain, pCreateInfo->oldSwapchain);
2286 old_chain->wsi_wl_surface = NULL;
2287 if (old_chain->tearing_control) {
2288 wp_tearing_control_v1_destroy(old_chain->tearing_control);
2289 old_chain->tearing_control = NULL;
2290 }
2291 }
2292
2293 /* Take ownership of the wsi_wl_surface */
2294 chain->wsi_wl_surface = wsi_wl_surface;
2295 wsi_wl_surface->chain = chain;
2296
2297 result = wsi_wl_surface_init(wsi_wl_surface, wsi_device);
2298 if (result != VK_SUCCESS)
2299 goto fail;
2300
2301 VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
2302 if (present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
2303 chain->tearing_control =
2304 wp_tearing_control_manager_v1_get_tearing_control(wsi_wl_surface->display->tearing_control_manager,
2305 wsi_wl_surface->surface);
2306 if (!chain->tearing_control) {
2307 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2308 goto fail;
2309 }
2310 wp_tearing_control_v1_set_presentation_hint(chain->tearing_control,
2311 WP_TEARING_CONTROL_V1_PRESENTATION_HINT_ASYNC);
2312 }
2313
2314 enum wsi_wl_buffer_type buffer_type;
2315 struct wsi_base_image_params *image_params = NULL;
2316 struct wsi_cpu_image_params cpu_image_params;
2317 struct wsi_drm_image_params drm_image_params;
2318 uint32_t num_drm_modifiers = 0;
2319 const uint64_t *drm_modifiers = NULL;
2320 if (wsi_device->sw) {
2321 cpu_image_params = (struct wsi_cpu_image_params) {
2322 .base.image_type = WSI_IMAGE_TYPE_CPU,
2323 };
2324 if (wsi_device->has_import_memory_host &&
2325 !(WSI_DEBUG & WSI_DEBUG_NOSHM)) {
2326 buffer_type = WSI_WL_BUFFER_GPU_SHM;
2327 cpu_image_params.alloc_shm = wsi_wl_alloc_image_shm;
2328 } else {
2329 buffer_type = WSI_WL_BUFFER_SHM_MEMCPY;
2330 }
2331 image_params = &cpu_image_params.base;
2332 } else {
2333 drm_image_params = (struct wsi_drm_image_params) {
2334 .base.image_type = WSI_IMAGE_TYPE_DRM,
2335 .same_gpu = wsi_wl_surface->display->same_gpu,
2336 };
2337 /* Use explicit DRM format modifiers when both the server and the driver
2338 * support them.
2339 */
2340 if (wsi_wl_surface->display->wl_dmabuf && wsi_device->supports_modifiers) {
2341 struct wsi_wl_format *f = NULL;
2342 /* Try to select modifiers for our vk_format from surface dma-buf
2343 * feedback. If that doesn't work, fallback to the list of supported
2344 * formats/modifiers by the display. */
2345 if (wsi_wl_surface->wl_dmabuf_feedback)
2346 f = pick_format_from_surface_dmabuf_feedback(wsi_wl_surface,
2347 pCreateInfo->imageFormat);
2348 if (f == NULL)
2349 f = find_format(&chain->wsi_wl_surface->display->formats,
2350 pCreateInfo->imageFormat);
2351 if (f != NULL) {
2352 num_drm_modifiers = u_vector_length(&f->modifiers);
2353 drm_modifiers = u_vector_tail(&f->modifiers);
2354 if (num_drm_modifiers > 0)
2355 drm_image_params.num_modifier_lists = 1;
2356 else
2357 drm_image_params.num_modifier_lists = 0;
2358 drm_image_params.num_modifiers = &num_drm_modifiers;
2359 drm_image_params.modifiers = &drm_modifiers;
2360 }
2361 }
2362 buffer_type = WSI_WL_BUFFER_NATIVE;
2363 image_params = &drm_image_params.base;
2364 }
2365
2366 result = wsi_swapchain_init(wsi_device, &chain->base, device,
2367 pCreateInfo, image_params, pAllocator);
2368 if (result != VK_SUCCESS)
2369 goto fail;
2370
2371 bool alpha = pCreateInfo->compositeAlpha ==
2372 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
2373
2374 chain->base.destroy = wsi_wl_swapchain_destroy;
2375 chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
2376 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
2377 chain->base.queue_present = wsi_wl_swapchain_queue_present;
2378 chain->base.release_images = wsi_wl_swapchain_release_images;
2379 chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;
2380 chain->base.wait_for_present = wsi_wl_swapchain_wait_for_present;
2381 chain->base.present_mode = present_mode;
2382 chain->base.image_count = num_images;
2383 chain->extent = pCreateInfo->imageExtent;
2384 chain->vk_format = pCreateInfo->imageFormat;
2385 chain->buffer_type = buffer_type;
2386 if (buffer_type == WSI_WL_BUFFER_NATIVE) {
2387 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
2388 } else {
2389 chain->shm_format = wl_shm_format_for_vk_format(chain->vk_format, alpha);
2390 }
2391 chain->num_drm_modifiers = num_drm_modifiers;
2392 if (num_drm_modifiers) {
2393 uint64_t *drm_modifiers_copy =
2394 vk_alloc(pAllocator, sizeof(*drm_modifiers) * num_drm_modifiers, 8,
2395 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2396 if (!drm_modifiers_copy) {
2397 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2398 goto fail_free_wl_chain;
2399 }
2400
2401 typed_memcpy(drm_modifiers_copy, drm_modifiers, num_drm_modifiers);
2402 chain->drm_modifiers = drm_modifiers_copy;
2403 }
2404
2405 if (!wsi_init_pthread_cond_monotonic(&chain->present_ids.list_advanced)) {
2406 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2407 goto fail_free_wl_chain;
2408 }
2409 pthread_mutex_init(&chain->present_ids.lock, NULL);
2410
2411 wl_list_init(&chain->present_ids.outstanding_list);
2412 chain->present_ids.queue =
2413 wl_display_create_queue_with_name(chain->wsi_wl_surface->display->wl_display,
2414 "mesa vk swapchain queue");
2415
2416 if (chain->wsi_wl_surface->display->wp_presentation_notwrapped) {
2417 chain->present_ids.wp_presentation =
2418 wl_proxy_create_wrapper(chain->wsi_wl_surface->display->wp_presentation_notwrapped);
2419 wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.wp_presentation,
2420 chain->present_ids.queue);
2421 } else {
2422 /* Fallback to frame callbacks when presentation protocol is not available.
2423 * We already have a proxy for the surface, but need another since
2424 * presentID is pumped through a different queue to not disrupt
2425 * QueuePresentKHR frame callback's queue. */
2426 chain->present_ids.surface = wl_proxy_create_wrapper(wsi_wl_surface->base.surface);
2427 wl_proxy_set_queue((struct wl_proxy *) chain->present_ids.surface,
2428 chain->present_ids.queue);
2429 }
2430
2431 chain->fifo_ready = true;
2432
2433 for (uint32_t i = 0; i < chain->base.image_count; i++) {
2434 result = wsi_wl_image_init(chain, &chain->images[i],
2435 pCreateInfo, pAllocator);
2436 if (result != VK_SUCCESS)
2437 goto fail_free_wl_images;
2438 chain->images[i].busy = false;
2439 }
2440
2441 *swapchain_out = &chain->base;
2442
2443 return VK_SUCCESS;
2444
2445 fail_free_wl_images:
2446 wsi_wl_swapchain_images_free(chain);
2447 fail_free_wl_chain:
2448 wsi_wl_swapchain_chain_free(chain, pAllocator);
2449 fail:
2450 vk_free(pAllocator, chain);
2451 wsi_wl_surface->chain = NULL;
2452
2453 assert(result != VK_SUCCESS);
2454 return result;
2455 }
2456
2457 VkResult
wsi_wl_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device)2458 wsi_wl_init_wsi(struct wsi_device *wsi_device,
2459 const VkAllocationCallbacks *alloc,
2460 VkPhysicalDevice physical_device)
2461 {
2462 struct wsi_wayland *wsi;
2463 VkResult result;
2464
2465 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2466 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2467 if (!wsi) {
2468 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2469 goto fail;
2470 }
2471
2472 wsi->physical_device = physical_device;
2473 wsi->alloc = alloc;
2474 wsi->wsi = wsi_device;
2475
2476 wsi->base.get_support = wsi_wl_surface_get_support;
2477 wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
2478 wsi->base.get_formats = wsi_wl_surface_get_formats;
2479 wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
2480 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
2481 wsi->base.get_present_rectangles = wsi_wl_surface_get_present_rectangles;
2482 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
2483
2484 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
2485
2486 return VK_SUCCESS;
2487
2488 fail:
2489 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
2490
2491 return result;
2492 }
2493
2494 void
wsi_wl_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)2495 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
2496 const VkAllocationCallbacks *alloc)
2497 {
2498 struct wsi_wayland *wsi =
2499 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
2500 if (!wsi)
2501 return;
2502
2503 vk_free(alloc, wsi);
2504 }
2505