1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25 #include <wayland-drm-client-protocol.h>
26
27 #include <assert.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <pthread.h>
34
35 #include "wsi_common_wayland.h"
36
37 #include <util/hash_table.h>
38 #include <util/u_vector.h>
39
40 #define typed_memcpy(dest, src, count) ({ \
41 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
42 memcpy((dest), (src), (count) * sizeof(*(src))); \
43 })
44
45 struct wsi_wayland;
46
47 struct wsi_wl_display {
48 struct wl_display * display;
49 struct wl_drm * drm;
50
51 struct wsi_wayland *wsi_wl;
52 /* Vector of VkFormats supported */
53 struct u_vector formats;
54
55 uint32_t capabilities;
56 };
57
58 struct wsi_wayland {
59 struct wsi_interface base;
60
61 const VkAllocationCallbacks *alloc;
62 VkPhysicalDevice physical_device;
63
64 pthread_mutex_t mutex;
65 /* Hash table of wl_display -> wsi_wl_display mappings */
66 struct hash_table * displays;
67
68 const struct wsi_callbacks *cbs;
69 };
70
71 static void
wsi_wl_display_add_vk_format(struct wsi_wl_display * display,VkFormat format)72 wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
73 {
74 /* Don't add a format that's already in the list */
75 VkFormat *f;
76 u_vector_foreach(f, &display->formats)
77 if (*f == format)
78 return;
79
80 /* Don't add formats that aren't renderable. */
81 VkFormatProperties props;
82
83 display->wsi_wl->cbs->get_phys_device_format_properties(display->wsi_wl->physical_device,
84 format, &props);
85 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
86 return;
87
88 f = u_vector_add(&display->formats);
89 if (f)
90 *f = format;
91 }
92
93 static void
drm_handle_device(void * data,struct wl_drm * drm,const char * name)94 drm_handle_device(void *data, struct wl_drm *drm, const char *name)
95 {
96 fprintf(stderr, "wl_drm.device(%s)\n", name);
97 }
98
99 static uint32_t
wl_drm_format_for_vk_format(VkFormat vk_format,bool alpha)100 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
101 {
102 switch (vk_format) {
103 /* TODO: Figure out what all the formats mean and make this table
104 * correct.
105 */
106 #if 0
107 case VK_FORMAT_R4G4B4A4_UNORM:
108 return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
109 case VK_FORMAT_R5G6B5_UNORM:
110 return WL_DRM_FORMAT_BGR565;
111 case VK_FORMAT_R5G5B5A1_UNORM:
112 return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
113 case VK_FORMAT_R8G8B8_UNORM:
114 return WL_DRM_FORMAT_XBGR8888;
115 case VK_FORMAT_R8G8B8A8_UNORM:
116 return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
117 case VK_FORMAT_R10G10B10A2_UNORM:
118 return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
119 case VK_FORMAT_B4G4R4A4_UNORM:
120 return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
121 case VK_FORMAT_B5G6R5_UNORM:
122 return WL_DRM_FORMAT_RGB565;
123 case VK_FORMAT_B5G5R5A1_UNORM:
124 return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
125 #endif
126 case VK_FORMAT_B8G8R8_UNORM:
127 case VK_FORMAT_B8G8R8_SRGB:
128 return WL_DRM_FORMAT_BGRX8888;
129 case VK_FORMAT_B8G8R8A8_UNORM:
130 case VK_FORMAT_B8G8R8A8_SRGB:
131 return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
132 #if 0
133 case VK_FORMAT_B10G10R10A2_UNORM:
134 return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
135 #endif
136
137 default:
138 assert(!"Unsupported Vulkan format");
139 return 0;
140 }
141 }
142
143 static void
drm_handle_format(void * data,struct wl_drm * drm,uint32_t wl_format)144 drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
145 {
146 struct wsi_wl_display *display = data;
147
148 switch (wl_format) {
149 #if 0
150 case WL_DRM_FORMAT_ABGR4444:
151 case WL_DRM_FORMAT_XBGR4444:
152 wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
153 break;
154 case WL_DRM_FORMAT_BGR565:
155 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
156 break;
157 case WL_DRM_FORMAT_ABGR1555:
158 case WL_DRM_FORMAT_XBGR1555:
159 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
160 break;
161 case WL_DRM_FORMAT_XBGR8888:
162 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
163 /* fallthrough */
164 case WL_DRM_FORMAT_ABGR8888:
165 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
166 break;
167 case WL_DRM_FORMAT_ABGR2101010:
168 case WL_DRM_FORMAT_XBGR2101010:
169 wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
170 break;
171 case WL_DRM_FORMAT_ARGB4444:
172 case WL_DRM_FORMAT_XRGB4444:
173 wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
174 break;
175 case WL_DRM_FORMAT_RGB565:
176 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
177 break;
178 case WL_DRM_FORMAT_ARGB1555:
179 case WL_DRM_FORMAT_XRGB1555:
180 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
181 break;
182 #endif
183 case WL_DRM_FORMAT_XRGB8888:
184 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
185 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_UNORM);
186 /* fallthrough */
187 case WL_DRM_FORMAT_ARGB8888:
188 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
189 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_UNORM);
190 break;
191 #if 0
192 case WL_DRM_FORMAT_ARGB2101010:
193 case WL_DRM_FORMAT_XRGB2101010:
194 wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
195 break;
196 #endif
197 }
198 }
199
200 static void
drm_handle_authenticated(void * data,struct wl_drm * drm)201 drm_handle_authenticated(void *data, struct wl_drm *drm)
202 {
203 }
204
205 static void
drm_handle_capabilities(void * data,struct wl_drm * drm,uint32_t capabilities)206 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
207 {
208 struct wsi_wl_display *display = data;
209
210 display->capabilities = capabilities;
211 }
212
213 static const struct wl_drm_listener drm_listener = {
214 drm_handle_device,
215 drm_handle_format,
216 drm_handle_authenticated,
217 drm_handle_capabilities,
218 };
219
220 static void
registry_handle_global(void * data,struct wl_registry * registry,uint32_t name,const char * interface,uint32_t version)221 registry_handle_global(void *data, struct wl_registry *registry,
222 uint32_t name, const char *interface, uint32_t version)
223 {
224 struct wsi_wl_display *display = data;
225
226 if (strcmp(interface, "wl_drm") == 0) {
227 assert(display->drm == NULL);
228
229 assert(version >= 2);
230 display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
231
232 if (display->drm)
233 wl_drm_add_listener(display->drm, &drm_listener, display);
234 }
235 }
236
237 static void
registry_handle_global_remove(void * data,struct wl_registry * registry,uint32_t name)238 registry_handle_global_remove(void *data, struct wl_registry *registry,
239 uint32_t name)
240 { /* No-op */ }
241
242 static const struct wl_registry_listener registry_listener = {
243 registry_handle_global,
244 registry_handle_global_remove
245 };
246
247 static void
wsi_wl_display_destroy(struct wsi_wayland * wsi,struct wsi_wl_display * display)248 wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display)
249 {
250 u_vector_finish(&display->formats);
251 if (display->drm)
252 wl_drm_destroy(display->drm);
253 vk_free(wsi->alloc, display);
254 }
255
256 static struct wsi_wl_display *
wsi_wl_display_create(struct wsi_wayland * wsi,struct wl_display * wl_display)257 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display)
258 {
259 struct wsi_wl_display *display =
260 vk_alloc(wsi->alloc, sizeof(*display), 8,
261 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
262 if (!display)
263 return NULL;
264
265 memset(display, 0, sizeof(*display));
266
267 display->display = wl_display;
268 display->wsi_wl = wsi;
269
270 if (!u_vector_init(&display->formats, sizeof(VkFormat), 8))
271 goto fail;
272
273 struct wl_registry *registry = wl_display_get_registry(wl_display);
274 if (!registry)
275 return NULL;
276
277 wl_registry_add_listener(registry, ®istry_listener, display);
278
279 /* Round-rip to get the wl_drm global */
280 wl_display_roundtrip(wl_display);
281
282 if (!display->drm)
283 goto fail;
284
285 /* Round-rip to get wl_drm formats and capabilities */
286 wl_display_roundtrip(wl_display);
287
288 /* We need prime support */
289 if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME))
290 goto fail;
291
292 /* We don't need this anymore */
293 wl_registry_destroy(registry);
294
295 return display;
296
297 fail:
298 if (registry)
299 wl_registry_destroy(registry);
300
301 wsi_wl_display_destroy(wsi, display);
302 return NULL;
303 }
304
305 static struct wsi_wl_display *
wsi_wl_get_display(struct wsi_device * wsi_device,struct wl_display * wl_display)306 wsi_wl_get_display(struct wsi_device *wsi_device,
307 struct wl_display *wl_display)
308 {
309 struct wsi_wayland *wsi =
310 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
311
312 pthread_mutex_lock(&wsi->mutex);
313
314 struct hash_entry *entry = _mesa_hash_table_search(wsi->displays,
315 wl_display);
316 if (!entry) {
317 /* We're about to make a bunch of blocking calls. Let's drop the
318 * mutex for now so we don't block up too badly.
319 */
320 pthread_mutex_unlock(&wsi->mutex);
321
322 struct wsi_wl_display *display = wsi_wl_display_create(wsi, wl_display);
323 if (!display)
324 return NULL;
325
326 pthread_mutex_lock(&wsi->mutex);
327
328 entry = _mesa_hash_table_search(wsi->displays, wl_display);
329 if (entry) {
330 /* Oops, someone raced us to it */
331 wsi_wl_display_destroy(wsi, display);
332 } else {
333 entry = _mesa_hash_table_insert(wsi->displays, wl_display, display);
334 }
335 }
336
337 pthread_mutex_unlock(&wsi->mutex);
338
339 return entry->data;
340 }
341
342 VkBool32
wsi_wl_get_presentation_support(struct wsi_device * wsi_device,struct wl_display * wl_display)343 wsi_wl_get_presentation_support(struct wsi_device *wsi_device,
344 struct wl_display *wl_display)
345 {
346 return wsi_wl_get_display(wsi_device, wl_display) != NULL;
347 }
348
349 static VkResult
wsi_wl_surface_get_support(VkIcdSurfaceBase * surface,struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,uint32_t queueFamilyIndex,VkBool32 * pSupported)350 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
351 struct wsi_device *wsi_device,
352 const VkAllocationCallbacks *alloc,
353 uint32_t queueFamilyIndex,
354 VkBool32* pSupported)
355 {
356 *pSupported = true;
357
358 return VK_SUCCESS;
359 }
360
361 static const VkPresentModeKHR present_modes[] = {
362 VK_PRESENT_MODE_MAILBOX_KHR,
363 VK_PRESENT_MODE_FIFO_KHR,
364 };
365
366 static VkResult
wsi_wl_surface_get_capabilities(VkIcdSurfaceBase * surface,VkSurfaceCapabilitiesKHR * caps)367 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
368 VkSurfaceCapabilitiesKHR* caps)
369 {
370 /* For true mailbox mode, we need at least 4 images:
371 * 1) One to scan out from
372 * 2) One to have queued for scan-out
373 * 3) One to be currently held by the Wayland compositor
374 * 4) One to render to
375 */
376 caps->minImageCount = 4;
377 /* There is no real maximum */
378 caps->maxImageCount = 0;
379
380 caps->currentExtent = (VkExtent2D) { -1, -1 };
381 caps->minImageExtent = (VkExtent2D) { 1, 1 };
382 /* This is the maximum supported size on Intel */
383 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
384 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
385 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
386 caps->maxImageArrayLayers = 1;
387
388 caps->supportedCompositeAlpha =
389 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
390 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
391
392 caps->supportedUsageFlags =
393 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
394 VK_IMAGE_USAGE_SAMPLED_BIT |
395 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
396 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
397
398 return VK_SUCCESS;
399 }
400
401 static VkResult
wsi_wl_surface_get_formats(VkIcdSurfaceBase * icd_surface,struct wsi_device * wsi_device,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)402 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
403 struct wsi_device *wsi_device,
404 uint32_t* pSurfaceFormatCount,
405 VkSurfaceFormatKHR* pSurfaceFormats)
406 {
407 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
408 struct wsi_wl_display *display =
409 wsi_wl_get_display(wsi_device, surface->display);
410 if (!display)
411 return VK_ERROR_OUT_OF_HOST_MEMORY;
412
413 if (pSurfaceFormats == NULL) {
414 *pSurfaceFormatCount = u_vector_length(&display->formats);
415 return VK_SUCCESS;
416 }
417
418 uint32_t count = 0;
419 VkFormat *f;
420 u_vector_foreach(f, &display->formats) {
421 if (count == *pSurfaceFormatCount)
422 return VK_INCOMPLETE;
423
424 pSurfaceFormats[count++] = (VkSurfaceFormatKHR) {
425 .format = *f,
426 /* TODO: We should get this from the compositor somehow */
427 .colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
428 };
429 }
430
431 assert(*pSurfaceFormatCount <= count);
432 *pSurfaceFormatCount = count;
433
434 return VK_SUCCESS;
435 }
436
437 static VkResult
wsi_wl_surface_get_present_modes(VkIcdSurfaceBase * surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)438 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
439 uint32_t* pPresentModeCount,
440 VkPresentModeKHR* pPresentModes)
441 {
442 if (pPresentModes == NULL) {
443 *pPresentModeCount = ARRAY_SIZE(present_modes);
444 return VK_SUCCESS;
445 }
446
447 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
448 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
449
450 if (*pPresentModeCount < ARRAY_SIZE(present_modes))
451 return VK_INCOMPLETE;
452 else
453 return VK_SUCCESS;
454 }
455
wsi_create_wl_surface(const VkAllocationCallbacks * pAllocator,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,VkSurfaceKHR * pSurface)456 VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
457 const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
458 VkSurfaceKHR *pSurface)
459 {
460 VkIcdSurfaceWayland *surface;
461
462 surface = vk_alloc(pAllocator, sizeof *surface, 8,
463 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
464 if (surface == NULL)
465 return VK_ERROR_OUT_OF_HOST_MEMORY;
466
467 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
468 surface->display = pCreateInfo->display;
469 surface->surface = pCreateInfo->surface;
470
471 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
472
473 return VK_SUCCESS;
474 }
475
476 struct wsi_wl_image {
477 VkImage image;
478 VkDeviceMemory memory;
479 struct wl_buffer * buffer;
480 bool busy;
481 };
482
483 struct wsi_wl_swapchain {
484 struct wsi_swapchain base;
485
486 struct wsi_wl_display * display;
487 struct wl_event_queue * queue;
488 struct wl_surface * surface;
489
490 VkExtent2D extent;
491 VkFormat vk_format;
492 uint32_t drm_format;
493
494 VkPresentModeKHR present_mode;
495 bool fifo_ready;
496
497 uint32_t image_count;
498 struct wsi_wl_image images[0];
499 };
500
501 static VkResult
wsi_wl_swapchain_get_images(struct wsi_swapchain * wsi_chain,uint32_t * pCount,VkImage * pSwapchainImages)502 wsi_wl_swapchain_get_images(struct wsi_swapchain *wsi_chain,
503 uint32_t *pCount, VkImage *pSwapchainImages)
504 {
505 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
506 uint32_t ret_count;
507 VkResult result;
508
509 if (pSwapchainImages == NULL) {
510 *pCount = chain->image_count;
511 return VK_SUCCESS;
512 }
513
514 result = VK_SUCCESS;
515 ret_count = chain->image_count;
516 if (chain->image_count > *pCount) {
517 ret_count = *pCount;
518 result = VK_INCOMPLETE;
519 }
520
521 for (uint32_t i = 0; i < ret_count; i++)
522 pSwapchainImages[i] = chain->images[i].image;
523
524 return result;
525 }
526
527 static VkResult
wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain * wsi_chain,uint64_t timeout,VkSemaphore semaphore,uint32_t * image_index)528 wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
529 uint64_t timeout,
530 VkSemaphore semaphore,
531 uint32_t *image_index)
532 {
533 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
534
535 int ret = wl_display_dispatch_queue_pending(chain->display->display,
536 chain->queue);
537 /* XXX: I'm not sure if out-of-date is the right error here. If
538 * wl_display_dispatch_queue_pending fails it most likely means we got
539 * kicked by the server so this seems more-or-less correct.
540 */
541 if (ret < 0)
542 return VK_ERROR_OUT_OF_DATE_KHR;
543
544 while (1) {
545 for (uint32_t i = 0; i < chain->image_count; i++) {
546 if (!chain->images[i].busy) {
547 /* We found a non-busy image */
548 *image_index = i;
549 chain->images[i].busy = true;
550 return VK_SUCCESS;
551 }
552 }
553
554 /* This time we do a blocking dispatch because we can't go
555 * anywhere until we get an event.
556 */
557 int ret = wl_display_roundtrip_queue(chain->display->display,
558 chain->queue);
559 if (ret < 0)
560 return VK_ERROR_OUT_OF_DATE_KHR;
561 }
562 }
563
564 static void
frame_handle_done(void * data,struct wl_callback * callback,uint32_t serial)565 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
566 {
567 struct wsi_wl_swapchain *chain = data;
568
569 chain->fifo_ready = true;
570
571 wl_callback_destroy(callback);
572 }
573
574 static const struct wl_callback_listener frame_listener = {
575 frame_handle_done,
576 };
577
578 static VkResult
wsi_wl_swapchain_queue_present(struct wsi_swapchain * wsi_chain,uint32_t image_index)579 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
580 uint32_t image_index)
581 {
582 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
583
584 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
585 while (!chain->fifo_ready) {
586 int ret = wl_display_dispatch_queue(chain->display->display,
587 chain->queue);
588 if (ret < 0)
589 return VK_ERROR_OUT_OF_DATE_KHR;
590 }
591 }
592
593 assert(image_index < chain->image_count);
594 wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
595 wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
596
597 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
598 struct wl_callback *frame = wl_surface_frame(chain->surface);
599 wl_proxy_set_queue((struct wl_proxy *)frame, chain->queue);
600 wl_callback_add_listener(frame, &frame_listener, chain);
601 chain->fifo_ready = false;
602 }
603
604 chain->images[image_index].busy = true;
605 wl_surface_commit(chain->surface);
606 wl_display_flush(chain->display->display);
607
608 return VK_SUCCESS;
609 }
610
611 static void
buffer_handle_release(void * data,struct wl_buffer * buffer)612 buffer_handle_release(void *data, struct wl_buffer *buffer)
613 {
614 struct wsi_wl_image *image = data;
615
616 assert(image->buffer == buffer);
617
618 image->busy = false;
619 }
620
621 static const struct wl_buffer_listener buffer_listener = {
622 buffer_handle_release,
623 };
624
625 static VkResult
wsi_wl_image_init(struct wsi_wl_swapchain * chain,struct wsi_wl_image * image,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator)626 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
627 struct wsi_wl_image *image,
628 const VkSwapchainCreateInfoKHR *pCreateInfo,
629 const VkAllocationCallbacks* pAllocator)
630 {
631 VkDevice vk_device = chain->base.device;
632 VkResult result;
633 int fd;
634 uint32_t size;
635 uint32_t row_pitch;
636 uint32_t offset;
637 result = chain->base.image_fns->create_wsi_image(vk_device,
638 pCreateInfo,
639 pAllocator,
640 &image->image,
641 &image->memory,
642 &size,
643 &offset,
644 &row_pitch,
645 &fd);
646 if (result != VK_SUCCESS)
647 return result;
648
649 image->buffer = wl_drm_create_prime_buffer(chain->display->drm,
650 fd, /* name */
651 chain->extent.width,
652 chain->extent.height,
653 chain->drm_format,
654 offset,
655 row_pitch,
656 0, 0, 0, 0 /* unused */);
657 wl_display_roundtrip(chain->display->display);
658 close(fd);
659
660 if (!image->buffer)
661 goto fail_image;
662
663 wl_proxy_set_queue((struct wl_proxy *)image->buffer, chain->queue);
664 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
665
666 return VK_SUCCESS;
667
668 fail_image:
669 chain->base.image_fns->free_wsi_image(vk_device, pAllocator,
670 image->image, image->memory);
671
672 return result;
673 }
674
675 static VkResult
wsi_wl_swapchain_destroy(struct wsi_swapchain * wsi_chain,const VkAllocationCallbacks * pAllocator)676 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
677 const VkAllocationCallbacks *pAllocator)
678 {
679 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
680
681 for (uint32_t i = 0; i < chain->image_count; i++) {
682 if (chain->images[i].buffer)
683 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
684 chain->images[i].image,
685 chain->images[i].memory);
686 }
687
688 vk_free(pAllocator, chain);
689
690 return VK_SUCCESS;
691 }
692
693 static VkResult
wsi_wl_surface_create_swapchain(VkIcdSurfaceBase * icd_surface,VkDevice device,struct wsi_device * wsi_device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,const struct wsi_image_fns * image_fns,struct wsi_swapchain ** swapchain_out)694 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
695 VkDevice device,
696 struct wsi_device *wsi_device,
697 const VkSwapchainCreateInfoKHR* pCreateInfo,
698 const VkAllocationCallbacks* pAllocator,
699 const struct wsi_image_fns *image_fns,
700 struct wsi_swapchain **swapchain_out)
701 {
702 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
703 struct wsi_wl_swapchain *chain;
704 VkResult result;
705
706 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
707
708 int num_images = pCreateInfo->minImageCount;
709
710 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
711 chain = vk_alloc(pAllocator, size, 8,
712 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
713 if (chain == NULL)
714 return VK_ERROR_OUT_OF_HOST_MEMORY;
715
716 bool alpha = pCreateInfo->compositeAlpha ==
717 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
718
719 chain->base.device = device;
720 chain->base.destroy = wsi_wl_swapchain_destroy;
721 chain->base.get_images = wsi_wl_swapchain_get_images;
722 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
723 chain->base.queue_present = wsi_wl_swapchain_queue_present;
724 chain->base.image_fns = image_fns;
725 chain->base.present_mode = pCreateInfo->presentMode;
726 chain->surface = surface->surface;
727 chain->extent = pCreateInfo->imageExtent;
728 chain->vk_format = pCreateInfo->imageFormat;
729 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
730
731 chain->fifo_ready = true;
732
733 chain->image_count = num_images;
734
735 /* Mark a bunch of stuff as NULL. This way we can just call
736 * destroy_swapchain for cleanup.
737 */
738 for (uint32_t i = 0; i < chain->image_count; i++)
739 chain->images[i].buffer = NULL;
740 chain->queue = NULL;
741
742 chain->display = wsi_wl_get_display(wsi_device,
743 surface->display);
744 if (!chain->display) {
745 result = VK_ERROR_INITIALIZATION_FAILED;
746 goto fail;
747 }
748
749 chain->queue = wl_display_create_queue(chain->display->display);
750 if (!chain->queue) {
751 result = VK_ERROR_INITIALIZATION_FAILED;
752 goto fail;
753 }
754
755 for (uint32_t i = 0; i < chain->image_count; i++) {
756 result = wsi_wl_image_init(chain, &chain->images[i],
757 pCreateInfo, pAllocator);
758 if (result != VK_SUCCESS)
759 goto fail;
760 chain->images[i].busy = false;
761 }
762
763 *swapchain_out = &chain->base;
764
765 return VK_SUCCESS;
766
767 fail:
768 wsi_wl_swapchain_destroy(&chain->base, pAllocator);
769
770 return result;
771 }
772
773 VkResult
wsi_wl_init_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc,VkPhysicalDevice physical_device,const struct wsi_callbacks * cbs)774 wsi_wl_init_wsi(struct wsi_device *wsi_device,
775 const VkAllocationCallbacks *alloc,
776 VkPhysicalDevice physical_device,
777 const struct wsi_callbacks *cbs)
778 {
779 struct wsi_wayland *wsi;
780 VkResult result;
781
782 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
783 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
784 if (!wsi) {
785 result = VK_ERROR_OUT_OF_HOST_MEMORY;
786 goto fail;
787 }
788
789 wsi->physical_device = physical_device;
790 wsi->alloc = alloc;
791 wsi->cbs = cbs;
792 int ret = pthread_mutex_init(&wsi->mutex, NULL);
793 if (ret != 0) {
794 if (ret == ENOMEM) {
795 result = VK_ERROR_OUT_OF_HOST_MEMORY;
796 } else {
797 /* FINISHME: Choose a better error. */
798 result = VK_ERROR_OUT_OF_HOST_MEMORY;
799 }
800
801 goto fail_alloc;
802 }
803
804 wsi->displays = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
805 _mesa_key_pointer_equal);
806 if (!wsi->displays) {
807 result = VK_ERROR_OUT_OF_HOST_MEMORY;
808 goto fail_mutex;
809 }
810
811 wsi->base.get_support = wsi_wl_surface_get_support;
812 wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
813 wsi->base.get_formats = wsi_wl_surface_get_formats;
814 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
815 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
816
817 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
818
819 return VK_SUCCESS;
820
821 fail_mutex:
822 pthread_mutex_destroy(&wsi->mutex);
823
824 fail_alloc:
825 vk_free(alloc, wsi);
826 fail:
827 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
828
829 return result;
830 }
831
832 void
wsi_wl_finish_wsi(struct wsi_device * wsi_device,const VkAllocationCallbacks * alloc)833 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
834 const VkAllocationCallbacks *alloc)
835 {
836 struct wsi_wayland *wsi =
837 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
838
839 if (wsi) {
840 struct hash_entry *entry;
841 hash_table_foreach(wsi->displays, entry)
842 wsi_wl_display_destroy(wsi, entry->data);
843
844 _mesa_hash_table_destroy(wsi->displays, NULL);
845
846 pthread_mutex_destroy(&wsi->mutex);
847
848 vk_free(alloc, wsi);
849 }
850 }
851