1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33 #include <xcb/xfixes.h>
34
35 #include <X11/Xlib-xcb.h>
36
37 #include "loader_dri_helper.h"
38 #include "loader_dri3_helper.h"
39 #include "util/macros.h"
40 #include "util/simple_mtx.h"
41 #include "drm-uapi/drm_fourcc.h"
42
43 /**
44 * A cached blit context.
45 */
46 struct loader_dri3_blit_context {
47 simple_mtx_t mtx;
48 __DRIcontext *ctx;
49 __DRIscreen *cur_screen;
50 const __DRIcoreExtension *core;
51 };
52
53 /* For simplicity we maintain the cache only for a single screen at a time */
54 static struct loader_dri3_blit_context blit_context = {
55 SIMPLE_MTX_INITIALIZER, NULL
56 };
57
58 static void
59 dri3_flush_present_events(struct loader_dri3_drawable *draw);
60
61 static struct loader_dri3_buffer *
62 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
63
64 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)65 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
66 {
67 xcb_screen_iterator_t screen_iter =
68 xcb_setup_roots_iterator(xcb_get_setup(conn));
69
70 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
71 if (screen_iter.data->root == root)
72 return screen_iter.data;
73 }
74
75 return NULL;
76 }
77
78 static xcb_visualtype_t *
get_xcb_visualtype_for_depth(struct loader_dri3_drawable * draw,int depth)79 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
80 {
81 xcb_visualtype_iterator_t visual_iter;
82 xcb_screen_t *screen = draw->screen;
83 xcb_depth_iterator_t depth_iter;
84
85 if (!screen)
86 return NULL;
87
88 depth_iter = xcb_screen_allowed_depths_iterator(screen);
89 for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
90 if (depth_iter.data->depth != depth)
91 continue;
92
93 visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
94 if (visual_iter.rem)
95 return visual_iter.data;
96 }
97
98 return NULL;
99 }
100
101 /* Sets the adaptive sync window property state. */
102 static void
set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)103 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
104 uint32_t state)
105 {
106 static char const name[] = "_VARIABLE_REFRESH";
107 xcb_intern_atom_cookie_t cookie;
108 xcb_intern_atom_reply_t* reply;
109 xcb_void_cookie_t check;
110
111 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
112 reply = xcb_intern_atom_reply(conn, cookie, NULL);
113 if (reply == NULL)
114 return;
115
116 if (state)
117 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
118 drawable, reply->atom,
119 XCB_ATOM_CARDINAL, 32, 1, &state);
120 else
121 check = xcb_delete_property_checked(conn, drawable, reply->atom);
122
123 xcb_discard_reply(conn, check.sequence);
124 free(reply);
125 }
126
127 /* Get red channel mask for given drawable at given depth. */
128 static unsigned int
dri3_get_red_mask_for_depth(struct loader_dri3_drawable * draw,int depth)129 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
130 {
131 xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
132
133 if (visual)
134 return visual->red_mask;
135
136 return 0;
137 }
138
139 /**
140 * Do we have blit functionality in the image blit extension?
141 *
142 * \param draw[in] The drawable intended to blit from / to.
143 * \return true if we have blit functionality. false otherwise.
144 */
loader_dri3_have_image_blit(const struct loader_dri3_drawable * draw)145 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
146 {
147 return draw->ext->image->base.version >= 9 &&
148 draw->ext->image->blitImage != NULL;
149 }
150
151 /**
152 * Get and lock (for use with the current thread) a dri context associated
153 * with the drawable's dri screen. The context is intended to be used with
154 * the dri image extension's blitImage method.
155 *
156 * \param draw[in] Pointer to the drawable whose dri screen we want a
157 * dri context for.
158 * \return A dri context or NULL if context creation failed.
159 *
160 * When the caller is done with the context (even if the context returned was
161 * NULL), the caller must call loader_dri3_blit_context_put.
162 */
163 static __DRIcontext *
loader_dri3_blit_context_get(struct loader_dri3_drawable * draw)164 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
165 {
166 simple_mtx_lock(&blit_context.mtx);
167
168 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen_render_gpu) {
169 blit_context.core->destroyContext(blit_context.ctx);
170 blit_context.ctx = NULL;
171 }
172
173 if (!blit_context.ctx) {
174 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen_render_gpu,
175 NULL, NULL, NULL);
176 blit_context.cur_screen = draw->dri_screen_render_gpu;
177 blit_context.core = draw->ext->core;
178 }
179
180 return blit_context.ctx;
181 }
182
183 /**
184 * Release (for use with other threads) a dri context previously obtained using
185 * loader_dri3_blit_context_get.
186 */
187 static void
loader_dri3_blit_context_put(void)188 loader_dri3_blit_context_put(void)
189 {
190 simple_mtx_unlock(&blit_context.mtx);
191 }
192
193 /**
194 * Blit (parts of) the contents of a DRI image to another dri image
195 *
196 * \param draw[in] The drawable which owns the images.
197 * \param dst[in] The destination image.
198 * \param src[in] The source image.
199 * \param dstx0[in] Start destination coordinate.
200 * \param dsty0[in] Start destination coordinate.
201 * \param width[in] Blit width.
202 * \param height[in] Blit height.
203 * \param srcx0[in] Start source coordinate.
204 * \param srcy0[in] Start source coordinate.
205 * \param flush_flag[in] Image blit flush flag.
206 * \return true iff successful.
207 */
208 static bool
loader_dri3_blit_image(struct loader_dri3_drawable * draw,__DRIimage * dst,__DRIimage * src,int dstx0,int dsty0,int width,int height,int srcx0,int srcy0,int flush_flag)209 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
210 __DRIimage *dst, __DRIimage *src,
211 int dstx0, int dsty0, int width, int height,
212 int srcx0, int srcy0, int flush_flag)
213 {
214 __DRIcontext *dri_context;
215 bool use_blit_context = false;
216
217 if (!loader_dri3_have_image_blit(draw))
218 return false;
219
220 dri_context = draw->vtable->get_dri_context(draw);
221
222 if (!dri_context || !draw->vtable->in_current_context(draw)) {
223 dri_context = loader_dri3_blit_context_get(draw);
224 use_blit_context = true;
225 flush_flag |= __BLIT_FLAG_FLUSH;
226 }
227
228 if (dri_context)
229 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
230 width, height, srcx0, srcy0,
231 width, height, flush_flag);
232
233 if (use_blit_context)
234 loader_dri3_blit_context_put();
235
236 return dri_context != NULL;
237 }
238
239 static inline void
dri3_fence_reset(xcb_connection_t * c,struct loader_dri3_buffer * buffer)240 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
241 {
242 xshmfence_reset(buffer->shm_fence);
243 }
244
245 static inline void
dri3_fence_set(struct loader_dri3_buffer * buffer)246 dri3_fence_set(struct loader_dri3_buffer *buffer)
247 {
248 xshmfence_trigger(buffer->shm_fence);
249 }
250
251 static inline void
dri3_fence_trigger(xcb_connection_t * c,struct loader_dri3_buffer * buffer)252 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
253 {
254 xcb_sync_trigger_fence(c, buffer->sync_fence);
255 }
256
257 static inline void
dri3_fence_await(xcb_connection_t * c,struct loader_dri3_drawable * draw,struct loader_dri3_buffer * buffer)258 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
259 struct loader_dri3_buffer *buffer)
260 {
261 xcb_flush(c);
262 xshmfence_await(buffer->shm_fence);
263 if (draw) {
264 mtx_lock(&draw->mtx);
265 dri3_flush_present_events(draw);
266 mtx_unlock(&draw->mtx);
267 }
268 }
269
270 static void
dri3_update_max_num_back(struct loader_dri3_drawable * draw)271 dri3_update_max_num_back(struct loader_dri3_drawable *draw)
272 {
273 switch (draw->last_present_mode) {
274 case XCB_PRESENT_COMPLETE_MODE_FLIP: {
275 if (draw->swap_interval == 0)
276 draw->max_num_back = 4;
277 else
278 draw->max_num_back = 3;
279
280 assert(draw->max_num_back <= LOADER_DRI3_MAX_BACK);
281 break;
282 }
283
284 case XCB_PRESENT_COMPLETE_MODE_SKIP:
285 break;
286
287 default:
288 draw->max_num_back = 2;
289 }
290 }
291
292 void
loader_dri3_set_swap_interval(struct loader_dri3_drawable * draw,int interval)293 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
294 {
295 /* Wait all previous swap done before changing swap interval.
296 *
297 * This is for preventing swap out of order in the following cases:
298 * 1. Change from sync swap mode (>0) to async mode (=0), so async swap occurs
299 * before previous pending sync swap.
300 * 2. Change from value A to B and A > B, so the target_msc for the previous
301 * pending swap may be bigger than newer swap.
302 *
303 * PS. changing from value A to B and A < B won't cause swap out of order but
304 * may still gets wrong target_msc value at the beginning.
305 */
306 if (draw->swap_interval != interval)
307 loader_dri3_swapbuffer_barrier(draw);
308
309 draw->swap_interval = interval;
310 }
311
312 static void
dri3_set_render_buffer(struct loader_dri3_drawable * draw,int buf_id,struct loader_dri3_buffer * buffer)313 dri3_set_render_buffer(struct loader_dri3_drawable *draw, int buf_id,
314 struct loader_dri3_buffer *buffer)
315 {
316 if (buf_id != LOADER_DRI3_FRONT_ID && !draw->buffers[buf_id])
317 draw->cur_num_back++;
318
319 draw->buffers[buf_id] = buffer;
320 }
321
322 /** dri3_free_render_buffer
323 *
324 * Free everything associated with one render buffer including pixmap, fence
325 * stuff and the driver image
326 */
327 static void
dri3_free_render_buffer(struct loader_dri3_drawable * draw,int buf_id)328 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
329 int buf_id)
330 {
331 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
332
333 if (!buffer)
334 return;
335
336 if (buffer->own_pixmap)
337 xcb_free_pixmap(draw->conn, buffer->pixmap);
338 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
339 xshmfence_unmap_shm(buffer->shm_fence);
340 draw->ext->image->destroyImage(buffer->image);
341 if (buffer->linear_buffer)
342 draw->ext->image->destroyImage(buffer->linear_buffer);
343 free(buffer);
344
345 draw->buffers[buf_id] = NULL;
346
347 if (buf_id != LOADER_DRI3_FRONT_ID)
348 draw->cur_num_back--;
349 }
350
351 void
loader_dri3_drawable_fini(struct loader_dri3_drawable * draw)352 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
353 {
354 int i;
355
356 draw->ext->core->destroyDrawable(draw->dri_drawable);
357
358 for (i = 0; i < ARRAY_SIZE(draw->buffers); i++)
359 dri3_free_render_buffer(draw, i);
360
361 if (draw->special_event) {
362 xcb_void_cookie_t cookie =
363 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
364 XCB_PRESENT_EVENT_MASK_NO_EVENT);
365
366 xcb_discard_reply(draw->conn, cookie.sequence);
367 xcb_unregister_for_special_event(draw->conn, draw->special_event);
368 }
369
370 if (draw->region)
371 xcb_xfixes_destroy_region(draw->conn, draw->region);
372
373 cnd_destroy(&draw->event_cnd);
374 mtx_destroy(&draw->mtx);
375 }
376
377 int
loader_dri3_drawable_init(xcb_connection_t * conn,xcb_drawable_t drawable,enum loader_dri3_drawable_type type,__DRIscreen * dri_screen_render_gpu,__DRIscreen * dri_screen_display_gpu,bool multiplanes_available,bool prefer_back_buffer_reuse,const __DRIconfig * dri_config,struct loader_dri3_extensions * ext,const struct loader_dri3_vtable * vtable,struct loader_dri3_drawable * draw)378 loader_dri3_drawable_init(xcb_connection_t *conn,
379 xcb_drawable_t drawable,
380 enum loader_dri3_drawable_type type,
381 __DRIscreen *dri_screen_render_gpu,
382 __DRIscreen *dri_screen_display_gpu,
383 bool multiplanes_available,
384 bool prefer_back_buffer_reuse,
385 const __DRIconfig *dri_config,
386 struct loader_dri3_extensions *ext,
387 const struct loader_dri3_vtable *vtable,
388 struct loader_dri3_drawable *draw)
389 {
390 xcb_get_geometry_cookie_t cookie;
391 xcb_get_geometry_reply_t *reply;
392 xcb_generic_error_t *error;
393
394 draw->conn = conn;
395 draw->ext = ext;
396 draw->vtable = vtable;
397 draw->drawable = drawable;
398 draw->type = type;
399 draw->region = 0;
400 draw->dri_screen_render_gpu = dri_screen_render_gpu;
401 draw->dri_screen_display_gpu = dri_screen_display_gpu;
402 draw->multiplanes_available = multiplanes_available;
403 draw->prefer_back_buffer_reuse = prefer_back_buffer_reuse;
404 draw->queries_buffer_age = false;
405
406 draw->have_back = 0;
407 draw->have_fake_front = 0;
408 draw->first_init = true;
409 draw->adaptive_sync = false;
410 draw->adaptive_sync_active = false;
411 draw->block_on_depleted_buffers = false;
412
413 draw->cur_blit_source = -1;
414 draw->back_format = __DRI_IMAGE_FORMAT_NONE;
415 mtx_init(&draw->mtx, mtx_plain);
416 cnd_init(&draw->event_cnd);
417
418 if (draw->ext->config) {
419 unsigned char adaptive_sync = 0;
420 unsigned char block_on_depleted_buffers = 0;
421
422 draw->ext->config->configQueryb(draw->dri_screen_render_gpu,
423 "adaptive_sync",
424 &adaptive_sync);
425
426 draw->adaptive_sync = adaptive_sync;
427
428 draw->ext->config->configQueryb(draw->dri_screen_render_gpu,
429 "block_on_depleted_buffers",
430 &block_on_depleted_buffers);
431
432 draw->block_on_depleted_buffers = block_on_depleted_buffers;
433 }
434
435 if (!draw->adaptive_sync)
436 set_adaptive_sync_property(conn, draw->drawable, false);
437
438 draw->swap_interval = dri_get_initial_swap_interval(draw->dri_screen_render_gpu,
439 draw->ext->config);
440
441 dri3_update_max_num_back(draw);
442
443 /* Create a new drawable */
444 draw->dri_drawable =
445 draw->ext->image_driver->createNewDrawable(dri_screen_render_gpu,
446 dri_config,
447 draw);
448
449 if (!draw->dri_drawable)
450 return 1;
451
452 cookie = xcb_get_geometry(draw->conn, draw->drawable);
453 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
454 if (reply == NULL || error != NULL) {
455 draw->ext->core->destroyDrawable(draw->dri_drawable);
456 return 1;
457 }
458
459 draw->screen = get_screen_for_root(draw->conn, reply->root);
460 draw->width = reply->width;
461 draw->height = reply->height;
462 draw->depth = reply->depth;
463 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
464 free(reply);
465
466 /*
467 * Make sure server has the same swap interval we do for the new
468 * drawable.
469 */
470 loader_dri3_set_swap_interval(draw, draw->swap_interval);
471
472 return 0;
473 }
474
475 /* XXX this belongs in presentproto */
476 #ifndef PresentWindowDestroyed
477 #define PresentWindowDestroyed (1 << 0)
478 #endif
479 /*
480 * Process one Present event
481 */
482 static bool
dri3_handle_present_event(struct loader_dri3_drawable * draw,xcb_present_generic_event_t * ge)483 dri3_handle_present_event(struct loader_dri3_drawable *draw,
484 xcb_present_generic_event_t *ge)
485 {
486 switch (ge->evtype) {
487 case XCB_PRESENT_CONFIGURE_NOTIFY: {
488 xcb_present_configure_notify_event_t *ce = (void *) ge;
489 if (ce->pixmap_flags & PresentWindowDestroyed) {
490 free(ge);
491 return false;
492 }
493
494 draw->width = ce->width;
495 draw->height = ce->height;
496 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
497 draw->ext->flush->invalidate(draw->dri_drawable);
498 break;
499 }
500 case XCB_PRESENT_COMPLETE_NOTIFY: {
501 xcb_present_complete_notify_event_t *ce = (void *) ge;
502
503 /* Compute the processed SBC number from the received 32-bit serial number
504 * merged with the upper 32-bits of the sent 64-bit serial number while
505 * checking for wrap.
506 */
507 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
508 uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
509
510 /* Only assume wraparound if that results in exactly the previous
511 * SBC + 1, otherwise ignore received SBC > sent SBC (those are
512 * probably from a previous loader_dri3_drawable instance) to avoid
513 * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
514 */
515 if (recv_sbc <= draw->send_sbc)
516 draw->recv_sbc = recv_sbc;
517 else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
518 draw->recv_sbc = recv_sbc - 0x100000000ULL;
519
520 /* When moving from flip to copy, we assume that we can allocate in
521 * a more optimal way if we don't need to cater for the display
522 * controller.
523 */
524 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
525 draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
526 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
527 if (draw->buffers[b])
528 draw->buffers[b]->reallocate = true;
529 }
530 }
531
532 /* If the server tells us that our allocation is suboptimal, we
533 * reallocate once.
534 */
535 #ifdef HAVE_DRI3_MODIFIERS
536 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
537 draw->last_present_mode != ce->mode) {
538 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
539 if (draw->buffers[b])
540 draw->buffers[b]->reallocate = true;
541 }
542 }
543 #endif
544 draw->last_present_mode = ce->mode;
545
546 draw->ust = ce->ust;
547 draw->msc = ce->msc;
548 } else if (ce->serial == draw->eid) {
549 draw->notify_ust = ce->ust;
550 draw->notify_msc = ce->msc;
551 }
552 break;
553 }
554 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
555 xcb_present_idle_notify_event_t *ie = (void *) ge;
556 int b;
557
558 for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
559 struct loader_dri3_buffer *buf = draw->buffers[b];
560
561 if (buf && buf->pixmap == ie->pixmap)
562 buf->busy = 0;
563 }
564 break;
565 }
566 }
567 free(ge);
568 return true;
569 }
570
571 static bool
dri3_wait_for_event_locked(struct loader_dri3_drawable * draw,unsigned * full_sequence)572 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
573 unsigned *full_sequence)
574 {
575 xcb_generic_event_t *ev;
576 xcb_present_generic_event_t *ge;
577
578 xcb_flush(draw->conn);
579
580 /* Only have one thread waiting for events at a time */
581 if (draw->has_event_waiter) {
582 cnd_wait(&draw->event_cnd, &draw->mtx);
583 if (full_sequence)
584 *full_sequence = draw->last_special_event_sequence;
585 /* Another thread has updated the protected info, so retest. */
586 return true;
587 } else {
588 draw->has_event_waiter = true;
589 /* Allow other threads access to the drawable while we're waiting. */
590 mtx_unlock(&draw->mtx);
591 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
592 mtx_lock(&draw->mtx);
593 draw->has_event_waiter = false;
594 cnd_broadcast(&draw->event_cnd);
595 }
596 if (!ev)
597 return false;
598 draw->last_special_event_sequence = ev->full_sequence;
599 if (full_sequence)
600 *full_sequence = ev->full_sequence;
601 ge = (void *) ev;
602 return dri3_handle_present_event(draw, ge);
603 }
604
605 /** loader_dri3_wait_for_msc
606 *
607 * Get the X server to send an event when the target msc/divisor/remainder is
608 * reached.
609 */
610 bool
loader_dri3_wait_for_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,int64_t * ust,int64_t * msc,int64_t * sbc)611 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
612 int64_t target_msc,
613 int64_t divisor, int64_t remainder,
614 int64_t *ust, int64_t *msc, int64_t *sbc)
615 {
616 xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
617 draw->drawable,
618 draw->eid,
619 target_msc,
620 divisor,
621 remainder);
622 unsigned full_sequence;
623
624 mtx_lock(&draw->mtx);
625
626 /* Wait for the event */
627 do {
628 if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
629 mtx_unlock(&draw->mtx);
630 return false;
631 }
632 } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
633
634 *ust = draw->notify_ust;
635 *msc = draw->notify_msc;
636 *sbc = draw->recv_sbc;
637 mtx_unlock(&draw->mtx);
638
639 return true;
640 }
641
642 /** loader_dri3_wait_for_sbc
643 *
644 * Wait for the completed swap buffer count to reach the specified
645 * target. Presumably the application knows that this will be reached with
646 * outstanding complete events, or we're going to be here awhile.
647 */
648 int
loader_dri3_wait_for_sbc(struct loader_dri3_drawable * draw,int64_t target_sbc,int64_t * ust,int64_t * msc,int64_t * sbc)649 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
650 int64_t target_sbc, int64_t *ust,
651 int64_t *msc, int64_t *sbc)
652 {
653 /* From the GLX_OML_sync_control spec:
654 *
655 * "If <target_sbc> = 0, the function will block until all previous
656 * swaps requested with glXSwapBuffersMscOML for that window have
657 * completed."
658 */
659 mtx_lock(&draw->mtx);
660 if (!target_sbc)
661 target_sbc = draw->send_sbc;
662
663 while (draw->recv_sbc < target_sbc) {
664 if (!dri3_wait_for_event_locked(draw, NULL)) {
665 mtx_unlock(&draw->mtx);
666 return 0;
667 }
668 }
669
670 *ust = draw->ust;
671 *msc = draw->msc;
672 *sbc = draw->recv_sbc;
673 mtx_unlock(&draw->mtx);
674 return 1;
675 }
676
677 /** loader_dri3_find_back
678 *
679 * Find an idle back buffer. If there isn't one, then
680 * wait for a present idle notify event from the X server
681 */
682 static int
dri3_find_back(struct loader_dri3_drawable * draw,bool prefer_a_different)683 dri3_find_back(struct loader_dri3_drawable *draw, bool prefer_a_different)
684 {
685 struct loader_dri3_buffer *buffer;
686 int b;
687 int max_num;
688 int best_id = -1;
689 uint64_t best_swap = 0;
690
691 mtx_lock(&draw->mtx);
692
693 if (!prefer_a_different) {
694 /* Increase the likelyhood of reusing current buffer */
695 dri3_flush_present_events(draw);
696
697 /* Reuse current back buffer if it's idle */
698 buffer = draw->buffers[draw->cur_back];
699 if (buffer && !buffer->busy) {
700 best_id = draw->cur_back;
701 goto unlock;
702 }
703 }
704
705 /* Check whether we need to reuse the current back buffer as new back.
706 * In that case, wait until it's not busy anymore.
707 */
708 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
709 max_num = 1;
710 draw->cur_blit_source = -1;
711 } else {
712 max_num = LOADER_DRI3_MAX_BACK;
713 }
714
715 /* In a DRI_PRIME situation, if prefer_a_different is true, we first try
716 * to find an idle buffer that is not the last used one.
717 * This is useful if we receive a XCB_PRESENT_EVENT_IDLE_NOTIFY event
718 * for a pixmap but it's not actually idle (eg: the DRI_PRIME blit is
719 * still in progress).
720 * Unigine Superposition hits this and this allows to use 2 back buffers
721 * instead of reusing the same one all the time, causing the next frame
722 * to wait for the copy to finish.
723 */
724 int current_back_id = draw->cur_back;
725 do {
726 /* Find idle buffer with lowest buffer age, or an unallocated slot */
727 for (b = 0; b < max_num; b++) {
728 int id = LOADER_DRI3_BACK_ID((b + current_back_id) % LOADER_DRI3_MAX_BACK);
729
730 buffer = draw->buffers[id];
731 if (buffer) {
732 if (!buffer->busy &&
733 (!prefer_a_different || id != current_back_id) &&
734 (best_id == -1 || buffer->last_swap > best_swap)) {
735 best_id = id;
736 best_swap = buffer->last_swap;
737 }
738 } else if (best_id == -1 &&
739 draw->cur_num_back < draw->max_num_back) {
740 best_id = id;
741 }
742 }
743
744 /* Prefer re-using the same buffer over blocking */
745 if (prefer_a_different && best_id == -1 &&
746 !draw->buffers[LOADER_DRI3_BACK_ID(current_back_id)]->busy)
747 best_id = current_back_id;
748 } while (best_id == -1 && dri3_wait_for_event_locked(draw, NULL));
749
750 if (best_id != -1)
751 draw->cur_back = best_id;
752
753 unlock:
754 mtx_unlock(&draw->mtx);
755 return best_id;
756 }
757
758 static xcb_gcontext_t
dri3_drawable_gc(struct loader_dri3_drawable * draw)759 dri3_drawable_gc(struct loader_dri3_drawable *draw)
760 {
761 if (!draw->gc) {
762 uint32_t v = 0;
763 xcb_create_gc(draw->conn,
764 (draw->gc = xcb_generate_id(draw->conn)),
765 draw->drawable,
766 XCB_GC_GRAPHICS_EXPOSURES,
767 &v);
768 }
769 return draw->gc;
770 }
771
772
773 static struct loader_dri3_buffer *
dri3_back_buffer(struct loader_dri3_drawable * draw)774 dri3_back_buffer(struct loader_dri3_drawable *draw)
775 {
776 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
777 }
778
779 static struct loader_dri3_buffer *
dri3_front_buffer(struct loader_dri3_drawable * draw)780 dri3_front_buffer(struct loader_dri3_drawable *draw)
781 {
782 return draw->buffers[LOADER_DRI3_FRONT_ID];
783 }
784
785 static void
dri3_copy_area(xcb_connection_t * c,xcb_drawable_t src_drawable,xcb_drawable_t dst_drawable,xcb_gcontext_t gc,int16_t src_x,int16_t src_y,int16_t dst_x,int16_t dst_y,uint16_t width,uint16_t height)786 dri3_copy_area(xcb_connection_t *c,
787 xcb_drawable_t src_drawable,
788 xcb_drawable_t dst_drawable,
789 xcb_gcontext_t gc,
790 int16_t src_x,
791 int16_t src_y,
792 int16_t dst_x,
793 int16_t dst_y,
794 uint16_t width,
795 uint16_t height)
796 {
797 xcb_void_cookie_t cookie;
798
799 cookie = xcb_copy_area_checked(c,
800 src_drawable,
801 dst_drawable,
802 gc,
803 src_x,
804 src_y,
805 dst_x,
806 dst_y,
807 width,
808 height);
809 xcb_discard_reply(c, cookie.sequence);
810 }
811
812 /**
813 * Asks the driver to flush any queued work necessary for serializing with the
814 * X command stream, and optionally the slightly more strict requirement of
815 * glFlush() equivalence (which would require flushing even if nothing had
816 * been drawn to a window system framebuffer, for example).
817 */
818 void
loader_dri3_flush(struct loader_dri3_drawable * draw,unsigned flags,enum __DRI2throttleReason throttle_reason)819 loader_dri3_flush(struct loader_dri3_drawable *draw,
820 unsigned flags,
821 enum __DRI2throttleReason throttle_reason)
822 {
823 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
824 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
825
826 if (dri_context) {
827 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
828 flags, throttle_reason);
829 }
830 }
831
832 void
loader_dri3_copy_sub_buffer(struct loader_dri3_drawable * draw,int x,int y,int width,int height,bool flush)833 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
834 int x, int y,
835 int width, int height,
836 bool flush)
837 {
838 struct loader_dri3_buffer *back;
839 unsigned flags = __DRI2_FLUSH_DRAWABLE;
840
841 /* Check we have the right attachments */
842 if (!draw->have_back || draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
843 return;
844
845 if (flush)
846 flags |= __DRI2_FLUSH_CONTEXT;
847 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
848
849 back = dri3_find_back_alloc(draw);
850 if (!back)
851 return;
852
853 y = draw->height - y - height;
854
855 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
856 /* Update the linear buffer part of the back buffer
857 * for the dri3_copy_area operation
858 */
859 (void) loader_dri3_blit_image(draw,
860 back->linear_buffer,
861 back->image,
862 0, 0, back->width, back->height,
863 0, 0, __BLIT_FLAG_FLUSH);
864 }
865
866 loader_dri3_swapbuffer_barrier(draw);
867 dri3_fence_reset(draw->conn, back);
868 dri3_copy_area(draw->conn,
869 back->pixmap,
870 draw->drawable,
871 dri3_drawable_gc(draw),
872 x, y, x, y, width, height);
873 dri3_fence_trigger(draw->conn, back);
874 /* Refresh the fake front (if present) after we just damaged the real
875 * front.
876 */
877 if (draw->have_fake_front &&
878 !loader_dri3_blit_image(draw,
879 dri3_front_buffer(draw)->image,
880 back->image,
881 x, y, width, height,
882 x, y, __BLIT_FLAG_FLUSH) &&
883 draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
884 dri3_fence_reset(draw->conn, dri3_front_buffer(draw));
885 dri3_copy_area(draw->conn,
886 back->pixmap,
887 dri3_front_buffer(draw)->pixmap,
888 dri3_drawable_gc(draw),
889 x, y, x, y, width, height);
890 dri3_fence_trigger(draw->conn, dri3_front_buffer(draw));
891 dri3_fence_await(draw->conn, NULL, dri3_front_buffer(draw));
892 }
893 dri3_fence_await(draw->conn, draw, back);
894 }
895
896 void
loader_dri3_copy_drawable(struct loader_dri3_drawable * draw,xcb_drawable_t dest,xcb_drawable_t src)897 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
898 xcb_drawable_t dest,
899 xcb_drawable_t src)
900 {
901 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
902
903 struct loader_dri3_buffer *front = dri3_front_buffer(draw);
904 if (front)
905 dri3_fence_reset(draw->conn, front);
906
907 dri3_copy_area(draw->conn,
908 src, dest,
909 dri3_drawable_gc(draw),
910 0, 0, 0, 0, draw->width, draw->height);
911
912 if (front) {
913 dri3_fence_trigger(draw->conn, front);
914 dri3_fence_await(draw->conn, draw, front);
915 }
916 }
917
918 void
loader_dri3_wait_x(struct loader_dri3_drawable * draw)919 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
920 {
921 struct loader_dri3_buffer *front;
922
923 if (draw == NULL || !draw->have_fake_front)
924 return;
925
926 front = dri3_front_buffer(draw);
927
928 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
929
930 /* In the psc->is_different_gpu case, the linear buffer has been updated,
931 * but not yet the tiled buffer.
932 * Copy back to the tiled buffer we use for rendering.
933 * Note that we don't need flushing.
934 */
935 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
936 (void) loader_dri3_blit_image(draw,
937 front->image,
938 front->linear_buffer,
939 0, 0, front->width, front->height,
940 0, 0, 0);
941 }
942
943 void
loader_dri3_wait_gl(struct loader_dri3_drawable * draw)944 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
945 {
946 struct loader_dri3_buffer *front;
947
948 if (draw == NULL || !draw->have_fake_front)
949 return;
950
951 front = dri3_front_buffer(draw);
952 /* TODO: `front` is not supposed to be NULL here, fix the actual bug
953 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/8982
954 */
955 if (!front)
956 return;
957
958 /* In the psc->is_different_gpu case, we update the linear_buffer
959 * before updating the real front.
960 */
961 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
962 (void) loader_dri3_blit_image(draw,
963 front->linear_buffer,
964 front->image,
965 0, 0, front->width, front->height,
966 0, 0, __BLIT_FLAG_FLUSH);
967 loader_dri3_swapbuffer_barrier(draw);
968 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
969 }
970
971 /** dri3_flush_present_events
972 *
973 * Process any present events that have been received from the X server
974 */
975 static void
dri3_flush_present_events(struct loader_dri3_drawable * draw)976 dri3_flush_present_events(struct loader_dri3_drawable *draw)
977 {
978 /* Check to see if any configuration changes have occurred
979 * since we were last invoked
980 */
981 if (draw->has_event_waiter)
982 return;
983
984 if (draw->special_event) {
985 xcb_generic_event_t *ev;
986
987 while ((ev = xcb_poll_for_special_event(draw->conn,
988 draw->special_event)) != NULL) {
989 xcb_present_generic_event_t *ge = (void *) ev;
990 if (!dri3_handle_present_event(draw, ge))
991 break;
992 }
993 }
994 }
995
996 /** loader_dri3_swap_buffers_msc
997 *
998 * Make the current back buffer visible using the present extension
999 */
1000 int64_t
loader_dri3_swap_buffers_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,unsigned flush_flags,const int * rects,int n_rects,bool force_copy)1001 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
1002 int64_t target_msc, int64_t divisor,
1003 int64_t remainder, unsigned flush_flags,
1004 const int *rects, int n_rects,
1005 bool force_copy)
1006 {
1007 struct loader_dri3_buffer *back;
1008 int64_t ret = 0;
1009 bool wait_for_next_buffer = false;
1010
1011 /* GLX spec:
1012 * void glXSwapBuffers(Display *dpy, GLXDrawable draw);
1013 * This operation is a no-op if draw was created with a non-double-buffered
1014 * GLXFBConfig, or if draw is a GLXPixmap.
1015 * ...
1016 * GLX pixmaps may be created with a config that includes back buffers and
1017 * stereoscopic buffers. However, glXSwapBuffers is ignored for these pixmaps.
1018 * ...
1019 * It is possible to create a pbuffer with back buffers and to swap the
1020 * front and back buffers by calling glXSwapBuffers.
1021 *
1022 * EGL spec:
1023 * EGLBoolean eglSwapBuffers(EGLDisplay dpy, EGLSurface surface);
1024 * If surface is a back-buffered window surface, then the color buffer is
1025 * copied to the native window associated with that surface. If surface is
1026 * a single-buffered window, pixmap, or pbuffer surface, eglSwapBuffers has
1027 * no effect.
1028 *
1029 * SwapBuffer effect:
1030 * | GLX | EGL |
1031 * | window | pixmap | pbuffer | window | pixmap | pbuffer|
1032 *-------+--------+--------+---------+--------+--------+--------+
1033 * single| nop | nop | nop | nop | nop | nop |
1034 * double| swap | nop | swap | swap | NA | NA |
1035 */
1036 if (!draw->have_back || draw->type == LOADER_DRI3_DRAWABLE_PIXMAP)
1037 return ret;
1038
1039 draw->vtable->flush_drawable(draw, flush_flags);
1040
1041 back = dri3_find_back_alloc(draw);
1042 /* Could only happen when error case, like display is already closed. */
1043 if (!back)
1044 return ret;
1045
1046 mtx_lock(&draw->mtx);
1047
1048 if (draw->adaptive_sync && !draw->adaptive_sync_active) {
1049 set_adaptive_sync_property(draw->conn, draw->drawable, true);
1050 draw->adaptive_sync_active = true;
1051 }
1052
1053 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
1054 /* Update the linear buffer before presenting the pixmap */
1055 (void) loader_dri3_blit_image(draw,
1056 back->linear_buffer,
1057 back->image,
1058 0, 0, back->width, back->height,
1059 0, 0, __BLIT_FLAG_FLUSH);
1060 }
1061
1062 /* If we need to preload the new back buffer, remember the source.
1063 * The force_copy parameter is used by EGL to attempt to preserve
1064 * the back buffer across a call to this function.
1065 */
1066 if (force_copy)
1067 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
1068
1069 /* Exchange the back and fake front. Even though the server knows about these
1070 * buffers, it has no notion of back and fake front.
1071 */
1072 if (draw->have_fake_front) {
1073 struct loader_dri3_buffer *tmp;
1074
1075 tmp = dri3_front_buffer(draw);
1076 draw->buffers[LOADER_DRI3_FRONT_ID] = back;
1077 draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1078
1079 if (force_copy)
1080 draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1081 }
1082
1083 dri3_flush_present_events(draw);
1084
1085 if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1086 dri3_fence_reset(draw->conn, back);
1087
1088 /* Compute when we want the frame shown by taking the last known
1089 * successful MSC and adding in a swap interval for each outstanding swap
1090 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1091 * semantic"
1092 */
1093 ++draw->send_sbc;
1094 if (target_msc == 0 && divisor == 0 && remainder == 0)
1095 target_msc = draw->msc + abs(draw->swap_interval) *
1096 (draw->send_sbc - draw->recv_sbc);
1097 else if (divisor == 0 && remainder > 0) {
1098 /* From the GLX_OML_sync_control spec:
1099 * "If <divisor> = 0, the swap will occur when MSC becomes
1100 * greater than or equal to <target_msc>."
1101 *
1102 * Note that there's no mention of the remainder. The Present
1103 * extension throws BadValue for remainder != 0 with divisor == 0, so
1104 * just drop the passed in value.
1105 */
1106 remainder = 0;
1107 }
1108
1109 /* From the GLX_EXT_swap_control spec
1110 * and the EGL 1.4 spec (page 53):
1111 *
1112 * "If <interval> is set to a value of 0, buffer swaps are not
1113 * synchronized to a video frame."
1114 *
1115 * From GLX_EXT_swap_control_tear:
1116 *
1117 * "If <interval> is negative, the minimum number of video frames
1118 * between buffer swaps is the absolute value of <interval>. In this
1119 * case, if abs(<interval>) video frames have already passed from
1120 * the previous swap when the swap is ready to be performed, the
1121 * swap will occur without synchronization to a video frame."
1122 *
1123 * Implementation note: It is possible to enable triple buffering
1124 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1125 * the default.
1126 */
1127 uint32_t options = XCB_PRESENT_OPTION_NONE;
1128 if (draw->swap_interval <= 0)
1129 options |= XCB_PRESENT_OPTION_ASYNC;
1130
1131 /* If we need to populate the new back, but need to reuse the back
1132 * buffer slot due to lack of local blit capabilities, make sure
1133 * the server doesn't flip and we deadlock.
1134 */
1135 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1136 options |= XCB_PRESENT_OPTION_COPY;
1137 #ifdef HAVE_DRI3_MODIFIERS
1138 if (draw->multiplanes_available)
1139 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1140 #endif
1141 back->busy = 1;
1142 back->last_swap = draw->send_sbc;
1143
1144 if (!draw->region) {
1145 draw->region = xcb_generate_id(draw->conn);
1146 xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1147 }
1148
1149 xcb_xfixes_region_t region = 0;
1150 xcb_rectangle_t xcb_rects[64];
1151
1152 if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1153 for (int i = 0; i < n_rects; i++) {
1154 const int *rect = &rects[i * 4];
1155 xcb_rects[i].x = rect[0];
1156 xcb_rects[i].y = draw->height - rect[1] - rect[3];
1157 xcb_rects[i].width = rect[2];
1158 xcb_rects[i].height = rect[3];
1159 }
1160
1161 region = draw->region;
1162 xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1163 }
1164
1165 xcb_present_pixmap(draw->conn,
1166 draw->drawable,
1167 back->pixmap,
1168 (uint32_t) draw->send_sbc,
1169 0, /* valid */
1170 region, /* update */
1171 0, /* x_off */
1172 0, /* y_off */
1173 None, /* target_crtc */
1174 None,
1175 back->sync_fence,
1176 options,
1177 target_msc,
1178 divisor,
1179 remainder, 0, NULL);
1180 } else {
1181 /* This can only be reached by double buffered GLXPbuffer. */
1182 assert(draw->type == LOADER_DRI3_DRAWABLE_PBUFFER);
1183 /* GLX does not have damage regions. */
1184 assert(n_rects == 0);
1185
1186 /* For wait and buffer age usage. */
1187 draw->send_sbc++;
1188 draw->recv_sbc = back->last_swap = draw->send_sbc;
1189
1190 /* Pixmap is imported as front buffer image when same GPU case, so just
1191 * locally blit back buffer image to it is enough. Otherwise front buffer
1192 * is a fake one which needs to be synced with pixmap by xserver remotely.
1193 */
1194 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
1195 !loader_dri3_blit_image(draw,
1196 dri3_front_buffer(draw)->image,
1197 back->image,
1198 0, 0, draw->width, draw->height,
1199 0, 0, __BLIT_FLAG_FLUSH)) {
1200 dri3_copy_area(draw->conn, back->pixmap,
1201 draw->drawable,
1202 dri3_drawable_gc(draw),
1203 0, 0, 0, 0, draw->width, draw->height);
1204 }
1205 }
1206
1207 ret = (int64_t) draw->send_sbc;
1208
1209 /* Schedule a server-side back-preserving blit if necessary.
1210 * This happens iff all conditions below are satisfied:
1211 * a) We have a fake front,
1212 * b) We need to preserve the back buffer,
1213 * c) We don't have local blit capabilities.
1214 */
1215 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1216 draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1217 struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1218 struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1219
1220 dri3_fence_reset(draw->conn, new_back);
1221 dri3_copy_area(draw->conn, src->pixmap,
1222 new_back->pixmap,
1223 dri3_drawable_gc(draw),
1224 0, 0, 0, 0, draw->width, draw->height);
1225 dri3_fence_trigger(draw->conn, new_back);
1226 new_back->last_swap = src->last_swap;
1227 }
1228
1229 xcb_flush(draw->conn);
1230 if (draw->stamp)
1231 ++(*draw->stamp);
1232
1233 /* Waiting on a buffer is only sensible if all buffers are in use and the
1234 * client doesn't use the buffer age extension. In this case a client is
1235 * relying on it receiving back control immediately.
1236 *
1237 * As waiting on a buffer can at worst make us miss a frame the option has
1238 * to be enabled explicitly with the block_on_depleted_buffers DRI option.
1239 */
1240 wait_for_next_buffer = draw->cur_num_back == draw->max_num_back &&
1241 !draw->queries_buffer_age && draw->block_on_depleted_buffers;
1242
1243 mtx_unlock(&draw->mtx);
1244
1245 draw->ext->flush->invalidate(draw->dri_drawable);
1246
1247 /* Clients that use up all available buffers usually regulate their drawing
1248 * through swapchain contention backpressure. In such a scenario the client
1249 * draws whenever control returns to it. Its event loop is slowed down only
1250 * by us waiting on buffers becoming available again.
1251 *
1252 * By waiting here on a new buffer and only then returning back to the client
1253 * we ensure the client begins drawing only when the next buffer is available
1254 * and not draw first and then wait a refresh cycle on the next available
1255 * buffer to show it. This way we can reduce the latency between what is
1256 * being drawn by the client and what is shown on the screen by one frame.
1257 */
1258 if (wait_for_next_buffer)
1259 dri3_find_back(draw, draw->prefer_back_buffer_reuse);
1260
1261 return ret;
1262 }
1263
1264 int
loader_dri3_query_buffer_age(struct loader_dri3_drawable * draw)1265 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1266 {
1267 struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1268 int ret = 0;
1269
1270 mtx_lock(&draw->mtx);
1271 draw->queries_buffer_age = true;
1272 if (back && back->last_swap != 0)
1273 ret = draw->send_sbc - back->last_swap + 1;
1274 mtx_unlock(&draw->mtx);
1275
1276 return ret;
1277 }
1278
1279 /** loader_dri3_open
1280 *
1281 * Wrapper around xcb_dri3_open
1282 */
1283 int
loader_dri3_open(xcb_connection_t * conn,xcb_window_t root,uint32_t provider)1284 loader_dri3_open(xcb_connection_t *conn,
1285 xcb_window_t root,
1286 uint32_t provider)
1287 {
1288 xcb_dri3_open_cookie_t cookie;
1289 xcb_dri3_open_reply_t *reply;
1290 xcb_xfixes_query_version_cookie_t fixes_cookie;
1291 xcb_xfixes_query_version_reply_t *fixes_reply;
1292 int fd;
1293
1294 cookie = xcb_dri3_open(conn,
1295 root,
1296 provider);
1297
1298 reply = xcb_dri3_open_reply(conn, cookie, NULL);
1299
1300 if (!reply || reply->nfd != 1) {
1301 free(reply);
1302 return -1;
1303 }
1304
1305 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1306 free(reply);
1307 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1308
1309 /* let the server know our xfixes level */
1310 fixes_cookie = xcb_xfixes_query_version(conn,
1311 XCB_XFIXES_MAJOR_VERSION,
1312 XCB_XFIXES_MINOR_VERSION);
1313 fixes_reply = xcb_xfixes_query_version_reply(conn, fixes_cookie, NULL);
1314 free(fixes_reply);
1315
1316 return fd;
1317 }
1318
1319 static uint32_t
dri3_cpp_for_format(uint32_t format)1320 dri3_cpp_for_format(uint32_t format) {
1321 switch (format) {
1322 case __DRI_IMAGE_FORMAT_R8:
1323 return 1;
1324 case __DRI_IMAGE_FORMAT_RGB565:
1325 case __DRI_IMAGE_FORMAT_GR88:
1326 return 2;
1327 case __DRI_IMAGE_FORMAT_XRGB8888:
1328 case __DRI_IMAGE_FORMAT_ARGB8888:
1329 case __DRI_IMAGE_FORMAT_ABGR8888:
1330 case __DRI_IMAGE_FORMAT_XBGR8888:
1331 case __DRI_IMAGE_FORMAT_XRGB2101010:
1332 case __DRI_IMAGE_FORMAT_ARGB2101010:
1333 case __DRI_IMAGE_FORMAT_XBGR2101010:
1334 case __DRI_IMAGE_FORMAT_ABGR2101010:
1335 case __DRI_IMAGE_FORMAT_SARGB8:
1336 case __DRI_IMAGE_FORMAT_SABGR8:
1337 case __DRI_IMAGE_FORMAT_SXRGB8:
1338 return 4;
1339 case __DRI_IMAGE_FORMAT_ABGR16161616:
1340 case __DRI_IMAGE_FORMAT_XBGR16161616:
1341 case __DRI_IMAGE_FORMAT_XBGR16161616F:
1342 case __DRI_IMAGE_FORMAT_ABGR16161616F:
1343 return 8;
1344 case __DRI_IMAGE_FORMAT_NONE:
1345 default:
1346 return 0;
1347 }
1348 }
1349
1350 /* Map format of render buffer to corresponding format for the linear_buffer
1351 * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1352 * Usually linear_format == format, except for depth >= 30 formats, where
1353 * different gpu vendors have different preferences wrt. color channel ordering.
1354 */
1355 static uint32_t
dri3_linear_format_for_format(struct loader_dri3_drawable * draw,uint32_t format)1356 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1357 {
1358 switch (format) {
1359 case __DRI_IMAGE_FORMAT_XRGB2101010:
1360 case __DRI_IMAGE_FORMAT_XBGR2101010:
1361 /* Different preferred formats for different hw */
1362 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1363 return __DRI_IMAGE_FORMAT_XBGR2101010;
1364 else
1365 return __DRI_IMAGE_FORMAT_XRGB2101010;
1366
1367 case __DRI_IMAGE_FORMAT_ARGB2101010:
1368 case __DRI_IMAGE_FORMAT_ABGR2101010:
1369 /* Different preferred formats for different hw */
1370 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1371 return __DRI_IMAGE_FORMAT_ABGR2101010;
1372 else
1373 return __DRI_IMAGE_FORMAT_ARGB2101010;
1374
1375 default:
1376 return format;
1377 }
1378 }
1379
1380 #ifdef HAVE_DRI3_MODIFIERS
1381 static bool
has_supported_modifier(struct loader_dri3_drawable * draw,unsigned int format,uint64_t * modifiers,uint32_t count)1382 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1383 uint64_t *modifiers, uint32_t count)
1384 {
1385 uint64_t *supported_modifiers;
1386 int32_t supported_modifiers_count;
1387 bool found = false;
1388 int i, j;
1389
1390 if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen_render_gpu,
1391 format, 0, NULL, NULL,
1392 &supported_modifiers_count) ||
1393 supported_modifiers_count == 0)
1394 return false;
1395
1396 supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1397 if (!supported_modifiers)
1398 return false;
1399
1400 draw->ext->image->queryDmaBufModifiers(draw->dri_screen_render_gpu, format,
1401 supported_modifiers_count,
1402 supported_modifiers, NULL,
1403 &supported_modifiers_count);
1404
1405 for (i = 0; !found && i < supported_modifiers_count; i++) {
1406 for (j = 0; !found && j < count; j++) {
1407 if (supported_modifiers[i] == modifiers[j])
1408 found = true;
1409 }
1410 }
1411
1412 free(supported_modifiers);
1413 return found;
1414 }
1415 #endif
1416
1417 /** loader_dri3_alloc_render_buffer
1418 *
1419 * Use the driver createImage function to construct a __DRIimage, then
1420 * get a file descriptor for that and create an X pixmap from that
1421 *
1422 * Allocate an xshmfence for synchronization
1423 */
1424 static struct loader_dri3_buffer *
dri3_alloc_render_buffer(struct loader_dri3_drawable * draw,unsigned int format,int width,int height,int depth)1425 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1426 int width, int height, int depth)
1427 {
1428 struct loader_dri3_buffer *buffer;
1429 __DRIimage *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1430 xcb_pixmap_t pixmap;
1431 xcb_sync_fence_t sync_fence;
1432 struct xshmfence *shm_fence;
1433 int buffer_fds[4], fence_fd;
1434 int num_planes = 0;
1435 uint64_t *modifiers = NULL;
1436 uint32_t count = 0;
1437 int i, mod;
1438 int ret;
1439
1440 /* Create an xshmfence object and
1441 * prepare to send that to the X server
1442 */
1443
1444 fence_fd = xshmfence_alloc_shm();
1445 if (fence_fd < 0)
1446 return NULL;
1447
1448 shm_fence = xshmfence_map_shm(fence_fd);
1449 if (shm_fence == NULL)
1450 goto no_shm_fence;
1451
1452 /* Allocate the image from the driver
1453 */
1454 buffer = calloc(1, sizeof *buffer);
1455 if (!buffer)
1456 goto no_buffer;
1457
1458 buffer->cpp = dri3_cpp_for_format(format);
1459 if (!buffer->cpp)
1460 goto no_image;
1461
1462 if (draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
1463 #ifdef HAVE_DRI3_MODIFIERS
1464 if (draw->multiplanes_available &&
1465 draw->ext->image->base.version >= 15 &&
1466 draw->ext->image->queryDmaBufModifiers &&
1467 draw->ext->image->createImageWithModifiers) {
1468 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1469 xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1470 xcb_generic_error_t *error = NULL;
1471
1472 mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1473 draw->window,
1474 depth, buffer->cpp * 8);
1475 mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1476 mod_cookie,
1477 &error);
1478 if (!mod_reply)
1479 goto no_image;
1480
1481 if (mod_reply->num_window_modifiers) {
1482 count = mod_reply->num_window_modifiers;
1483 modifiers = malloc(count * sizeof(uint64_t));
1484 if (!modifiers) {
1485 free(mod_reply);
1486 goto no_image;
1487 }
1488
1489 memcpy(modifiers,
1490 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1491 count * sizeof(uint64_t));
1492
1493 if (!has_supported_modifier(draw, loader_image_format_to_fourcc(format),
1494 modifiers, count)) {
1495 free(modifiers);
1496 count = 0;
1497 modifiers = NULL;
1498 }
1499 }
1500
1501 if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1502 count = mod_reply->num_screen_modifiers;
1503 modifiers = malloc(count * sizeof(uint64_t));
1504 if (!modifiers) {
1505 free(mod_reply);
1506 goto no_image;
1507 }
1508
1509 memcpy(modifiers,
1510 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1511 count * sizeof(uint64_t));
1512 }
1513
1514 free(mod_reply);
1515 }
1516 #endif
1517 buffer->image = loader_dri_create_image(draw->dri_screen_render_gpu, draw->ext->image,
1518 width, height, format,
1519 __DRI_IMAGE_USE_SHARE |
1520 __DRI_IMAGE_USE_SCANOUT |
1521 __DRI_IMAGE_USE_BACKBUFFER |
1522 (draw->is_protected_content ?
1523 __DRI_IMAGE_USE_PROTECTED : 0),
1524 modifiers, count, buffer);
1525 free(modifiers);
1526
1527 pixmap_buffer = buffer->image;
1528
1529 if (!buffer->image)
1530 goto no_image;
1531 } else {
1532 buffer->image = draw->ext->image->createImage(draw->dri_screen_render_gpu,
1533 width, height,
1534 format,
1535 0,
1536 buffer);
1537
1538 if (!buffer->image)
1539 goto no_image;
1540
1541 /* if driver name is same only then dri_screen_display_gpu is set.
1542 * This check is needed because for simplicity render gpu image extension
1543 * is also used for display gpu.
1544 */
1545 if (draw->dri_screen_display_gpu) {
1546 linear_buffer_display_gpu =
1547 draw->ext->image->createImage(draw->dri_screen_display_gpu,
1548 width, height,
1549 dri3_linear_format_for_format(draw, format),
1550 __DRI_IMAGE_USE_SHARE |
1551 __DRI_IMAGE_USE_LINEAR |
1552 __DRI_IMAGE_USE_BACKBUFFER |
1553 __DRI_IMAGE_USE_SCANOUT,
1554 buffer);
1555 pixmap_buffer = linear_buffer_display_gpu;
1556 }
1557
1558 if (!pixmap_buffer) {
1559 buffer->linear_buffer =
1560 draw->ext->image->createImage(draw->dri_screen_render_gpu,
1561 width, height,
1562 dri3_linear_format_for_format(draw, format),
1563 __DRI_IMAGE_USE_SHARE |
1564 __DRI_IMAGE_USE_LINEAR |
1565 __DRI_IMAGE_USE_BACKBUFFER |
1566 __DRI_IMAGE_USE_SCANOUT |
1567 __DRI_IMAGE_USE_PRIME_BUFFER,
1568 buffer);
1569
1570 pixmap_buffer = buffer->linear_buffer;
1571 if (!buffer->linear_buffer) {
1572 goto no_linear_buffer;
1573 }
1574 }
1575 }
1576
1577 /* X want some information about the planes, so ask the image for it
1578 */
1579 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1580 &num_planes))
1581 num_planes = 1;
1582
1583 for (i = 0; i < num_planes; i++) {
1584 __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1585
1586 if (!image) {
1587 assert(i == 0);
1588 image = pixmap_buffer;
1589 }
1590
1591 buffer_fds[i] = -1;
1592
1593 ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1594 &buffer_fds[i]);
1595 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1596 &buffer->strides[i]);
1597 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1598 &buffer->offsets[i]);
1599 if (image != pixmap_buffer)
1600 draw->ext->image->destroyImage(image);
1601
1602 if (!ret)
1603 goto no_buffer_attrib;
1604 }
1605
1606 ret = draw->ext->image->queryImage(pixmap_buffer,
1607 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1608 buffer->modifier = (uint64_t) mod << 32;
1609 ret &= draw->ext->image->queryImage(pixmap_buffer,
1610 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1611 buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1612
1613 if (!ret)
1614 buffer->modifier = DRM_FORMAT_MOD_INVALID;
1615
1616 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu &&
1617 draw->dri_screen_display_gpu && linear_buffer_display_gpu) {
1618 /* The linear buffer was created in the display GPU's vram, so we
1619 * need to make it visible to render GPU
1620 */
1621 if (draw->ext->image->base.version >= 20)
1622 buffer->linear_buffer =
1623 draw->ext->image->createImageFromFds2(draw->dri_screen_render_gpu,
1624 width,
1625 height,
1626 loader_image_format_to_fourcc(format),
1627 &buffer_fds[0], num_planes,
1628 __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1629 &buffer->strides[0],
1630 &buffer->offsets[0],
1631 buffer);
1632 else
1633 buffer->linear_buffer =
1634 draw->ext->image->createImageFromFds(draw->dri_screen_render_gpu,
1635 width,
1636 height,
1637 loader_image_format_to_fourcc(format),
1638 &buffer_fds[0], num_planes,
1639 &buffer->strides[0],
1640 &buffer->offsets[0],
1641 buffer);
1642 if (!buffer->linear_buffer)
1643 goto no_buffer_attrib;
1644
1645 draw->ext->image->destroyImage(linear_buffer_display_gpu);
1646 }
1647
1648 pixmap = xcb_generate_id(draw->conn);
1649 #ifdef HAVE_DRI3_MODIFIERS
1650 if (draw->multiplanes_available &&
1651 buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1652 xcb_dri3_pixmap_from_buffers(draw->conn,
1653 pixmap,
1654 draw->window,
1655 num_planes,
1656 width, height,
1657 buffer->strides[0], buffer->offsets[0],
1658 buffer->strides[1], buffer->offsets[1],
1659 buffer->strides[2], buffer->offsets[2],
1660 buffer->strides[3], buffer->offsets[3],
1661 depth, buffer->cpp * 8,
1662 buffer->modifier,
1663 buffer_fds);
1664 } else
1665 #endif
1666 {
1667 xcb_dri3_pixmap_from_buffer(draw->conn,
1668 pixmap,
1669 draw->drawable,
1670 buffer->size,
1671 width, height, buffer->strides[0],
1672 depth, buffer->cpp * 8,
1673 buffer_fds[0]);
1674 }
1675
1676 xcb_dri3_fence_from_fd(draw->conn,
1677 pixmap,
1678 (sync_fence = xcb_generate_id(draw->conn)),
1679 false,
1680 fence_fd);
1681
1682 buffer->pixmap = pixmap;
1683 buffer->own_pixmap = true;
1684 buffer->sync_fence = sync_fence;
1685 buffer->shm_fence = shm_fence;
1686 buffer->width = width;
1687 buffer->height = height;
1688
1689 /* Mark the buffer as idle
1690 */
1691 dri3_fence_set(buffer);
1692
1693 return buffer;
1694
1695 no_buffer_attrib:
1696 do {
1697 if (buffer_fds[i] != -1)
1698 close(buffer_fds[i]);
1699 } while (--i >= 0);
1700 draw->ext->image->destroyImage(pixmap_buffer);
1701 no_linear_buffer:
1702 if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
1703 draw->ext->image->destroyImage(buffer->image);
1704 no_image:
1705 free(buffer);
1706 no_buffer:
1707 xshmfence_unmap_shm(shm_fence);
1708 no_shm_fence:
1709 close(fence_fd);
1710 return NULL;
1711 }
1712
1713 static bool
dri3_detect_drawable_is_window(struct loader_dri3_drawable * draw)1714 dri3_detect_drawable_is_window(struct loader_dri3_drawable *draw)
1715 {
1716 /* Try to select for input on the window.
1717 *
1718 * If the drawable is a window, this will get our events
1719 * delivered.
1720 *
1721 * Otherwise, we'll get a BadWindow error back from this request which
1722 * will let us know that the drawable is a pixmap instead.
1723 */
1724
1725 xcb_void_cookie_t cookie =
1726 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1727 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1728 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1729 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1730
1731 /* Check to see if our select input call failed. If it failed with a
1732 * BadWindow error, then assume the drawable is a pixmap.
1733 */
1734 xcb_generic_error_t *error = xcb_request_check(draw->conn, cookie);
1735
1736 if (error) {
1737 if (error->error_code != BadWindow) {
1738 free(error);
1739 return false;
1740 }
1741 free(error);
1742
1743 /* pixmap can't get here, see driFetchDrawable(). */
1744 draw->type = LOADER_DRI3_DRAWABLE_PBUFFER;
1745 return true;
1746 }
1747
1748 draw->type = LOADER_DRI3_DRAWABLE_WINDOW;
1749 return true;
1750 }
1751
1752 static bool
dri3_setup_present_event(struct loader_dri3_drawable * draw)1753 dri3_setup_present_event(struct loader_dri3_drawable *draw)
1754 {
1755 /* No need to setup for pixmap drawable. */
1756 if (draw->type == LOADER_DRI3_DRAWABLE_PIXMAP ||
1757 draw->type == LOADER_DRI3_DRAWABLE_PBUFFER)
1758 return true;
1759
1760 draw->eid = xcb_generate_id(draw->conn);
1761
1762 if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1763 xcb_present_select_input(draw->conn, draw->eid, draw->drawable,
1764 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1765 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1766 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1767 } else {
1768 assert(draw->type == LOADER_DRI3_DRAWABLE_UNKNOWN);
1769
1770 if (!dri3_detect_drawable_is_window(draw))
1771 return false;
1772
1773 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1774 return true;
1775 }
1776
1777 /* Create an XCB event queue to hold present events outside of the usual
1778 * application event queue
1779 */
1780 draw->special_event = xcb_register_for_special_xge(draw->conn,
1781 &xcb_present_id,
1782 draw->eid,
1783 draw->stamp);
1784 return true;
1785 }
1786
1787 /** loader_dri3_update_drawable
1788 *
1789 * Called the first time we use the drawable and then
1790 * after we receive present configure notify events to
1791 * track the geometry of the drawable
1792 */
1793 static int
dri3_update_drawable(struct loader_dri3_drawable * draw)1794 dri3_update_drawable(struct loader_dri3_drawable *draw)
1795 {
1796 mtx_lock(&draw->mtx);
1797 if (draw->first_init) {
1798 xcb_get_geometry_cookie_t geom_cookie;
1799 xcb_get_geometry_reply_t *geom_reply;
1800 xcb_window_t root_win;
1801
1802 draw->first_init = false;
1803
1804 if (!dri3_setup_present_event(draw)) {
1805 mtx_unlock(&draw->mtx);
1806 return false;
1807 }
1808
1809 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1810
1811 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1812
1813 if (!geom_reply) {
1814 mtx_unlock(&draw->mtx);
1815 return false;
1816 }
1817 draw->width = geom_reply->width;
1818 draw->height = geom_reply->height;
1819 draw->depth = geom_reply->depth;
1820 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1821 root_win = geom_reply->root;
1822
1823 free(geom_reply);
1824
1825 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1826 draw->window = root_win;
1827 else
1828 draw->window = draw->drawable;
1829 }
1830 dri3_flush_present_events(draw);
1831 mtx_unlock(&draw->mtx);
1832 return true;
1833 }
1834
1835 __DRIimage *
loader_dri3_create_image(xcb_connection_t * c,xcb_dri3_buffer_from_pixmap_reply_t * bp_reply,unsigned int format,__DRIscreen * dri_screen,const __DRIimageExtension * image,void * loaderPrivate)1836 loader_dri3_create_image(xcb_connection_t *c,
1837 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1838 unsigned int format,
1839 __DRIscreen *dri_screen,
1840 const __DRIimageExtension *image,
1841 void *loaderPrivate)
1842 {
1843 int *fds;
1844 __DRIimage *image_planar, *ret;
1845 int stride, offset;
1846
1847 /* Get an FD for the pixmap object
1848 */
1849 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1850
1851 stride = bp_reply->stride;
1852 offset = 0;
1853
1854 /* createImageFromFds creates a wrapper __DRIimage structure which
1855 * can deal with multiple planes for things like Yuv images. So, once
1856 * we've gotten the planar wrapper, pull the single plane out of it and
1857 * discard the wrapper.
1858 */
1859 image_planar = image->createImageFromFds(dri_screen,
1860 bp_reply->width,
1861 bp_reply->height,
1862 loader_image_format_to_fourcc(format),
1863 fds, 1,
1864 &stride, &offset, loaderPrivate);
1865 close(fds[0]);
1866 if (!image_planar)
1867 return NULL;
1868
1869 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1870
1871 if (!ret)
1872 ret = image_planar;
1873 else
1874 image->destroyImage(image_planar);
1875
1876 return ret;
1877 }
1878
1879 #ifdef HAVE_DRI3_MODIFIERS
1880 __DRIimage *
loader_dri3_create_image_from_buffers(xcb_connection_t * c,xcb_dri3_buffers_from_pixmap_reply_t * bp_reply,unsigned int format,__DRIscreen * dri_screen,const __DRIimageExtension * image,void * loaderPrivate)1881 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1882 xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1883 unsigned int format,
1884 __DRIscreen *dri_screen,
1885 const __DRIimageExtension *image,
1886 void *loaderPrivate)
1887 {
1888 __DRIimage *ret;
1889 int *fds;
1890 uint32_t *strides_in, *offsets_in;
1891 int strides[4], offsets[4];
1892 unsigned error;
1893 int i;
1894
1895 if (bp_reply->nfd > 4)
1896 return NULL;
1897
1898 fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1899 strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1900 offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1901 for (i = 0; i < bp_reply->nfd; i++) {
1902 strides[i] = strides_in[i];
1903 offsets[i] = offsets_in[i];
1904 }
1905
1906 ret = image->createImageFromDmaBufs2(dri_screen,
1907 bp_reply->width,
1908 bp_reply->height,
1909 loader_image_format_to_fourcc(format),
1910 bp_reply->modifier,
1911 fds, bp_reply->nfd,
1912 strides, offsets,
1913 0, 0, 0, 0, /* UNDEFINED */
1914 &error, loaderPrivate);
1915
1916 for (i = 0; i < bp_reply->nfd; i++)
1917 close(fds[i]);
1918
1919 return ret;
1920 }
1921 #endif
1922
1923 /** dri3_get_pixmap_buffer
1924 *
1925 * Get the DRM object for a pixmap from the X server and
1926 * wrap that with a __DRIimage structure using createImageFromFds
1927 */
1928 static struct loader_dri3_buffer *
dri3_get_pixmap_buffer(__DRIdrawable * driDrawable,unsigned int format,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1929 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1930 enum loader_dri3_buffer_type buffer_type,
1931 struct loader_dri3_drawable *draw)
1932 {
1933 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1934 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1935 xcb_drawable_t pixmap;
1936 xcb_sync_fence_t sync_fence;
1937 struct xshmfence *shm_fence;
1938 int width;
1939 int height;
1940 int fence_fd;
1941 __DRIscreen *cur_screen;
1942
1943 if (buffer)
1944 return buffer;
1945
1946 pixmap = draw->drawable;
1947
1948 buffer = calloc(1, sizeof *buffer);
1949 if (!buffer)
1950 goto no_buffer;
1951
1952 fence_fd = xshmfence_alloc_shm();
1953 if (fence_fd < 0)
1954 goto no_fence;
1955 shm_fence = xshmfence_map_shm(fence_fd);
1956 if (shm_fence == NULL) {
1957 close (fence_fd);
1958 goto no_fence;
1959 }
1960
1961 /* Get the currently-bound screen or revert to using the drawable's screen if
1962 * no contexts are currently bound. The latter case is at least necessary for
1963 * obs-studio, when using Window Capture (Xcomposite) as a Source.
1964 */
1965 cur_screen = draw->vtable->get_dri_screen();
1966 if (!cur_screen) {
1967 cur_screen = draw->dri_screen_render_gpu;
1968 }
1969
1970 xcb_dri3_fence_from_fd(draw->conn,
1971 pixmap,
1972 (sync_fence = xcb_generate_id(draw->conn)),
1973 false,
1974 fence_fd);
1975 #ifdef HAVE_DRI3_MODIFIERS
1976 if (draw->multiplanes_available &&
1977 draw->ext->image->base.version >= 15 &&
1978 draw->ext->image->createImageFromDmaBufs2) {
1979 xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1980 xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1981
1982 bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1983 bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1984 NULL);
1985 if (!bps_reply)
1986 goto no_image;
1987 buffer->image =
1988 loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1989 cur_screen, draw->ext->image,
1990 buffer);
1991 width = bps_reply->width;
1992 height = bps_reply->height;
1993 free(bps_reply);
1994 } else
1995 #endif
1996 {
1997 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1998 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1999
2000 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
2001 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
2002 if (!bp_reply)
2003 goto no_image;
2004
2005 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
2006 cur_screen, draw->ext->image,
2007 buffer);
2008 width = bp_reply->width;
2009 height = bp_reply->height;
2010 free(bp_reply);
2011 }
2012
2013 if (!buffer->image)
2014 goto no_image;
2015
2016 buffer->pixmap = pixmap;
2017 buffer->own_pixmap = false;
2018 buffer->width = width;
2019 buffer->height = height;
2020 buffer->shm_fence = shm_fence;
2021 buffer->sync_fence = sync_fence;
2022
2023 dri3_set_render_buffer(draw, buf_id, buffer);
2024
2025 return buffer;
2026
2027 no_image:
2028 xcb_sync_destroy_fence(draw->conn, sync_fence);
2029 xshmfence_unmap_shm(shm_fence);
2030 no_fence:
2031 free(buffer);
2032 no_buffer:
2033 return NULL;
2034 }
2035
2036 /** dri3_get_buffer
2037 *
2038 * Find a front or back buffer, allocating new ones as necessary
2039 */
2040 static struct loader_dri3_buffer *
dri3_get_buffer(__DRIdrawable * driDrawable,unsigned int format,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2041 dri3_get_buffer(__DRIdrawable *driDrawable,
2042 unsigned int format,
2043 enum loader_dri3_buffer_type buffer_type,
2044 struct loader_dri3_drawable *draw)
2045 {
2046 struct loader_dri3_buffer *buffer;
2047 bool fence_await = buffer_type == loader_dri3_buffer_back;
2048 int buf_id;
2049
2050 if (buffer_type == loader_dri3_buffer_back) {
2051 draw->back_format = format;
2052
2053 buf_id = dri3_find_back(draw, !draw->prefer_back_buffer_reuse);
2054
2055 if (buf_id < 0)
2056 return NULL;
2057 } else {
2058 buf_id = LOADER_DRI3_FRONT_ID;
2059 }
2060
2061 buffer = draw->buffers[buf_id];
2062
2063 /* Allocate a new buffer if there isn't an old one, if that
2064 * old one is the wrong size, or if it's suboptimal
2065 */
2066 if (!buffer || buffer->width != draw->width ||
2067 buffer->height != draw->height ||
2068 buffer->reallocate) {
2069 struct loader_dri3_buffer *new_buffer;
2070
2071 /* Allocate the new buffers
2072 */
2073 new_buffer = dri3_alloc_render_buffer(draw,
2074 format,
2075 draw->width,
2076 draw->height,
2077 draw->depth);
2078 if (!new_buffer)
2079 return NULL;
2080
2081 /* When resizing, copy the contents of the old buffer, waiting for that
2082 * copy to complete using our fences before proceeding
2083 */
2084 if ((buffer_type == loader_dri3_buffer_back ||
2085 (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
2086 && buffer) {
2087
2088 /* Fill the new buffer with data from an old buffer */
2089 if (!loader_dri3_blit_image(draw,
2090 new_buffer->image,
2091 buffer->image,
2092 0, 0,
2093 MIN2(buffer->width, new_buffer->width),
2094 MIN2(buffer->height, new_buffer->height),
2095 0, 0, 0) &&
2096 !buffer->linear_buffer) {
2097 dri3_fence_reset(draw->conn, new_buffer);
2098 dri3_copy_area(draw->conn,
2099 buffer->pixmap,
2100 new_buffer->pixmap,
2101 dri3_drawable_gc(draw),
2102 0, 0, 0, 0,
2103 draw->width, draw->height);
2104 dri3_fence_trigger(draw->conn, new_buffer);
2105 fence_await = true;
2106 }
2107 dri3_free_render_buffer(draw, buf_id);
2108 } else if (buffer_type == loader_dri3_buffer_front) {
2109 /* Fill the new fake front with data from a real front */
2110 loader_dri3_swapbuffer_barrier(draw);
2111 dri3_fence_reset(draw->conn, new_buffer);
2112 dri3_copy_area(draw->conn,
2113 draw->drawable,
2114 new_buffer->pixmap,
2115 dri3_drawable_gc(draw),
2116 0, 0, 0, 0,
2117 draw->width, draw->height);
2118 dri3_fence_trigger(draw->conn, new_buffer);
2119
2120 if (new_buffer->linear_buffer) {
2121 dri3_fence_await(draw->conn, draw, new_buffer);
2122 (void) loader_dri3_blit_image(draw,
2123 new_buffer->image,
2124 new_buffer->linear_buffer,
2125 0, 0, draw->width, draw->height,
2126 0, 0, 0);
2127 } else
2128 fence_await = true;
2129 }
2130 buffer = new_buffer;
2131 dri3_set_render_buffer(draw, buf_id, buffer);
2132 }
2133
2134 if (fence_await)
2135 dri3_fence_await(draw->conn, draw, buffer);
2136
2137 /*
2138 * Do we need to preserve the content of a previous buffer?
2139 *
2140 * Note that this blit is needed only to avoid a wait for a buffer that
2141 * is currently in the flip chain or being scanned out from. That's really
2142 * a tradeoff. If we're ok with the wait we can reduce the number of back
2143 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2144 * but in the latter case we must disallow page-flipping.
2145 */
2146 if (buffer_type == loader_dri3_buffer_back &&
2147 draw->cur_blit_source != -1 &&
2148 draw->buffers[draw->cur_blit_source] &&
2149 buffer != draw->buffers[draw->cur_blit_source]) {
2150
2151 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2152
2153 /* Avoid flushing here. Will propably do good for tiling hardware. */
2154 (void) loader_dri3_blit_image(draw,
2155 buffer->image,
2156 source->image,
2157 0, 0, draw->width, draw->height,
2158 0, 0, 0);
2159 buffer->last_swap = source->last_swap;
2160 draw->cur_blit_source = -1;
2161 }
2162 /* Return the requested buffer */
2163 return buffer;
2164 }
2165
2166 /** dri3_free_buffers
2167 *
2168 * Free the front bufffer or all of the back buffers. Used
2169 * when the application changes which buffers it needs
2170 */
2171 static void
dri3_free_buffers(__DRIdrawable * driDrawable,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2172 dri3_free_buffers(__DRIdrawable *driDrawable,
2173 enum loader_dri3_buffer_type buffer_type,
2174 struct loader_dri3_drawable *draw)
2175 {
2176 int first_id;
2177 int n_id;
2178 int buf_id;
2179
2180 switch (buffer_type) {
2181 case loader_dri3_buffer_back:
2182 first_id = LOADER_DRI3_BACK_ID(0);
2183 n_id = LOADER_DRI3_MAX_BACK;
2184 draw->cur_blit_source = -1;
2185 break;
2186 case loader_dri3_buffer_front:
2187 first_id = LOADER_DRI3_FRONT_ID;
2188 /* Don't free a fake front holding new backbuffer content. */
2189 n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2190 break;
2191 default:
2192 unreachable("unhandled buffer_type");
2193 }
2194
2195 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++)
2196 dri3_free_render_buffer(draw, buf_id);
2197 }
2198
2199 /** loader_dri3_get_buffers
2200 *
2201 * The published buffer allocation API.
2202 * Returns all of the necessary buffers, allocating
2203 * as needed.
2204 */
2205 int
loader_dri3_get_buffers(__DRIdrawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)2206 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
2207 unsigned int format,
2208 uint32_t *stamp,
2209 void *loaderPrivate,
2210 uint32_t buffer_mask,
2211 struct __DRIimageList *buffers)
2212 {
2213 struct loader_dri3_drawable *draw = loaderPrivate;
2214 struct loader_dri3_buffer *front, *back;
2215 int buf_id;
2216
2217 buffers->image_mask = 0;
2218 buffers->front = NULL;
2219 buffers->back = NULL;
2220
2221 if (!dri3_update_drawable(draw))
2222 return false;
2223
2224 dri3_update_max_num_back(draw);
2225
2226 /* Free no longer needed back buffers */
2227 for (buf_id = 0; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2228 int buffer_age;
2229
2230 back = draw->buffers[buf_id];
2231 if (!back || !back->last_swap || draw->cur_blit_source == buf_id)
2232 continue;
2233
2234 buffer_age = draw->send_sbc - back->last_swap + 1;
2235 if (buffer_age > 200)
2236 dri3_free_render_buffer(draw, buf_id);
2237 }
2238
2239 /* pixmaps always have front buffers.
2240 */
2241 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
2242 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2243
2244 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2245 /* All pixmaps are owned by the server gpu.
2246 * When we use a different gpu, we can't use the pixmap
2247 * as buffer since it is potentially tiled a way
2248 * our device can't understand. In this case, use
2249 * a fake front buffer. Hopefully the pixmap
2250 * content will get synced with the fake front
2251 * buffer.
2252 */
2253 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW &&
2254 draw->dri_screen_render_gpu == draw->dri_screen_display_gpu)
2255 front = dri3_get_pixmap_buffer(driDrawable,
2256 format,
2257 loader_dri3_buffer_front,
2258 draw);
2259 else
2260 front = dri3_get_buffer(driDrawable,
2261 format,
2262 loader_dri3_buffer_front,
2263 draw);
2264
2265 if (!front)
2266 return false;
2267 } else {
2268 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2269 draw->have_fake_front = 0;
2270 front = NULL;
2271 }
2272
2273 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2274 back = dri3_get_buffer(driDrawable,
2275 format,
2276 loader_dri3_buffer_back,
2277 draw);
2278 if (!back)
2279 return false;
2280 draw->have_back = 1;
2281 } else {
2282 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2283 draw->have_back = 0;
2284 back = NULL;
2285 }
2286
2287 if (front) {
2288 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2289 buffers->front = front->image;
2290 draw->have_fake_front =
2291 draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
2292 draw->type == LOADER_DRI3_DRAWABLE_WINDOW;
2293 }
2294
2295 if (back) {
2296 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2297 buffers->back = back->image;
2298 }
2299
2300 draw->stamp = stamp;
2301
2302 return true;
2303 }
2304
2305 /** loader_dri3_update_drawable_geometry
2306 *
2307 * Get the current drawable geometry.
2308 */
2309 void
loader_dri3_update_drawable_geometry(struct loader_dri3_drawable * draw)2310 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2311 {
2312 xcb_get_geometry_cookie_t geom_cookie;
2313 xcb_get_geometry_reply_t *geom_reply;
2314
2315 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2316
2317 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2318
2319 if (geom_reply) {
2320 bool changed = draw->width != geom_reply->width || draw->height != geom_reply->height;
2321 draw->width = geom_reply->width;
2322 draw->height = geom_reply->height;
2323 if (changed) {
2324 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2325 draw->ext->flush->invalidate(draw->dri_drawable);
2326 }
2327
2328 free(geom_reply);
2329 }
2330 }
2331
2332 /**
2333 * Make sure the server has flushed all pending swap buffers to hardware
2334 * for this drawable. Ideally we'd want to send an X protocol request to
2335 * have the server block our connection until the swaps are complete. That
2336 * would avoid the potential round-trip here.
2337 */
2338 void
loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable * draw)2339 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2340 {
2341 int64_t ust, msc, sbc;
2342
2343 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2344 }
2345
2346 /**
2347 * Perform any cleanup associated with a close screen operation.
2348 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2349 *
2350 * This function destroys the screen's cached swap context if any.
2351 */
2352 void
loader_dri3_close_screen(__DRIscreen * dri_screen)2353 loader_dri3_close_screen(__DRIscreen *dri_screen)
2354 {
2355 simple_mtx_lock(&blit_context.mtx);
2356 if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2357 blit_context.core->destroyContext(blit_context.ctx);
2358 blit_context.ctx = NULL;
2359 }
2360 simple_mtx_unlock(&blit_context.mtx);
2361 }
2362
2363 /**
2364 * Find a backbuffer slot - potentially allocating a back buffer
2365 *
2366 * \param draw[in,out] Pointer to the drawable for which to find back.
2367 * \return Pointer to a new back buffer or NULL if allocation failed or was
2368 * not mandated.
2369 *
2370 * Find a potentially new back buffer, and if it's not been allocated yet and
2371 * in addition needs initializing, then try to allocate and initialize it.
2372 */
2373 static struct loader_dri3_buffer *
dri3_find_back_alloc(struct loader_dri3_drawable * draw)2374 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2375 {
2376 struct loader_dri3_buffer *back;
2377 int id;
2378
2379 id = dri3_find_back(draw, false);
2380 if (id < 0)
2381 return NULL;
2382
2383 back = draw->buffers[id];
2384 /* Allocate a new back if we haven't got one */
2385 if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2386 dri3_update_drawable(draw))
2387 back = dri3_alloc_render_buffer(draw, draw->back_format,
2388 draw->width, draw->height, draw->depth);
2389
2390 if (!back)
2391 return NULL;
2392
2393 dri3_set_render_buffer(draw, id, back);
2394
2395 /* If necessary, prefill the back with data. */
2396 if (draw->cur_blit_source != -1 &&
2397 draw->buffers[draw->cur_blit_source] &&
2398 back != draw->buffers[draw->cur_blit_source]) {
2399 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2400
2401 dri3_fence_await(draw->conn, draw, source);
2402 dri3_fence_await(draw->conn, draw, back);
2403 (void) loader_dri3_blit_image(draw,
2404 back->image,
2405 source->image,
2406 0, 0, draw->width, draw->height,
2407 0, 0, 0);
2408 back->last_swap = source->last_swap;
2409 draw->cur_blit_source = -1;
2410 }
2411
2412 return back;
2413 }
2414