• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2013 Keith Packard
3  * Copyright © 2015 Boyan Ding
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that copyright
8  * notice and this permission notice appear in supporting documentation, and
9  * that the name of the copyright holders not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  The copyright holders make no representations
12  * about the suitability of this software for any purpose.  It is provided "as
13  * is" without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21  * OF THIS SOFTWARE.
22  */
23 
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28 
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33 #include <xcb/xfixes.h>
34 
35 #include <X11/Xlib-xcb.h>
36 
37 #include "loader_dri_helper.h"
38 #include "loader_dri3_helper.h"
39 #include "pipe/p_screen.h"
40 #include "util/macros.h"
41 #include "util/simple_mtx.h"
42 #include "drm-uapi/drm_fourcc.h"
43 #include "dri_screen.h"
44 #include "dri_util.h"
45 
46 /**
47  * A cached blit context.
48  */
49 struct loader_dri3_blit_context {
50    simple_mtx_t mtx;
51    struct dri_context *ctx;
52    struct dri_screen *cur_screen;
53    const __DRIcoreExtension *core;
54 };
55 
56 /* For simplicity we maintain the cache only for a single screen at a time */
57 static struct loader_dri3_blit_context blit_context = {
58    SIMPLE_MTX_INITIALIZER, NULL
59 };
60 
61 static void
62 dri3_flush_present_events(struct loader_dri3_drawable *draw);
63 
64 static struct loader_dri3_buffer *
65 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
66 
67 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)68 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
69 {
70    xcb_screen_iterator_t screen_iter =
71    xcb_setup_roots_iterator(xcb_get_setup(conn));
72 
73    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
74       if (screen_iter.data->root == root)
75          return screen_iter.data;
76    }
77 
78    return NULL;
79 }
80 
81 static xcb_visualtype_t *
get_xcb_visualtype_for_depth(struct loader_dri3_drawable * draw,int depth)82 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
83 {
84    xcb_visualtype_iterator_t visual_iter;
85    xcb_screen_t *screen = draw->screen;
86    xcb_depth_iterator_t depth_iter;
87 
88    if (!screen)
89       return NULL;
90 
91    depth_iter = xcb_screen_allowed_depths_iterator(screen);
92    for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
93       if (depth_iter.data->depth != depth)
94          continue;
95 
96       visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
97       if (visual_iter.rem)
98          return visual_iter.data;
99    }
100 
101    return NULL;
102 }
103 
104 /* Sets the adaptive sync window property state. */
105 static void
set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)106 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
107                            uint32_t state)
108 {
109    static char const name[] = "_VARIABLE_REFRESH";
110    xcb_intern_atom_cookie_t cookie;
111    xcb_intern_atom_reply_t* reply;
112    xcb_void_cookie_t check;
113 
114    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
115    reply = xcb_intern_atom_reply(conn, cookie, NULL);
116    if (reply == NULL)
117       return;
118 
119    if (state)
120       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
121                                           drawable, reply->atom,
122                                           XCB_ATOM_CARDINAL, 32, 1, &state);
123    else
124       check = xcb_delete_property_checked(conn, drawable, reply->atom);
125 
126    xcb_discard_reply(conn, check.sequence);
127    free(reply);
128 }
129 
130 /* Get red channel mask for given drawable at given depth. */
131 static unsigned int
dri3_get_red_mask_for_depth(struct loader_dri3_drawable * draw,int depth)132 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
133 {
134    xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
135 
136    if (visual)
137       return visual->red_mask;
138 
139    return 0;
140 }
141 
142 /**
143  * Get and lock (for use with the current thread) a dri context associated
144  * with the drawable's dri screen. The context is intended to be used with
145  * the dri image extension's blitImage method.
146  *
147  * \param draw[in]  Pointer to the drawable whose dri screen we want a
148  * dri context for.
149  * \return A dri context or NULL if context creation failed.
150  *
151  * When the caller is done with the context (even if the context returned was
152  * NULL), the caller must call loader_dri3_blit_context_put.
153  */
154 static struct dri_context *
loader_dri3_blit_context_get(struct loader_dri3_drawable * draw)155 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
156 {
157    simple_mtx_lock(&blit_context.mtx);
158 
159    if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen_render_gpu) {
160       driDestroyContext(blit_context.ctx);
161       blit_context.ctx = NULL;
162    }
163 
164    if (!blit_context.ctx) {
165       blit_context.ctx = driCreateNewContext(draw->dri_screen_render_gpu,
166                                                            NULL, NULL, NULL);
167       blit_context.cur_screen = draw->dri_screen_render_gpu;
168    }
169 
170    return blit_context.ctx;
171 }
172 
173 /**
174  * Release (for use with other threads) a dri context previously obtained using
175  * loader_dri3_blit_context_get.
176  */
177 static void
loader_dri3_blit_context_put(void)178 loader_dri3_blit_context_put(void)
179 {
180    simple_mtx_unlock(&blit_context.mtx);
181 }
182 
183 /**
184  * Blit (parts of) the contents of a DRI image to another dri image
185  *
186  * \param draw[in]  The drawable which owns the images.
187  * \param dst[in]  The destination image.
188  * \param src[in]  The source image.
189  * \param dstx0[in]  Start destination coordinate.
190  * \param dsty0[in]  Start destination coordinate.
191  * \param width[in]  Blit width.
192  * \param height[in] Blit height.
193  * \param srcx0[in]  Start source coordinate.
194  * \param srcy0[in]  Start source coordinate.
195  * \param flush_flag[in]  Image blit flush flag.
196  * \return true iff successful.
197  */
198 static bool
loader_dri3_blit_image(struct loader_dri3_drawable * draw,struct dri_image * dst,struct dri_image * src,int dstx0,int dsty0,int width,int height,int srcx0,int srcy0,int flush_flag)199 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
200                        struct dri_image *dst, struct dri_image *src,
201                        int dstx0, int dsty0, int width, int height,
202                        int srcx0, int srcy0, int flush_flag)
203 {
204    struct dri_context *dri_context;
205    bool use_blit_context = false;
206 
207    dri_context = draw->vtable->get_dri_context(draw);
208 
209    if (!dri_context || !draw->vtable->in_current_context(draw)) {
210       dri_context = loader_dri3_blit_context_get(draw);
211       use_blit_context = true;
212       flush_flag |= __BLIT_FLAG_FLUSH;
213    }
214 
215    if (dri_context)
216       dri2_blit_image(dri_context, dst, src, dstx0, dsty0,
217                      width, height, srcx0, srcy0,
218                      width, height, flush_flag);
219 
220    if (use_blit_context)
221       loader_dri3_blit_context_put();
222 
223    return dri_context != NULL;
224 }
225 
226 static inline void
dri3_fence_reset(xcb_connection_t * c,struct loader_dri3_buffer * buffer)227 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
228 {
229    xshmfence_reset(buffer->shm_fence);
230 }
231 
232 static inline void
dri3_fence_set(struct loader_dri3_buffer * buffer)233 dri3_fence_set(struct loader_dri3_buffer *buffer)
234 {
235    xshmfence_trigger(buffer->shm_fence);
236 }
237 
238 static inline void
dri3_fence_trigger(xcb_connection_t * c,struct loader_dri3_buffer * buffer)239 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
240 {
241    xcb_sync_trigger_fence(c, buffer->sync_fence);
242 }
243 
244 static inline void
dri3_fence_await(xcb_connection_t * c,struct loader_dri3_drawable * draw,struct loader_dri3_buffer * buffer)245 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
246                  struct loader_dri3_buffer *buffer)
247 {
248    xcb_flush(c);
249    xshmfence_await(buffer->shm_fence);
250    if (draw) {
251       mtx_lock(&draw->mtx);
252       dri3_flush_present_events(draw);
253       mtx_unlock(&draw->mtx);
254    }
255 }
256 
257 static void
dri3_update_max_num_back(struct loader_dri3_drawable * draw)258 dri3_update_max_num_back(struct loader_dri3_drawable *draw)
259 {
260    switch (draw->last_present_mode) {
261    case XCB_PRESENT_COMPLETE_MODE_FLIP: {
262       if (draw->swap_interval == 0)
263          draw->max_num_back = 4;
264       else
265          draw->max_num_back = 3;
266 
267       assert(draw->max_num_back <= LOADER_DRI3_MAX_BACK);
268       break;
269    }
270 
271    case XCB_PRESENT_COMPLETE_MODE_SKIP:
272       break;
273 
274    default:
275       draw->max_num_back = 2;
276    }
277 }
278 
279 void
loader_dri3_set_swap_interval(struct loader_dri3_drawable * draw,int interval)280 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
281 {
282    /* Wait all previous swap done before changing swap interval.
283     *
284     * This is for preventing swap out of order in the following cases:
285     *   1. Change from sync swap mode (>0) to async mode (=0), so async swap occurs
286     *      before previous pending sync swap.
287     *   2. Change from value A to B and A > B, so the target_msc for the previous
288     *      pending swap may be bigger than newer swap.
289     *
290     * PS. changing from value A to B and A < B won't cause swap out of order but
291     * may still gets wrong target_msc value at the beginning.
292     */
293    if (draw->swap_interval != interval)
294       loader_dri3_swapbuffer_barrier(draw);
295 
296    draw->swap_interval = interval;
297 }
298 
299 static void
dri3_set_render_buffer(struct loader_dri3_drawable * draw,int buf_id,struct loader_dri3_buffer * buffer)300 dri3_set_render_buffer(struct loader_dri3_drawable *draw, int buf_id,
301                        struct loader_dri3_buffer *buffer)
302 {
303    if (buf_id != LOADER_DRI3_FRONT_ID && !draw->buffers[buf_id])
304       draw->cur_num_back++;
305 
306    draw->buffers[buf_id] = buffer;
307 }
308 
309 /** dri3_free_render_buffer
310  *
311  * Free everything associated with one render buffer including pixmap, fence
312  * stuff and the driver image
313  */
314 static void
dri3_free_render_buffer(struct loader_dri3_drawable * draw,int buf_id)315 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
316                         int buf_id)
317 {
318    struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
319 
320    if (!buffer)
321       return;
322 
323    if (buffer->own_pixmap)
324       xcb_free_pixmap(draw->conn, buffer->pixmap);
325    xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
326    xshmfence_unmap_shm(buffer->shm_fence);
327    dri2_destroy_image(buffer->image);
328    if (buffer->linear_buffer)
329       dri2_destroy_image(buffer->linear_buffer);
330    free(buffer);
331 
332    draw->buffers[buf_id] = NULL;
333 
334    if (buf_id != LOADER_DRI3_FRONT_ID)
335       draw->cur_num_back--;
336 }
337 
338 void
loader_dri3_drawable_fini(struct loader_dri3_drawable * draw)339 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
340 {
341    int i;
342 
343    driDestroyDrawable(draw->dri_drawable);
344 
345    for (i = 0; i < ARRAY_SIZE(draw->buffers); i++)
346       dri3_free_render_buffer(draw, i);
347 
348    if (draw->special_event) {
349       xcb_void_cookie_t cookie =
350          xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
351                                           XCB_PRESENT_EVENT_MASK_NO_EVENT);
352 
353       xcb_discard_reply(draw->conn, cookie.sequence);
354       xcb_unregister_for_special_event(draw->conn, draw->special_event);
355    }
356 
357    if (draw->region)
358       xcb_xfixes_destroy_region(draw->conn, draw->region);
359 
360    cnd_destroy(&draw->event_cnd);
361    mtx_destroy(&draw->mtx);
362 }
363 
364 int
loader_dri3_drawable_init(xcb_connection_t * conn,xcb_drawable_t drawable,enum loader_dri3_drawable_type type,struct dri_screen * dri_screen_render_gpu,struct dri_screen * dri_screen_display_gpu,bool multiplanes_available,bool prefer_back_buffer_reuse,const struct dri_config * dri_config,const struct loader_dri3_vtable * vtable,struct loader_dri3_drawable * draw)365 loader_dri3_drawable_init(xcb_connection_t *conn,
366                           xcb_drawable_t drawable,
367                           enum loader_dri3_drawable_type type,
368                           struct dri_screen *dri_screen_render_gpu,
369                           struct dri_screen *dri_screen_display_gpu,
370                           bool multiplanes_available,
371                           bool prefer_back_buffer_reuse,
372                           const struct dri_config *dri_config,
373                           const struct loader_dri3_vtable *vtable,
374                           struct loader_dri3_drawable *draw)
375 {
376    xcb_get_geometry_cookie_t cookie;
377    xcb_get_geometry_reply_t *reply;
378    xcb_generic_error_t *error;
379 
380    draw->conn = conn;
381    draw->vtable = vtable;
382    draw->drawable = drawable;
383    draw->type = type;
384    draw->region = 0;
385    draw->dri_screen_render_gpu = dri_screen_render_gpu;
386    draw->dri_screen_display_gpu = dri_screen_display_gpu;
387    draw->multiplanes_available = multiplanes_available;
388    draw->prefer_back_buffer_reuse = prefer_back_buffer_reuse;
389    draw->queries_buffer_age = false;
390 
391    draw->have_back = 0;
392    draw->have_fake_front = 0;
393    draw->first_init = true;
394    draw->adaptive_sync = false;
395    draw->adaptive_sync_active = false;
396    draw->block_on_depleted_buffers = false;
397 
398    draw->cur_blit_source = -1;
399    draw->back_format = DRM_FORMAT_INVALID;
400    mtx_init(&draw->mtx, mtx_plain);
401    cnd_init(&draw->event_cnd);
402 
403    {
404       unsigned char adaptive_sync = 0;
405       unsigned char block_on_depleted_buffers = 0;
406 
407       dri2GalliumConfigQueryb(draw->dri_screen_render_gpu,
408                                       "adaptive_sync",
409                                       &adaptive_sync);
410 
411       draw->adaptive_sync = adaptive_sync;
412 
413       dri2GalliumConfigQueryb(draw->dri_screen_render_gpu,
414                                       "block_on_depleted_buffers",
415                                       &block_on_depleted_buffers);
416 
417       draw->block_on_depleted_buffers = block_on_depleted_buffers;
418    }
419 
420    if (!draw->adaptive_sync)
421       set_adaptive_sync_property(conn, draw->drawable, false);
422 
423    draw->swap_interval = dri_get_initial_swap_interval(draw->dri_screen_render_gpu);
424 
425    dri3_update_max_num_back(draw);
426 
427    /* Create a new drawable */
428    draw->dri_drawable = dri_create_drawable(dri_screen_render_gpu, dri_config,
429                                             type == LOADER_DRI3_DRAWABLE_PIXMAP, draw);
430 
431    if (!draw->dri_drawable)
432       return 1;
433 
434    cookie = xcb_get_geometry(draw->conn, draw->drawable);
435    reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
436    if (reply == NULL || error != NULL) {
437       driDestroyDrawable(draw->dri_drawable);
438       return 1;
439    }
440 
441    draw->screen = get_screen_for_root(draw->conn, reply->root);
442    draw->width = reply->width;
443    draw->height = reply->height;
444    draw->depth = reply->depth;
445    draw->vtable->set_drawable_size(draw, draw->width, draw->height);
446    free(reply);
447 
448    /*
449     * Make sure server has the same swap interval we do for the new
450     * drawable.
451     */
452    loader_dri3_set_swap_interval(draw, draw->swap_interval);
453 
454    return 0;
455 }
456 
457 /* XXX this belongs in presentproto */
458 #ifndef PresentWindowDestroyed
459 #define PresentWindowDestroyed (1 << 0)
460 #endif
461 /*
462  * Process one Present event
463  */
464 static bool
dri3_handle_present_event(struct loader_dri3_drawable * draw,xcb_present_generic_event_t * ge)465 dri3_handle_present_event(struct loader_dri3_drawable *draw,
466                           xcb_present_generic_event_t *ge)
467 {
468    switch (ge->evtype) {
469    case XCB_PRESENT_CONFIGURE_NOTIFY: {
470       xcb_present_configure_notify_event_t *ce = (void *) ge;
471       if (ce->pixmap_flags & PresentWindowDestroyed) {
472          free(ge);
473          return false;
474       }
475 
476       draw->width = ce->width;
477       draw->height = ce->height;
478       draw->vtable->set_drawable_size(draw, draw->width, draw->height);
479       dri_invalidate_drawable(draw->dri_drawable);
480       break;
481    }
482    case XCB_PRESENT_COMPLETE_NOTIFY: {
483       xcb_present_complete_notify_event_t *ce = (void *) ge;
484 
485       /* Compute the processed SBC number from the received 32-bit serial number
486        * merged with the upper 32-bits of the sent 64-bit serial number while
487        * checking for wrap.
488        */
489       if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
490          uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
491 
492          /* Only assume wraparound if that results in exactly the previous
493           * SBC + 1, otherwise ignore received SBC > sent SBC (those are
494           * probably from a previous loader_dri3_drawable instance) to avoid
495           * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
496           */
497          if (recv_sbc <= draw->send_sbc)
498             draw->recv_sbc = recv_sbc;
499          else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
500             draw->recv_sbc = recv_sbc - 0x100000000ULL;
501 
502          /* When moving from flip to copy, we assume that we can allocate in
503           * a more optimal way if we don't need to cater for the display
504           * controller.
505           */
506          if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
507              draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
508             for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
509                if (draw->buffers[b])
510                   draw->buffers[b]->reallocate = true;
511             }
512          }
513 
514          /* If the server tells us that our allocation is suboptimal, we
515           * reallocate once.
516           */
517 #ifdef HAVE_X11_DRM
518          if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
519              draw->last_present_mode != ce->mode) {
520             for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
521                if (draw->buffers[b])
522                   draw->buffers[b]->reallocate = true;
523             }
524          }
525 #endif
526          draw->last_present_mode = ce->mode;
527 
528          draw->ust = ce->ust;
529          draw->msc = ce->msc;
530       } else if (ce->serial == draw->eid) {
531          draw->notify_ust = ce->ust;
532          draw->notify_msc = ce->msc;
533       }
534       break;
535    }
536    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
537       xcb_present_idle_notify_event_t *ie = (void *) ge;
538       int b;
539 
540       for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
541          struct loader_dri3_buffer *buf = draw->buffers[b];
542 
543          if (buf && buf->pixmap == ie->pixmap)
544             buf->busy = 0;
545       }
546       break;
547    }
548    }
549    free(ge);
550    return true;
551 }
552 
553 static bool
dri3_wait_for_event_locked(struct loader_dri3_drawable * draw,unsigned * full_sequence)554 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
555                            unsigned *full_sequence)
556 {
557    xcb_generic_event_t *ev;
558    xcb_present_generic_event_t *ge;
559 
560    xcb_flush(draw->conn);
561 
562    /* Only have one thread waiting for events at a time */
563    if (draw->has_event_waiter) {
564       cnd_wait(&draw->event_cnd, &draw->mtx);
565       if (full_sequence)
566          *full_sequence = draw->last_special_event_sequence;
567       /* Another thread has updated the protected info, so retest. */
568       return true;
569    } else {
570       draw->has_event_waiter = true;
571       /* Allow other threads access to the drawable while we're waiting. */
572       mtx_unlock(&draw->mtx);
573       ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
574       mtx_lock(&draw->mtx);
575       draw->has_event_waiter = false;
576       cnd_broadcast(&draw->event_cnd);
577    }
578    if (!ev)
579       return false;
580    draw->last_special_event_sequence = ev->full_sequence;
581    if (full_sequence)
582       *full_sequence = ev->full_sequence;
583    ge = (void *) ev;
584    return dri3_handle_present_event(draw, ge);
585 }
586 
587 /** loader_dri3_wait_for_msc
588  *
589  * Get the X server to send an event when the target msc/divisor/remainder is
590  * reached.
591  */
592 bool
loader_dri3_wait_for_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,int64_t * ust,int64_t * msc,int64_t * sbc)593 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
594                          int64_t target_msc,
595                          int64_t divisor, int64_t remainder,
596                          int64_t *ust, int64_t *msc, int64_t *sbc)
597 {
598    xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
599                                                      draw->drawable,
600                                                      draw->eid,
601                                                      target_msc,
602                                                      divisor,
603                                                      remainder);
604    unsigned full_sequence;
605 
606    mtx_lock(&draw->mtx);
607 
608    /* Wait for the event */
609    do {
610       if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
611          mtx_unlock(&draw->mtx);
612          return false;
613       }
614    } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
615 
616    *ust = draw->notify_ust;
617    *msc = draw->notify_msc;
618    *sbc = draw->recv_sbc;
619    mtx_unlock(&draw->mtx);
620 
621    return true;
622 }
623 
624 /** loader_dri3_wait_for_sbc
625  *
626  * Wait for the completed swap buffer count to reach the specified
627  * target. Presumably the application knows that this will be reached with
628  * outstanding complete events, or we're going to be here awhile.
629  */
630 int
loader_dri3_wait_for_sbc(struct loader_dri3_drawable * draw,int64_t target_sbc,int64_t * ust,int64_t * msc,int64_t * sbc)631 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
632                          int64_t target_sbc, int64_t *ust,
633                          int64_t *msc, int64_t *sbc)
634 {
635    /* From the GLX_OML_sync_control spec:
636     *
637     *     "If <target_sbc> = 0, the function will block until all previous
638     *      swaps requested with glXSwapBuffersMscOML for that window have
639     *      completed."
640     */
641    mtx_lock(&draw->mtx);
642    if (!target_sbc)
643       target_sbc = draw->send_sbc;
644 
645    while (draw->recv_sbc < target_sbc) {
646       if (!dri3_wait_for_event_locked(draw, NULL)) {
647          mtx_unlock(&draw->mtx);
648          return 0;
649       }
650    }
651 
652    *ust = draw->ust;
653    *msc = draw->msc;
654    *sbc = draw->recv_sbc;
655    mtx_unlock(&draw->mtx);
656    return 1;
657 }
658 
659 /** loader_dri3_find_back
660  *
661  * Find an idle back buffer. If there isn't one, then
662  * wait for a present idle notify event from the X server
663  */
664 static int
dri3_find_back(struct loader_dri3_drawable * draw,bool prefer_a_different)665 dri3_find_back(struct loader_dri3_drawable *draw, bool prefer_a_different)
666 {
667    struct loader_dri3_buffer *buffer;
668    int b;
669    int max_num;
670    int best_id = -1;
671    uint64_t best_swap = 0;
672 
673    mtx_lock(&draw->mtx);
674 
675    if (!prefer_a_different) {
676       /* Increase the likelyhood of reusing current buffer */
677       dri3_flush_present_events(draw);
678 
679       /* Reuse current back buffer if it's idle */
680       buffer = draw->buffers[draw->cur_back];
681       if (buffer && !buffer->busy) {
682          best_id = draw->cur_back;
683          goto unlock;
684       }
685    }
686 
687    /* Check whether we need to reuse the current back buffer as new back.
688     * In that case, wait until it's not busy anymore.
689     */
690    if (draw->cur_blit_source != -1) {
691       max_num = 1;
692       draw->cur_blit_source = -1;
693    } else {
694       max_num = LOADER_DRI3_MAX_BACK;
695    }
696 
697    /* In a DRI_PRIME situation, if prefer_a_different is true, we first try
698     * to find an idle buffer that is not the last used one.
699     * This is useful if we receive a XCB_PRESENT_EVENT_IDLE_NOTIFY event
700     * for a pixmap but it's not actually idle (eg: the DRI_PRIME blit is
701     * still in progress).
702     * Unigine Superposition hits this and this allows to use 2 back buffers
703     * instead of reusing the same one all the time, causing the next frame
704     * to wait for the copy to finish.
705     */
706    int current_back_id = draw->cur_back;
707    do {
708       /* Find idle buffer with lowest buffer age, or an unallocated slot */
709       for (b = 0; b < max_num; b++) {
710          int id = LOADER_DRI3_BACK_ID((b + current_back_id) % LOADER_DRI3_MAX_BACK);
711 
712          buffer = draw->buffers[id];
713          if (buffer) {
714             if (!buffer->busy &&
715                 (!prefer_a_different || id != current_back_id) &&
716                 (best_id == -1 || buffer->last_swap > best_swap)) {
717                best_id = id;
718                best_swap = buffer->last_swap;
719             }
720          } else if (best_id == -1 &&
721                     draw->cur_num_back < draw->max_num_back) {
722             best_id = id;
723          }
724       }
725 
726       /* Prefer re-using the same buffer over blocking */
727       if (prefer_a_different && best_id == -1 &&
728           !draw->buffers[LOADER_DRI3_BACK_ID(current_back_id)]->busy)
729          best_id = current_back_id;
730    } while (best_id == -1 && dri3_wait_for_event_locked(draw, NULL));
731 
732    if (best_id != -1)
733       draw->cur_back = best_id;
734 
735 unlock:
736    mtx_unlock(&draw->mtx);
737    return best_id;
738 }
739 
740 static xcb_gcontext_t
dri3_drawable_gc(struct loader_dri3_drawable * draw)741 dri3_drawable_gc(struct loader_dri3_drawable *draw)
742 {
743    if (!draw->gc) {
744       uint32_t v = 0;
745       xcb_create_gc(draw->conn,
746                     (draw->gc = xcb_generate_id(draw->conn)),
747                     draw->drawable,
748                     XCB_GC_GRAPHICS_EXPOSURES,
749                     &v);
750    }
751    return draw->gc;
752 }
753 
754 
755 static struct loader_dri3_buffer *
dri3_back_buffer(struct loader_dri3_drawable * draw)756 dri3_back_buffer(struct loader_dri3_drawable *draw)
757 {
758    return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
759 }
760 
761 static struct loader_dri3_buffer *
dri3_front_buffer(struct loader_dri3_drawable * draw)762 dri3_front_buffer(struct loader_dri3_drawable *draw)
763 {
764    return draw->buffers[LOADER_DRI3_FRONT_ID];
765 }
766 
767 static void
dri3_copy_area(xcb_connection_t * c,xcb_drawable_t src_drawable,xcb_drawable_t dst_drawable,xcb_gcontext_t gc,int16_t src_x,int16_t src_y,int16_t dst_x,int16_t dst_y,uint16_t width,uint16_t height)768 dri3_copy_area(xcb_connection_t *c,
769                xcb_drawable_t    src_drawable,
770                xcb_drawable_t    dst_drawable,
771                xcb_gcontext_t    gc,
772                int16_t           src_x,
773                int16_t           src_y,
774                int16_t           dst_x,
775                int16_t           dst_y,
776                uint16_t          width,
777                uint16_t          height)
778 {
779    xcb_void_cookie_t cookie;
780 
781    cookie = xcb_copy_area_checked(c,
782                                   src_drawable,
783                                   dst_drawable,
784                                   gc,
785                                   src_x,
786                                   src_y,
787                                   dst_x,
788                                   dst_y,
789                                   width,
790                                   height);
791    xcb_discard_reply(c, cookie.sequence);
792 }
793 
794 /**
795  * Asks the driver to flush any queued work necessary for serializing with the
796  * X command stream, and optionally the slightly more strict requirement of
797  * glFlush() equivalence (which would require flushing even if nothing had
798  * been drawn to a window system framebuffer, for example).
799  */
800 void
loader_dri3_flush(struct loader_dri3_drawable * draw,unsigned flags,enum __DRI2throttleReason throttle_reason)801 loader_dri3_flush(struct loader_dri3_drawable *draw,
802                   unsigned flags,
803                   enum __DRI2throttleReason throttle_reason)
804 {
805    /* NEED TO CHECK WHETHER CONTEXT IS NULL */
806    struct dri_context *dri_context = draw->vtable->get_dri_context(draw);
807 
808    if (dri_context) {
809       dri_flush(dri_context, draw->dri_drawable, flags, throttle_reason);
810    }
811 }
812 
813 void
loader_dri3_copy_sub_buffer(struct loader_dri3_drawable * draw,int x,int y,int width,int height,bool flush)814 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
815                             int x, int y,
816                             int width, int height,
817                             bool flush)
818 {
819    struct loader_dri3_buffer *back;
820    unsigned flags = __DRI2_FLUSH_DRAWABLE;
821 
822    /* Check we have the right attachments */
823    if (!draw->have_back || draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
824       return;
825 
826    if (flush)
827       flags |= __DRI2_FLUSH_CONTEXT;
828    loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
829 
830    back = dri3_find_back_alloc(draw);
831    if (!back)
832       return;
833 
834    y = draw->height - y - height;
835 
836    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
837       /* Update the linear buffer part of the back buffer
838        * for the dri3_copy_area operation
839        */
840       (void) loader_dri3_blit_image(draw,
841                                     back->linear_buffer,
842                                     back->image,
843                                     0, 0, back->width, back->height,
844                                     0, 0, __BLIT_FLAG_FLUSH);
845    }
846 
847    loader_dri3_swapbuffer_barrier(draw);
848    dri3_fence_reset(draw->conn, back);
849    dri3_copy_area(draw->conn,
850                   back->pixmap,
851                   draw->drawable,
852                   dri3_drawable_gc(draw),
853                   x, y, x, y, width, height);
854    dri3_fence_trigger(draw->conn, back);
855    /* Refresh the fake front (if present) after we just damaged the real
856     * front.
857     */
858    if (draw->have_fake_front &&
859        !loader_dri3_blit_image(draw,
860                                dri3_front_buffer(draw)->image,
861                                back->image,
862                                x, y, width, height,
863                                x, y, __BLIT_FLAG_FLUSH) &&
864        draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
865       dri3_fence_reset(draw->conn, dri3_front_buffer(draw));
866       dri3_copy_area(draw->conn,
867                      back->pixmap,
868                      dri3_front_buffer(draw)->pixmap,
869                      dri3_drawable_gc(draw),
870                      x, y, x, y, width, height);
871       dri3_fence_trigger(draw->conn, dri3_front_buffer(draw));
872       dri3_fence_await(draw->conn, NULL, dri3_front_buffer(draw));
873    }
874    dri3_fence_await(draw->conn, draw, back);
875 }
876 
877 void
loader_dri3_copy_drawable(struct loader_dri3_drawable * draw,xcb_drawable_t dest,xcb_drawable_t src)878 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
879                           xcb_drawable_t dest,
880                           xcb_drawable_t src)
881 {
882    loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
883 
884    struct loader_dri3_buffer *front = dri3_front_buffer(draw);
885    if (front)
886       dri3_fence_reset(draw->conn, front);
887 
888    dri3_copy_area(draw->conn,
889                   src, dest,
890                   dri3_drawable_gc(draw),
891                   0, 0, 0, 0, draw->width, draw->height);
892 
893    if (front) {
894       dri3_fence_trigger(draw->conn, front);
895       dri3_fence_await(draw->conn, draw, front);
896    }
897 }
898 
899 void
loader_dri3_wait_x(struct loader_dri3_drawable * draw)900 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
901 {
902    struct loader_dri3_buffer *front;
903 
904    if (draw == NULL || !draw->have_fake_front)
905       return;
906 
907    front = dri3_front_buffer(draw);
908 
909    loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
910 
911    /* In the psc->is_different_gpu case, the linear buffer has been updated,
912     * but not yet the tiled buffer.
913     * Copy back to the tiled buffer we use for rendering.
914     * Note that we don't need flushing.
915     */
916    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
917       (void) loader_dri3_blit_image(draw,
918                                     front->image,
919                                     front->linear_buffer,
920                                     0, 0, front->width, front->height,
921                                     0, 0, 0);
922 }
923 
924 void
loader_dri3_wait_gl(struct loader_dri3_drawable * draw)925 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
926 {
927    struct loader_dri3_buffer *front;
928 
929    if (draw == NULL || !draw->have_fake_front)
930       return;
931 
932    front = dri3_front_buffer(draw);
933    /* TODO: `front` is not supposed to be NULL here, fix the actual bug
934     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/8982
935     */
936    if (!front)
937       return;
938 
939    /* In the psc->is_different_gpu case, we update the linear_buffer
940     * before updating the real front.
941     */
942    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
943       (void) loader_dri3_blit_image(draw,
944                                     front->linear_buffer,
945                                     front->image,
946                                     0, 0, front->width, front->height,
947                                     0, 0, __BLIT_FLAG_FLUSH);
948    loader_dri3_swapbuffer_barrier(draw);
949    loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
950 }
951 
952 /** dri3_flush_present_events
953  *
954  * Process any present events that have been received from the X server
955  */
956 static void
dri3_flush_present_events(struct loader_dri3_drawable * draw)957 dri3_flush_present_events(struct loader_dri3_drawable *draw)
958 {
959    /* Check to see if any configuration changes have occurred
960     * since we were last invoked
961     */
962    if (draw->has_event_waiter)
963       return;
964 
965    if (draw->special_event) {
966       xcb_generic_event_t    *ev;
967 
968       while ((ev = xcb_poll_for_special_event(draw->conn,
969                                               draw->special_event)) != NULL) {
970          xcb_present_generic_event_t *ge = (void *) ev;
971          if (!dri3_handle_present_event(draw, ge))
972             break;
973       }
974    }
975 }
976 
977 /** loader_dri3_swap_buffers_msc
978  *
979  * Make the current back buffer visible using the present extension
980  */
981 int64_t
loader_dri3_swap_buffers_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,unsigned flush_flags,const int * rects,int n_rects,bool force_copy)982 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
983                              int64_t target_msc, int64_t divisor,
984                              int64_t remainder, unsigned flush_flags,
985                              const int *rects, int n_rects,
986                              bool force_copy)
987 {
988    struct loader_dri3_buffer *back;
989    int64_t ret = 0;
990    bool wait_for_next_buffer = false;
991 
992    /* GLX spec:
993     *   void glXSwapBuffers(Display *dpy, GLXDrawable draw);
994     *   This operation is a no-op if draw was created with a non-double-buffered
995     *   GLXFBConfig, or if draw is a GLXPixmap.
996     *   ...
997     *   GLX pixmaps may be created with a config that includes back buffers and
998     *   stereoscopic buffers. However, glXSwapBuffers is ignored for these pixmaps.
999     *   ...
1000     *   It is possible to create a pbuffer with back buffers and to swap the
1001     *   front and back buffers by calling glXSwapBuffers.
1002     *
1003     * EGL spec:
1004     *   EGLBoolean eglSwapBuffers(EGLDisplay dpy, EGLSurface surface);
1005     *   If surface is a back-buffered window surface, then the color buffer is
1006     *   copied to the native window associated with that surface. If surface is
1007     *   a single-buffered window, pixmap, or pbuffer surface, eglSwapBuffers has
1008     *   no effect.
1009     *
1010     * SwapBuffer effect:
1011     *       |           GLX             |           EGL            |
1012     *       | window | pixmap | pbuffer | window | pixmap | pbuffer|
1013     *-------+--------+--------+---------+--------+--------+--------+
1014     * single|  nop   |  nop   |   nop   |  nop   |  nop   |   nop  |
1015     * double|  swap  |  nop   |   swap  |  swap  |  NA    |   NA   |
1016     */
1017    if (!draw->have_back || draw->type == LOADER_DRI3_DRAWABLE_PIXMAP)
1018       return ret;
1019 
1020    draw->vtable->flush_drawable(draw, flush_flags);
1021 
1022    back = dri3_find_back_alloc(draw);
1023    /* Could only happen when error case, like display is already closed. */
1024    if (!back)
1025       return ret;
1026 
1027    mtx_lock(&draw->mtx);
1028 
1029    if (draw->adaptive_sync && !draw->adaptive_sync_active) {
1030       set_adaptive_sync_property(draw->conn, draw->drawable, true);
1031       draw->adaptive_sync_active = true;
1032    }
1033 
1034    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
1035       /* Update the linear buffer before presenting the pixmap */
1036       (void) loader_dri3_blit_image(draw,
1037                                     back->linear_buffer,
1038                                     back->image,
1039                                     0, 0, back->width, back->height,
1040                                     0, 0, __BLIT_FLAG_FLUSH);
1041    }
1042 
1043    /* If we need to preload the new back buffer, remember the source.
1044     * The force_copy parameter is used by EGL to attempt to preserve
1045     * the back buffer across a call to this function.
1046     */
1047    if (force_copy)
1048       draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
1049 
1050    /* Exchange the back and fake front. Even though the server knows about these
1051     * buffers, it has no notion of back and fake front.
1052     */
1053    if (draw->have_fake_front) {
1054       struct loader_dri3_buffer *tmp;
1055 
1056       tmp = dri3_front_buffer(draw);
1057       draw->buffers[LOADER_DRI3_FRONT_ID] = back;
1058       draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1059 
1060       if (force_copy)
1061          draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1062    }
1063 
1064    dri3_flush_present_events(draw);
1065 
1066    if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1067       dri3_fence_reset(draw->conn, back);
1068 
1069       /* Compute when we want the frame shown by taking the last known
1070        * successful MSC and adding in a swap interval for each outstanding swap
1071        * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1072        * semantic"
1073        */
1074       ++draw->send_sbc;
1075       if (target_msc == 0 && divisor == 0 && remainder == 0)
1076          target_msc = draw->msc + abs(draw->swap_interval) *
1077                       (draw->send_sbc - draw->recv_sbc);
1078       else if (divisor == 0 && remainder > 0) {
1079          /* From the GLX_OML_sync_control spec:
1080           *     "If <divisor> = 0, the swap will occur when MSC becomes
1081           *      greater than or equal to <target_msc>."
1082           *
1083           * Note that there's no mention of the remainder.  The Present
1084           * extension throws BadValue for remainder != 0 with divisor == 0, so
1085           * just drop the passed in value.
1086           */
1087          remainder = 0;
1088       }
1089 
1090       /* From the GLX_EXT_swap_control spec
1091        * and the EGL 1.4 spec (page 53):
1092        *
1093        *     "If <interval> is set to a value of 0, buffer swaps are not
1094        *      synchronized to a video frame."
1095        *
1096        * From GLX_EXT_swap_control_tear:
1097        *
1098        *     "If <interval> is negative, the minimum number of video frames
1099        *      between buffer swaps is the absolute value of <interval>. In this
1100        *      case, if abs(<interval>) video frames have already passed from
1101        *      the previous swap when the swap is ready to be performed, the
1102        *      swap will occur without synchronization to a video frame."
1103        *
1104        * Implementation note: It is possible to enable triple buffering
1105        * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1106        * the default.
1107        */
1108       uint32_t options = XCB_PRESENT_OPTION_NONE;
1109       if (draw->swap_interval <= 0)
1110          options |= XCB_PRESENT_OPTION_ASYNC;
1111 
1112       /* If we need to populate the new back, but need to reuse the back
1113        * buffer slot due to lack of local blit capabilities, make sure
1114        * the server doesn't flip and we deadlock.
1115        */
1116       if (draw->cur_blit_source != -1)
1117          options |= XCB_PRESENT_OPTION_COPY;
1118 #ifdef HAVE_X11_DRM
1119       if (draw->multiplanes_available)
1120          options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1121 #endif
1122       back->busy = 1;
1123       back->last_swap = draw->send_sbc;
1124 
1125       if (!draw->region) {
1126          draw->region = xcb_generate_id(draw->conn);
1127          xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1128       }
1129 
1130       xcb_xfixes_region_t region = 0;
1131       xcb_rectangle_t xcb_rects[64];
1132 
1133       if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1134          for (int i = 0; i < n_rects; i++) {
1135             const int *rect = &rects[i * 4];
1136             xcb_rects[i].x = rect[0];
1137             xcb_rects[i].y = draw->height - rect[1] - rect[3];
1138             xcb_rects[i].width = rect[2];
1139             xcb_rects[i].height = rect[3];
1140          }
1141 
1142          region = draw->region;
1143          xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1144       }
1145 
1146       xcb_present_pixmap(draw->conn,
1147                          draw->drawable,
1148                          back->pixmap,
1149                          (uint32_t) draw->send_sbc,
1150                          0,                                    /* valid */
1151                          region,                               /* update */
1152                          0,                                    /* x_off */
1153                          0,                                    /* y_off */
1154                          None,                                 /* target_crtc */
1155                          None,
1156                          back->sync_fence,
1157                          options,
1158                          target_msc,
1159                          divisor,
1160                          remainder, 0, NULL);
1161    } else {
1162       /* This can only be reached by double buffered GLXPbuffer. */
1163       assert(draw->type == LOADER_DRI3_DRAWABLE_PBUFFER);
1164       /* GLX does not have damage regions. */
1165       assert(n_rects == 0);
1166 
1167       /* For wait and buffer age usage. */
1168       draw->send_sbc++;
1169       draw->recv_sbc = back->last_swap = draw->send_sbc;
1170 
1171       /* Pixmap is imported as front buffer image when same GPU case, so just
1172        * locally blit back buffer image to it is enough. Otherwise front buffer
1173        * is a fake one which needs to be synced with pixmap by xserver remotely.
1174        */
1175       if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
1176           !loader_dri3_blit_image(draw,
1177                                   dri3_front_buffer(draw)->image,
1178                                   back->image,
1179                                   0, 0, draw->width, draw->height,
1180                                   0, 0, __BLIT_FLAG_FLUSH)) {
1181          dri3_copy_area(draw->conn, back->pixmap,
1182                         draw->drawable,
1183                         dri3_drawable_gc(draw),
1184                         0, 0, 0, 0, draw->width, draw->height);
1185       }
1186    }
1187 
1188    ret = (int64_t) draw->send_sbc;
1189 
1190    /* Schedule a server-side back-preserving blit if necessary.
1191     * This happens iff all conditions below are satisfied:
1192     * a) We have a fake front,
1193     * b) We need to preserve the back buffer,
1194     * c) We don't have local blit capabilities.
1195     */
1196    if (draw->cur_blit_source != -1 &&
1197        draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1198       struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1199       struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1200 
1201       dri3_fence_reset(draw->conn, new_back);
1202       dri3_copy_area(draw->conn, src->pixmap,
1203                      new_back->pixmap,
1204                      dri3_drawable_gc(draw),
1205                      0, 0, 0, 0, draw->width, draw->height);
1206       dri3_fence_trigger(draw->conn, new_back);
1207       new_back->last_swap = src->last_swap;
1208    }
1209 
1210    xcb_flush(draw->conn);
1211    if (draw->stamp)
1212       ++(*draw->stamp);
1213 
1214    /* Waiting on a buffer is only sensible if all buffers are in use and the
1215     * client doesn't use the buffer age extension. In this case a client is
1216     * relying on it receiving back control immediately.
1217     *
1218     * As waiting on a buffer can at worst make us miss a frame the option has
1219     * to be enabled explicitly with the block_on_depleted_buffers DRI option.
1220     */
1221    wait_for_next_buffer = draw->cur_num_back == draw->max_num_back &&
1222       !draw->queries_buffer_age && draw->block_on_depleted_buffers;
1223 
1224    mtx_unlock(&draw->mtx);
1225 
1226    dri_invalidate_drawable(draw->dri_drawable);
1227 
1228    /* Clients that use up all available buffers usually regulate their drawing
1229     * through swapchain contention backpressure. In such a scenario the client
1230     * draws whenever control returns to it. Its event loop is slowed down only
1231     * by us waiting on buffers becoming available again.
1232     *
1233     * By waiting here on a new buffer and only then returning back to the client
1234     * we ensure the client begins drawing only when the next buffer is available
1235     * and not draw first and then wait a refresh cycle on the next available
1236     * buffer to show it. This way we can reduce the latency between what is
1237     * being drawn by the client and what is shown on the screen by one frame.
1238     */
1239    if (wait_for_next_buffer)
1240       dri3_find_back(draw, draw->prefer_back_buffer_reuse);
1241 
1242    return ret;
1243 }
1244 
1245 int
loader_dri3_query_buffer_age(struct loader_dri3_drawable * draw)1246 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1247 {
1248    struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1249    int ret = 0;
1250 
1251    mtx_lock(&draw->mtx);
1252    draw->queries_buffer_age = true;
1253    if (back && back->last_swap != 0)
1254       ret = draw->send_sbc - back->last_swap + 1;
1255    mtx_unlock(&draw->mtx);
1256 
1257    return ret;
1258 }
1259 
1260 static uint32_t
dri3_cpp_for_fourcc(uint32_t format)1261 dri3_cpp_for_fourcc(uint32_t format) {
1262    switch (format) {
1263    case DRM_FORMAT_R8:
1264       return 1;
1265    case DRM_FORMAT_RGB565:
1266    case DRM_FORMAT_GR88:
1267       return 2;
1268    case DRM_FORMAT_XRGB8888:
1269    case DRM_FORMAT_ARGB8888:
1270    case DRM_FORMAT_ABGR8888:
1271    case DRM_FORMAT_XBGR8888:
1272    case DRM_FORMAT_XRGB2101010:
1273    case DRM_FORMAT_ARGB2101010:
1274    case DRM_FORMAT_XBGR2101010:
1275    case DRM_FORMAT_ABGR2101010:
1276    case __DRI_IMAGE_FORMAT_SARGB8:
1277    case __DRI_IMAGE_FORMAT_SABGR8:
1278    case __DRI_IMAGE_FORMAT_SXRGB8:
1279       return 4;
1280    case DRM_FORMAT_ABGR16161616:
1281    case DRM_FORMAT_XBGR16161616:
1282    case DRM_FORMAT_XBGR16161616F:
1283    case DRM_FORMAT_ABGR16161616F:
1284       return 8;
1285    case DRM_FORMAT_INVALID:
1286    default:
1287       return 0;
1288    }
1289 }
1290 
1291 /* Map format of render buffer to corresponding format for the linear_buffer
1292  * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1293  * Usually linear_format == format, except for depth >= 30 formats, where
1294  * different gpu vendors have different preferences wrt. color channel ordering.
1295  */
1296 static uint32_t
dri3_linear_format_for_format(struct loader_dri3_drawable * draw,uint32_t format)1297 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1298 {
1299    switch (format) {
1300       case  __DRI_IMAGE_FORMAT_XRGB2101010:
1301       case  __DRI_IMAGE_FORMAT_XBGR2101010:
1302          /* Different preferred formats for different hw */
1303          if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1304             return __DRI_IMAGE_FORMAT_XBGR2101010;
1305          else
1306             return __DRI_IMAGE_FORMAT_XRGB2101010;
1307 
1308       case  __DRI_IMAGE_FORMAT_ARGB2101010:
1309       case  __DRI_IMAGE_FORMAT_ABGR2101010:
1310          /* Different preferred formats for different hw */
1311          if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1312             return __DRI_IMAGE_FORMAT_ABGR2101010;
1313          else
1314             return __DRI_IMAGE_FORMAT_ARGB2101010;
1315 
1316       default:
1317          return format;
1318    }
1319 }
1320 
1321 #ifdef HAVE_X11_DRM
1322 static bool
has_supported_modifier(struct loader_dri3_drawable * draw,unsigned int format,uint64_t * modifiers,uint32_t count)1323 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1324                        uint64_t *modifiers, uint32_t count)
1325 {
1326    uint64_t *supported_modifiers;
1327    int32_t supported_modifiers_count;
1328    bool found = false;
1329    int i, j;
1330 
1331    if (!dri_query_dma_buf_modifiers(draw->dri_screen_render_gpu,
1332                                                format, 0, NULL, NULL,
1333                                                &supported_modifiers_count) ||
1334        supported_modifiers_count == 0)
1335       return false;
1336 
1337    supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1338    if (!supported_modifiers)
1339       return false;
1340 
1341    dri_query_dma_buf_modifiers(draw->dri_screen_render_gpu, format,
1342                                           supported_modifiers_count,
1343                                           supported_modifiers, NULL,
1344                                           &supported_modifiers_count);
1345 
1346    for (i = 0; !found && i < supported_modifiers_count; i++) {
1347       for (j = 0; !found && j < count; j++) {
1348          if (supported_modifiers[i] == modifiers[j])
1349             found = true;
1350       }
1351    }
1352 
1353    free(supported_modifiers);
1354    return found;
1355 }
1356 #endif
1357 
1358 /** loader_dri3_alloc_render_buffer
1359  *
1360  * Use the driver createImage function to construct a struct dri_image, then
1361  * get a file descriptor for that and create an X pixmap from that
1362  *
1363  * Allocate an xshmfence for synchronization
1364  */
1365 static struct loader_dri3_buffer *
dri3_alloc_render_buffer(struct loader_dri3_drawable * draw,unsigned int fourcc,int width,int height,int depth)1366 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int fourcc,
1367                          int width, int height, int depth)
1368 {
1369    struct loader_dri3_buffer *buffer;
1370    struct dri_image *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1371    int format = loader_fourcc_to_image_format(fourcc);
1372    xcb_pixmap_t pixmap;
1373    xcb_sync_fence_t sync_fence;
1374    struct xshmfence *shm_fence;
1375    int buffer_fds[4], fence_fd;
1376    int num_planes = 0;
1377    uint64_t *modifiers = NULL;
1378    uint32_t count = 0;
1379    int i, mod;
1380    int ret;
1381 
1382    /* Create an xshmfence object and
1383     * prepare to send that to the X server
1384     */
1385 
1386    fence_fd = xshmfence_alloc_shm();
1387    if (fence_fd < 0)
1388       return NULL;
1389 
1390    shm_fence = xshmfence_map_shm(fence_fd);
1391    if (shm_fence == NULL)
1392       goto no_shm_fence;
1393 
1394    /* Allocate the image from the driver
1395     */
1396    buffer = calloc(1, sizeof *buffer);
1397    if (!buffer)
1398       goto no_buffer;
1399 
1400    buffer->cpp = dri3_cpp_for_fourcc(fourcc);
1401    if (!buffer->cpp)
1402       goto no_image;
1403 
1404    if (draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
1405 #ifdef HAVE_X11_DRM
1406       if (draw->multiplanes_available && draw->dri_screen_render_gpu->base.screen->resource_create_with_modifiers) {
1407          xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1408          xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1409          xcb_generic_error_t *error = NULL;
1410 
1411          mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1412                                                        draw->window,
1413                                                        depth, buffer->cpp * 8);
1414          mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1415                                                             mod_cookie,
1416                                                             &error);
1417          if (!mod_reply)
1418             goto no_image;
1419 
1420          if (mod_reply->num_window_modifiers) {
1421             count = mod_reply->num_window_modifiers;
1422             modifiers = malloc(count * sizeof(uint64_t));
1423             if (!modifiers) {
1424                free(mod_reply);
1425                goto no_image;
1426             }
1427 
1428             memcpy(modifiers,
1429                    xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1430                    count * sizeof(uint64_t));
1431 
1432             if (!has_supported_modifier(draw, fourcc, modifiers, count)) {
1433                free(modifiers);
1434                count = 0;
1435                modifiers = NULL;
1436             }
1437          }
1438 
1439          if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1440             count = mod_reply->num_screen_modifiers;
1441             modifiers = malloc(count * sizeof(uint64_t));
1442             if (!modifiers) {
1443                free(mod_reply);
1444                goto no_image;
1445             }
1446 
1447             memcpy(modifiers,
1448                    xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1449                    count * sizeof(uint64_t));
1450          }
1451 
1452          free(mod_reply);
1453       }
1454 #endif
1455       buffer->image = dri_create_image_with_modifiers(draw->dri_screen_render_gpu,
1456                                               width, height, format,
1457                                               __DRI_IMAGE_USE_SHARE |
1458                                               __DRI_IMAGE_USE_SCANOUT |
1459                                               __DRI_IMAGE_USE_BACKBUFFER |
1460                                               (draw->is_protected_content ?
1461                                                __DRI_IMAGE_USE_PROTECTED : 0),
1462                                               modifiers, count, buffer);
1463       free(modifiers);
1464 
1465       pixmap_buffer = buffer->image;
1466 
1467       if (!buffer->image)
1468          goto no_image;
1469    } else {
1470       buffer->image =
1471          dri_create_image(draw->dri_screen_render_gpu,
1472                                        width, height, format,
1473                                        NULL, 0, 0, buffer);
1474 
1475       if (!buffer->image)
1476          goto no_image;
1477 
1478       /* if driver name is same only then dri_screen_display_gpu is set.
1479        * This check is needed because for simplicity render gpu image extension
1480        * is also used for display gpu.
1481        */
1482       if (draw->dri_screen_display_gpu) {
1483          linear_buffer_display_gpu =
1484            dri_create_image(draw->dri_screen_display_gpu,
1485                                          width, height,
1486                                          dri3_linear_format_for_format(draw, format),
1487                                          NULL, 0,
1488                                          __DRI_IMAGE_USE_SHARE |
1489                                          __DRI_IMAGE_USE_LINEAR |
1490                                          __DRI_IMAGE_USE_BACKBUFFER |
1491                                          __DRI_IMAGE_USE_SCANOUT,
1492                                          buffer);
1493          pixmap_buffer = linear_buffer_display_gpu;
1494       }
1495 
1496       if (!pixmap_buffer) {
1497          buffer->linear_buffer =
1498            dri_create_image(draw->dri_screen_render_gpu,
1499                                         width, height,
1500                                         dri3_linear_format_for_format(draw, format),
1501                                         NULL, 0,
1502                                         __DRI_IMAGE_USE_SHARE |
1503                                         __DRI_IMAGE_USE_LINEAR |
1504                                         __DRI_IMAGE_USE_BACKBUFFER |
1505                                         __DRI_IMAGE_USE_SCANOUT |
1506                                         __DRI_IMAGE_USE_PRIME_BUFFER,
1507                                         buffer);
1508 
1509          pixmap_buffer = buffer->linear_buffer;
1510          if (!buffer->linear_buffer) {
1511             goto no_linear_buffer;
1512          }
1513       }
1514    }
1515 
1516    /* X want some information about the planes, so ask the image for it
1517     */
1518    if (!dri2_query_image(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1519                                      &num_planes))
1520       num_planes = 1;
1521 
1522    for (i = 0; i < num_planes; i++) {
1523       struct dri_image *image = dri2_from_planar(pixmap_buffer, i, NULL);
1524 
1525       if (!image) {
1526          assert(i == 0);
1527          image = pixmap_buffer;
1528       }
1529 
1530       buffer_fds[i] = -1;
1531 
1532       ret = dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD,
1533                                          &buffer_fds[i]);
1534       ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE,
1535                                           &buffer->strides[i]);
1536       ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_OFFSET,
1537                                           &buffer->offsets[i]);
1538       if (image != pixmap_buffer)
1539          dri2_destroy_image(image);
1540 
1541       if (!ret)
1542          goto no_buffer_attrib;
1543    }
1544 
1545    ret = dri2_query_image(pixmap_buffer,
1546                                      __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1547    buffer->modifier = (uint64_t) mod << 32;
1548    ret &= dri2_query_image(pixmap_buffer,
1549                                        __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1550    buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1551 
1552    if (!ret)
1553       buffer->modifier = DRM_FORMAT_MOD_INVALID;
1554 
1555    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu &&
1556        draw->dri_screen_display_gpu && linear_buffer_display_gpu) {
1557       /* The linear buffer was created in the display GPU's vram, so we
1558        * need to make it visible to render GPU
1559        */
1560       buffer->linear_buffer =
1561          dri2_from_dma_bufs(draw->dri_screen_render_gpu,
1562                                                   width,
1563                                                   height,
1564                                                   fourcc,
1565                                                   DRM_FORMAT_MOD_INVALID,
1566                                                   &buffer_fds[0], num_planes,
1567                                                   &buffer->strides[0],
1568                                                   &buffer->offsets[0],
1569                                                   0, 0, 0, 0, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1570                                                   NULL, buffer);
1571       if (!buffer->linear_buffer)
1572          goto no_buffer_attrib;
1573 
1574       dri2_destroy_image(linear_buffer_display_gpu);
1575    }
1576 
1577    pixmap = xcb_generate_id(draw->conn);
1578 #ifdef HAVE_X11_DRM
1579    if (draw->multiplanes_available &&
1580        buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1581       xcb_dri3_pixmap_from_buffers(draw->conn,
1582                                    pixmap,
1583                                    draw->window,
1584                                    num_planes,
1585                                    width, height,
1586                                    buffer->strides[0], buffer->offsets[0],
1587                                    buffer->strides[1], buffer->offsets[1],
1588                                    buffer->strides[2], buffer->offsets[2],
1589                                    buffer->strides[3], buffer->offsets[3],
1590                                    depth, buffer->cpp * 8,
1591                                    buffer->modifier,
1592                                    buffer_fds);
1593    } else
1594 #endif
1595    {
1596       xcb_dri3_pixmap_from_buffer(draw->conn,
1597                                   pixmap,
1598                                   draw->drawable,
1599                                   buffer->size,
1600                                   width, height, buffer->strides[0],
1601                                   depth, buffer->cpp * 8,
1602                                   buffer_fds[0]);
1603    }
1604 
1605    xcb_dri3_fence_from_fd(draw->conn,
1606                           pixmap,
1607                           (sync_fence = xcb_generate_id(draw->conn)),
1608                           false,
1609                           fence_fd);
1610 
1611    buffer->pixmap = pixmap;
1612    buffer->own_pixmap = true;
1613    buffer->sync_fence = sync_fence;
1614    buffer->shm_fence = shm_fence;
1615    buffer->width = width;
1616    buffer->height = height;
1617 
1618    /* Mark the buffer as idle
1619     */
1620    dri3_fence_set(buffer);
1621 
1622    return buffer;
1623 
1624 no_buffer_attrib:
1625    do {
1626       if (buffer_fds[i] != -1)
1627          close(buffer_fds[i]);
1628    } while (--i >= 0);
1629    dri2_destroy_image(pixmap_buffer);
1630 no_linear_buffer:
1631    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
1632       dri2_destroy_image(buffer->image);
1633 no_image:
1634    free(buffer);
1635 no_buffer:
1636    xshmfence_unmap_shm(shm_fence);
1637 no_shm_fence:
1638    close(fence_fd);
1639    return NULL;
1640 }
1641 
1642 static bool
dri3_detect_drawable_is_window(struct loader_dri3_drawable * draw)1643 dri3_detect_drawable_is_window(struct loader_dri3_drawable *draw)
1644 {
1645    /* Try to select for input on the window.
1646     *
1647     * If the drawable is a window, this will get our events
1648     * delivered.
1649     *
1650     * Otherwise, we'll get a BadWindow error back from this request which
1651     * will let us know that the drawable is a pixmap instead.
1652     */
1653 
1654    xcb_void_cookie_t cookie =
1655       xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1656                                        XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1657                                        XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1658                                        XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1659 
1660    /* Check to see if our select input call failed. If it failed with a
1661     * BadWindow error, then assume the drawable is a pixmap.
1662     */
1663    xcb_generic_error_t *error = xcb_request_check(draw->conn, cookie);
1664 
1665    if (error) {
1666       if (error->error_code != BadWindow) {
1667          free(error);
1668          return false;
1669       }
1670       free(error);
1671 
1672       /* pixmap can't get here, see driFetchDrawable(). */
1673       draw->type = LOADER_DRI3_DRAWABLE_PBUFFER;
1674       return true;
1675    }
1676 
1677    draw->type = LOADER_DRI3_DRAWABLE_WINDOW;
1678    return true;
1679 }
1680 
1681 static bool
dri3_setup_present_event(struct loader_dri3_drawable * draw)1682 dri3_setup_present_event(struct loader_dri3_drawable *draw)
1683 {
1684    /* No need to setup for pixmap drawable. */
1685    if (draw->type == LOADER_DRI3_DRAWABLE_PIXMAP ||
1686        draw->type == LOADER_DRI3_DRAWABLE_PBUFFER)
1687       return true;
1688 
1689    draw->eid = xcb_generate_id(draw->conn);
1690 
1691    if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1692       xcb_present_select_input(draw->conn, draw->eid, draw->drawable,
1693                                XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1694                                XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1695                                XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1696    } else {
1697       assert(draw->type == LOADER_DRI3_DRAWABLE_UNKNOWN);
1698 
1699       if (!dri3_detect_drawable_is_window(draw))
1700          return false;
1701 
1702       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1703          return true;
1704    }
1705 
1706    /* Create an XCB event queue to hold present events outside of the usual
1707     * application event queue
1708     */
1709    draw->special_event = xcb_register_for_special_xge(draw->conn,
1710                                                       &xcb_present_id,
1711                                                       draw->eid,
1712                                                       draw->stamp);
1713    return true;
1714 }
1715 
1716 /** loader_dri3_update_drawable
1717  *
1718  * Called the first time we use the drawable and then
1719  * after we receive present configure notify events to
1720  * track the geometry of the drawable
1721  */
1722 static int
dri3_update_drawable(struct loader_dri3_drawable * draw)1723 dri3_update_drawable(struct loader_dri3_drawable *draw)
1724 {
1725    mtx_lock(&draw->mtx);
1726    if (draw->first_init) {
1727       xcb_get_geometry_cookie_t                 geom_cookie;
1728       xcb_get_geometry_reply_t                  *geom_reply;
1729       xcb_window_t                               root_win;
1730 
1731       draw->first_init = false;
1732 
1733       if (!dri3_setup_present_event(draw)) {
1734          mtx_unlock(&draw->mtx);
1735          return false;
1736       }
1737 
1738       geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1739 
1740       geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1741 
1742       if (!geom_reply) {
1743          mtx_unlock(&draw->mtx);
1744          return false;
1745       }
1746       draw->width = geom_reply->width;
1747       draw->height = geom_reply->height;
1748       draw->depth = geom_reply->depth;
1749       draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1750       root_win = geom_reply->root;
1751 
1752       free(geom_reply);
1753 
1754       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1755          draw->window = root_win;
1756       else
1757          draw->window = draw->drawable;
1758    }
1759    dri3_flush_present_events(draw);
1760    mtx_unlock(&draw->mtx);
1761    return true;
1762 }
1763 
1764 struct dri_image *
loader_dri3_create_image(xcb_connection_t * c,xcb_dri3_buffer_from_pixmap_reply_t * bp_reply,unsigned int fourcc,struct dri_screen * dri_screen,void * loaderPrivate)1765 loader_dri3_create_image(xcb_connection_t *c,
1766                          xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1767                          unsigned int fourcc,
1768                          struct dri_screen *dri_screen,
1769                          void *loaderPrivate)
1770 {
1771    int                                  *fds;
1772    struct dri_image                           *image_planar, *ret;
1773    int                                  stride, offset;
1774 
1775    /* Get an FD for the pixmap object
1776     */
1777    fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1778 
1779    stride = bp_reply->stride;
1780    offset = 0;
1781 
1782    /* createImageFromDmaBufs creates a wrapper struct dri_image structure which
1783     * can deal with multiple planes for things like Yuv images. So, once
1784     * we've gotten the planar wrapper, pull the single plane out of it and
1785     * discard the wrapper.
1786     */
1787    image_planar = dri2_from_dma_bufs(dri_screen,
1788                                        bp_reply->width,
1789                                        bp_reply->height,
1790                                        fourcc,
1791                                        DRM_FORMAT_MOD_INVALID,
1792                                        fds, 1,
1793                                        &stride, &offset,
1794                                        0, 0, 0, 0, 0,
1795                                        NULL, loaderPrivate);
1796    close(fds[0]);
1797    if (!image_planar)
1798       return NULL;
1799 
1800    ret = dri2_from_planar(image_planar, 0, loaderPrivate);
1801 
1802    if (!ret)
1803       ret = image_planar;
1804    else
1805       dri2_destroy_image(image_planar);
1806 
1807    return ret;
1808 }
1809 
1810 #ifdef HAVE_X11_DRM
1811 struct dri_image *
loader_dri3_create_image_from_buffers(xcb_connection_t * c,xcb_dri3_buffers_from_pixmap_reply_t * bp_reply,unsigned int fourcc,struct dri_screen * dri_screen,void * loaderPrivate)1812 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1813                                       xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1814                                       unsigned int fourcc,
1815                                       struct dri_screen *dri_screen,
1816                                       void *loaderPrivate)
1817 {
1818    struct dri_image                           *ret;
1819    int                                  *fds;
1820    uint32_t                             *strides_in, *offsets_in;
1821    int                                   strides[4], offsets[4];
1822    unsigned                              error;
1823    int                                   i;
1824 
1825    if (bp_reply->nfd > 4)
1826       return NULL;
1827 
1828    fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1829    strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1830    offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1831    for (i = 0; i < bp_reply->nfd; i++) {
1832       strides[i] = strides_in[i];
1833       offsets[i] = offsets_in[i];
1834    }
1835 
1836    ret = dri2_from_dma_bufs(dri_screen,
1837                                        bp_reply->width,
1838                                        bp_reply->height,
1839                                        fourcc,
1840                                        bp_reply->modifier,
1841                                        fds, bp_reply->nfd,
1842                                        strides, offsets,
1843                                        0, 0, 0, 0, /* UNDEFINED */
1844                                        0, &error, loaderPrivate);
1845 
1846    for (i = 0; i < bp_reply->nfd; i++)
1847       close(fds[i]);
1848 
1849    return ret;
1850 }
1851 #endif
1852 
1853 struct dri_image *
loader_dri3_get_pixmap_buffer(xcb_connection_t * conn,xcb_drawable_t pixmap,struct dri_screen * screen,unsigned fourcc,bool multiplanes_available,int * width,int * height,void * loader_data)1854 loader_dri3_get_pixmap_buffer(xcb_connection_t *conn, xcb_drawable_t pixmap, struct dri_screen *screen,
1855                               unsigned fourcc, bool multiplanes_available,
1856                               int *width, int *height, void *loader_data)
1857 {
1858    struct dri_image *image;
1859 #ifdef HAVE_X11_DRM
1860    if (multiplanes_available) {
1861       xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1862       xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1863 
1864       bps_cookie = xcb_dri3_buffers_from_pixmap(conn, pixmap);
1865       bps_reply = xcb_dri3_buffers_from_pixmap_reply(conn, bps_cookie,
1866                                                      NULL);
1867       if (!bps_reply)
1868          return NULL;
1869       image = loader_dri3_create_image_from_buffers(conn, bps_reply, fourcc,
1870                                                     screen, loader_data);
1871       *width = bps_reply->width;
1872       *height = bps_reply->height;
1873       free(bps_reply);
1874    } else
1875 #endif
1876    {
1877       xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1878       xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1879 
1880       bp_cookie = xcb_dri3_buffer_from_pixmap(conn, pixmap);
1881       bp_reply = xcb_dri3_buffer_from_pixmap_reply(conn, bp_cookie, NULL);
1882       if (!bp_reply)
1883          return NULL;
1884 
1885       image = loader_dri3_create_image(conn, bp_reply, fourcc, screen,
1886                                                loader_data);
1887       *width = bp_reply->width;
1888       *height = bp_reply->height;
1889       free(bp_reply);
1890    }
1891    return image;
1892 }
1893 
1894 /** dri3_get_pixmap_buffer
1895  *
1896  * Get the DRM object for a pixmap from the X server and
1897  * wrap that with a struct dri_image structure using createImageFromDmaBufs
1898  */
1899 static struct loader_dri3_buffer *
dri3_get_pixmap_buffer(struct dri_drawable * driDrawable,unsigned int fourcc,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1900 dri3_get_pixmap_buffer(struct dri_drawable *driDrawable, unsigned int fourcc,
1901                        enum loader_dri3_buffer_type buffer_type,
1902                        struct loader_dri3_drawable *draw)
1903 {
1904    int                                  buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1905    struct loader_dri3_buffer            *buffer = draw->buffers[buf_id];
1906    xcb_drawable_t                       pixmap;
1907    xcb_sync_fence_t                     sync_fence;
1908    struct xshmfence                     *shm_fence;
1909    int                                  width;
1910    int                                  height;
1911    int                                  fence_fd;
1912    struct dri_screen                          *cur_screen;
1913 
1914    if (buffer)
1915       return buffer;
1916 
1917    pixmap = draw->drawable;
1918 
1919    buffer = calloc(1, sizeof *buffer);
1920    if (!buffer)
1921       goto no_buffer;
1922 
1923    fence_fd = xshmfence_alloc_shm();
1924    if (fence_fd < 0)
1925       goto no_fence;
1926    shm_fence = xshmfence_map_shm(fence_fd);
1927    if (shm_fence == NULL) {
1928       close (fence_fd);
1929       goto no_fence;
1930    }
1931 
1932    /* Get the currently-bound screen or revert to using the drawable's screen if
1933     * no contexts are currently bound. The latter case is at least necessary for
1934     * obs-studio, when using Window Capture (Xcomposite) as a Source.
1935     */
1936    cur_screen = draw->vtable->get_dri_screen();
1937    if (!cur_screen) {
1938        cur_screen = draw->dri_screen_render_gpu;
1939    }
1940 
1941    xcb_dri3_fence_from_fd(draw->conn,
1942                           pixmap,
1943                           (sync_fence = xcb_generate_id(draw->conn)),
1944                           false,
1945                           fence_fd);
1946    buffer->image = loader_dri3_get_pixmap_buffer(draw->conn, pixmap, cur_screen, fourcc,
1947                                                  draw->multiplanes_available, &width, &height, buffer);
1948 
1949    if (!buffer->image)
1950       goto no_image;
1951 
1952    buffer->pixmap = pixmap;
1953    buffer->own_pixmap = false;
1954    buffer->width = width;
1955    buffer->height = height;
1956    buffer->shm_fence = shm_fence;
1957    buffer->sync_fence = sync_fence;
1958 
1959    dri3_set_render_buffer(draw, buf_id, buffer);
1960 
1961    return buffer;
1962 
1963 no_image:
1964    xcb_sync_destroy_fence(draw->conn, sync_fence);
1965    xshmfence_unmap_shm(shm_fence);
1966 no_fence:
1967    free(buffer);
1968 no_buffer:
1969    return NULL;
1970 }
1971 
1972 /** dri3_get_buffer
1973  *
1974  * Find a front or back buffer, allocating new ones as necessary
1975  */
1976 static struct loader_dri3_buffer *
dri3_get_buffer(struct dri_drawable * driDrawable,unsigned int fourcc,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1977 dri3_get_buffer(struct dri_drawable *driDrawable,
1978                 unsigned int fourcc,
1979                 enum loader_dri3_buffer_type buffer_type,
1980                 struct loader_dri3_drawable *draw)
1981 {
1982    struct loader_dri3_buffer *buffer;
1983    bool fence_await = buffer_type == loader_dri3_buffer_back;
1984    int buf_id;
1985 
1986    if (buffer_type == loader_dri3_buffer_back) {
1987       draw->back_format = fourcc;
1988 
1989       buf_id = dri3_find_back(draw, !draw->prefer_back_buffer_reuse);
1990 
1991       if (buf_id < 0)
1992          return NULL;
1993    } else {
1994       buf_id = LOADER_DRI3_FRONT_ID;
1995    }
1996 
1997    buffer = draw->buffers[buf_id];
1998 
1999    /* Allocate a new buffer if there isn't an old one, if that
2000     * old one is the wrong size, or if it's suboptimal
2001     */
2002    if (!buffer || buffer->width != draw->width ||
2003        buffer->height != draw->height ||
2004        buffer->reallocate) {
2005       struct loader_dri3_buffer *new_buffer;
2006 
2007       /* Allocate the new buffers
2008        */
2009       new_buffer = dri3_alloc_render_buffer(draw,
2010                                             fourcc,
2011                                             draw->width,
2012                                             draw->height,
2013                                             draw->depth);
2014       if (!new_buffer)
2015          return NULL;
2016 
2017       /* When resizing, copy the contents of the old buffer, waiting for that
2018        * copy to complete using our fences before proceeding
2019        */
2020       if ((buffer_type == loader_dri3_buffer_back ||
2021            (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
2022           && buffer) {
2023 
2024          /* Fill the new buffer with data from an old buffer */
2025          if (!loader_dri3_blit_image(draw,
2026                                      new_buffer->image,
2027                                      buffer->image,
2028                                      0, 0,
2029                                      MIN2(buffer->width, new_buffer->width),
2030                                      MIN2(buffer->height, new_buffer->height),
2031                                      0, 0, 0) &&
2032              !buffer->linear_buffer) {
2033             dri3_fence_reset(draw->conn, new_buffer);
2034             dri3_copy_area(draw->conn,
2035                            buffer->pixmap,
2036                            new_buffer->pixmap,
2037                            dri3_drawable_gc(draw),
2038                            0, 0, 0, 0,
2039                            draw->width, draw->height);
2040             dri3_fence_trigger(draw->conn, new_buffer);
2041             fence_await = true;
2042          }
2043          dri3_free_render_buffer(draw, buf_id);
2044       } else if (buffer_type == loader_dri3_buffer_front) {
2045          /* Fill the new fake front with data from a real front */
2046          loader_dri3_swapbuffer_barrier(draw);
2047          dri3_fence_reset(draw->conn, new_buffer);
2048          dri3_copy_area(draw->conn,
2049                         draw->drawable,
2050                         new_buffer->pixmap,
2051                         dri3_drawable_gc(draw),
2052                         0, 0, 0, 0,
2053                         draw->width, draw->height);
2054          dri3_fence_trigger(draw->conn, new_buffer);
2055 
2056          if (new_buffer->linear_buffer) {
2057             dri3_fence_await(draw->conn, draw, new_buffer);
2058             (void) loader_dri3_blit_image(draw,
2059                                           new_buffer->image,
2060                                           new_buffer->linear_buffer,
2061                                           0, 0, draw->width, draw->height,
2062                                           0, 0, 0);
2063          } else
2064             fence_await = true;
2065       }
2066       buffer = new_buffer;
2067       dri3_set_render_buffer(draw, buf_id, buffer);
2068    }
2069 
2070    if (fence_await)
2071       dri3_fence_await(draw->conn, draw, buffer);
2072 
2073    /*
2074     * Do we need to preserve the content of a previous buffer?
2075     *
2076     * Note that this blit is needed only to avoid a wait for a buffer that
2077     * is currently in the flip chain or being scanned out from. That's really
2078     * a tradeoff. If we're ok with the wait we can reduce the number of back
2079     * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2080     * but in the latter case we must disallow page-flipping.
2081     */
2082    if (buffer_type == loader_dri3_buffer_back &&
2083        draw->cur_blit_source != -1 &&
2084        draw->buffers[draw->cur_blit_source] &&
2085        buffer != draw->buffers[draw->cur_blit_source]) {
2086 
2087       struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2088 
2089       /* Avoid flushing here. Will propably do good for tiling hardware. */
2090       (void) loader_dri3_blit_image(draw,
2091                                     buffer->image,
2092                                     source->image,
2093                                     0, 0, draw->width, draw->height,
2094                                     0, 0, 0);
2095       buffer->last_swap = source->last_swap;
2096       draw->cur_blit_source = -1;
2097    }
2098    /* Return the requested buffer */
2099    return buffer;
2100 }
2101 
2102 /** dri3_free_buffers
2103  *
2104  * Free the front bufffer or all of the back buffers. Used
2105  * when the application changes which buffers it needs
2106  */
2107 static void
dri3_free_buffers(struct dri_drawable * driDrawable,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2108 dri3_free_buffers(struct dri_drawable *driDrawable,
2109                   enum loader_dri3_buffer_type buffer_type,
2110                   struct loader_dri3_drawable *draw)
2111 {
2112    int first_id;
2113    int n_id;
2114    int buf_id;
2115 
2116    switch (buffer_type) {
2117    case loader_dri3_buffer_back:
2118       first_id = LOADER_DRI3_BACK_ID(0);
2119       n_id = LOADER_DRI3_MAX_BACK;
2120       draw->cur_blit_source = -1;
2121       break;
2122    case loader_dri3_buffer_front:
2123       first_id = LOADER_DRI3_FRONT_ID;
2124       /* Don't free a fake front holding new backbuffer content. */
2125       n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2126       break;
2127    default:
2128       unreachable("unhandled buffer_type");
2129    }
2130 
2131    for (buf_id = first_id; buf_id < first_id + n_id; buf_id++)
2132       dri3_free_render_buffer(draw, buf_id);
2133 }
2134 
2135 /** loader_dri3_get_buffers
2136  *
2137  * The published buffer allocation API.
2138  * Returns all of the necessary buffers, allocating
2139  * as needed.
2140  */
2141 int
loader_dri3_get_buffers(struct dri_drawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)2142 loader_dri3_get_buffers(struct dri_drawable *driDrawable,
2143                         unsigned int format,
2144                         uint32_t *stamp,
2145                         void *loaderPrivate,
2146                         uint32_t buffer_mask,
2147                         struct __DRIimageList *buffers)
2148 {
2149    struct loader_dri3_drawable *draw = loaderPrivate;
2150    struct loader_dri3_buffer   *front, *back;
2151    int fourcc = loader_image_format_to_fourcc(format);
2152    int buf_id;
2153 
2154    buffers->image_mask = 0;
2155    buffers->front = NULL;
2156    buffers->back = NULL;
2157 
2158    if (!dri3_update_drawable(draw))
2159       return false;
2160 
2161    dri3_update_max_num_back(draw);
2162 
2163    /* Free no longer needed back buffers */
2164    for (buf_id = 0; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2165       int buffer_age;
2166 
2167       back = draw->buffers[buf_id];
2168       if (!back || !back->last_swap || draw->cur_blit_source == buf_id)
2169          continue;
2170 
2171       buffer_age = draw->send_sbc - back->last_swap + 1;
2172       if (buffer_age > 200)
2173          dri3_free_render_buffer(draw, buf_id);
2174    }
2175 
2176    /* pixmaps always have front buffers.
2177     */
2178    if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
2179       buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2180 
2181    if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2182       /* All pixmaps are owned by the server gpu.
2183        * When we use a different gpu, we can't use the pixmap
2184        * as buffer since it is potentially tiled a way
2185        * our device can't understand. In this case, use
2186        * a fake front buffer. Hopefully the pixmap
2187        * content will get synced with the fake front
2188        * buffer.
2189        */
2190       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW &&
2191           draw->dri_screen_render_gpu == draw->dri_screen_display_gpu)
2192          front = dri3_get_pixmap_buffer(driDrawable,
2193                                         fourcc,
2194                                         loader_dri3_buffer_front,
2195                                         draw);
2196       else
2197          front = dri3_get_buffer(driDrawable,
2198                                  fourcc,
2199                                  loader_dri3_buffer_front,
2200                                  draw);
2201 
2202       if (!front)
2203          return false;
2204    } else {
2205       dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2206       draw->have_fake_front = 0;
2207       front = NULL;
2208    }
2209 
2210    if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2211       back = dri3_get_buffer(driDrawable,
2212                              fourcc,
2213                              loader_dri3_buffer_back,
2214                              draw);
2215       if (!back)
2216          return false;
2217       draw->have_back = 1;
2218    } else {
2219       dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2220       draw->have_back = 0;
2221       back = NULL;
2222    }
2223 
2224    if (front) {
2225       buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2226       buffers->front = front->image;
2227       draw->have_fake_front =
2228          draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
2229          draw->type == LOADER_DRI3_DRAWABLE_WINDOW;
2230    }
2231 
2232    if (back) {
2233       buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2234       buffers->back = back->image;
2235    }
2236 
2237    draw->stamp = stamp;
2238 
2239    return true;
2240 }
2241 
2242 /** loader_dri3_update_drawable_geometry
2243  *
2244  * Get the current drawable geometry.
2245  */
2246 void
loader_dri3_update_drawable_geometry(struct loader_dri3_drawable * draw)2247 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2248 {
2249    xcb_get_geometry_cookie_t geom_cookie;
2250    xcb_get_geometry_reply_t *geom_reply;
2251 
2252    geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2253 
2254    geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2255 
2256    if (geom_reply) {
2257       bool changed = draw->width != geom_reply->width || draw->height != geom_reply->height;
2258       draw->width = geom_reply->width;
2259       draw->height = geom_reply->height;
2260       if (changed) {
2261          draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2262          dri_invalidate_drawable(draw->dri_drawable);
2263       }
2264 
2265       free(geom_reply);
2266    }
2267 }
2268 
2269 /**
2270  * Make sure the server has flushed all pending swap buffers to hardware
2271  * for this drawable. Ideally we'd want to send an X protocol request to
2272  * have the server block our connection until the swaps are complete. That
2273  * would avoid the potential round-trip here.
2274  */
2275 void
loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable * draw)2276 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2277 {
2278    int64_t ust, msc, sbc;
2279 
2280    (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2281 }
2282 
2283 /**
2284  * Perform any cleanup associated with a close screen operation.
2285  * \param dri_screen[in,out] Pointer to struct dri_screen about to be closed.
2286  *
2287  * This function destroys the screen's cached swap context if any.
2288  */
2289 void
loader_dri3_close_screen(struct dri_screen * dri_screen)2290 loader_dri3_close_screen(struct dri_screen *dri_screen)
2291 {
2292    simple_mtx_lock(&blit_context.mtx);
2293    if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2294       driDestroyContext(blit_context.ctx);
2295       blit_context.ctx = NULL;
2296    }
2297    simple_mtx_unlock(&blit_context.mtx);
2298 }
2299 
2300 /**
2301  * Find a backbuffer slot - potentially allocating a back buffer
2302  *
2303  * \param draw[in,out]  Pointer to the drawable for which to find back.
2304  * \return Pointer to a new back buffer or NULL if allocation failed or was
2305  * not mandated.
2306  *
2307  * Find a potentially new back buffer, and if it's not been allocated yet and
2308  * in addition needs initializing, then try to allocate and initialize it.
2309  */
2310 static struct loader_dri3_buffer *
dri3_find_back_alloc(struct loader_dri3_drawable * draw)2311 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2312 {
2313    struct loader_dri3_buffer *back;
2314    int id;
2315 
2316    id = dri3_find_back(draw, false);
2317    if (id < 0)
2318       return NULL;
2319 
2320    back = draw->buffers[id];
2321    /* Allocate a new back if we haven't got one */
2322    if (!back && draw->back_format != DRM_FORMAT_INVALID &&
2323        dri3_update_drawable(draw))
2324       back = dri3_alloc_render_buffer(draw, draw->back_format,
2325                                       draw->width, draw->height, draw->depth);
2326 
2327    if (!back)
2328       return NULL;
2329 
2330    dri3_set_render_buffer(draw, id, back);
2331 
2332    /* If necessary, prefill the back with data. */
2333    if (draw->cur_blit_source != -1 &&
2334        draw->buffers[draw->cur_blit_source] &&
2335        back != draw->buffers[draw->cur_blit_source]) {
2336       struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2337 
2338       dri3_fence_await(draw->conn, draw, source);
2339       dri3_fence_await(draw->conn, draw, back);
2340       (void) loader_dri3_blit_image(draw,
2341                                     back->image,
2342                                     source->image,
2343                                     0, 0, draw->width, draw->height,
2344                                     0, 0, 0);
2345       back->last_swap = source->last_swap;
2346       draw->cur_blit_source = -1;
2347    }
2348 
2349    return back;
2350 }
2351