• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2013 Keith Packard
3  * Copyright © 2015 Boyan Ding
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that copyright
8  * notice and this permission notice appear in supporting documentation, and
9  * that the name of the copyright holders not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  The copyright holders make no representations
12  * about the suitability of this software for any purpose.  It is provided "as
13  * is" without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21  * OF THIS SOFTWARE.
22  */
23 
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28 
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33 #include <xcb/xfixes.h>
34 
35 #include <X11/Xlib-xcb.h>
36 
37 #include "loader_dri_helper.h"
38 #include "loader_dri3_helper.h"
39 #include "pipe/p_screen.h"
40 #include "util/log.h"
41 #include "util/macros.h"
42 #include "util/simple_mtx.h"
43 #include "drm-uapi/drm_fourcc.h"
44 #include "dri_screen.h"
45 #include "dri_util.h"
46 
47 /**
48  * A cached blit context.
49  */
50 struct loader_dri3_blit_context {
51    simple_mtx_t mtx;
52    struct dri_context *ctx;
53    struct dri_screen *cur_screen;
54    const __DRIcoreExtension *core;
55 };
56 
57 /* For simplicity we maintain the cache only for a single screen at a time */
58 static struct loader_dri3_blit_context blit_context = {
59    SIMPLE_MTX_INITIALIZER, NULL
60 };
61 
62 static void
63 dri3_flush_present_events(struct loader_dri3_drawable *draw);
64 
65 static struct loader_dri3_buffer *
66 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
67 
68 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)69 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
70 {
71    xcb_screen_iterator_t screen_iter =
72    xcb_setup_roots_iterator(xcb_get_setup(conn));
73 
74    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
75       if (screen_iter.data->root == root)
76          return screen_iter.data;
77    }
78 
79    return NULL;
80 }
81 
82 /* Error checking helpers for xcb_ functions. Use it to avoid late
83  * error handling
84  */
85 __attribute__((format(printf, 3, 4)))
_check_xcb_error(xcb_connection_t * conn,xcb_void_cookie_t cookie,const char * fmt,...)86 static bool _check_xcb_error(xcb_connection_t *conn, xcb_void_cookie_t cookie, const char *fmt, ...) {
87    xcb_generic_error_t *error;
88 
89    if ((error = xcb_request_check(conn, cookie))) {
90       va_list args;
91       va_start(args, fmt);
92       mesa_loge_v(fmt, args);
93       mesa_loge("X error: %d\n", error->error_code);
94       va_end(args);
95       free(error);
96       return false;
97    }
98    return true;
99 }
100 #define check_xcb_error(cookie, name) _check_xcb_error(draw->conn, cookie, "%s:%d %s failed", __func__, __LINE__, name)
101 
102 static xcb_visualtype_t *
get_xcb_visualtype_for_depth(struct loader_dri3_drawable * draw,int depth)103 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
104 {
105    xcb_visualtype_iterator_t visual_iter;
106    xcb_screen_t *screen = draw->screen;
107    xcb_depth_iterator_t depth_iter;
108 
109    if (!screen)
110       return NULL;
111 
112    depth_iter = xcb_screen_allowed_depths_iterator(screen);
113    for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
114       if (depth_iter.data->depth != depth)
115          continue;
116 
117       visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
118       if (visual_iter.rem)
119          return visual_iter.data;
120    }
121 
122    return NULL;
123 }
124 
125 /* Sets the adaptive sync window property state. */
126 static void
set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)127 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
128                            uint32_t state)
129 {
130    static char const name[] = "_VARIABLE_REFRESH";
131    xcb_intern_atom_cookie_t cookie;
132    xcb_intern_atom_reply_t* reply;
133    xcb_void_cookie_t check;
134 
135    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
136    reply = xcb_intern_atom_reply(conn, cookie, NULL);
137    if (reply == NULL)
138       return;
139 
140    if (state)
141       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
142                                           drawable, reply->atom,
143                                           XCB_ATOM_CARDINAL, 32, 1, &state);
144    else
145       check = xcb_delete_property_checked(conn, drawable, reply->atom);
146 
147    xcb_discard_reply(conn, check.sequence);
148    free(reply);
149 }
150 
151 /* Get red channel mask for given drawable at given depth. */
152 static unsigned int
dri3_get_red_mask_for_depth(struct loader_dri3_drawable * draw,int depth)153 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
154 {
155    xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
156 
157    if (visual)
158       return visual->red_mask;
159 
160    return 0;
161 }
162 
163 /**
164  * Get and lock (for use with the current thread) a dri context associated
165  * with the drawable's dri screen. The context is intended to be used with
166  * the dri image extension's blitImage method.
167  *
168  * \param draw[in]  Pointer to the drawable whose dri screen we want a
169  * dri context for.
170  * \return A dri context or NULL if context creation failed.
171  *
172  * When the caller is done with the context (even if the context returned was
173  * NULL), the caller must call loader_dri3_blit_context_put.
174  */
175 static struct dri_context *
loader_dri3_blit_context_get(struct loader_dri3_drawable * draw)176 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
177 {
178    simple_mtx_lock(&blit_context.mtx);
179 
180    if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen_render_gpu) {
181       driDestroyContext(blit_context.ctx);
182       blit_context.ctx = NULL;
183    }
184 
185    if (!blit_context.ctx) {
186       blit_context.ctx = driCreateNewContext(draw->dri_screen_render_gpu,
187                                                            NULL, NULL, NULL);
188       blit_context.cur_screen = draw->dri_screen_render_gpu;
189    }
190 
191    return blit_context.ctx;
192 }
193 
194 /**
195  * Release (for use with other threads) a dri context previously obtained using
196  * loader_dri3_blit_context_get.
197  */
198 static void
loader_dri3_blit_context_put(void)199 loader_dri3_blit_context_put(void)
200 {
201    simple_mtx_unlock(&blit_context.mtx);
202 }
203 
204 /**
205  * Blit (parts of) the contents of a DRI image to another dri image
206  *
207  * \param draw[in]  The drawable which owns the images.
208  * \param dst[in]  The destination image.
209  * \param src[in]  The source image.
210  * \param dstx0[in]  Start destination coordinate.
211  * \param dsty0[in]  Start destination coordinate.
212  * \param width[in]  Blit width.
213  * \param height[in] Blit height.
214  * \param srcx0[in]  Start source coordinate.
215  * \param srcy0[in]  Start source coordinate.
216  * \param flush_flag[in]  Image blit flush flag.
217  * \return true iff successful.
218  */
219 static bool
loader_dri3_blit_image(struct loader_dri3_drawable * draw,struct dri_image * dst,struct dri_image * src,int dstx0,int dsty0,int width,int height,int srcx0,int srcy0,int flush_flag)220 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
221                        struct dri_image *dst, struct dri_image *src,
222                        int dstx0, int dsty0, int width, int height,
223                        int srcx0, int srcy0, int flush_flag)
224 {
225    struct dri_context *dri_context;
226    bool use_blit_context = false;
227 
228    dri_context = draw->vtable->get_dri_context(draw);
229 
230    if (!dri_context || !draw->vtable->in_current_context(draw)) {
231       dri_context = loader_dri3_blit_context_get(draw);
232       use_blit_context = true;
233       flush_flag |= __BLIT_FLAG_FLUSH;
234    }
235 
236    if (dri_context)
237       dri2_blit_image(dri_context, dst, src, dstx0, dsty0,
238                      width, height, srcx0, srcy0,
239                      width, height, flush_flag);
240 
241    if (use_blit_context)
242       loader_dri3_blit_context_put();
243 
244    return dri_context != NULL;
245 }
246 
247 static inline void
dri3_fence_reset(xcb_connection_t * c,struct loader_dri3_buffer * buffer)248 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
249 {
250    xshmfence_reset(buffer->shm_fence);
251 }
252 
253 static inline void
dri3_fence_set(struct loader_dri3_buffer * buffer)254 dri3_fence_set(struct loader_dri3_buffer *buffer)
255 {
256    xshmfence_trigger(buffer->shm_fence);
257 }
258 
259 static inline void
dri3_fence_trigger(xcb_connection_t * c,struct loader_dri3_buffer * buffer)260 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
261 {
262    xcb_sync_trigger_fence(c, buffer->sync_fence);
263 }
264 
265 static inline void
dri3_fence_await(xcb_connection_t * c,struct loader_dri3_drawable * draw,struct loader_dri3_buffer * buffer)266 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
267                  struct loader_dri3_buffer *buffer)
268 {
269    xcb_flush(c);
270    xshmfence_await(buffer->shm_fence);
271    if (draw) {
272       mtx_lock(&draw->mtx);
273       dri3_flush_present_events(draw);
274       mtx_unlock(&draw->mtx);
275    }
276 }
277 
278 static void
dri3_update_max_num_back(struct loader_dri3_drawable * draw)279 dri3_update_max_num_back(struct loader_dri3_drawable *draw)
280 {
281    switch (draw->last_present_mode) {
282    case XCB_PRESENT_COMPLETE_MODE_FLIP: {
283       if (draw->swap_interval == 0)
284          draw->max_num_back = 4;
285       else
286          draw->max_num_back = 3;
287 
288       assert(draw->max_num_back <= LOADER_DRI3_MAX_BACK);
289       break;
290    }
291 
292    case XCB_PRESENT_COMPLETE_MODE_SKIP:
293       break;
294 
295    default:
296       draw->max_num_back = 2;
297    }
298 }
299 
300 void
loader_dri3_set_swap_interval(struct loader_dri3_drawable * draw,int interval)301 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
302 {
303    /* Wait all previous swap done before changing swap interval.
304     *
305     * This is for preventing swap out of order in the following cases:
306     *   1. Change from sync swap mode (>0) to async mode (=0), so async swap occurs
307     *      before previous pending sync swap.
308     *   2. Change from value A to B and A > B, so the target_msc for the previous
309     *      pending swap may be bigger than newer swap.
310     *
311     * PS. changing from value A to B and A < B won't cause swap out of order but
312     * may still gets wrong target_msc value at the beginning.
313     */
314    if (draw->swap_interval != interval)
315       loader_dri3_swapbuffer_barrier(draw);
316 
317    draw->swap_interval = interval;
318 }
319 
320 static void
dri3_set_render_buffer(struct loader_dri3_drawable * draw,int buf_id,struct loader_dri3_buffer * buffer)321 dri3_set_render_buffer(struct loader_dri3_drawable *draw, int buf_id,
322                        struct loader_dri3_buffer *buffer)
323 {
324    if (buf_id != LOADER_DRI3_FRONT_ID && !draw->buffers[buf_id])
325       draw->cur_num_back++;
326 
327    draw->buffers[buf_id] = buffer;
328 }
329 
330 /** dri3_free_render_buffer
331  *
332  * Free everything associated with one render buffer including pixmap, fence
333  * stuff and the driver image
334  */
335 static void
dri3_free_render_buffer(struct loader_dri3_drawable * draw,int buf_id)336 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
337                         int buf_id)
338 {
339    struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
340 
341    if (!buffer)
342       return;
343 
344    if (buffer->own_pixmap)
345       xcb_free_pixmap(draw->conn, buffer->pixmap);
346    xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
347    xshmfence_unmap_shm(buffer->shm_fence);
348    dri2_destroy_image(buffer->image);
349    if (buffer->linear_buffer)
350       dri2_destroy_image(buffer->linear_buffer);
351    free(buffer);
352 
353    draw->buffers[buf_id] = NULL;
354 
355    if (buf_id != LOADER_DRI3_FRONT_ID)
356       draw->cur_num_back--;
357 }
358 
359 void
loader_dri3_drawable_fini(struct loader_dri3_drawable * draw)360 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
361 {
362    int i;
363 
364    driDestroyDrawable(draw->dri_drawable);
365 
366    for (i = 0; i < ARRAY_SIZE(draw->buffers); i++)
367       dri3_free_render_buffer(draw, i);
368 
369    if (draw->special_event) {
370       xcb_void_cookie_t cookie =
371          xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
372                                           XCB_PRESENT_EVENT_MASK_NO_EVENT);
373 
374       xcb_discard_reply(draw->conn, cookie.sequence);
375       xcb_unregister_for_special_event(draw->conn, draw->special_event);
376    }
377 
378    if (draw->region)
379       xcb_xfixes_destroy_region(draw->conn, draw->region);
380 
381    cnd_destroy(&draw->event_cnd);
382    mtx_destroy(&draw->mtx);
383 }
384 
385 int
loader_dri3_drawable_init(xcb_connection_t * conn,xcb_drawable_t drawable,enum loader_dri3_drawable_type type,struct dri_screen * dri_screen_render_gpu,struct dri_screen * dri_screen_display_gpu,bool multiplanes_available,bool prefer_back_buffer_reuse,const struct dri_config * dri_config,const struct loader_dri3_vtable * vtable,struct loader_dri3_drawable * draw)386 loader_dri3_drawable_init(xcb_connection_t *conn,
387                           xcb_drawable_t drawable,
388                           enum loader_dri3_drawable_type type,
389                           struct dri_screen *dri_screen_render_gpu,
390                           struct dri_screen *dri_screen_display_gpu,
391                           bool multiplanes_available,
392                           bool prefer_back_buffer_reuse,
393                           const struct dri_config *dri_config,
394                           const struct loader_dri3_vtable *vtable,
395                           struct loader_dri3_drawable *draw)
396 {
397    xcb_get_geometry_cookie_t cookie;
398    xcb_get_geometry_reply_t *reply;
399    xcb_generic_error_t *error;
400 
401    draw->conn = conn;
402    draw->vtable = vtable;
403    draw->drawable = drawable;
404    draw->type = type;
405    draw->region = 0;
406    draw->dri_screen_render_gpu = dri_screen_render_gpu;
407    draw->dri_screen_display_gpu = dri_screen_display_gpu;
408    draw->multiplanes_available = multiplanes_available;
409    draw->prefer_back_buffer_reuse = prefer_back_buffer_reuse;
410    draw->queries_buffer_age = false;
411 
412    draw->have_back = 0;
413    draw->have_fake_front = 0;
414    draw->first_init = true;
415    draw->adaptive_sync = false;
416    draw->adaptive_sync_active = false;
417    draw->block_on_depleted_buffers = false;
418 
419    draw->cur_blit_source = -1;
420    draw->back_format = DRM_FORMAT_INVALID;
421    mtx_init(&draw->mtx, mtx_plain);
422    cnd_init(&draw->event_cnd);
423 
424    {
425       unsigned char adaptive_sync = 0;
426       unsigned char block_on_depleted_buffers = 0;
427 
428       dri2GalliumConfigQueryb(draw->dri_screen_render_gpu,
429                                       "adaptive_sync",
430                                       &adaptive_sync);
431 
432       draw->adaptive_sync = adaptive_sync;
433 
434       dri2GalliumConfigQueryb(draw->dri_screen_render_gpu,
435                                       "block_on_depleted_buffers",
436                                       &block_on_depleted_buffers);
437 
438       draw->block_on_depleted_buffers = block_on_depleted_buffers;
439    }
440 
441    if (!draw->adaptive_sync)
442       set_adaptive_sync_property(conn, draw->drawable, false);
443 
444    draw->swap_interval = dri_get_initial_swap_interval(draw->dri_screen_render_gpu);
445 
446    dri3_update_max_num_back(draw);
447 
448    /* Create a new drawable */
449    draw->dri_drawable = dri_create_drawable(dri_screen_render_gpu, dri_config,
450                                             type == LOADER_DRI3_DRAWABLE_PIXMAP, draw);
451 
452    if (!draw->dri_drawable)
453       return 1;
454 
455    cookie = xcb_get_geometry(draw->conn, draw->drawable);
456    reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
457    if (reply == NULL || error != NULL) {
458       driDestroyDrawable(draw->dri_drawable);
459       return 1;
460    }
461 
462    draw->screen = get_screen_for_root(draw->conn, reply->root);
463    draw->width = reply->width;
464    draw->height = reply->height;
465    draw->depth = reply->depth;
466    draw->vtable->set_drawable_size(draw, draw->width, draw->height);
467    free(reply);
468 
469    /*
470     * Make sure server has the same swap interval we do for the new
471     * drawable.
472     */
473    loader_dri3_set_swap_interval(draw, draw->swap_interval);
474 
475    return 0;
476 }
477 
478 /* XXX this belongs in presentproto */
479 #ifndef PresentWindowDestroyed
480 #define PresentWindowDestroyed (1 << 0)
481 #endif
482 /*
483  * Process one Present event
484  */
485 static bool
dri3_handle_present_event(struct loader_dri3_drawable * draw,xcb_present_generic_event_t * ge)486 dri3_handle_present_event(struct loader_dri3_drawable *draw,
487                           xcb_present_generic_event_t *ge)
488 {
489    switch (ge->evtype) {
490    case XCB_PRESENT_CONFIGURE_NOTIFY: {
491       xcb_present_configure_notify_event_t *ce = (void *) ge;
492       if (ce->pixmap_flags & PresentWindowDestroyed) {
493          free(ge);
494          return false;
495       }
496 
497       draw->width = ce->width;
498       draw->height = ce->height;
499       draw->vtable->set_drawable_size(draw, draw->width, draw->height);
500       dri_invalidate_drawable(draw->dri_drawable);
501       break;
502    }
503    case XCB_PRESENT_COMPLETE_NOTIFY: {
504       xcb_present_complete_notify_event_t *ce = (void *) ge;
505 
506       /* Compute the processed SBC number from the received 32-bit serial number
507        * merged with the upper 32-bits of the sent 64-bit serial number while
508        * checking for wrap.
509        */
510       if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
511          uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
512 
513          /* Only assume wraparound if that results in exactly the previous
514           * SBC + 1, otherwise ignore received SBC > sent SBC (those are
515           * probably from a previous loader_dri3_drawable instance) to avoid
516           * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
517           */
518          if (recv_sbc <= draw->send_sbc)
519             draw->recv_sbc = recv_sbc;
520          else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
521             draw->recv_sbc = recv_sbc - 0x100000000ULL;
522 
523          /* When moving from flip to copy, we assume that we can allocate in
524           * a more optimal way if we don't need to cater for the display
525           * controller.
526           */
527          if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
528              draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
529             for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
530                if (draw->buffers[b])
531                   draw->buffers[b]->reallocate = true;
532             }
533          }
534 
535          /* If the server tells us that our allocation is suboptimal, we
536           * reallocate once.
537           */
538 #ifdef HAVE_X11_DRM
539          if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
540              draw->last_present_mode != ce->mode) {
541             for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
542                if (draw->buffers[b])
543                   draw->buffers[b]->reallocate = true;
544             }
545          }
546 #endif
547          draw->last_present_mode = ce->mode;
548 
549          draw->ust = ce->ust;
550          draw->msc = ce->msc;
551       } else if (ce->serial == draw->eid) {
552          draw->notify_ust = ce->ust;
553          draw->notify_msc = ce->msc;
554       }
555       break;
556    }
557    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
558       xcb_present_idle_notify_event_t *ie = (void *) ge;
559       int b;
560 
561       for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
562          struct loader_dri3_buffer *buf = draw->buffers[b];
563 
564          if (buf && buf->pixmap == ie->pixmap)
565             buf->busy = 0;
566       }
567       break;
568    }
569    }
570    free(ge);
571    return true;
572 }
573 
574 static bool
dri3_wait_for_event_locked(struct loader_dri3_drawable * draw,unsigned * full_sequence)575 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
576                            unsigned *full_sequence)
577 {
578    xcb_generic_event_t *ev;
579    xcb_present_generic_event_t *ge;
580 
581    xcb_flush(draw->conn);
582 
583    /* Only have one thread waiting for events at a time */
584    if (draw->has_event_waiter) {
585       cnd_wait(&draw->event_cnd, &draw->mtx);
586       if (full_sequence)
587          *full_sequence = draw->last_special_event_sequence;
588       /* Another thread has updated the protected info, so retest. */
589       return true;
590    } else {
591       draw->has_event_waiter = true;
592       /* Allow other threads access to the drawable while we're waiting. */
593       mtx_unlock(&draw->mtx);
594       ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
595       mtx_lock(&draw->mtx);
596       draw->has_event_waiter = false;
597       cnd_broadcast(&draw->event_cnd);
598    }
599    if (!ev)
600       return false;
601    draw->last_special_event_sequence = ev->full_sequence;
602    if (full_sequence)
603       *full_sequence = ev->full_sequence;
604    ge = (void *) ev;
605    return dri3_handle_present_event(draw, ge);
606 }
607 
608 /** loader_dri3_wait_for_msc
609  *
610  * Get the X server to send an event when the target msc/divisor/remainder is
611  * reached.
612  */
613 bool
loader_dri3_wait_for_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,int64_t * ust,int64_t * msc,int64_t * sbc)614 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
615                          int64_t target_msc,
616                          int64_t divisor, int64_t remainder,
617                          int64_t *ust, int64_t *msc, int64_t *sbc)
618 {
619    xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
620                                                      draw->drawable,
621                                                      draw->eid,
622                                                      target_msc,
623                                                      divisor,
624                                                      remainder);
625    unsigned full_sequence;
626 
627    mtx_lock(&draw->mtx);
628 
629    /* Wait for the event */
630    do {
631       if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
632          mtx_unlock(&draw->mtx);
633          return false;
634       }
635    } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
636 
637    *ust = draw->notify_ust;
638    *msc = draw->notify_msc;
639    *sbc = draw->recv_sbc;
640    mtx_unlock(&draw->mtx);
641 
642    return true;
643 }
644 
645 /** loader_dri3_wait_for_sbc
646  *
647  * Wait for the completed swap buffer count to reach the specified
648  * target. Presumably the application knows that this will be reached with
649  * outstanding complete events, or we're going to be here awhile.
650  */
651 int
loader_dri3_wait_for_sbc(struct loader_dri3_drawable * draw,int64_t target_sbc,int64_t * ust,int64_t * msc,int64_t * sbc)652 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
653                          int64_t target_sbc, int64_t *ust,
654                          int64_t *msc, int64_t *sbc)
655 {
656    /* From the GLX_OML_sync_control spec:
657     *
658     *     "If <target_sbc> = 0, the function will block until all previous
659     *      swaps requested with glXSwapBuffersMscOML for that window have
660     *      completed."
661     */
662    mtx_lock(&draw->mtx);
663    if (!target_sbc)
664       target_sbc = draw->send_sbc;
665 
666    while (draw->recv_sbc < target_sbc) {
667       if (!dri3_wait_for_event_locked(draw, NULL)) {
668          mtx_unlock(&draw->mtx);
669          return 0;
670       }
671    }
672 
673    *ust = draw->ust;
674    *msc = draw->msc;
675    *sbc = draw->recv_sbc;
676    mtx_unlock(&draw->mtx);
677    return 1;
678 }
679 
680 /** loader_dri3_find_back
681  *
682  * Find an idle back buffer. If there isn't one, then
683  * wait for a present idle notify event from the X server
684  */
685 static int
dri3_find_back(struct loader_dri3_drawable * draw,bool prefer_a_different)686 dri3_find_back(struct loader_dri3_drawable *draw, bool prefer_a_different)
687 {
688    struct loader_dri3_buffer *buffer;
689    int b;
690    int max_num;
691    int best_id = -1;
692    uint64_t best_swap = 0;
693 
694    mtx_lock(&draw->mtx);
695 
696    if (!prefer_a_different) {
697       /* Increase the likelyhood of reusing current buffer */
698       dri3_flush_present_events(draw);
699 
700       /* Reuse current back buffer if it's idle */
701       buffer = draw->buffers[draw->cur_back];
702       if (buffer && !buffer->busy) {
703          best_id = draw->cur_back;
704          goto unlock;
705       }
706    }
707 
708    /* Check whether we need to reuse the current back buffer as new back.
709     * In that case, wait until it's not busy anymore.
710     */
711    if (draw->cur_blit_source != -1) {
712       max_num = 1;
713       draw->cur_blit_source = -1;
714    } else {
715       max_num = LOADER_DRI3_MAX_BACK;
716    }
717 
718    /* In a DRI_PRIME situation, if prefer_a_different is true, we first try
719     * to find an idle buffer that is not the last used one.
720     * This is useful if we receive a XCB_PRESENT_EVENT_IDLE_NOTIFY event
721     * for a pixmap but it's not actually idle (eg: the DRI_PRIME blit is
722     * still in progress).
723     * Unigine Superposition hits this and this allows to use 2 back buffers
724     * instead of reusing the same one all the time, causing the next frame
725     * to wait for the copy to finish.
726     */
727    int current_back_id = draw->cur_back;
728    do {
729       /* Find idle buffer with lowest buffer age, or an unallocated slot */
730       for (b = 0; b < max_num; b++) {
731          int id = LOADER_DRI3_BACK_ID((b + current_back_id) % LOADER_DRI3_MAX_BACK);
732 
733          buffer = draw->buffers[id];
734          if (buffer) {
735             if (!buffer->busy &&
736                 (!prefer_a_different || id != current_back_id) &&
737                 (best_id == -1 || buffer->last_swap > best_swap)) {
738                best_id = id;
739                best_swap = buffer->last_swap;
740             }
741          } else if (best_id == -1 &&
742                     draw->cur_num_back < draw->max_num_back) {
743             best_id = id;
744          }
745       }
746 
747       /* Prefer re-using the same buffer over blocking */
748       if (prefer_a_different && best_id == -1 &&
749           !draw->buffers[LOADER_DRI3_BACK_ID(current_back_id)]->busy)
750          best_id = current_back_id;
751    } while (best_id == -1 && dri3_wait_for_event_locked(draw, NULL));
752 
753    if (best_id != -1)
754       draw->cur_back = best_id;
755 
756 unlock:
757    mtx_unlock(&draw->mtx);
758    return best_id;
759 }
760 
761 static xcb_gcontext_t
dri3_drawable_gc(struct loader_dri3_drawable * draw)762 dri3_drawable_gc(struct loader_dri3_drawable *draw)
763 {
764    if (!draw->gc) {
765       uint32_t v = 0;
766       xcb_create_gc(draw->conn,
767                     (draw->gc = xcb_generate_id(draw->conn)),
768                     draw->drawable,
769                     XCB_GC_GRAPHICS_EXPOSURES,
770                     &v);
771    }
772    return draw->gc;
773 }
774 
775 
776 static struct loader_dri3_buffer *
dri3_back_buffer(struct loader_dri3_drawable * draw)777 dri3_back_buffer(struct loader_dri3_drawable *draw)
778 {
779    return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
780 }
781 
782 static struct loader_dri3_buffer *
dri3_front_buffer(struct loader_dri3_drawable * draw)783 dri3_front_buffer(struct loader_dri3_drawable *draw)
784 {
785    return draw->buffers[LOADER_DRI3_FRONT_ID];
786 }
787 
788 static void
dri3_copy_area(xcb_connection_t * c,xcb_drawable_t src_drawable,xcb_drawable_t dst_drawable,xcb_gcontext_t gc,int16_t src_x,int16_t src_y,int16_t dst_x,int16_t dst_y,uint16_t width,uint16_t height)789 dri3_copy_area(xcb_connection_t *c,
790                xcb_drawable_t    src_drawable,
791                xcb_drawable_t    dst_drawable,
792                xcb_gcontext_t    gc,
793                int16_t           src_x,
794                int16_t           src_y,
795                int16_t           dst_x,
796                int16_t           dst_y,
797                uint16_t          width,
798                uint16_t          height)
799 {
800    xcb_void_cookie_t cookie;
801 
802    cookie = xcb_copy_area_checked(c,
803                                   src_drawable,
804                                   dst_drawable,
805                                   gc,
806                                   src_x,
807                                   src_y,
808                                   dst_x,
809                                   dst_y,
810                                   width,
811                                   height);
812    xcb_discard_reply(c, cookie.sequence);
813 }
814 
815 /**
816  * Asks the driver to flush any queued work necessary for serializing with the
817  * X command stream, and optionally the slightly more strict requirement of
818  * glFlush() equivalence (which would require flushing even if nothing had
819  * been drawn to a window system framebuffer, for example).
820  */
821 void
loader_dri3_flush(struct loader_dri3_drawable * draw,unsigned flags,enum __DRI2throttleReason throttle_reason)822 loader_dri3_flush(struct loader_dri3_drawable *draw,
823                   unsigned flags,
824                   enum __DRI2throttleReason throttle_reason)
825 {
826    /* NEED TO CHECK WHETHER CONTEXT IS NULL */
827    struct dri_context *dri_context = draw->vtable->get_dri_context(draw);
828 
829    if (dri_context) {
830       dri_flush(dri_context, draw->dri_drawable, flags, throttle_reason);
831    }
832 }
833 
834 void
loader_dri3_copy_sub_buffer(struct loader_dri3_drawable * draw,int x,int y,int width,int height,bool flush)835 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
836                             int x, int y,
837                             int width, int height,
838                             bool flush)
839 {
840    struct loader_dri3_buffer *back;
841    unsigned flags = __DRI2_FLUSH_DRAWABLE;
842 
843    /* Check we have the right attachments */
844    if (!draw->have_back || draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
845       return;
846 
847    if (flush)
848       flags |= __DRI2_FLUSH_CONTEXT;
849    loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
850 
851    back = dri3_find_back_alloc(draw);
852    if (!back)
853       return;
854 
855    y = draw->height - y - height;
856 
857    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
858       /* Update the linear buffer part of the back buffer
859        * for the dri3_copy_area operation
860        */
861       (void) loader_dri3_blit_image(draw,
862                                     back->linear_buffer,
863                                     back->image,
864                                     0, 0, back->width, back->height,
865                                     0, 0, __BLIT_FLAG_FLUSH);
866    }
867 
868    loader_dri3_swapbuffer_barrier(draw);
869    dri3_fence_reset(draw->conn, back);
870    dri3_copy_area(draw->conn,
871                   back->pixmap,
872                   draw->drawable,
873                   dri3_drawable_gc(draw),
874                   x, y, x, y, width, height);
875    dri3_fence_trigger(draw->conn, back);
876    /* Refresh the fake front (if present) after we just damaged the real
877     * front.
878     */
879    if (draw->have_fake_front &&
880        !loader_dri3_blit_image(draw,
881                                dri3_front_buffer(draw)->image,
882                                back->image,
883                                x, y, width, height,
884                                x, y, __BLIT_FLAG_FLUSH) &&
885        draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
886       dri3_fence_reset(draw->conn, dri3_front_buffer(draw));
887       dri3_copy_area(draw->conn,
888                      back->pixmap,
889                      dri3_front_buffer(draw)->pixmap,
890                      dri3_drawable_gc(draw),
891                      x, y, x, y, width, height);
892       dri3_fence_trigger(draw->conn, dri3_front_buffer(draw));
893       dri3_fence_await(draw->conn, NULL, dri3_front_buffer(draw));
894    }
895    dri3_fence_await(draw->conn, draw, back);
896 }
897 
898 void
loader_dri3_copy_drawable(struct loader_dri3_drawable * draw,xcb_drawable_t dest,xcb_drawable_t src)899 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
900                           xcb_drawable_t dest,
901                           xcb_drawable_t src)
902 {
903    loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
904 
905    struct loader_dri3_buffer *front = dri3_front_buffer(draw);
906    if (front)
907       dri3_fence_reset(draw->conn, front);
908 
909    dri3_copy_area(draw->conn,
910                   src, dest,
911                   dri3_drawable_gc(draw),
912                   0, 0, 0, 0, draw->width, draw->height);
913 
914    if (front) {
915       dri3_fence_trigger(draw->conn, front);
916       dri3_fence_await(draw->conn, draw, front);
917    }
918 }
919 
920 void
loader_dri3_wait_x(struct loader_dri3_drawable * draw)921 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
922 {
923    struct loader_dri3_buffer *front;
924 
925    if (draw == NULL || !draw->have_fake_front)
926       return;
927 
928    front = dri3_front_buffer(draw);
929 
930    loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
931 
932    /* In the psc->is_different_gpu case, the linear buffer has been updated,
933     * but not yet the tiled buffer.
934     * Copy back to the tiled buffer we use for rendering.
935     * Note that we don't need flushing.
936     */
937    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
938       (void) loader_dri3_blit_image(draw,
939                                     front->image,
940                                     front->linear_buffer,
941                                     0, 0, front->width, front->height,
942                                     0, 0, 0);
943 }
944 
945 void
loader_dri3_wait_gl(struct loader_dri3_drawable * draw)946 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
947 {
948    struct loader_dri3_buffer *front;
949 
950    if (draw == NULL || !draw->have_fake_front)
951       return;
952 
953    front = dri3_front_buffer(draw);
954    /* TODO: `front` is not supposed to be NULL here, fix the actual bug
955     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/8982
956     */
957    if (!front)
958       return;
959 
960    /* In the psc->is_different_gpu case, we update the linear_buffer
961     * before updating the real front.
962     */
963    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
964       (void) loader_dri3_blit_image(draw,
965                                     front->linear_buffer,
966                                     front->image,
967                                     0, 0, front->width, front->height,
968                                     0, 0, __BLIT_FLAG_FLUSH);
969    loader_dri3_swapbuffer_barrier(draw);
970    loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
971 }
972 
973 /** dri3_flush_present_events
974  *
975  * Process any present events that have been received from the X server
976  */
977 static void
dri3_flush_present_events(struct loader_dri3_drawable * draw)978 dri3_flush_present_events(struct loader_dri3_drawable *draw)
979 {
980    /* Check to see if any configuration changes have occurred
981     * since we were last invoked
982     */
983    if (draw->has_event_waiter)
984       return;
985 
986    if (draw->special_event) {
987       xcb_generic_event_t    *ev;
988 
989       while ((ev = xcb_poll_for_special_event(draw->conn,
990                                               draw->special_event)) != NULL) {
991          xcb_present_generic_event_t *ge = (void *) ev;
992          if (!dri3_handle_present_event(draw, ge))
993             break;
994       }
995    }
996 }
997 
998 /** loader_dri3_swap_buffers_msc
999  *
1000  * Make the current back buffer visible using the present extension
1001  */
1002 int64_t
loader_dri3_swap_buffers_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,unsigned flush_flags,const int * rects,int n_rects,bool force_copy)1003 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
1004                              int64_t target_msc, int64_t divisor,
1005                              int64_t remainder, unsigned flush_flags,
1006                              const int *rects, int n_rects,
1007                              bool force_copy)
1008 {
1009    struct loader_dri3_buffer *back;
1010    int64_t ret = 0;
1011    bool wait_for_next_buffer = false;
1012 
1013    /* GLX spec:
1014     *   void glXSwapBuffers(Display *dpy, GLXDrawable draw);
1015     *   This operation is a no-op if draw was created with a non-double-buffered
1016     *   GLXFBConfig, or if draw is a GLXPixmap.
1017     *   ...
1018     *   GLX pixmaps may be created with a config that includes back buffers and
1019     *   stereoscopic buffers. However, glXSwapBuffers is ignored for these pixmaps.
1020     *   ...
1021     *   It is possible to create a pbuffer with back buffers and to swap the
1022     *   front and back buffers by calling glXSwapBuffers.
1023     *
1024     * EGL spec:
1025     *   EGLBoolean eglSwapBuffers(EGLDisplay dpy, EGLSurface surface);
1026     *   If surface is a back-buffered window surface, then the color buffer is
1027     *   copied to the native window associated with that surface. If surface is
1028     *   a single-buffered window, pixmap, or pbuffer surface, eglSwapBuffers has
1029     *   no effect.
1030     *
1031     * SwapBuffer effect:
1032     *       |           GLX             |           EGL            |
1033     *       | window | pixmap | pbuffer | window | pixmap | pbuffer|
1034     *-------+--------+--------+---------+--------+--------+--------+
1035     * single|  nop   |  nop   |   nop   |  nop   |  nop   |   nop  |
1036     * double|  swap  |  nop   |   swap  |  swap  |  NA    |   NA   |
1037     */
1038    if (!draw->have_back || draw->type == LOADER_DRI3_DRAWABLE_PIXMAP)
1039       return ret;
1040 
1041    draw->vtable->flush_drawable(draw, flush_flags);
1042 
1043    back = dri3_find_back_alloc(draw);
1044    /* Could only happen when error case, like display is already closed. */
1045    if (!back)
1046       return ret;
1047 
1048    mtx_lock(&draw->mtx);
1049 
1050    if (draw->adaptive_sync && !draw->adaptive_sync_active) {
1051       set_adaptive_sync_property(draw->conn, draw->drawable, true);
1052       draw->adaptive_sync_active = true;
1053    }
1054 
1055    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu) {
1056       /* Update the linear buffer before presenting the pixmap */
1057       (void) loader_dri3_blit_image(draw,
1058                                     back->linear_buffer,
1059                                     back->image,
1060                                     0, 0, back->width, back->height,
1061                                     0, 0, __BLIT_FLAG_FLUSH);
1062    }
1063 
1064    /* If we need to preload the new back buffer, remember the source.
1065     * The force_copy parameter is used by EGL to attempt to preserve
1066     * the back buffer across a call to this function.
1067     */
1068    if (force_copy)
1069       draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
1070 
1071    /* Exchange the back and fake front. Even though the server knows about these
1072     * buffers, it has no notion of back and fake front.
1073     */
1074    if (draw->have_fake_front) {
1075       struct loader_dri3_buffer *tmp;
1076 
1077       tmp = dri3_front_buffer(draw);
1078       draw->buffers[LOADER_DRI3_FRONT_ID] = back;
1079       draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1080 
1081       if (force_copy)
1082          draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1083    }
1084 
1085    dri3_flush_present_events(draw);
1086 
1087    if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1088       dri3_fence_reset(draw->conn, back);
1089 
1090       /* Compute when we want the frame shown by taking the last known
1091        * successful MSC and adding in a swap interval for each outstanding swap
1092        * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1093        * semantic"
1094        */
1095       ++draw->send_sbc;
1096       if (target_msc == 0 && divisor == 0 && remainder == 0)
1097          target_msc = draw->msc + abs(draw->swap_interval) *
1098                       (draw->send_sbc - draw->recv_sbc);
1099       else if (divisor == 0 && remainder > 0) {
1100          /* From the GLX_OML_sync_control spec:
1101           *     "If <divisor> = 0, the swap will occur when MSC becomes
1102           *      greater than or equal to <target_msc>."
1103           *
1104           * Note that there's no mention of the remainder.  The Present
1105           * extension throws BadValue for remainder != 0 with divisor == 0, so
1106           * just drop the passed in value.
1107           */
1108          remainder = 0;
1109       }
1110 
1111       /* From the GLX_EXT_swap_control spec
1112        * and the EGL 1.4 spec (page 53):
1113        *
1114        *     "If <interval> is set to a value of 0, buffer swaps are not
1115        *      synchronized to a video frame."
1116        *
1117        * From GLX_EXT_swap_control_tear:
1118        *
1119        *     "If <interval> is negative, the minimum number of video frames
1120        *      between buffer swaps is the absolute value of <interval>. In this
1121        *      case, if abs(<interval>) video frames have already passed from
1122        *      the previous swap when the swap is ready to be performed, the
1123        *      swap will occur without synchronization to a video frame."
1124        *
1125        * Implementation note: It is possible to enable triple buffering
1126        * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1127        * the default.
1128        */
1129       uint32_t options = XCB_PRESENT_OPTION_NONE;
1130       if (draw->swap_interval <= 0)
1131          options |= XCB_PRESENT_OPTION_ASYNC;
1132 
1133       /* If we need to populate the new back, but need to reuse the back
1134        * buffer slot due to lack of local blit capabilities, make sure
1135        * the server doesn't flip and we deadlock.
1136        */
1137       if (draw->cur_blit_source != -1)
1138          options |= XCB_PRESENT_OPTION_COPY;
1139 #ifdef HAVE_X11_DRM
1140       if (draw->multiplanes_available)
1141          options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1142 #endif
1143       back->busy = 1;
1144       back->last_swap = draw->send_sbc;
1145 
1146       if (!draw->region) {
1147          draw->region = xcb_generate_id(draw->conn);
1148          xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1149       }
1150 
1151       xcb_xfixes_region_t region = 0;
1152       xcb_rectangle_t xcb_rects[64];
1153 
1154       if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1155          for (int i = 0; i < n_rects; i++) {
1156             const int *rect = &rects[i * 4];
1157             xcb_rects[i].x = rect[0];
1158             xcb_rects[i].y = draw->height - rect[1] - rect[3];
1159             xcb_rects[i].width = rect[2];
1160             xcb_rects[i].height = rect[3];
1161          }
1162 
1163          region = draw->region;
1164          xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1165       }
1166 
1167       xcb_present_pixmap(draw->conn,
1168                          draw->drawable,
1169                          back->pixmap,
1170                          (uint32_t) draw->send_sbc,
1171                          0,                                    /* valid */
1172                          region,                               /* update */
1173                          0,                                    /* x_off */
1174                          0,                                    /* y_off */
1175                          None,                                 /* target_crtc */
1176                          None,
1177                          back->sync_fence,
1178                          options,
1179                          target_msc,
1180                          divisor,
1181                          remainder, 0, NULL);
1182    } else {
1183       /* This can only be reached by double buffered GLXPbuffer. */
1184       assert(draw->type == LOADER_DRI3_DRAWABLE_PBUFFER);
1185       /* GLX does not have damage regions. */
1186       assert(n_rects == 0);
1187 
1188       /* For wait and buffer age usage. */
1189       draw->send_sbc++;
1190       draw->recv_sbc = back->last_swap = draw->send_sbc;
1191 
1192       /* Pixmap is imported as front buffer image when same GPU case, so just
1193        * locally blit back buffer image to it is enough. Otherwise front buffer
1194        * is a fake one which needs to be synced with pixmap by xserver remotely.
1195        */
1196       if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
1197           !loader_dri3_blit_image(draw,
1198                                   dri3_front_buffer(draw)->image,
1199                                   back->image,
1200                                   0, 0, draw->width, draw->height,
1201                                   0, 0, __BLIT_FLAG_FLUSH)) {
1202          dri3_copy_area(draw->conn, back->pixmap,
1203                         draw->drawable,
1204                         dri3_drawable_gc(draw),
1205                         0, 0, 0, 0, draw->width, draw->height);
1206       }
1207    }
1208 
1209    ret = (int64_t) draw->send_sbc;
1210 
1211    /* Schedule a server-side back-preserving blit if necessary.
1212     * This happens iff all conditions below are satisfied:
1213     * a) We have a fake front,
1214     * b) We need to preserve the back buffer,
1215     * c) We don't have local blit capabilities.
1216     */
1217    if (draw->cur_blit_source != -1 &&
1218        draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1219       struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1220       struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1221 
1222       dri3_fence_reset(draw->conn, new_back);
1223       dri3_copy_area(draw->conn, src->pixmap,
1224                      new_back->pixmap,
1225                      dri3_drawable_gc(draw),
1226                      0, 0, 0, 0, draw->width, draw->height);
1227       dri3_fence_trigger(draw->conn, new_back);
1228       new_back->last_swap = src->last_swap;
1229    }
1230 
1231    xcb_flush(draw->conn);
1232    if (draw->stamp)
1233       ++(*draw->stamp);
1234 
1235    /* Waiting on a buffer is only sensible if all buffers are in use and the
1236     * client doesn't use the buffer age extension. In this case a client is
1237     * relying on it receiving back control immediately.
1238     *
1239     * As waiting on a buffer can at worst make us miss a frame the option has
1240     * to be enabled explicitly with the block_on_depleted_buffers DRI option.
1241     */
1242    wait_for_next_buffer = draw->cur_num_back == draw->max_num_back &&
1243       !draw->queries_buffer_age && draw->block_on_depleted_buffers;
1244 
1245    mtx_unlock(&draw->mtx);
1246 
1247    dri_invalidate_drawable(draw->dri_drawable);
1248 
1249    /* Clients that use up all available buffers usually regulate their drawing
1250     * through swapchain contention backpressure. In such a scenario the client
1251     * draws whenever control returns to it. Its event loop is slowed down only
1252     * by us waiting on buffers becoming available again.
1253     *
1254     * By waiting here on a new buffer and only then returning back to the client
1255     * we ensure the client begins drawing only when the next buffer is available
1256     * and not draw first and then wait a refresh cycle on the next available
1257     * buffer to show it. This way we can reduce the latency between what is
1258     * being drawn by the client and what is shown on the screen by one frame.
1259     */
1260    if (wait_for_next_buffer)
1261       dri3_find_back(draw, draw->prefer_back_buffer_reuse);
1262 
1263    return ret;
1264 }
1265 
1266 int
loader_dri3_query_buffer_age(struct loader_dri3_drawable * draw)1267 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1268 {
1269    struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1270    int ret = 0;
1271 
1272    mtx_lock(&draw->mtx);
1273    draw->queries_buffer_age = true;
1274    if (back && back->last_swap != 0)
1275       ret = draw->send_sbc - back->last_swap + 1;
1276    mtx_unlock(&draw->mtx);
1277 
1278    return ret;
1279 }
1280 
1281 static uint32_t
dri3_cpp_for_fourcc(uint32_t format)1282 dri3_cpp_for_fourcc(uint32_t format) {
1283    switch (format) {
1284    case DRM_FORMAT_R8:
1285       return 1;
1286    case DRM_FORMAT_ARGB1555:
1287    case DRM_FORMAT_RGB565:
1288    case DRM_FORMAT_GR88:
1289       return 2;
1290    case DRM_FORMAT_XRGB8888:
1291    case DRM_FORMAT_ARGB8888:
1292    case DRM_FORMAT_ABGR8888:
1293    case DRM_FORMAT_XBGR8888:
1294    case DRM_FORMAT_XRGB2101010:
1295    case DRM_FORMAT_ARGB2101010:
1296    case DRM_FORMAT_XBGR2101010:
1297    case DRM_FORMAT_ABGR2101010:
1298    case __DRI_IMAGE_FORMAT_SARGB8:
1299    case __DRI_IMAGE_FORMAT_SABGR8:
1300    case __DRI_IMAGE_FORMAT_SXRGB8:
1301       return 4;
1302    case DRM_FORMAT_ABGR16161616:
1303    case DRM_FORMAT_XBGR16161616:
1304    case DRM_FORMAT_XBGR16161616F:
1305    case DRM_FORMAT_ABGR16161616F:
1306       return 8;
1307    case DRM_FORMAT_INVALID:
1308    default:
1309       return 0;
1310    }
1311 }
1312 
1313 /* Map format of render buffer to corresponding format for the linear_buffer
1314  * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1315  * Usually linear_format == format, except for depth >= 30 formats, where
1316  * different gpu vendors have different preferences wrt. color channel ordering.
1317  */
1318 static uint32_t
dri3_linear_format_for_format(struct loader_dri3_drawable * draw,uint32_t format)1319 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1320 {
1321    switch (format) {
1322       case  __DRI_IMAGE_FORMAT_XRGB2101010:
1323       case  __DRI_IMAGE_FORMAT_XBGR2101010:
1324          /* Different preferred formats for different hw */
1325          if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1326             return __DRI_IMAGE_FORMAT_XBGR2101010;
1327          else
1328             return __DRI_IMAGE_FORMAT_XRGB2101010;
1329 
1330       case  __DRI_IMAGE_FORMAT_ARGB2101010:
1331       case  __DRI_IMAGE_FORMAT_ABGR2101010:
1332          /* Different preferred formats for different hw */
1333          if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1334             return __DRI_IMAGE_FORMAT_ABGR2101010;
1335          else
1336             return __DRI_IMAGE_FORMAT_ARGB2101010;
1337 
1338       default:
1339          return format;
1340    }
1341 }
1342 
1343 #ifdef HAVE_X11_DRM
1344 static bool
has_supported_modifier(struct loader_dri3_drawable * draw,unsigned int format,uint64_t * modifiers,uint32_t count)1345 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1346                        uint64_t *modifiers, uint32_t count)
1347 {
1348    uint64_t *supported_modifiers;
1349    int32_t supported_modifiers_count;
1350    bool found = false;
1351    int i, j;
1352 
1353    if (!dri_query_dma_buf_modifiers(draw->dri_screen_render_gpu,
1354                                                format, 0, NULL, NULL,
1355                                                &supported_modifiers_count) ||
1356        supported_modifiers_count == 0)
1357       return false;
1358 
1359    supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1360    if (!supported_modifiers)
1361       return false;
1362 
1363    dri_query_dma_buf_modifiers(draw->dri_screen_render_gpu, format,
1364                                           supported_modifiers_count,
1365                                           supported_modifiers, NULL,
1366                                           &supported_modifiers_count);
1367 
1368    for (i = 0; !found && i < supported_modifiers_count; i++) {
1369       for (j = 0; !found && j < count; j++) {
1370          if (supported_modifiers[i] == modifiers[j])
1371             found = true;
1372       }
1373    }
1374 
1375    free(supported_modifiers);
1376    return found;
1377 }
1378 #endif
1379 
1380 /** loader_dri3_alloc_render_buffer
1381  *
1382  * Use the driver createImage function to construct a struct dri_image, then
1383  * get a file descriptor for that and create an X pixmap from that
1384  *
1385  * Allocate an xshmfence for synchronization
1386  */
1387 static struct loader_dri3_buffer *
dri3_alloc_render_buffer(struct loader_dri3_drawable * draw,unsigned int fourcc,int width,int height,int depth)1388 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int fourcc,
1389                          int width, int height, int depth)
1390 {
1391    struct loader_dri3_buffer *buffer;
1392    struct dri_image *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1393    int format = loader_fourcc_to_image_format(fourcc);
1394    xcb_pixmap_t pixmap;
1395    xcb_sync_fence_t sync_fence;
1396    struct xshmfence *shm_fence;
1397    int buffer_fds[4], fence_fd;
1398    int num_planes = 0;
1399    uint64_t *modifiers = NULL;
1400    uint32_t count = 0;
1401    int i, mod;
1402    int ret;
1403 
1404    /* Create an xshmfence object and
1405     * prepare to send that to the X server
1406     */
1407 
1408    fence_fd = xshmfence_alloc_shm();
1409    if (fence_fd < 0)
1410       return NULL;
1411 
1412    shm_fence = xshmfence_map_shm(fence_fd);
1413    if (shm_fence == NULL)
1414       goto no_shm_fence;
1415 
1416    /* Allocate the image from the driver
1417     */
1418    buffer = calloc(1, sizeof *buffer);
1419    if (!buffer)
1420       goto no_buffer;
1421 
1422    buffer->cpp = dri3_cpp_for_fourcc(fourcc);
1423    if (!buffer->cpp)
1424       goto no_image;
1425 
1426    if (draw->dri_screen_render_gpu == draw->dri_screen_display_gpu) {
1427 #ifdef HAVE_X11_DRM
1428       if (draw->multiplanes_available && draw->dri_screen_render_gpu->base.screen->resource_create_with_modifiers) {
1429          xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1430          xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1431          xcb_generic_error_t *error = NULL;
1432 
1433          mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1434                                                        draw->window,
1435                                                        depth, buffer->cpp * 8);
1436          mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1437                                                             mod_cookie,
1438                                                             &error);
1439          if (!mod_reply)
1440             goto no_image;
1441 
1442          if (mod_reply->num_window_modifiers) {
1443             count = mod_reply->num_window_modifiers;
1444             modifiers = malloc(count * sizeof(uint64_t));
1445             if (!modifiers) {
1446                free(mod_reply);
1447                goto no_image;
1448             }
1449 
1450             memcpy(modifiers,
1451                    xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1452                    count * sizeof(uint64_t));
1453 
1454             if (!has_supported_modifier(draw, fourcc, modifiers, count)) {
1455                free(modifiers);
1456                count = 0;
1457                modifiers = NULL;
1458             }
1459          }
1460 
1461          if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1462             count = mod_reply->num_screen_modifiers;
1463             modifiers = malloc(count * sizeof(uint64_t));
1464             if (!modifiers) {
1465                free(mod_reply);
1466                goto no_image;
1467             }
1468 
1469             memcpy(modifiers,
1470                    xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1471                    count * sizeof(uint64_t));
1472          }
1473 
1474          free(mod_reply);
1475       }
1476 #endif
1477       buffer->image = dri_create_image_with_modifiers(draw->dri_screen_render_gpu,
1478                                               width, height, format,
1479                                               __DRI_IMAGE_USE_SHARE |
1480                                               __DRI_IMAGE_USE_SCANOUT |
1481                                               __DRI_IMAGE_USE_BACKBUFFER |
1482                                               (draw->is_protected_content ?
1483                                                __DRI_IMAGE_USE_PROTECTED : 0),
1484                                               modifiers, count, buffer);
1485       free(modifiers);
1486 
1487       pixmap_buffer = buffer->image;
1488 
1489       if (!buffer->image)
1490          goto no_image;
1491    } else {
1492       buffer->image =
1493          dri_create_image(draw->dri_screen_render_gpu,
1494                                        width, height, format,
1495                                        NULL, 0, 0, buffer);
1496 
1497       if (!buffer->image)
1498          goto no_image;
1499 
1500       /* if driver name is same only then dri_screen_display_gpu is set.
1501        * This check is needed because for simplicity render gpu image extension
1502        * is also used for display gpu.
1503        */
1504       if (draw->dri_screen_display_gpu) {
1505          linear_buffer_display_gpu =
1506            dri_create_image(draw->dri_screen_display_gpu,
1507                                          width, height,
1508                                          dri3_linear_format_for_format(draw, format),
1509                                          NULL, 0,
1510                                          __DRI_IMAGE_USE_SHARE |
1511                                          __DRI_IMAGE_USE_LINEAR |
1512                                          __DRI_IMAGE_USE_BACKBUFFER |
1513                                          __DRI_IMAGE_USE_SCANOUT,
1514                                          buffer);
1515          pixmap_buffer = linear_buffer_display_gpu;
1516       }
1517 
1518       if (!pixmap_buffer) {
1519          buffer->linear_buffer =
1520            dri_create_image(draw->dri_screen_render_gpu,
1521                                         width, height,
1522                                         dri3_linear_format_for_format(draw, format),
1523                                         NULL, 0,
1524                                         __DRI_IMAGE_USE_SHARE |
1525                                         __DRI_IMAGE_USE_LINEAR |
1526                                         __DRI_IMAGE_USE_BACKBUFFER |
1527                                         __DRI_IMAGE_USE_SCANOUT |
1528                                         __DRI_IMAGE_USE_PRIME_BUFFER,
1529                                         buffer);
1530 
1531          pixmap_buffer = buffer->linear_buffer;
1532          if (!buffer->linear_buffer) {
1533             goto no_linear_buffer;
1534          }
1535       }
1536    }
1537 
1538    /* X want some information about the planes, so ask the image for it
1539     */
1540    if (!dri2_query_image(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1541                                      &num_planes))
1542       num_planes = 1;
1543 
1544    for (i = 0; i < num_planes; i++) {
1545       struct dri_image *image = dri2_from_planar(pixmap_buffer, i, NULL);
1546 
1547       if (!image) {
1548          assert(i == 0);
1549          image = pixmap_buffer;
1550       }
1551 
1552       buffer_fds[i] = -1;
1553 
1554       ret = dri2_query_image(image, __DRI_IMAGE_ATTRIB_FD,
1555                                          &buffer_fds[i]);
1556       ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_STRIDE,
1557                                           &buffer->strides[i]);
1558       ret &= dri2_query_image(image, __DRI_IMAGE_ATTRIB_OFFSET,
1559                                           &buffer->offsets[i]);
1560       if (image != pixmap_buffer)
1561          dri2_destroy_image(image);
1562 
1563       if (!ret)
1564          goto no_buffer_attrib;
1565    }
1566 
1567    ret = dri2_query_image(pixmap_buffer,
1568                                      __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1569    buffer->modifier = (uint64_t) mod << 32;
1570    ret &= dri2_query_image(pixmap_buffer,
1571                                        __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1572    buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1573 
1574    if (!ret)
1575       buffer->modifier = DRM_FORMAT_MOD_INVALID;
1576 
1577    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu &&
1578        draw->dri_screen_display_gpu && linear_buffer_display_gpu) {
1579       /* The linear buffer was created in the display GPU's vram, so we
1580        * need to make it visible to render GPU
1581        */
1582       buffer->linear_buffer =
1583          dri2_from_dma_bufs(draw->dri_screen_render_gpu,
1584                                                   width,
1585                                                   height,
1586                                                   fourcc,
1587                                                   DRM_FORMAT_MOD_INVALID,
1588                                                   &buffer_fds[0], num_planes,
1589                                                   &buffer->strides[0],
1590                                                   &buffer->offsets[0],
1591                                                   0, 0, 0, 0, __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1592                                                   NULL, buffer);
1593       if (!buffer->linear_buffer)
1594          goto no_buffer_attrib;
1595 
1596       dri2_destroy_image(linear_buffer_display_gpu);
1597    }
1598 
1599    pixmap = xcb_generate_id(draw->conn);
1600 
1601    xcb_void_cookie_t cookie_pix, cookie_fence;
1602 #ifdef HAVE_X11_DRM
1603    if (draw->multiplanes_available &&
1604        buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1605       cookie_pix = xcb_dri3_pixmap_from_buffers_checked(draw->conn,
1606                                                         pixmap,
1607                                                         draw->window,
1608                                                         num_planes,
1609                                                         width, height,
1610                                                         buffer->strides[0], buffer->offsets[0],
1611                                                         buffer->strides[1], buffer->offsets[1],
1612                                                         buffer->strides[2], buffer->offsets[2],
1613                                                         buffer->strides[3], buffer->offsets[3],
1614                                                         depth, buffer->cpp * 8,
1615                                                         buffer->modifier,
1616                                                         buffer_fds);
1617    } else
1618 #endif
1619    {
1620       cookie_pix = xcb_dri3_pixmap_from_buffer_checked(draw->conn,
1621                                                        pixmap,
1622                                                        draw->drawable,
1623                                                        buffer->size,
1624                                                        width, height, buffer->strides[0],
1625                                                        depth, buffer->cpp * 8,
1626                                                        buffer_fds[0]);
1627    }
1628    cookie_fence = xcb_dri3_fence_from_fd_checked(draw->conn,
1629                                                  pixmap,
1630                                                  (sync_fence = xcb_generate_id(draw->conn)),
1631                                                  false,
1632                                                  fence_fd);
1633    /* Group error checking to limit round-trips. */
1634    if (!check_xcb_error(cookie_pix, "xcb_dri3_pixmap_from_buffer[s]"))
1635       goto no_buffer_attrib;
1636    if (!check_xcb_error(cookie_fence, "xcb_dri3_fence_from_fd"))
1637       goto no_buffer_attrib;
1638 
1639    buffer->pixmap = pixmap;
1640    buffer->own_pixmap = true;
1641    buffer->sync_fence = sync_fence;
1642    buffer->shm_fence = shm_fence;
1643    buffer->width = width;
1644    buffer->height = height;
1645 
1646    /* Mark the buffer as idle
1647     */
1648    dri3_fence_set(buffer);
1649 
1650    return buffer;
1651 
1652 no_buffer_attrib:
1653    do {
1654       if (buffer_fds[i] != -1)
1655          close(buffer_fds[i]);
1656    } while (--i >= 0);
1657    dri2_destroy_image(pixmap_buffer);
1658 no_linear_buffer:
1659    if (draw->dri_screen_render_gpu != draw->dri_screen_display_gpu)
1660       dri2_destroy_image(buffer->image);
1661 no_image:
1662    free(buffer);
1663 no_buffer:
1664    xshmfence_unmap_shm(shm_fence);
1665 no_shm_fence:
1666    close(fence_fd);
1667    return NULL;
1668 }
1669 
1670 static bool
dri3_detect_drawable_is_window(struct loader_dri3_drawable * draw)1671 dri3_detect_drawable_is_window(struct loader_dri3_drawable *draw)
1672 {
1673    /* Try to select for input on the window.
1674     *
1675     * If the drawable is a window, this will get our events
1676     * delivered.
1677     *
1678     * Otherwise, we'll get a BadWindow error back from this request which
1679     * will let us know that the drawable is a pixmap instead.
1680     */
1681 
1682    xcb_void_cookie_t cookie =
1683       xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1684                                        XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1685                                        XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1686                                        XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1687 
1688    /* Check to see if our select input call failed. If it failed with a
1689     * BadWindow error, then assume the drawable is a pixmap.
1690     */
1691    xcb_generic_error_t *error = xcb_request_check(draw->conn, cookie);
1692 
1693    if (error) {
1694       if (error->error_code != BadWindow) {
1695          free(error);
1696          return false;
1697       }
1698       free(error);
1699 
1700       /* pixmap can't get here, see driFetchDrawable(). */
1701       draw->type = LOADER_DRI3_DRAWABLE_PBUFFER;
1702       return true;
1703    }
1704 
1705    draw->type = LOADER_DRI3_DRAWABLE_WINDOW;
1706    return true;
1707 }
1708 
1709 static bool
dri3_setup_present_event(struct loader_dri3_drawable * draw)1710 dri3_setup_present_event(struct loader_dri3_drawable *draw)
1711 {
1712    /* No need to setup for pixmap drawable. */
1713    if (draw->type == LOADER_DRI3_DRAWABLE_PIXMAP ||
1714        draw->type == LOADER_DRI3_DRAWABLE_PBUFFER)
1715       return true;
1716 
1717    draw->eid = xcb_generate_id(draw->conn);
1718 
1719    if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1720       xcb_present_select_input(draw->conn, draw->eid, draw->drawable,
1721                                XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1722                                XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1723                                XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1724    } else {
1725       assert(draw->type == LOADER_DRI3_DRAWABLE_UNKNOWN);
1726 
1727       if (!dri3_detect_drawable_is_window(draw))
1728          return false;
1729 
1730       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1731          return true;
1732    }
1733 
1734    /* Create an XCB event queue to hold present events outside of the usual
1735     * application event queue
1736     */
1737    draw->special_event = xcb_register_for_special_xge(draw->conn,
1738                                                       &xcb_present_id,
1739                                                       draw->eid,
1740                                                       draw->stamp);
1741    return true;
1742 }
1743 
1744 /** loader_dri3_update_drawable
1745  *
1746  * Called the first time we use the drawable and then
1747  * after we receive present configure notify events to
1748  * track the geometry of the drawable
1749  */
1750 static int
dri3_update_drawable(struct loader_dri3_drawable * draw)1751 dri3_update_drawable(struct loader_dri3_drawable *draw)
1752 {
1753    mtx_lock(&draw->mtx);
1754    if (draw->first_init) {
1755       xcb_get_geometry_cookie_t                 geom_cookie;
1756       xcb_get_geometry_reply_t                  *geom_reply;
1757       xcb_window_t                               root_win;
1758 
1759       draw->first_init = false;
1760 
1761       if (!dri3_setup_present_event(draw)) {
1762          mtx_unlock(&draw->mtx);
1763          return false;
1764       }
1765 
1766       geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1767 
1768       geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1769 
1770       if (!geom_reply) {
1771          mtx_unlock(&draw->mtx);
1772          return false;
1773       }
1774       draw->width = geom_reply->width;
1775       draw->height = geom_reply->height;
1776       draw->depth = geom_reply->depth;
1777       draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1778       root_win = geom_reply->root;
1779 
1780       free(geom_reply);
1781 
1782       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1783          draw->window = root_win;
1784       else
1785          draw->window = draw->drawable;
1786    }
1787    dri3_flush_present_events(draw);
1788    mtx_unlock(&draw->mtx);
1789    return true;
1790 }
1791 
1792 struct dri_image *
loader_dri3_create_image(xcb_connection_t * c,xcb_dri3_buffer_from_pixmap_reply_t * bp_reply,unsigned int fourcc,struct dri_screen * dri_screen,void * loaderPrivate)1793 loader_dri3_create_image(xcb_connection_t *c,
1794                          xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1795                          unsigned int fourcc,
1796                          struct dri_screen *dri_screen,
1797                          void *loaderPrivate)
1798 {
1799    int                                  *fds;
1800    struct dri_image                           *image_planar, *ret;
1801    int                                  stride, offset;
1802 
1803    /* Get an FD for the pixmap object
1804     */
1805    fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1806 
1807    stride = bp_reply->stride;
1808    offset = 0;
1809 
1810    /* createImageFromDmaBufs creates a wrapper struct dri_image structure which
1811     * can deal with multiple planes for things like Yuv images. So, once
1812     * we've gotten the planar wrapper, pull the single plane out of it and
1813     * discard the wrapper.
1814     */
1815    image_planar = dri2_from_dma_bufs(dri_screen,
1816                                        bp_reply->width,
1817                                        bp_reply->height,
1818                                        fourcc,
1819                                        DRM_FORMAT_MOD_INVALID,
1820                                        fds, 1,
1821                                        &stride, &offset,
1822                                        0, 0, 0, 0, 0,
1823                                        NULL, loaderPrivate);
1824    close(fds[0]);
1825    if (!image_planar)
1826       return NULL;
1827 
1828    ret = dri2_from_planar(image_planar, 0, loaderPrivate);
1829 
1830    if (!ret)
1831       ret = image_planar;
1832    else
1833       dri2_destroy_image(image_planar);
1834 
1835    return ret;
1836 }
1837 
1838 #ifdef HAVE_X11_DRM
1839 struct dri_image *
loader_dri3_create_image_from_buffers(xcb_connection_t * c,xcb_dri3_buffers_from_pixmap_reply_t * bp_reply,unsigned int fourcc,struct dri_screen * dri_screen,void * loaderPrivate)1840 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1841                                       xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1842                                       unsigned int fourcc,
1843                                       struct dri_screen *dri_screen,
1844                                       void *loaderPrivate)
1845 {
1846    struct dri_image                           *ret;
1847    int                                  *fds;
1848    uint32_t                             *strides_in, *offsets_in;
1849    int                                   strides[4], offsets[4];
1850    unsigned                              error;
1851    int                                   i;
1852 
1853    if (bp_reply->nfd > 4)
1854       return NULL;
1855 
1856    fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1857    strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1858    offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1859    for (i = 0; i < bp_reply->nfd; i++) {
1860       strides[i] = strides_in[i];
1861       offsets[i] = offsets_in[i];
1862    }
1863 
1864    ret = dri2_from_dma_bufs(dri_screen,
1865                                        bp_reply->width,
1866                                        bp_reply->height,
1867                                        fourcc,
1868                                        bp_reply->modifier,
1869                                        fds, bp_reply->nfd,
1870                                        strides, offsets,
1871                                        0, 0, 0, 0, /* UNDEFINED */
1872                                        0, &error, loaderPrivate);
1873 
1874    for (i = 0; i < bp_reply->nfd; i++)
1875       close(fds[i]);
1876 
1877    return ret;
1878 }
1879 #endif
1880 
1881 struct dri_image *
loader_dri3_get_pixmap_buffer(xcb_connection_t * conn,xcb_drawable_t pixmap,struct dri_screen * screen,unsigned fourcc,bool multiplanes_available,int * width,int * height,void * loader_data)1882 loader_dri3_get_pixmap_buffer(xcb_connection_t *conn, xcb_drawable_t pixmap, struct dri_screen *screen,
1883                               unsigned fourcc, bool multiplanes_available,
1884                               int *width, int *height, void *loader_data)
1885 {
1886    struct dri_image *image;
1887 #ifdef HAVE_X11_DRM
1888    if (multiplanes_available) {
1889       xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1890       xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1891 
1892       bps_cookie = xcb_dri3_buffers_from_pixmap(conn, pixmap);
1893       bps_reply = xcb_dri3_buffers_from_pixmap_reply(conn, bps_cookie,
1894                                                      NULL);
1895       if (!bps_reply)
1896          return NULL;
1897       image = loader_dri3_create_image_from_buffers(conn, bps_reply, fourcc,
1898                                                     screen, loader_data);
1899       *width = bps_reply->width;
1900       *height = bps_reply->height;
1901       free(bps_reply);
1902    } else
1903 #endif
1904    {
1905       xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1906       xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1907 
1908       bp_cookie = xcb_dri3_buffer_from_pixmap(conn, pixmap);
1909       bp_reply = xcb_dri3_buffer_from_pixmap_reply(conn, bp_cookie, NULL);
1910       if (!bp_reply)
1911          return NULL;
1912 
1913       image = loader_dri3_create_image(conn, bp_reply, fourcc, screen,
1914                                                loader_data);
1915       *width = bp_reply->width;
1916       *height = bp_reply->height;
1917       free(bp_reply);
1918    }
1919    return image;
1920 }
1921 
1922 /** dri3_get_pixmap_buffer
1923  *
1924  * Get the DRM object for a pixmap from the X server and
1925  * wrap that with a struct dri_image structure using createImageFromDmaBufs
1926  */
1927 static struct loader_dri3_buffer *
dri3_get_pixmap_buffer(struct dri_drawable * driDrawable,unsigned int fourcc,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1928 dri3_get_pixmap_buffer(struct dri_drawable *driDrawable, unsigned int fourcc,
1929                        enum loader_dri3_buffer_type buffer_type,
1930                        struct loader_dri3_drawable *draw)
1931 {
1932    int                                  buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1933    struct loader_dri3_buffer            *buffer = draw->buffers[buf_id];
1934    xcb_drawable_t                       pixmap;
1935    xcb_void_cookie_t                    cookie;
1936    xcb_sync_fence_t                     sync_fence;
1937    struct xshmfence                     *shm_fence;
1938    int                                  width;
1939    int                                  height;
1940    int                                  fence_fd;
1941    struct dri_screen                          *cur_screen;
1942 
1943    if (buffer)
1944       return buffer;
1945 
1946    pixmap = draw->drawable;
1947 
1948    buffer = calloc(1, sizeof *buffer);
1949    if (!buffer)
1950       goto no_buffer;
1951 
1952    fence_fd = xshmfence_alloc_shm();
1953    if (fence_fd < 0)
1954       goto no_fence;
1955    shm_fence = xshmfence_map_shm(fence_fd);
1956    if (shm_fence == NULL) {
1957       close (fence_fd);
1958       goto no_fence;
1959    }
1960 
1961    /* Get the currently-bound screen or revert to using the drawable's screen if
1962     * no contexts are currently bound. The latter case is at least necessary for
1963     * obs-studio, when using Window Capture (Xcomposite) as a Source.
1964     */
1965    cur_screen = draw->vtable->get_dri_screen();
1966    if (!cur_screen) {
1967        cur_screen = draw->dri_screen_render_gpu;
1968    }
1969 
1970    cookie = xcb_dri3_fence_from_fd_checked(draw->conn,
1971                                            pixmap,
1972                                            (sync_fence = xcb_generate_id(draw->conn)),
1973                                            false,
1974                                            fence_fd);
1975    if (!check_xcb_error(cookie, "xcb_dri3_fence_from_fd"))
1976       goto no_image;
1977 
1978    buffer->image = loader_dri3_get_pixmap_buffer(draw->conn, pixmap, cur_screen, fourcc,
1979                                                  draw->multiplanes_available, &width, &height, buffer);
1980 
1981    if (!buffer->image)
1982       goto no_image;
1983 
1984    buffer->pixmap = pixmap;
1985    buffer->own_pixmap = false;
1986    buffer->width = width;
1987    buffer->height = height;
1988    buffer->shm_fence = shm_fence;
1989    buffer->sync_fence = sync_fence;
1990 
1991    dri3_set_render_buffer(draw, buf_id, buffer);
1992 
1993    return buffer;
1994 
1995 no_image:
1996    xcb_sync_destroy_fence(draw->conn, sync_fence);
1997    xshmfence_unmap_shm(shm_fence);
1998 no_fence:
1999    free(buffer);
2000 no_buffer:
2001    return NULL;
2002 }
2003 
2004 /** dri3_get_buffer
2005  *
2006  * Find a front or back buffer, allocating new ones as necessary
2007  */
2008 static struct loader_dri3_buffer *
dri3_get_buffer(struct dri_drawable * driDrawable,unsigned int fourcc,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2009 dri3_get_buffer(struct dri_drawable *driDrawable,
2010                 unsigned int fourcc,
2011                 enum loader_dri3_buffer_type buffer_type,
2012                 struct loader_dri3_drawable *draw)
2013 {
2014    struct loader_dri3_buffer *buffer;
2015    bool fence_await = buffer_type == loader_dri3_buffer_back;
2016    int buf_id;
2017 
2018    if (buffer_type == loader_dri3_buffer_back) {
2019       draw->back_format = fourcc;
2020 
2021       buf_id = dri3_find_back(draw, !draw->prefer_back_buffer_reuse);
2022 
2023       if (buf_id < 0)
2024          return NULL;
2025    } else {
2026       buf_id = LOADER_DRI3_FRONT_ID;
2027    }
2028 
2029    buffer = draw->buffers[buf_id];
2030 
2031    /* Allocate a new buffer if there isn't an old one, if that
2032     * old one is the wrong size, or if it's suboptimal
2033     */
2034    if (!buffer || buffer->width != draw->width ||
2035        buffer->height != draw->height ||
2036        buffer->reallocate) {
2037       struct loader_dri3_buffer *new_buffer;
2038 
2039       /* Allocate the new buffers
2040        */
2041       new_buffer = dri3_alloc_render_buffer(draw,
2042                                             fourcc,
2043                                             draw->width,
2044                                             draw->height,
2045                                             draw->depth);
2046       if (!new_buffer)
2047          return NULL;
2048 
2049       /* When resizing, copy the contents of the old buffer, waiting for that
2050        * copy to complete using our fences before proceeding
2051        */
2052       if ((buffer_type == loader_dri3_buffer_back ||
2053            (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
2054           && buffer) {
2055 
2056          /* Fill the new buffer with data from an old buffer */
2057          if (!loader_dri3_blit_image(draw,
2058                                      new_buffer->image,
2059                                      buffer->image,
2060                                      0, 0,
2061                                      MIN2(buffer->width, new_buffer->width),
2062                                      MIN2(buffer->height, new_buffer->height),
2063                                      0, 0, 0) &&
2064              !buffer->linear_buffer) {
2065             dri3_fence_reset(draw->conn, new_buffer);
2066             dri3_copy_area(draw->conn,
2067                            buffer->pixmap,
2068                            new_buffer->pixmap,
2069                            dri3_drawable_gc(draw),
2070                            0, 0, 0, 0,
2071                            draw->width, draw->height);
2072             dri3_fence_trigger(draw->conn, new_buffer);
2073             fence_await = true;
2074          }
2075          dri3_free_render_buffer(draw, buf_id);
2076       } else if (buffer_type == loader_dri3_buffer_front) {
2077          /* Fill the new fake front with data from a real front */
2078          loader_dri3_swapbuffer_barrier(draw);
2079          dri3_fence_reset(draw->conn, new_buffer);
2080          dri3_copy_area(draw->conn,
2081                         draw->drawable,
2082                         new_buffer->pixmap,
2083                         dri3_drawable_gc(draw),
2084                         0, 0, 0, 0,
2085                         draw->width, draw->height);
2086          dri3_fence_trigger(draw->conn, new_buffer);
2087 
2088          if (new_buffer->linear_buffer) {
2089             dri3_fence_await(draw->conn, draw, new_buffer);
2090             (void) loader_dri3_blit_image(draw,
2091                                           new_buffer->image,
2092                                           new_buffer->linear_buffer,
2093                                           0, 0, draw->width, draw->height,
2094                                           0, 0, 0);
2095          } else
2096             fence_await = true;
2097       }
2098       buffer = new_buffer;
2099       dri3_set_render_buffer(draw, buf_id, buffer);
2100    }
2101 
2102    if (fence_await)
2103       dri3_fence_await(draw->conn, draw, buffer);
2104 
2105    /*
2106     * Do we need to preserve the content of a previous buffer?
2107     *
2108     * Note that this blit is needed only to avoid a wait for a buffer that
2109     * is currently in the flip chain or being scanned out from. That's really
2110     * a tradeoff. If we're ok with the wait we can reduce the number of back
2111     * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2112     * but in the latter case we must disallow page-flipping.
2113     */
2114    if (buffer_type == loader_dri3_buffer_back &&
2115        draw->cur_blit_source != -1 &&
2116        draw->buffers[draw->cur_blit_source] &&
2117        buffer != draw->buffers[draw->cur_blit_source]) {
2118 
2119       struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2120 
2121       /* Avoid flushing here. Will propably do good for tiling hardware. */
2122       (void) loader_dri3_blit_image(draw,
2123                                     buffer->image,
2124                                     source->image,
2125                                     0, 0, draw->width, draw->height,
2126                                     0, 0, 0);
2127       buffer->last_swap = source->last_swap;
2128       draw->cur_blit_source = -1;
2129    }
2130    /* Return the requested buffer */
2131    return buffer;
2132 }
2133 
2134 /** dri3_free_buffers
2135  *
2136  * Free the front bufffer or all of the back buffers. Used
2137  * when the application changes which buffers it needs
2138  */
2139 static void
dri3_free_buffers(struct dri_drawable * driDrawable,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2140 dri3_free_buffers(struct dri_drawable *driDrawable,
2141                   enum loader_dri3_buffer_type buffer_type,
2142                   struct loader_dri3_drawable *draw)
2143 {
2144    int first_id;
2145    int n_id;
2146    int buf_id;
2147 
2148    switch (buffer_type) {
2149    case loader_dri3_buffer_back:
2150       first_id = LOADER_DRI3_BACK_ID(0);
2151       n_id = LOADER_DRI3_MAX_BACK;
2152       draw->cur_blit_source = -1;
2153       break;
2154    case loader_dri3_buffer_front:
2155       first_id = LOADER_DRI3_FRONT_ID;
2156       /* Don't free a fake front holding new backbuffer content. */
2157       n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2158       break;
2159    default:
2160       unreachable("unhandled buffer_type");
2161    }
2162 
2163    for (buf_id = first_id; buf_id < first_id + n_id; buf_id++)
2164       dri3_free_render_buffer(draw, buf_id);
2165 }
2166 
2167 /** loader_dri3_get_buffers
2168  *
2169  * The published buffer allocation API.
2170  * Returns all of the necessary buffers, allocating
2171  * as needed.
2172  */
2173 int
loader_dri3_get_buffers(struct dri_drawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)2174 loader_dri3_get_buffers(struct dri_drawable *driDrawable,
2175                         unsigned int format,
2176                         uint32_t *stamp,
2177                         void *loaderPrivate,
2178                         uint32_t buffer_mask,
2179                         struct __DRIimageList *buffers)
2180 {
2181    struct loader_dri3_drawable *draw = loaderPrivate;
2182    struct loader_dri3_buffer   *front, *back;
2183    int fourcc = loader_image_format_to_fourcc(format);
2184    int buf_id;
2185 
2186    buffers->image_mask = 0;
2187    buffers->front = NULL;
2188    buffers->back = NULL;
2189 
2190    if (!dri3_update_drawable(draw))
2191       return false;
2192 
2193    dri3_update_max_num_back(draw);
2194 
2195    /* Free no longer needed back buffers */
2196    for (buf_id = 0; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2197       int buffer_age;
2198 
2199       back = draw->buffers[buf_id];
2200       if (!back || !back->last_swap || draw->cur_blit_source == buf_id)
2201          continue;
2202 
2203       buffer_age = draw->send_sbc - back->last_swap + 1;
2204       if (buffer_age > 200)
2205          dri3_free_render_buffer(draw, buf_id);
2206    }
2207 
2208    /* pixmaps always have front buffers.
2209     */
2210    if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
2211       buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2212 
2213    if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2214       /* All pixmaps are owned by the server gpu.
2215        * When we use a different gpu, we can't use the pixmap
2216        * as buffer since it is potentially tiled a way
2217        * our device can't understand. In this case, use
2218        * a fake front buffer. Hopefully the pixmap
2219        * content will get synced with the fake front
2220        * buffer.
2221        */
2222       if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW &&
2223           draw->dri_screen_render_gpu == draw->dri_screen_display_gpu)
2224          front = dri3_get_pixmap_buffer(driDrawable,
2225                                         fourcc,
2226                                         loader_dri3_buffer_front,
2227                                         draw);
2228       else
2229          front = dri3_get_buffer(driDrawable,
2230                                  fourcc,
2231                                  loader_dri3_buffer_front,
2232                                  draw);
2233 
2234       if (!front)
2235          return false;
2236    } else {
2237       dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2238       draw->have_fake_front = 0;
2239       front = NULL;
2240    }
2241 
2242    if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2243       back = dri3_get_buffer(driDrawable,
2244                              fourcc,
2245                              loader_dri3_buffer_back,
2246                              draw);
2247       if (!back)
2248          return false;
2249       draw->have_back = 1;
2250    } else {
2251       dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2252       draw->have_back = 0;
2253       back = NULL;
2254    }
2255 
2256    if (front) {
2257       buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2258       buffers->front = front->image;
2259       draw->have_fake_front =
2260          draw->dri_screen_render_gpu != draw->dri_screen_display_gpu ||
2261          draw->type == LOADER_DRI3_DRAWABLE_WINDOW;
2262    }
2263 
2264    if (back) {
2265       buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2266       buffers->back = back->image;
2267    }
2268 
2269    draw->stamp = stamp;
2270 
2271    return true;
2272 }
2273 
2274 /** loader_dri3_update_drawable_geometry
2275  *
2276  * Get the current drawable geometry.
2277  */
2278 void
loader_dri3_update_drawable_geometry(struct loader_dri3_drawable * draw)2279 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2280 {
2281    xcb_get_geometry_cookie_t geom_cookie;
2282    xcb_get_geometry_reply_t *geom_reply;
2283 
2284    geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2285 
2286    geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2287 
2288    if (geom_reply) {
2289       bool changed = draw->width != geom_reply->width || draw->height != geom_reply->height;
2290       draw->width = geom_reply->width;
2291       draw->height = geom_reply->height;
2292       if (changed) {
2293          draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2294          dri_invalidate_drawable(draw->dri_drawable);
2295       }
2296 
2297       free(geom_reply);
2298    }
2299 }
2300 
2301 /**
2302  * Make sure the server has flushed all pending swap buffers to hardware
2303  * for this drawable. Ideally we'd want to send an X protocol request to
2304  * have the server block our connection until the swaps are complete. That
2305  * would avoid the potential round-trip here.
2306  */
2307 void
loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable * draw)2308 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2309 {
2310    int64_t ust, msc, sbc;
2311 
2312    (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2313 }
2314 
2315 /**
2316  * Perform any cleanup associated with a close screen operation.
2317  * \param dri_screen[in,out] Pointer to struct dri_screen about to be closed.
2318  *
2319  * This function destroys the screen's cached swap context if any.
2320  */
2321 void
loader_dri3_close_screen(struct dri_screen * dri_screen)2322 loader_dri3_close_screen(struct dri_screen *dri_screen)
2323 {
2324    simple_mtx_lock(&blit_context.mtx);
2325    if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2326       driDestroyContext(blit_context.ctx);
2327       blit_context.ctx = NULL;
2328    }
2329    simple_mtx_unlock(&blit_context.mtx);
2330 }
2331 
2332 /**
2333  * Find a backbuffer slot - potentially allocating a back buffer
2334  *
2335  * \param draw[in,out]  Pointer to the drawable for which to find back.
2336  * \return Pointer to a new back buffer or NULL if allocation failed or was
2337  * not mandated.
2338  *
2339  * Find a potentially new back buffer, and if it's not been allocated yet and
2340  * in addition needs initializing, then try to allocate and initialize it.
2341  */
2342 static struct loader_dri3_buffer *
dri3_find_back_alloc(struct loader_dri3_drawable * draw)2343 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2344 {
2345    struct loader_dri3_buffer *back;
2346    int id;
2347 
2348    id = dri3_find_back(draw, false);
2349    if (id < 0)
2350       return NULL;
2351 
2352    back = draw->buffers[id];
2353    /* Allocate a new back if we haven't got one */
2354    if (!back && draw->back_format != DRM_FORMAT_INVALID &&
2355        dri3_update_drawable(draw))
2356       back = dri3_alloc_render_buffer(draw, draw->back_format,
2357                                       draw->width, draw->height, draw->depth);
2358 
2359    if (!back)
2360       return NULL;
2361 
2362    dri3_set_render_buffer(draw, id, back);
2363 
2364    /* If necessary, prefill the back with data. */
2365    if (draw->cur_blit_source != -1 &&
2366        draw->buffers[draw->cur_blit_source] &&
2367        back != draw->buffers[draw->cur_blit_source]) {
2368       struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2369 
2370       dri3_fence_await(draw->conn, draw, source);
2371       dri3_fence_await(draw->conn, draw, back);
2372       (void) loader_dri3_blit_image(draw,
2373                                     back->image,
2374                                     source->image,
2375                                     0, 0, draw->width, draw->height,
2376                                     0, 0, 0);
2377       back->last_swap = source->last_swap;
2378       draw->cur_blit_source = -1;
2379    }
2380 
2381    return back;
2382 }
2383