1 /*
2 * Copyright © 2013 Keith Packard
3 * Copyright © 2015 Boyan Ding
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that copyright
8 * notice and this permission notice appear in supporting documentation, and
9 * that the name of the copyright holders not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. The copyright holders make no representations
12 * about the suitability of this software for any purpose. It is provided "as
13 * is" without express or implied warranty.
14 *
15 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
16 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
17 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
18 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
19 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
20 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
21 * OF THIS SOFTWARE.
22 */
23
24 #include <fcntl.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28
29 #include <X11/xshmfence.h>
30 #include <xcb/xcb.h>
31 #include <xcb/dri3.h>
32 #include <xcb/present.h>
33 #include <xcb/xfixes.h>
34
35 #include <X11/Xlib-xcb.h>
36
37 #include "loader_dri_helper.h"
38 #include "loader_dri3_helper.h"
39 #include "util/macros.h"
40 #include "drm-uapi/drm_fourcc.h"
41
42 /**
43 * A cached blit context.
44 */
45 struct loader_dri3_blit_context {
46 mtx_t mtx;
47 __DRIcontext *ctx;
48 __DRIscreen *cur_screen;
49 const __DRIcoreExtension *core;
50 };
51
52 /* For simplicity we maintain the cache only for a single screen at a time */
53 static struct loader_dri3_blit_context blit_context = {
54 _MTX_INITIALIZER_NP, NULL
55 };
56
57 static void
58 dri3_flush_present_events(struct loader_dri3_drawable *draw);
59
60 static struct loader_dri3_buffer *
61 dri3_find_back_alloc(struct loader_dri3_drawable *draw);
62
63 static xcb_screen_t *
get_screen_for_root(xcb_connection_t * conn,xcb_window_t root)64 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
65 {
66 xcb_screen_iterator_t screen_iter =
67 xcb_setup_roots_iterator(xcb_get_setup(conn));
68
69 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
70 if (screen_iter.data->root == root)
71 return screen_iter.data;
72 }
73
74 return NULL;
75 }
76
77 static xcb_visualtype_t *
get_xcb_visualtype_for_depth(struct loader_dri3_drawable * draw,int depth)78 get_xcb_visualtype_for_depth(struct loader_dri3_drawable *draw, int depth)
79 {
80 xcb_visualtype_iterator_t visual_iter;
81 xcb_screen_t *screen = draw->screen;
82 xcb_depth_iterator_t depth_iter;
83
84 if (!screen)
85 return NULL;
86
87 depth_iter = xcb_screen_allowed_depths_iterator(screen);
88 for (; depth_iter.rem; xcb_depth_next(&depth_iter)) {
89 if (depth_iter.data->depth != depth)
90 continue;
91
92 visual_iter = xcb_depth_visuals_iterator(depth_iter.data);
93 if (visual_iter.rem)
94 return visual_iter.data;
95 }
96
97 return NULL;
98 }
99
100 /* Sets the adaptive sync window property state. */
101 static void
set_adaptive_sync_property(xcb_connection_t * conn,xcb_drawable_t drawable,uint32_t state)102 set_adaptive_sync_property(xcb_connection_t *conn, xcb_drawable_t drawable,
103 uint32_t state)
104 {
105 static char const name[] = "_VARIABLE_REFRESH";
106 xcb_intern_atom_cookie_t cookie;
107 xcb_intern_atom_reply_t* reply;
108 xcb_void_cookie_t check;
109
110 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
111 reply = xcb_intern_atom_reply(conn, cookie, NULL);
112 if (reply == NULL)
113 return;
114
115 if (state)
116 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
117 drawable, reply->atom,
118 XCB_ATOM_CARDINAL, 32, 1, &state);
119 else
120 check = xcb_delete_property_checked(conn, drawable, reply->atom);
121
122 xcb_discard_reply(conn, check.sequence);
123 free(reply);
124 }
125
126 /* Get red channel mask for given drawable at given depth. */
127 static unsigned int
dri3_get_red_mask_for_depth(struct loader_dri3_drawable * draw,int depth)128 dri3_get_red_mask_for_depth(struct loader_dri3_drawable *draw, int depth)
129 {
130 xcb_visualtype_t *visual = get_xcb_visualtype_for_depth(draw, depth);
131
132 if (visual)
133 return visual->red_mask;
134
135 return 0;
136 }
137
138 /**
139 * Do we have blit functionality in the image blit extension?
140 *
141 * \param draw[in] The drawable intended to blit from / to.
142 * \return true if we have blit functionality. false otherwise.
143 */
loader_dri3_have_image_blit(const struct loader_dri3_drawable * draw)144 static bool loader_dri3_have_image_blit(const struct loader_dri3_drawable *draw)
145 {
146 return draw->ext->image->base.version >= 9 &&
147 draw->ext->image->blitImage != NULL;
148 }
149
150 /**
151 * Get and lock (for use with the current thread) a dri context associated
152 * with the drawable's dri screen. The context is intended to be used with
153 * the dri image extension's blitImage method.
154 *
155 * \param draw[in] Pointer to the drawable whose dri screen we want a
156 * dri context for.
157 * \return A dri context or NULL if context creation failed.
158 *
159 * When the caller is done with the context (even if the context returned was
160 * NULL), the caller must call loader_dri3_blit_context_put.
161 */
162 static __DRIcontext *
loader_dri3_blit_context_get(struct loader_dri3_drawable * draw)163 loader_dri3_blit_context_get(struct loader_dri3_drawable *draw)
164 {
165 mtx_lock(&blit_context.mtx);
166
167 if (blit_context.ctx && blit_context.cur_screen != draw->dri_screen) {
168 blit_context.core->destroyContext(blit_context.ctx);
169 blit_context.ctx = NULL;
170 }
171
172 if (!blit_context.ctx) {
173 blit_context.ctx = draw->ext->core->createNewContext(draw->dri_screen,
174 NULL, NULL, NULL);
175 blit_context.cur_screen = draw->dri_screen;
176 blit_context.core = draw->ext->core;
177 }
178
179 return blit_context.ctx;
180 }
181
182 /**
183 * Release (for use with other threads) a dri context previously obtained using
184 * loader_dri3_blit_context_get.
185 */
186 static void
loader_dri3_blit_context_put(void)187 loader_dri3_blit_context_put(void)
188 {
189 mtx_unlock(&blit_context.mtx);
190 }
191
192 /**
193 * Blit (parts of) the contents of a DRI image to another dri image
194 *
195 * \param draw[in] The drawable which owns the images.
196 * \param dst[in] The destination image.
197 * \param src[in] The source image.
198 * \param dstx0[in] Start destination coordinate.
199 * \param dsty0[in] Start destination coordinate.
200 * \param width[in] Blit width.
201 * \param height[in] Blit height.
202 * \param srcx0[in] Start source coordinate.
203 * \param srcy0[in] Start source coordinate.
204 * \param flush_flag[in] Image blit flush flag.
205 * \return true iff successful.
206 */
207 static bool
loader_dri3_blit_image(struct loader_dri3_drawable * draw,__DRIimage * dst,__DRIimage * src,int dstx0,int dsty0,int width,int height,int srcx0,int srcy0,int flush_flag)208 loader_dri3_blit_image(struct loader_dri3_drawable *draw,
209 __DRIimage *dst, __DRIimage *src,
210 int dstx0, int dsty0, int width, int height,
211 int srcx0, int srcy0, int flush_flag)
212 {
213 __DRIcontext *dri_context;
214 bool use_blit_context = false;
215
216 if (!loader_dri3_have_image_blit(draw))
217 return false;
218
219 dri_context = draw->vtable->get_dri_context(draw);
220
221 if (!dri_context || !draw->vtable->in_current_context(draw)) {
222 dri_context = loader_dri3_blit_context_get(draw);
223 use_blit_context = true;
224 flush_flag |= __BLIT_FLAG_FLUSH;
225 }
226
227 if (dri_context)
228 draw->ext->image->blitImage(dri_context, dst, src, dstx0, dsty0,
229 width, height, srcx0, srcy0,
230 width, height, flush_flag);
231
232 if (use_blit_context)
233 loader_dri3_blit_context_put();
234
235 return dri_context != NULL;
236 }
237
238 static inline void
dri3_fence_reset(xcb_connection_t * c,struct loader_dri3_buffer * buffer)239 dri3_fence_reset(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
240 {
241 xshmfence_reset(buffer->shm_fence);
242 }
243
244 static inline void
dri3_fence_set(struct loader_dri3_buffer * buffer)245 dri3_fence_set(struct loader_dri3_buffer *buffer)
246 {
247 xshmfence_trigger(buffer->shm_fence);
248 }
249
250 static inline void
dri3_fence_trigger(xcb_connection_t * c,struct loader_dri3_buffer * buffer)251 dri3_fence_trigger(xcb_connection_t *c, struct loader_dri3_buffer *buffer)
252 {
253 xcb_sync_trigger_fence(c, buffer->sync_fence);
254 }
255
256 static inline void
dri3_fence_await(xcb_connection_t * c,struct loader_dri3_drawable * draw,struct loader_dri3_buffer * buffer)257 dri3_fence_await(xcb_connection_t *c, struct loader_dri3_drawable *draw,
258 struct loader_dri3_buffer *buffer)
259 {
260 xcb_flush(c);
261 xshmfence_await(buffer->shm_fence);
262 if (draw) {
263 mtx_lock(&draw->mtx);
264 dri3_flush_present_events(draw);
265 mtx_unlock(&draw->mtx);
266 }
267 }
268
269 static void
dri3_update_max_num_back(struct loader_dri3_drawable * draw)270 dri3_update_max_num_back(struct loader_dri3_drawable *draw)
271 {
272 switch (draw->last_present_mode) {
273 case XCB_PRESENT_COMPLETE_MODE_FLIP: {
274 int new_max;
275
276 if (draw->swap_interval == 0)
277 new_max = 4;
278 else
279 new_max = 3;
280
281 assert(new_max <= LOADER_DRI3_MAX_BACK);
282
283 if (new_max != draw->max_num_back) {
284 /* On transition from swap interval == 0 to != 0, start with two
285 * buffers again. Otherwise keep the current number of buffers. Either
286 * way, more will be allocated if needed.
287 */
288 if (new_max < draw->max_num_back)
289 draw->cur_num_back = 2;
290
291 draw->max_num_back = new_max;
292 }
293
294 break;
295 }
296
297 case XCB_PRESENT_COMPLETE_MODE_SKIP:
298 break;
299
300 default:
301 /* On transition from flips to copies, start with a single buffer again,
302 * a second one will be allocated if needed
303 */
304 if (draw->max_num_back != 2)
305 draw->cur_num_back = 1;
306
307 draw->max_num_back = 2;
308 }
309 }
310
311 void
loader_dri3_set_swap_interval(struct loader_dri3_drawable * draw,int interval)312 loader_dri3_set_swap_interval(struct loader_dri3_drawable *draw, int interval)
313 {
314 /* Wait all previous swap done before changing swap interval.
315 *
316 * This is for preventing swap out of order in the following cases:
317 * 1. Change from sync swap mode (>0) to async mode (=0), so async swap occurs
318 * before previous pending sync swap.
319 * 2. Change from value A to B and A > B, so the target_msc for the previous
320 * pending swap may be bigger than newer swap.
321 *
322 * PS. changing from value A to B and A < B won't cause swap out of order but
323 * may still gets wrong target_msc value at the beginning.
324 */
325 if (draw->swap_interval != interval)
326 loader_dri3_swapbuffer_barrier(draw);
327
328 draw->swap_interval = interval;
329 }
330
331 /** dri3_free_render_buffer
332 *
333 * Free everything associated with one render buffer including pixmap, fence
334 * stuff and the driver image
335 */
336 static void
dri3_free_render_buffer(struct loader_dri3_drawable * draw,struct loader_dri3_buffer * buffer)337 dri3_free_render_buffer(struct loader_dri3_drawable *draw,
338 struct loader_dri3_buffer *buffer)
339 {
340 if (buffer->own_pixmap)
341 xcb_free_pixmap(draw->conn, buffer->pixmap);
342 xcb_sync_destroy_fence(draw->conn, buffer->sync_fence);
343 xshmfence_unmap_shm(buffer->shm_fence);
344 draw->ext->image->destroyImage(buffer->image);
345 if (buffer->linear_buffer)
346 draw->ext->image->destroyImage(buffer->linear_buffer);
347 free(buffer);
348 }
349
350 void
loader_dri3_drawable_fini(struct loader_dri3_drawable * draw)351 loader_dri3_drawable_fini(struct loader_dri3_drawable *draw)
352 {
353 int i;
354
355 draw->ext->core->destroyDrawable(draw->dri_drawable);
356
357 for (i = 0; i < ARRAY_SIZE(draw->buffers); i++) {
358 if (draw->buffers[i])
359 dri3_free_render_buffer(draw, draw->buffers[i]);
360 }
361
362 if (draw->special_event) {
363 xcb_void_cookie_t cookie =
364 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
365 XCB_PRESENT_EVENT_MASK_NO_EVENT);
366
367 xcb_discard_reply(draw->conn, cookie.sequence);
368 xcb_unregister_for_special_event(draw->conn, draw->special_event);
369 }
370
371 if (draw->region)
372 xcb_xfixes_destroy_region(draw->conn, draw->region);
373
374 cnd_destroy(&draw->event_cnd);
375 mtx_destroy(&draw->mtx);
376 }
377
378 int
loader_dri3_drawable_init(xcb_connection_t * conn,xcb_drawable_t drawable,enum loader_dri3_drawable_type type,__DRIscreen * dri_screen,bool is_different_gpu,bool multiplanes_available,bool prefer_back_buffer_reuse,const __DRIconfig * dri_config,struct loader_dri3_extensions * ext,const struct loader_dri3_vtable * vtable,struct loader_dri3_drawable * draw)379 loader_dri3_drawable_init(xcb_connection_t *conn,
380 xcb_drawable_t drawable,
381 enum loader_dri3_drawable_type type,
382 __DRIscreen *dri_screen,
383 bool is_different_gpu,
384 bool multiplanes_available,
385 bool prefer_back_buffer_reuse,
386 const __DRIconfig *dri_config,
387 struct loader_dri3_extensions *ext,
388 const struct loader_dri3_vtable *vtable,
389 struct loader_dri3_drawable *draw)
390 {
391 xcb_get_geometry_cookie_t cookie;
392 xcb_get_geometry_reply_t *reply;
393 xcb_generic_error_t *error;
394
395 draw->conn = conn;
396 draw->ext = ext;
397 draw->vtable = vtable;
398 draw->drawable = drawable;
399 draw->type = type;
400 draw->region = 0;
401 draw->dri_screen = dri_screen;
402 draw->is_different_gpu = is_different_gpu;
403 draw->multiplanes_available = multiplanes_available;
404 draw->prefer_back_buffer_reuse = prefer_back_buffer_reuse;
405
406 draw->have_back = 0;
407 draw->have_fake_front = 0;
408 draw->first_init = true;
409 draw->adaptive_sync = false;
410 draw->adaptive_sync_active = false;
411
412 draw->cur_blit_source = -1;
413 draw->back_format = __DRI_IMAGE_FORMAT_NONE;
414 mtx_init(&draw->mtx, mtx_plain);
415 cnd_init(&draw->event_cnd);
416
417 if (draw->ext->config) {
418 unsigned char adaptive_sync = 0;
419
420 draw->ext->config->configQueryb(draw->dri_screen,
421 "adaptive_sync",
422 &adaptive_sync);
423
424 draw->adaptive_sync = adaptive_sync;
425 }
426
427 if (!draw->adaptive_sync)
428 set_adaptive_sync_property(conn, draw->drawable, false);
429
430 draw->swap_interval = dri_get_initial_swap_interval(draw->dri_screen,
431 draw->ext->config);
432
433 dri3_update_max_num_back(draw);
434
435 /* Create a new drawable */
436 draw->dri_drawable =
437 draw->ext->image_driver->createNewDrawable(dri_screen,
438 dri_config,
439 draw);
440
441 if (!draw->dri_drawable)
442 return 1;
443
444 cookie = xcb_get_geometry(draw->conn, draw->drawable);
445 reply = xcb_get_geometry_reply(draw->conn, cookie, &error);
446 if (reply == NULL || error != NULL) {
447 draw->ext->core->destroyDrawable(draw->dri_drawable);
448 return 1;
449 }
450
451 draw->screen = get_screen_for_root(draw->conn, reply->root);
452 draw->width = reply->width;
453 draw->height = reply->height;
454 draw->depth = reply->depth;
455 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
456 free(reply);
457
458 draw->swap_method = __DRI_ATTRIB_SWAP_UNDEFINED;
459 if (draw->ext->core->base.version >= 2) {
460 (void )draw->ext->core->getConfigAttrib(dri_config,
461 __DRI_ATTRIB_SWAP_METHOD,
462 &draw->swap_method);
463 }
464
465 /*
466 * Make sure server has the same swap interval we do for the new
467 * drawable.
468 */
469 loader_dri3_set_swap_interval(draw, draw->swap_interval);
470
471 return 0;
472 }
473
474 /*
475 * Process one Present event
476 */
477 static void
dri3_handle_present_event(struct loader_dri3_drawable * draw,xcb_present_generic_event_t * ge)478 dri3_handle_present_event(struct loader_dri3_drawable *draw,
479 xcb_present_generic_event_t *ge)
480 {
481 switch (ge->evtype) {
482 case XCB_PRESENT_CONFIGURE_NOTIFY: {
483 xcb_present_configure_notify_event_t *ce = (void *) ge;
484
485 draw->width = ce->width;
486 draw->height = ce->height;
487 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
488 draw->ext->flush->invalidate(draw->dri_drawable);
489 break;
490 }
491 case XCB_PRESENT_COMPLETE_NOTIFY: {
492 xcb_present_complete_notify_event_t *ce = (void *) ge;
493
494 /* Compute the processed SBC number from the received 32-bit serial number
495 * merged with the upper 32-bits of the sent 64-bit serial number while
496 * checking for wrap.
497 */
498 if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
499 uint64_t recv_sbc = (draw->send_sbc & 0xffffffff00000000LL) | ce->serial;
500
501 /* Only assume wraparound if that results in exactly the previous
502 * SBC + 1, otherwise ignore received SBC > sent SBC (those are
503 * probably from a previous loader_dri3_drawable instance) to avoid
504 * calculating bogus target MSC values in loader_dri3_swap_buffers_msc
505 */
506 if (recv_sbc <= draw->send_sbc)
507 draw->recv_sbc = recv_sbc;
508 else if (recv_sbc == (draw->recv_sbc + 0x100000001ULL))
509 draw->recv_sbc = recv_sbc - 0x100000000ULL;
510
511 /* When moving from flip to copy, we assume that we can allocate in
512 * a more optimal way if we don't need to cater for the display
513 * controller.
514 */
515 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
516 draw->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP) {
517 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
518 if (draw->buffers[b])
519 draw->buffers[b]->reallocate = true;
520 }
521 }
522
523 /* If the server tells us that our allocation is suboptimal, we
524 * reallocate once.
525 */
526 #ifdef HAVE_DRI3_MODIFIERS
527 if (ce->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
528 draw->last_present_mode != ce->mode) {
529 for (int b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
530 if (draw->buffers[b])
531 draw->buffers[b]->reallocate = true;
532 }
533 }
534 #endif
535 draw->last_present_mode = ce->mode;
536
537 if (draw->vtable->show_fps)
538 draw->vtable->show_fps(draw, ce->ust);
539
540 draw->ust = ce->ust;
541 draw->msc = ce->msc;
542 } else if (ce->serial == draw->eid) {
543 draw->notify_ust = ce->ust;
544 draw->notify_msc = ce->msc;
545 }
546 break;
547 }
548 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
549 xcb_present_idle_notify_event_t *ie = (void *) ge;
550 int b;
551
552 for (b = 0; b < ARRAY_SIZE(draw->buffers); b++) {
553 struct loader_dri3_buffer *buf = draw->buffers[b];
554
555 if (buf && buf->pixmap == ie->pixmap)
556 buf->busy = 0;
557 }
558 break;
559 }
560 }
561 free(ge);
562 }
563
564 static bool
dri3_wait_for_event_locked(struct loader_dri3_drawable * draw,unsigned * full_sequence)565 dri3_wait_for_event_locked(struct loader_dri3_drawable *draw,
566 unsigned *full_sequence)
567 {
568 xcb_generic_event_t *ev;
569 xcb_present_generic_event_t *ge;
570
571 xcb_flush(draw->conn);
572
573 /* Only have one thread waiting for events at a time */
574 if (draw->has_event_waiter) {
575 cnd_wait(&draw->event_cnd, &draw->mtx);
576 if (full_sequence)
577 *full_sequence = draw->last_special_event_sequence;
578 /* Another thread has updated the protected info, so retest. */
579 return true;
580 } else {
581 draw->has_event_waiter = true;
582 /* Allow other threads access to the drawable while we're waiting. */
583 mtx_unlock(&draw->mtx);
584 ev = xcb_wait_for_special_event(draw->conn, draw->special_event);
585 mtx_lock(&draw->mtx);
586 draw->has_event_waiter = false;
587 cnd_broadcast(&draw->event_cnd);
588 }
589 if (!ev)
590 return false;
591 draw->last_special_event_sequence = ev->full_sequence;
592 if (full_sequence)
593 *full_sequence = ev->full_sequence;
594 ge = (void *) ev;
595 dri3_handle_present_event(draw, ge);
596 return true;
597 }
598
599 /** loader_dri3_wait_for_msc
600 *
601 * Get the X server to send an event when the target msc/divisor/remainder is
602 * reached.
603 */
604 bool
loader_dri3_wait_for_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,int64_t * ust,int64_t * msc,int64_t * sbc)605 loader_dri3_wait_for_msc(struct loader_dri3_drawable *draw,
606 int64_t target_msc,
607 int64_t divisor, int64_t remainder,
608 int64_t *ust, int64_t *msc, int64_t *sbc)
609 {
610 xcb_void_cookie_t cookie = xcb_present_notify_msc(draw->conn,
611 draw->drawable,
612 draw->eid,
613 target_msc,
614 divisor,
615 remainder);
616 unsigned full_sequence;
617
618 mtx_lock(&draw->mtx);
619
620 /* Wait for the event */
621 do {
622 if (!dri3_wait_for_event_locked(draw, &full_sequence)) {
623 mtx_unlock(&draw->mtx);
624 return false;
625 }
626 } while (full_sequence != cookie.sequence || draw->notify_msc < target_msc);
627
628 *ust = draw->notify_ust;
629 *msc = draw->notify_msc;
630 *sbc = draw->recv_sbc;
631 mtx_unlock(&draw->mtx);
632
633 return true;
634 }
635
636 /** loader_dri3_wait_for_sbc
637 *
638 * Wait for the completed swap buffer count to reach the specified
639 * target. Presumably the application knows that this will be reached with
640 * outstanding complete events, or we're going to be here awhile.
641 */
642 int
loader_dri3_wait_for_sbc(struct loader_dri3_drawable * draw,int64_t target_sbc,int64_t * ust,int64_t * msc,int64_t * sbc)643 loader_dri3_wait_for_sbc(struct loader_dri3_drawable *draw,
644 int64_t target_sbc, int64_t *ust,
645 int64_t *msc, int64_t *sbc)
646 {
647 /* From the GLX_OML_sync_control spec:
648 *
649 * "If <target_sbc> = 0, the function will block until all previous
650 * swaps requested with glXSwapBuffersMscOML for that window have
651 * completed."
652 */
653 mtx_lock(&draw->mtx);
654 if (!target_sbc)
655 target_sbc = draw->send_sbc;
656
657 while (draw->recv_sbc < target_sbc) {
658 if (!dri3_wait_for_event_locked(draw, NULL)) {
659 mtx_unlock(&draw->mtx);
660 return 0;
661 }
662 }
663
664 *ust = draw->ust;
665 *msc = draw->msc;
666 *sbc = draw->recv_sbc;
667 mtx_unlock(&draw->mtx);
668 return 1;
669 }
670
671 /** loader_dri3_find_back
672 *
673 * Find an idle back buffer. If there isn't one, then
674 * wait for a present idle notify event from the X server
675 */
676 static int
dri3_find_back(struct loader_dri3_drawable * draw,bool prefer_a_different)677 dri3_find_back(struct loader_dri3_drawable *draw, bool prefer_a_different)
678 {
679 int b;
680 int num_to_consider;
681 int max_num;
682
683 mtx_lock(&draw->mtx);
684 /* Increase the likelyhood of reusing current buffer */
685 dri3_flush_present_events(draw);
686
687 /* Check whether we need to reuse the current back buffer as new back.
688 * In that case, wait until it's not busy anymore.
689 */
690 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1) {
691 num_to_consider = 1;
692 max_num = 1;
693 draw->cur_blit_source = -1;
694 } else {
695 num_to_consider = draw->cur_num_back;
696 max_num = draw->max_num_back;
697 }
698
699 /* In a DRI_PRIME situation, if prefer_a_different is true, we first try
700 * to find an idle buffer that is not the last used one.
701 * This is useful if we receive a XCB_PRESENT_EVENT_IDLE_NOTIFY event
702 * for a pixmap but it's not actually idle (eg: the DRI_PRIME blit is
703 * still in progress).
704 * Unigine Superposition hits this and this allows to use 2 back buffers
705 * instead of reusing the same one all the time, causing the next frame
706 * to wait for the copy to finish.
707 */
708 int current_back_id = draw->cur_back;
709 for (;;) {
710 for (b = 0; b < num_to_consider; b++) {
711 int id = LOADER_DRI3_BACK_ID((b + draw->cur_back) % draw->cur_num_back);
712 struct loader_dri3_buffer *buffer = draw->buffers[id];
713
714 if (!buffer || (!buffer->busy &&
715 (!prefer_a_different || id != current_back_id))) {
716 draw->cur_back = id;
717 mtx_unlock(&draw->mtx);
718 return id;
719 }
720 }
721
722 if (num_to_consider < max_num) {
723 num_to_consider = ++draw->cur_num_back;
724 } else if (prefer_a_different) {
725 prefer_a_different = false;
726 } else if (!dri3_wait_for_event_locked(draw, NULL)) {
727 mtx_unlock(&draw->mtx);
728 return -1;
729 }
730 }
731 }
732
733 static xcb_gcontext_t
dri3_drawable_gc(struct loader_dri3_drawable * draw)734 dri3_drawable_gc(struct loader_dri3_drawable *draw)
735 {
736 if (!draw->gc) {
737 uint32_t v = 0;
738 xcb_create_gc(draw->conn,
739 (draw->gc = xcb_generate_id(draw->conn)),
740 draw->drawable,
741 XCB_GC_GRAPHICS_EXPOSURES,
742 &v);
743 }
744 return draw->gc;
745 }
746
747
748 static struct loader_dri3_buffer *
dri3_back_buffer(struct loader_dri3_drawable * draw)749 dri3_back_buffer(struct loader_dri3_drawable *draw)
750 {
751 return draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)];
752 }
753
754 static struct loader_dri3_buffer *
dri3_front_buffer(struct loader_dri3_drawable * draw)755 dri3_front_buffer(struct loader_dri3_drawable *draw)
756 {
757 return draw->buffers[LOADER_DRI3_FRONT_ID];
758 }
759
760 static void
dri3_copy_area(xcb_connection_t * c,xcb_drawable_t src_drawable,xcb_drawable_t dst_drawable,xcb_gcontext_t gc,int16_t src_x,int16_t src_y,int16_t dst_x,int16_t dst_y,uint16_t width,uint16_t height)761 dri3_copy_area(xcb_connection_t *c,
762 xcb_drawable_t src_drawable,
763 xcb_drawable_t dst_drawable,
764 xcb_gcontext_t gc,
765 int16_t src_x,
766 int16_t src_y,
767 int16_t dst_x,
768 int16_t dst_y,
769 uint16_t width,
770 uint16_t height)
771 {
772 xcb_void_cookie_t cookie;
773
774 cookie = xcb_copy_area_checked(c,
775 src_drawable,
776 dst_drawable,
777 gc,
778 src_x,
779 src_y,
780 dst_x,
781 dst_y,
782 width,
783 height);
784 xcb_discard_reply(c, cookie.sequence);
785 }
786
787 /**
788 * Asks the driver to flush any queued work necessary for serializing with the
789 * X command stream, and optionally the slightly more strict requirement of
790 * glFlush() equivalence (which would require flushing even if nothing had
791 * been drawn to a window system framebuffer, for example).
792 */
793 void
loader_dri3_flush(struct loader_dri3_drawable * draw,unsigned flags,enum __DRI2throttleReason throttle_reason)794 loader_dri3_flush(struct loader_dri3_drawable *draw,
795 unsigned flags,
796 enum __DRI2throttleReason throttle_reason)
797 {
798 /* NEED TO CHECK WHETHER CONTEXT IS NULL */
799 __DRIcontext *dri_context = draw->vtable->get_dri_context(draw);
800
801 if (dri_context) {
802 draw->ext->flush->flush_with_flags(dri_context, draw->dri_drawable,
803 flags, throttle_reason);
804 }
805 }
806
807 void
loader_dri3_copy_sub_buffer(struct loader_dri3_drawable * draw,int x,int y,int width,int height,bool flush)808 loader_dri3_copy_sub_buffer(struct loader_dri3_drawable *draw,
809 int x, int y,
810 int width, int height,
811 bool flush)
812 {
813 struct loader_dri3_buffer *back;
814 unsigned flags = __DRI2_FLUSH_DRAWABLE;
815
816 /* Check we have the right attachments */
817 if (!draw->have_back || draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
818 return;
819
820 if (flush)
821 flags |= __DRI2_FLUSH_CONTEXT;
822 loader_dri3_flush(draw, flags, __DRI2_THROTTLE_COPYSUBBUFFER);
823
824 back = dri3_find_back_alloc(draw);
825 if (!back)
826 return;
827
828 y = draw->height - y - height;
829
830 if (draw->is_different_gpu) {
831 /* Update the linear buffer part of the back buffer
832 * for the dri3_copy_area operation
833 */
834 (void) loader_dri3_blit_image(draw,
835 back->linear_buffer,
836 back->image,
837 0, 0, back->width, back->height,
838 0, 0, __BLIT_FLAG_FLUSH);
839 }
840
841 loader_dri3_swapbuffer_barrier(draw);
842 dri3_fence_reset(draw->conn, back);
843 dri3_copy_area(draw->conn,
844 back->pixmap,
845 draw->drawable,
846 dri3_drawable_gc(draw),
847 x, y, x, y, width, height);
848 dri3_fence_trigger(draw->conn, back);
849 /* Refresh the fake front (if present) after we just damaged the real
850 * front.
851 */
852 if (draw->have_fake_front &&
853 !loader_dri3_blit_image(draw,
854 dri3_front_buffer(draw)->image,
855 back->image,
856 x, y, width, height,
857 x, y, __BLIT_FLAG_FLUSH) &&
858 !draw->is_different_gpu) {
859 dri3_fence_reset(draw->conn, dri3_front_buffer(draw));
860 dri3_copy_area(draw->conn,
861 back->pixmap,
862 dri3_front_buffer(draw)->pixmap,
863 dri3_drawable_gc(draw),
864 x, y, x, y, width, height);
865 dri3_fence_trigger(draw->conn, dri3_front_buffer(draw));
866 dri3_fence_await(draw->conn, NULL, dri3_front_buffer(draw));
867 }
868 dri3_fence_await(draw->conn, draw, back);
869 }
870
871 void
loader_dri3_copy_drawable(struct loader_dri3_drawable * draw,xcb_drawable_t dest,xcb_drawable_t src)872 loader_dri3_copy_drawable(struct loader_dri3_drawable *draw,
873 xcb_drawable_t dest,
874 xcb_drawable_t src)
875 {
876 loader_dri3_flush(draw, __DRI2_FLUSH_DRAWABLE, __DRI2_THROTTLE_COPYSUBBUFFER);
877
878 struct loader_dri3_buffer *front = dri3_front_buffer(draw);
879 if (front)
880 dri3_fence_reset(draw->conn, front);
881
882 dri3_copy_area(draw->conn,
883 src, dest,
884 dri3_drawable_gc(draw),
885 0, 0, 0, 0, draw->width, draw->height);
886
887 if (front) {
888 dri3_fence_trigger(draw->conn, front);
889 dri3_fence_await(draw->conn, draw, front);
890 }
891 }
892
893 void
loader_dri3_wait_x(struct loader_dri3_drawable * draw)894 loader_dri3_wait_x(struct loader_dri3_drawable *draw)
895 {
896 struct loader_dri3_buffer *front;
897
898 if (draw == NULL || !draw->have_fake_front)
899 return;
900
901 front = dri3_front_buffer(draw);
902
903 loader_dri3_copy_drawable(draw, front->pixmap, draw->drawable);
904
905 /* In the psc->is_different_gpu case, the linear buffer has been updated,
906 * but not yet the tiled buffer.
907 * Copy back to the tiled buffer we use for rendering.
908 * Note that we don't need flushing.
909 */
910 if (draw->is_different_gpu)
911 (void) loader_dri3_blit_image(draw,
912 front->image,
913 front->linear_buffer,
914 0, 0, front->width, front->height,
915 0, 0, 0);
916 }
917
918 void
loader_dri3_wait_gl(struct loader_dri3_drawable * draw)919 loader_dri3_wait_gl(struct loader_dri3_drawable *draw)
920 {
921 struct loader_dri3_buffer *front;
922
923 if (draw == NULL || !draw->have_fake_front)
924 return;
925
926 front = dri3_front_buffer(draw);
927
928 /* In the psc->is_different_gpu case, we update the linear_buffer
929 * before updating the real front.
930 */
931 if (draw->is_different_gpu)
932 (void) loader_dri3_blit_image(draw,
933 front->linear_buffer,
934 front->image,
935 0, 0, front->width, front->height,
936 0, 0, __BLIT_FLAG_FLUSH);
937 loader_dri3_swapbuffer_barrier(draw);
938 loader_dri3_copy_drawable(draw, draw->drawable, front->pixmap);
939 }
940
941 /** dri3_flush_present_events
942 *
943 * Process any present events that have been received from the X server
944 */
945 static void
dri3_flush_present_events(struct loader_dri3_drawable * draw)946 dri3_flush_present_events(struct loader_dri3_drawable *draw)
947 {
948 /* Check to see if any configuration changes have occurred
949 * since we were last invoked
950 */
951 if (draw->has_event_waiter)
952 return;
953
954 if (draw->special_event) {
955 xcb_generic_event_t *ev;
956
957 while ((ev = xcb_poll_for_special_event(draw->conn,
958 draw->special_event)) != NULL) {
959 xcb_present_generic_event_t *ge = (void *) ev;
960 dri3_handle_present_event(draw, ge);
961 }
962 }
963 }
964
965 /** loader_dri3_swap_buffers_msc
966 *
967 * Make the current back buffer visible using the present extension
968 */
969 int64_t
loader_dri3_swap_buffers_msc(struct loader_dri3_drawable * draw,int64_t target_msc,int64_t divisor,int64_t remainder,unsigned flush_flags,const int * rects,int n_rects,bool force_copy)970 loader_dri3_swap_buffers_msc(struct loader_dri3_drawable *draw,
971 int64_t target_msc, int64_t divisor,
972 int64_t remainder, unsigned flush_flags,
973 const int *rects, int n_rects,
974 bool force_copy)
975 {
976 struct loader_dri3_buffer *back;
977 int64_t ret = 0;
978
979 /* GLX spec:
980 * void glXSwapBuffers(Display *dpy, GLXDrawable draw);
981 * This operation is a no-op if draw was created with a non-double-buffered
982 * GLXFBConfig, or if draw is a GLXPixmap.
983 * ...
984 * GLX pixmaps may be created with a config that includes back buffers and
985 * stereoscopic buffers. However, glXSwapBuffers is ignored for these pixmaps.
986 * ...
987 * It is possible to create a pbuffer with back buffers and to swap the
988 * front and back buffers by calling glXSwapBuffers.
989 *
990 * EGL spec:
991 * EGLBoolean eglSwapBuffers(EGLDisplay dpy, EGLSurface surface);
992 * If surface is a back-buffered window surface, then the color buffer is
993 * copied to the native window associated with that surface. If surface is
994 * a single-buffered window, pixmap, or pbuffer surface, eglSwapBuffers has
995 * no effect.
996 *
997 * SwapBuffer effect:
998 * | GLX | EGL |
999 * | window | pixmap | pbuffer | window | pixmap | pbuffer|
1000 *-------+--------+--------+---------+--------+--------+--------+
1001 * single| nop | nop | nop | nop | nop | nop |
1002 * double| swap | nop | swap | swap | NA | NA |
1003 */
1004 if (!draw->have_back || draw->type == LOADER_DRI3_DRAWABLE_PIXMAP)
1005 return ret;
1006
1007 draw->vtable->flush_drawable(draw, flush_flags);
1008
1009 back = dri3_find_back_alloc(draw);
1010 /* Could only happen when error case, like display is already closed. */
1011 if (!back)
1012 return ret;
1013
1014 mtx_lock(&draw->mtx);
1015
1016 if (draw->adaptive_sync && !draw->adaptive_sync_active) {
1017 set_adaptive_sync_property(draw->conn, draw->drawable, true);
1018 draw->adaptive_sync_active = true;
1019 }
1020
1021 if (draw->is_different_gpu) {
1022 /* Update the linear buffer before presenting the pixmap */
1023 (void) loader_dri3_blit_image(draw,
1024 back->linear_buffer,
1025 back->image,
1026 0, 0, back->width, back->height,
1027 0, 0, __BLIT_FLAG_FLUSH);
1028 }
1029
1030 /* If we need to preload the new back buffer, remember the source.
1031 * The force_copy parameter is used by EGL to attempt to preserve
1032 * the back buffer across a call to this function.
1033 */
1034 if (draw->swap_method != __DRI_ATTRIB_SWAP_UNDEFINED || force_copy)
1035 draw->cur_blit_source = LOADER_DRI3_BACK_ID(draw->cur_back);
1036
1037 /* Exchange the back and fake front. Even though the server knows about these
1038 * buffers, it has no notion of back and fake front.
1039 */
1040 if (draw->have_fake_front) {
1041 struct loader_dri3_buffer *tmp;
1042
1043 tmp = dri3_front_buffer(draw);
1044 draw->buffers[LOADER_DRI3_FRONT_ID] = back;
1045 draw->buffers[LOADER_DRI3_BACK_ID(draw->cur_back)] = tmp;
1046
1047 if (draw->swap_method == __DRI_ATTRIB_SWAP_COPY || force_copy)
1048 draw->cur_blit_source = LOADER_DRI3_FRONT_ID;
1049 }
1050
1051 dri3_flush_present_events(draw);
1052
1053 if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1054 dri3_fence_reset(draw->conn, back);
1055
1056 /* Compute when we want the frame shown by taking the last known
1057 * successful MSC and adding in a swap interval for each outstanding swap
1058 * request. target_msc=divisor=remainder=0 means "Use glXSwapBuffers()
1059 * semantic"
1060 */
1061 ++draw->send_sbc;
1062 if (target_msc == 0 && divisor == 0 && remainder == 0)
1063 target_msc = draw->msc + abs(draw->swap_interval) *
1064 (draw->send_sbc - draw->recv_sbc);
1065 else if (divisor == 0 && remainder > 0) {
1066 /* From the GLX_OML_sync_control spec:
1067 * "If <divisor> = 0, the swap will occur when MSC becomes
1068 * greater than or equal to <target_msc>."
1069 *
1070 * Note that there's no mention of the remainder. The Present
1071 * extension throws BadValue for remainder != 0 with divisor == 0, so
1072 * just drop the passed in value.
1073 */
1074 remainder = 0;
1075 }
1076
1077 /* From the GLX_EXT_swap_control spec
1078 * and the EGL 1.4 spec (page 53):
1079 *
1080 * "If <interval> is set to a value of 0, buffer swaps are not
1081 * synchronized to a video frame."
1082 *
1083 * From GLX_EXT_swap_control_tear:
1084 *
1085 * "If <interval> is negative, the minimum number of video frames
1086 * between buffer swaps is the absolute value of <interval>. In this
1087 * case, if abs(<interval>) video frames have already passed from
1088 * the previous swap when the swap is ready to be performed, the
1089 * swap will occur without synchronization to a video frame."
1090 *
1091 * Implementation note: It is possible to enable triple buffering
1092 * behaviour by not using XCB_PRESENT_OPTION_ASYNC, but this should not be
1093 * the default.
1094 */
1095 uint32_t options = XCB_PRESENT_OPTION_NONE;
1096 if (draw->swap_interval <= 0)
1097 options |= XCB_PRESENT_OPTION_ASYNC;
1098
1099 /* If we need to populate the new back, but need to reuse the back
1100 * buffer slot due to lack of local blit capabilities, make sure
1101 * the server doesn't flip and we deadlock.
1102 */
1103 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1)
1104 options |= XCB_PRESENT_OPTION_COPY;
1105 #ifdef HAVE_DRI3_MODIFIERS
1106 if (draw->multiplanes_available)
1107 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1108 #endif
1109 back->busy = 1;
1110 back->last_swap = draw->send_sbc;
1111
1112 if (!draw->region) {
1113 draw->region = xcb_generate_id(draw->conn);
1114 xcb_xfixes_create_region(draw->conn, draw->region, 0, NULL);
1115 }
1116
1117 xcb_xfixes_region_t region = 0;
1118 xcb_rectangle_t xcb_rects[64];
1119
1120 if (n_rects > 0 && n_rects <= ARRAY_SIZE(xcb_rects)) {
1121 for (int i = 0; i < n_rects; i++) {
1122 const int *rect = &rects[i * 4];
1123 xcb_rects[i].x = rect[0];
1124 xcb_rects[i].y = draw->height - rect[1] - rect[3];
1125 xcb_rects[i].width = rect[2];
1126 xcb_rects[i].height = rect[3];
1127 }
1128
1129 region = draw->region;
1130 xcb_xfixes_set_region(draw->conn, region, n_rects, xcb_rects);
1131 }
1132
1133 xcb_present_pixmap(draw->conn,
1134 draw->drawable,
1135 back->pixmap,
1136 (uint32_t) draw->send_sbc,
1137 0, /* valid */
1138 region, /* update */
1139 0, /* x_off */
1140 0, /* y_off */
1141 None, /* target_crtc */
1142 None,
1143 back->sync_fence,
1144 options,
1145 target_msc,
1146 divisor,
1147 remainder, 0, NULL);
1148 } else {
1149 /* This can only be reached by double buffered GLXPbuffer. */
1150 assert(draw->type == LOADER_DRI3_DRAWABLE_PBUFFER);
1151 /* GLX does not have damage regions. */
1152 assert(n_rects == 0);
1153
1154 /* For wait and buffer age usage. */
1155 draw->send_sbc++;
1156 draw->recv_sbc = back->last_swap = draw->send_sbc;
1157
1158 /* Pixmap is imported as front buffer image when same GPU case, so just
1159 * locally blit back buffer image to it is enough. Otherwise front buffer
1160 * is a fake one which needs to be synced with pixmap by xserver remotely.
1161 */
1162 if (draw->is_different_gpu ||
1163 !loader_dri3_blit_image(draw,
1164 dri3_front_buffer(draw)->image,
1165 back->image,
1166 0, 0, draw->width, draw->height,
1167 0, 0, __BLIT_FLAG_FLUSH)) {
1168 dri3_copy_area(draw->conn, back->pixmap,
1169 draw->drawable,
1170 dri3_drawable_gc(draw),
1171 0, 0, 0, 0, draw->width, draw->height);
1172 }
1173 }
1174
1175 ret = (int64_t) draw->send_sbc;
1176
1177 /* Schedule a server-side back-preserving blit if necessary.
1178 * This happens iff all conditions below are satisfied:
1179 * a) We have a fake front,
1180 * b) We need to preserve the back buffer,
1181 * c) We don't have local blit capabilities.
1182 */
1183 if (!loader_dri3_have_image_blit(draw) && draw->cur_blit_source != -1 &&
1184 draw->cur_blit_source != LOADER_DRI3_BACK_ID(draw->cur_back)) {
1185 struct loader_dri3_buffer *new_back = dri3_back_buffer(draw);
1186 struct loader_dri3_buffer *src = draw->buffers[draw->cur_blit_source];
1187
1188 dri3_fence_reset(draw->conn, new_back);
1189 dri3_copy_area(draw->conn, src->pixmap,
1190 new_back->pixmap,
1191 dri3_drawable_gc(draw),
1192 0, 0, 0, 0, draw->width, draw->height);
1193 dri3_fence_trigger(draw->conn, new_back);
1194 new_back->last_swap = src->last_swap;
1195 }
1196
1197 xcb_flush(draw->conn);
1198 if (draw->stamp)
1199 ++(*draw->stamp);
1200
1201 mtx_unlock(&draw->mtx);
1202
1203 draw->ext->flush->invalidate(draw->dri_drawable);
1204
1205 return ret;
1206 }
1207
1208 int
loader_dri3_query_buffer_age(struct loader_dri3_drawable * draw)1209 loader_dri3_query_buffer_age(struct loader_dri3_drawable *draw)
1210 {
1211 struct loader_dri3_buffer *back = dri3_find_back_alloc(draw);
1212 int ret;
1213
1214 mtx_lock(&draw->mtx);
1215 ret = (!back || back->last_swap == 0) ? 0 :
1216 draw->send_sbc - back->last_swap + 1;
1217 mtx_unlock(&draw->mtx);
1218
1219 return ret;
1220 }
1221
1222 /** loader_dri3_open
1223 *
1224 * Wrapper around xcb_dri3_open
1225 */
1226 int
loader_dri3_open(xcb_connection_t * conn,xcb_window_t root,uint32_t provider)1227 loader_dri3_open(xcb_connection_t *conn,
1228 xcb_window_t root,
1229 uint32_t provider)
1230 {
1231 xcb_dri3_open_cookie_t cookie;
1232 xcb_dri3_open_reply_t *reply;
1233 xcb_xfixes_query_version_cookie_t fixes_cookie;
1234 xcb_xfixes_query_version_reply_t *fixes_reply;
1235 int fd;
1236
1237 cookie = xcb_dri3_open(conn,
1238 root,
1239 provider);
1240
1241 reply = xcb_dri3_open_reply(conn, cookie, NULL);
1242 if (!reply)
1243 return -1;
1244
1245 if (reply->nfd != 1) {
1246 free(reply);
1247 return -1;
1248 }
1249
1250 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
1251 free(reply);
1252 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
1253
1254 /* let the server know our xfixes level */
1255 fixes_cookie = xcb_xfixes_query_version(conn,
1256 XCB_XFIXES_MAJOR_VERSION,
1257 XCB_XFIXES_MINOR_VERSION);
1258 fixes_reply = xcb_xfixes_query_version_reply(conn, fixes_cookie, NULL);
1259 free(fixes_reply);
1260
1261 return fd;
1262 }
1263
1264 static uint32_t
dri3_cpp_for_format(uint32_t format)1265 dri3_cpp_for_format(uint32_t format) {
1266 switch (format) {
1267 case __DRI_IMAGE_FORMAT_R8:
1268 return 1;
1269 case __DRI_IMAGE_FORMAT_RGB565:
1270 case __DRI_IMAGE_FORMAT_GR88:
1271 return 2;
1272 case __DRI_IMAGE_FORMAT_XRGB8888:
1273 case __DRI_IMAGE_FORMAT_ARGB8888:
1274 case __DRI_IMAGE_FORMAT_ABGR8888:
1275 case __DRI_IMAGE_FORMAT_XBGR8888:
1276 case __DRI_IMAGE_FORMAT_XRGB2101010:
1277 case __DRI_IMAGE_FORMAT_ARGB2101010:
1278 case __DRI_IMAGE_FORMAT_XBGR2101010:
1279 case __DRI_IMAGE_FORMAT_ABGR2101010:
1280 case __DRI_IMAGE_FORMAT_SARGB8:
1281 case __DRI_IMAGE_FORMAT_SABGR8:
1282 case __DRI_IMAGE_FORMAT_SXRGB8:
1283 return 4;
1284 case __DRI_IMAGE_FORMAT_ABGR16161616:
1285 case __DRI_IMAGE_FORMAT_XBGR16161616:
1286 case __DRI_IMAGE_FORMAT_XBGR16161616F:
1287 case __DRI_IMAGE_FORMAT_ABGR16161616F:
1288 return 8;
1289 case __DRI_IMAGE_FORMAT_NONE:
1290 default:
1291 return 0;
1292 }
1293 }
1294
1295 /* Map format of render buffer to corresponding format for the linear_buffer
1296 * used for sharing with the display gpu of a Prime setup (== is_different_gpu).
1297 * Usually linear_format == format, except for depth >= 30 formats, where
1298 * different gpu vendors have different preferences wrt. color channel ordering.
1299 */
1300 static uint32_t
dri3_linear_format_for_format(struct loader_dri3_drawable * draw,uint32_t format)1301 dri3_linear_format_for_format(struct loader_dri3_drawable *draw, uint32_t format)
1302 {
1303 switch (format) {
1304 case __DRI_IMAGE_FORMAT_XRGB2101010:
1305 case __DRI_IMAGE_FORMAT_XBGR2101010:
1306 /* Different preferred formats for different hw */
1307 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1308 return __DRI_IMAGE_FORMAT_XBGR2101010;
1309 else
1310 return __DRI_IMAGE_FORMAT_XRGB2101010;
1311
1312 case __DRI_IMAGE_FORMAT_ARGB2101010:
1313 case __DRI_IMAGE_FORMAT_ABGR2101010:
1314 /* Different preferred formats for different hw */
1315 if (dri3_get_red_mask_for_depth(draw, 30) == 0x3ff)
1316 return __DRI_IMAGE_FORMAT_ABGR2101010;
1317 else
1318 return __DRI_IMAGE_FORMAT_ARGB2101010;
1319
1320 default:
1321 return format;
1322 }
1323 }
1324
1325 /* the DRIimage createImage function takes __DRI_IMAGE_FORMAT codes, while
1326 * the createImageFromFds call takes DRM_FORMAT codes. To avoid
1327 * complete confusion, just deal in __DRI_IMAGE_FORMAT codes for now and
1328 * translate to DRM_FORMAT codes in the call to createImageFromFds
1329 */
1330 static int
image_format_to_fourcc(int format)1331 image_format_to_fourcc(int format)
1332 {
1333
1334 /* Convert from __DRI_IMAGE_FORMAT to DRM_FORMAT (sigh) */
1335 switch (format) {
1336 case __DRI_IMAGE_FORMAT_SARGB8: return __DRI_IMAGE_FOURCC_SARGB8888;
1337 case __DRI_IMAGE_FORMAT_SABGR8: return __DRI_IMAGE_FOURCC_SABGR8888;
1338 case __DRI_IMAGE_FORMAT_SXRGB8: return __DRI_IMAGE_FOURCC_SXRGB8888;
1339 case __DRI_IMAGE_FORMAT_RGB565: return DRM_FORMAT_RGB565;
1340 case __DRI_IMAGE_FORMAT_XRGB8888: return DRM_FORMAT_XRGB8888;
1341 case __DRI_IMAGE_FORMAT_ARGB8888: return DRM_FORMAT_ARGB8888;
1342 case __DRI_IMAGE_FORMAT_ABGR8888: return DRM_FORMAT_ABGR8888;
1343 case __DRI_IMAGE_FORMAT_XBGR8888: return DRM_FORMAT_XBGR8888;
1344 case __DRI_IMAGE_FORMAT_XRGB2101010: return DRM_FORMAT_XRGB2101010;
1345 case __DRI_IMAGE_FORMAT_ARGB2101010: return DRM_FORMAT_ARGB2101010;
1346 case __DRI_IMAGE_FORMAT_XBGR2101010: return DRM_FORMAT_XBGR2101010;
1347 case __DRI_IMAGE_FORMAT_ABGR2101010: return DRM_FORMAT_ABGR2101010;
1348 case __DRI_IMAGE_FORMAT_ABGR16161616: return DRM_FORMAT_ABGR16161616;
1349 case __DRI_IMAGE_FORMAT_XBGR16161616: return DRM_FORMAT_XBGR16161616;
1350 case __DRI_IMAGE_FORMAT_XBGR16161616F: return DRM_FORMAT_XBGR16161616F;
1351 case __DRI_IMAGE_FORMAT_ABGR16161616F: return DRM_FORMAT_ABGR16161616F;
1352 }
1353 return 0;
1354 }
1355
1356 #ifdef HAVE_DRI3_MODIFIERS
1357 static bool
has_supported_modifier(struct loader_dri3_drawable * draw,unsigned int format,uint64_t * modifiers,uint32_t count)1358 has_supported_modifier(struct loader_dri3_drawable *draw, unsigned int format,
1359 uint64_t *modifiers, uint32_t count)
1360 {
1361 uint64_t *supported_modifiers;
1362 int32_t supported_modifiers_count;
1363 bool found = false;
1364 int i, j;
1365
1366 if (!draw->ext->image->queryDmaBufModifiers(draw->dri_screen,
1367 format, 0, NULL, NULL,
1368 &supported_modifiers_count) ||
1369 supported_modifiers_count == 0)
1370 return false;
1371
1372 supported_modifiers = malloc(supported_modifiers_count * sizeof(uint64_t));
1373 if (!supported_modifiers)
1374 return false;
1375
1376 draw->ext->image->queryDmaBufModifiers(draw->dri_screen, format,
1377 supported_modifiers_count,
1378 supported_modifiers, NULL,
1379 &supported_modifiers_count);
1380
1381 for (i = 0; !found && i < supported_modifiers_count; i++) {
1382 for (j = 0; !found && j < count; j++) {
1383 if (supported_modifiers[i] == modifiers[j])
1384 found = true;
1385 }
1386 }
1387
1388 free(supported_modifiers);
1389 return found;
1390 }
1391 #endif
1392
1393 /** loader_dri3_alloc_render_buffer
1394 *
1395 * Use the driver createImage function to construct a __DRIimage, then
1396 * get a file descriptor for that and create an X pixmap from that
1397 *
1398 * Allocate an xshmfence for synchronization
1399 */
1400 static struct loader_dri3_buffer *
dri3_alloc_render_buffer(struct loader_dri3_drawable * draw,unsigned int format,int width,int height,int depth)1401 dri3_alloc_render_buffer(struct loader_dri3_drawable *draw, unsigned int format,
1402 int width, int height, int depth)
1403 {
1404 struct loader_dri3_buffer *buffer;
1405 __DRIimage *pixmap_buffer = NULL, *linear_buffer_display_gpu = NULL;
1406 xcb_pixmap_t pixmap;
1407 xcb_sync_fence_t sync_fence;
1408 struct xshmfence *shm_fence;
1409 int buffer_fds[4], fence_fd;
1410 int num_planes = 0;
1411 uint64_t *modifiers = NULL;
1412 uint32_t count = 0;
1413 int i, mod;
1414 int ret;
1415
1416 /* Create an xshmfence object and
1417 * prepare to send that to the X server
1418 */
1419
1420 fence_fd = xshmfence_alloc_shm();
1421 if (fence_fd < 0)
1422 return NULL;
1423
1424 shm_fence = xshmfence_map_shm(fence_fd);
1425 if (shm_fence == NULL)
1426 goto no_shm_fence;
1427
1428 /* Allocate the image from the driver
1429 */
1430 buffer = calloc(1, sizeof *buffer);
1431 if (!buffer)
1432 goto no_buffer;
1433
1434 buffer->cpp = dri3_cpp_for_format(format);
1435 if (!buffer->cpp)
1436 goto no_image;
1437
1438 if (!draw->is_different_gpu) {
1439 #ifdef HAVE_DRI3_MODIFIERS
1440 if (draw->multiplanes_available &&
1441 draw->ext->image->base.version >= 15 &&
1442 draw->ext->image->queryDmaBufModifiers &&
1443 draw->ext->image->createImageWithModifiers) {
1444 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie;
1445 xcb_dri3_get_supported_modifiers_reply_t *mod_reply;
1446 xcb_generic_error_t *error = NULL;
1447
1448 mod_cookie = xcb_dri3_get_supported_modifiers(draw->conn,
1449 draw->window,
1450 depth, buffer->cpp * 8);
1451 mod_reply = xcb_dri3_get_supported_modifiers_reply(draw->conn,
1452 mod_cookie,
1453 &error);
1454 if (!mod_reply)
1455 goto no_image;
1456
1457 if (mod_reply->num_window_modifiers) {
1458 count = mod_reply->num_window_modifiers;
1459 modifiers = malloc(count * sizeof(uint64_t));
1460 if (!modifiers) {
1461 free(mod_reply);
1462 goto no_image;
1463 }
1464
1465 memcpy(modifiers,
1466 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1467 count * sizeof(uint64_t));
1468
1469 if (!has_supported_modifier(draw, image_format_to_fourcc(format),
1470 modifiers, count)) {
1471 free(modifiers);
1472 count = 0;
1473 modifiers = NULL;
1474 }
1475 }
1476
1477 if (mod_reply->num_screen_modifiers && modifiers == NULL) {
1478 count = mod_reply->num_screen_modifiers;
1479 modifiers = malloc(count * sizeof(uint64_t));
1480 if (!modifiers) {
1481 free(modifiers);
1482 free(mod_reply);
1483 goto no_image;
1484 }
1485
1486 memcpy(modifiers,
1487 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1488 count * sizeof(uint64_t));
1489 }
1490
1491 free(mod_reply);
1492 }
1493 #endif
1494 buffer->image = loader_dri_create_image(draw->dri_screen, draw->ext->image,
1495 width, height, format,
1496 __DRI_IMAGE_USE_SHARE |
1497 __DRI_IMAGE_USE_SCANOUT |
1498 __DRI_IMAGE_USE_BACKBUFFER |
1499 (draw->is_protected_content ?
1500 __DRI_IMAGE_USE_PROTECTED : 0),
1501 modifiers, count, buffer);
1502 free(modifiers);
1503
1504 pixmap_buffer = buffer->image;
1505
1506 if (!buffer->image)
1507 goto no_image;
1508 } else {
1509 buffer->image = draw->ext->image->createImage(draw->dri_screen,
1510 width, height,
1511 format,
1512 0,
1513 buffer);
1514
1515 if (!buffer->image)
1516 goto no_image;
1517
1518 /* if driver name is same only then dri_screen_display_gpu is set.
1519 * This check is needed because for simplicity render gpu image extension
1520 * is also used for display gpu.
1521 */
1522 if (draw->dri_screen_display_gpu) {
1523 linear_buffer_display_gpu =
1524 draw->ext->image->createImage(draw->dri_screen_display_gpu,
1525 width, height,
1526 dri3_linear_format_for_format(draw, format),
1527 __DRI_IMAGE_USE_SHARE |
1528 __DRI_IMAGE_USE_LINEAR |
1529 __DRI_IMAGE_USE_BACKBUFFER |
1530 __DRI_IMAGE_USE_SCANOUT,
1531 buffer);
1532 pixmap_buffer = linear_buffer_display_gpu;
1533 }
1534
1535 if (!pixmap_buffer) {
1536 buffer->linear_buffer =
1537 draw->ext->image->createImage(draw->dri_screen,
1538 width, height,
1539 dri3_linear_format_for_format(draw, format),
1540 __DRI_IMAGE_USE_SHARE |
1541 __DRI_IMAGE_USE_LINEAR |
1542 __DRI_IMAGE_USE_BACKBUFFER |
1543 __DRI_IMAGE_USE_SCANOUT |
1544 __DRI_IMAGE_USE_PRIME_BUFFER,
1545 buffer);
1546
1547 pixmap_buffer = buffer->linear_buffer;
1548 if (!buffer->linear_buffer) {
1549 goto no_linear_buffer;
1550 }
1551 }
1552 }
1553
1554 /* X want some information about the planes, so ask the image for it
1555 */
1556 if (!draw->ext->image->queryImage(pixmap_buffer, __DRI_IMAGE_ATTRIB_NUM_PLANES,
1557 &num_planes))
1558 num_planes = 1;
1559
1560 for (i = 0; i < num_planes; i++) {
1561 __DRIimage *image = draw->ext->image->fromPlanar(pixmap_buffer, i, NULL);
1562
1563 if (!image) {
1564 assert(i == 0);
1565 image = pixmap_buffer;
1566 }
1567
1568 buffer_fds[i] = -1;
1569
1570 ret = draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_FD,
1571 &buffer_fds[i]);
1572 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE,
1573 &buffer->strides[i]);
1574 ret &= draw->ext->image->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET,
1575 &buffer->offsets[i]);
1576 if (image != pixmap_buffer)
1577 draw->ext->image->destroyImage(image);
1578
1579 if (!ret)
1580 goto no_buffer_attrib;
1581 }
1582
1583 ret = draw->ext->image->queryImage(pixmap_buffer,
1584 __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, &mod);
1585 buffer->modifier = (uint64_t) mod << 32;
1586 ret &= draw->ext->image->queryImage(pixmap_buffer,
1587 __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, &mod);
1588 buffer->modifier |= (uint64_t)(mod & 0xffffffff);
1589
1590 if (!ret)
1591 buffer->modifier = DRM_FORMAT_MOD_INVALID;
1592
1593 if (draw->is_different_gpu && draw->dri_screen_display_gpu &&
1594 linear_buffer_display_gpu) {
1595 /* The linear buffer was created in the display GPU's vram, so we
1596 * need to make it visible to render GPU
1597 */
1598 if (draw->ext->image->base.version >= 20)
1599 buffer->linear_buffer =
1600 draw->ext->image->createImageFromFds2(draw->dri_screen,
1601 width,
1602 height,
1603 image_format_to_fourcc(format),
1604 &buffer_fds[0], num_planes,
1605 __DRI_IMAGE_PRIME_LINEAR_BUFFER,
1606 &buffer->strides[0],
1607 &buffer->offsets[0],
1608 buffer);
1609 else
1610 buffer->linear_buffer =
1611 draw->ext->image->createImageFromFds(draw->dri_screen,
1612 width,
1613 height,
1614 image_format_to_fourcc(format),
1615 &buffer_fds[0], num_planes,
1616 &buffer->strides[0],
1617 &buffer->offsets[0],
1618 buffer);
1619 if (!buffer->linear_buffer)
1620 goto no_buffer_attrib;
1621
1622 draw->ext->image->destroyImage(linear_buffer_display_gpu);
1623 }
1624
1625 pixmap = xcb_generate_id(draw->conn);
1626 #ifdef HAVE_DRI3_MODIFIERS
1627 if (draw->multiplanes_available &&
1628 buffer->modifier != DRM_FORMAT_MOD_INVALID) {
1629 xcb_dri3_pixmap_from_buffers(draw->conn,
1630 pixmap,
1631 draw->window,
1632 num_planes,
1633 width, height,
1634 buffer->strides[0], buffer->offsets[0],
1635 buffer->strides[1], buffer->offsets[1],
1636 buffer->strides[2], buffer->offsets[2],
1637 buffer->strides[3], buffer->offsets[3],
1638 depth, buffer->cpp * 8,
1639 buffer->modifier,
1640 buffer_fds);
1641 } else
1642 #endif
1643 {
1644 xcb_dri3_pixmap_from_buffer(draw->conn,
1645 pixmap,
1646 draw->drawable,
1647 buffer->size,
1648 width, height, buffer->strides[0],
1649 depth, buffer->cpp * 8,
1650 buffer_fds[0]);
1651 }
1652
1653 xcb_dri3_fence_from_fd(draw->conn,
1654 pixmap,
1655 (sync_fence = xcb_generate_id(draw->conn)),
1656 false,
1657 fence_fd);
1658
1659 buffer->pixmap = pixmap;
1660 buffer->own_pixmap = true;
1661 buffer->sync_fence = sync_fence;
1662 buffer->shm_fence = shm_fence;
1663 buffer->width = width;
1664 buffer->height = height;
1665
1666 /* Mark the buffer as idle
1667 */
1668 dri3_fence_set(buffer);
1669
1670 return buffer;
1671
1672 no_buffer_attrib:
1673 do {
1674 if (buffer_fds[i] != -1)
1675 close(buffer_fds[i]);
1676 } while (--i >= 0);
1677 draw->ext->image->destroyImage(pixmap_buffer);
1678 no_linear_buffer:
1679 if (draw->is_different_gpu)
1680 draw->ext->image->destroyImage(buffer->image);
1681 no_image:
1682 free(buffer);
1683 no_buffer:
1684 xshmfence_unmap_shm(shm_fence);
1685 no_shm_fence:
1686 close(fence_fd);
1687 return NULL;
1688 }
1689
1690 static bool
dri3_detect_drawable_is_window(struct loader_dri3_drawable * draw)1691 dri3_detect_drawable_is_window(struct loader_dri3_drawable *draw)
1692 {
1693 /* Try to select for input on the window.
1694 *
1695 * If the drawable is a window, this will get our events
1696 * delivered.
1697 *
1698 * Otherwise, we'll get a BadWindow error back from this request which
1699 * will let us know that the drawable is a pixmap instead.
1700 */
1701
1702 xcb_void_cookie_t cookie =
1703 xcb_present_select_input_checked(draw->conn, draw->eid, draw->drawable,
1704 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1705 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1706 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1707
1708 /* Check to see if our select input call failed. If it failed with a
1709 * BadWindow error, then assume the drawable is a pixmap.
1710 */
1711 xcb_generic_error_t *error = xcb_request_check(draw->conn, cookie);
1712
1713 if (error) {
1714 if (error->error_code != BadWindow) {
1715 free(error);
1716 return false;
1717 }
1718 free(error);
1719
1720 /* pixmap can't get here, see driFetchDrawable(). */
1721 draw->type = LOADER_DRI3_DRAWABLE_PBUFFER;
1722 return true;
1723 }
1724
1725 draw->type = LOADER_DRI3_DRAWABLE_WINDOW;
1726 return true;
1727 }
1728
1729 static bool
dri3_setup_present_event(struct loader_dri3_drawable * draw)1730 dri3_setup_present_event(struct loader_dri3_drawable *draw)
1731 {
1732 /* No need to setup for pixmap drawable. */
1733 if (draw->type == LOADER_DRI3_DRAWABLE_PIXMAP ||
1734 draw->type == LOADER_DRI3_DRAWABLE_PBUFFER)
1735 return true;
1736
1737 draw->eid = xcb_generate_id(draw->conn);
1738
1739 if (draw->type == LOADER_DRI3_DRAWABLE_WINDOW) {
1740 xcb_present_select_input(draw->conn, draw->eid, draw->drawable,
1741 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1742 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1743 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1744 } else {
1745 assert(draw->type == LOADER_DRI3_DRAWABLE_UNKNOWN);
1746
1747 if (!dri3_detect_drawable_is_window(draw))
1748 return false;
1749
1750 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1751 return true;
1752 }
1753
1754 /* Create an XCB event queue to hold present events outside of the usual
1755 * application event queue
1756 */
1757 draw->special_event = xcb_register_for_special_xge(draw->conn,
1758 &xcb_present_id,
1759 draw->eid,
1760 draw->stamp);
1761 return true;
1762 }
1763
1764 /** loader_dri3_update_drawable
1765 *
1766 * Called the first time we use the drawable and then
1767 * after we receive present configure notify events to
1768 * track the geometry of the drawable
1769 */
1770 static int
dri3_update_drawable(struct loader_dri3_drawable * draw)1771 dri3_update_drawable(struct loader_dri3_drawable *draw)
1772 {
1773 mtx_lock(&draw->mtx);
1774 if (draw->first_init) {
1775 xcb_get_geometry_cookie_t geom_cookie;
1776 xcb_get_geometry_reply_t *geom_reply;
1777 xcb_window_t root_win;
1778
1779 draw->first_init = false;
1780
1781 if (!dri3_setup_present_event(draw)) {
1782 mtx_unlock(&draw->mtx);
1783 return false;
1784 }
1785
1786 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
1787
1788 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
1789
1790 if (!geom_reply) {
1791 mtx_unlock(&draw->mtx);
1792 return false;
1793 }
1794 draw->width = geom_reply->width;
1795 draw->height = geom_reply->height;
1796 draw->depth = geom_reply->depth;
1797 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
1798 root_win = geom_reply->root;
1799
1800 free(geom_reply);
1801
1802 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW)
1803 draw->window = root_win;
1804 else
1805 draw->window = draw->drawable;
1806 }
1807 dri3_flush_present_events(draw);
1808 mtx_unlock(&draw->mtx);
1809 return true;
1810 }
1811
1812 __DRIimage *
loader_dri3_create_image(xcb_connection_t * c,xcb_dri3_buffer_from_pixmap_reply_t * bp_reply,unsigned int format,__DRIscreen * dri_screen,const __DRIimageExtension * image,void * loaderPrivate)1813 loader_dri3_create_image(xcb_connection_t *c,
1814 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply,
1815 unsigned int format,
1816 __DRIscreen *dri_screen,
1817 const __DRIimageExtension *image,
1818 void *loaderPrivate)
1819 {
1820 int *fds;
1821 __DRIimage *image_planar, *ret;
1822 int stride, offset;
1823
1824 /* Get an FD for the pixmap object
1825 */
1826 fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
1827
1828 stride = bp_reply->stride;
1829 offset = 0;
1830
1831 /* createImageFromFds creates a wrapper __DRIimage structure which
1832 * can deal with multiple planes for things like Yuv images. So, once
1833 * we've gotten the planar wrapper, pull the single plane out of it and
1834 * discard the wrapper.
1835 */
1836 image_planar = image->createImageFromFds(dri_screen,
1837 bp_reply->width,
1838 bp_reply->height,
1839 image_format_to_fourcc(format),
1840 fds, 1,
1841 &stride, &offset, loaderPrivate);
1842 close(fds[0]);
1843 if (!image_planar)
1844 return NULL;
1845
1846 ret = image->fromPlanar(image_planar, 0, loaderPrivate);
1847
1848 if (!ret)
1849 ret = image_planar;
1850 else
1851 image->destroyImage(image_planar);
1852
1853 return ret;
1854 }
1855
1856 #ifdef HAVE_DRI3_MODIFIERS
1857 __DRIimage *
loader_dri3_create_image_from_buffers(xcb_connection_t * c,xcb_dri3_buffers_from_pixmap_reply_t * bp_reply,unsigned int format,__DRIscreen * dri_screen,const __DRIimageExtension * image,void * loaderPrivate)1858 loader_dri3_create_image_from_buffers(xcb_connection_t *c,
1859 xcb_dri3_buffers_from_pixmap_reply_t *bp_reply,
1860 unsigned int format,
1861 __DRIscreen *dri_screen,
1862 const __DRIimageExtension *image,
1863 void *loaderPrivate)
1864 {
1865 __DRIimage *ret;
1866 int *fds;
1867 uint32_t *strides_in, *offsets_in;
1868 int strides[4], offsets[4];
1869 unsigned error;
1870 int i;
1871
1872 if (bp_reply->nfd > 4)
1873 return NULL;
1874
1875 fds = xcb_dri3_buffers_from_pixmap_reply_fds(c, bp_reply);
1876 strides_in = xcb_dri3_buffers_from_pixmap_strides(bp_reply);
1877 offsets_in = xcb_dri3_buffers_from_pixmap_offsets(bp_reply);
1878 for (i = 0; i < bp_reply->nfd; i++) {
1879 strides[i] = strides_in[i];
1880 offsets[i] = offsets_in[i];
1881 }
1882
1883 ret = image->createImageFromDmaBufs2(dri_screen,
1884 bp_reply->width,
1885 bp_reply->height,
1886 image_format_to_fourcc(format),
1887 bp_reply->modifier,
1888 fds, bp_reply->nfd,
1889 strides, offsets,
1890 0, 0, 0, 0, /* UNDEFINED */
1891 &error, loaderPrivate);
1892
1893 for (i = 0; i < bp_reply->nfd; i++)
1894 close(fds[i]);
1895
1896 return ret;
1897 }
1898 #endif
1899
1900 /** dri3_get_pixmap_buffer
1901 *
1902 * Get the DRM object for a pixmap from the X server and
1903 * wrap that with a __DRIimage structure using createImageFromFds
1904 */
1905 static struct loader_dri3_buffer *
dri3_get_pixmap_buffer(__DRIdrawable * driDrawable,unsigned int format,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)1906 dri3_get_pixmap_buffer(__DRIdrawable *driDrawable, unsigned int format,
1907 enum loader_dri3_buffer_type buffer_type,
1908 struct loader_dri3_drawable *draw)
1909 {
1910 int buf_id = loader_dri3_pixmap_buf_id(buffer_type);
1911 struct loader_dri3_buffer *buffer = draw->buffers[buf_id];
1912 xcb_drawable_t pixmap;
1913 xcb_sync_fence_t sync_fence;
1914 struct xshmfence *shm_fence;
1915 int width;
1916 int height;
1917 int fence_fd;
1918 __DRIscreen *cur_screen;
1919
1920 if (buffer)
1921 return buffer;
1922
1923 pixmap = draw->drawable;
1924
1925 buffer = calloc(1, sizeof *buffer);
1926 if (!buffer)
1927 goto no_buffer;
1928
1929 fence_fd = xshmfence_alloc_shm();
1930 if (fence_fd < 0)
1931 goto no_fence;
1932 shm_fence = xshmfence_map_shm(fence_fd);
1933 if (shm_fence == NULL) {
1934 close (fence_fd);
1935 goto no_fence;
1936 }
1937
1938 /* Get the currently-bound screen or revert to using the drawable's screen if
1939 * no contexts are currently bound. The latter case is at least necessary for
1940 * obs-studio, when using Window Capture (Xcomposite) as a Source.
1941 */
1942 cur_screen = draw->vtable->get_dri_screen();
1943 if (!cur_screen) {
1944 cur_screen = draw->dri_screen;
1945 }
1946
1947 xcb_dri3_fence_from_fd(draw->conn,
1948 pixmap,
1949 (sync_fence = xcb_generate_id(draw->conn)),
1950 false,
1951 fence_fd);
1952 #ifdef HAVE_DRI3_MODIFIERS
1953 if (draw->multiplanes_available &&
1954 draw->ext->image->base.version >= 15 &&
1955 draw->ext->image->createImageFromDmaBufs2) {
1956 xcb_dri3_buffers_from_pixmap_cookie_t bps_cookie;
1957 xcb_dri3_buffers_from_pixmap_reply_t *bps_reply;
1958
1959 bps_cookie = xcb_dri3_buffers_from_pixmap(draw->conn, pixmap);
1960 bps_reply = xcb_dri3_buffers_from_pixmap_reply(draw->conn, bps_cookie,
1961 NULL);
1962 if (!bps_reply)
1963 goto no_image;
1964 buffer->image =
1965 loader_dri3_create_image_from_buffers(draw->conn, bps_reply, format,
1966 cur_screen, draw->ext->image,
1967 buffer);
1968 width = bps_reply->width;
1969 height = bps_reply->height;
1970 free(bps_reply);
1971 } else
1972 #endif
1973 {
1974 xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
1975 xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
1976
1977 bp_cookie = xcb_dri3_buffer_from_pixmap(draw->conn, pixmap);
1978 bp_reply = xcb_dri3_buffer_from_pixmap_reply(draw->conn, bp_cookie, NULL);
1979 if (!bp_reply)
1980 goto no_image;
1981
1982 buffer->image = loader_dri3_create_image(draw->conn, bp_reply, format,
1983 cur_screen, draw->ext->image,
1984 buffer);
1985 width = bp_reply->width;
1986 height = bp_reply->height;
1987 free(bp_reply);
1988 }
1989
1990 if (!buffer->image)
1991 goto no_image;
1992
1993 buffer->pixmap = pixmap;
1994 buffer->own_pixmap = false;
1995 buffer->width = width;
1996 buffer->height = height;
1997 buffer->shm_fence = shm_fence;
1998 buffer->sync_fence = sync_fence;
1999
2000 draw->buffers[buf_id] = buffer;
2001
2002 return buffer;
2003
2004 no_image:
2005 xcb_sync_destroy_fence(draw->conn, sync_fence);
2006 xshmfence_unmap_shm(shm_fence);
2007 no_fence:
2008 free(buffer);
2009 no_buffer:
2010 return NULL;
2011 }
2012
2013 /** dri3_get_buffer
2014 *
2015 * Find a front or back buffer, allocating new ones as necessary
2016 */
2017 static struct loader_dri3_buffer *
dri3_get_buffer(__DRIdrawable * driDrawable,unsigned int format,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2018 dri3_get_buffer(__DRIdrawable *driDrawable,
2019 unsigned int format,
2020 enum loader_dri3_buffer_type buffer_type,
2021 struct loader_dri3_drawable *draw)
2022 {
2023 struct loader_dri3_buffer *buffer;
2024 bool fence_await = buffer_type == loader_dri3_buffer_back;
2025 int buf_id;
2026
2027 if (buffer_type == loader_dri3_buffer_back) {
2028 draw->back_format = format;
2029
2030 buf_id = dri3_find_back(draw, !draw->prefer_back_buffer_reuse);
2031
2032 if (buf_id < 0)
2033 return NULL;
2034 } else {
2035 buf_id = LOADER_DRI3_FRONT_ID;
2036 }
2037
2038 buffer = draw->buffers[buf_id];
2039
2040 /* Allocate a new buffer if there isn't an old one, if that
2041 * old one is the wrong size, or if it's suboptimal
2042 */
2043 if (!buffer || buffer->width != draw->width ||
2044 buffer->height != draw->height ||
2045 buffer->reallocate) {
2046 struct loader_dri3_buffer *new_buffer;
2047
2048 /* Allocate the new buffers
2049 */
2050 new_buffer = dri3_alloc_render_buffer(draw,
2051 format,
2052 draw->width,
2053 draw->height,
2054 draw->depth);
2055 if (!new_buffer)
2056 return NULL;
2057
2058 /* When resizing, copy the contents of the old buffer, waiting for that
2059 * copy to complete using our fences before proceeding
2060 */
2061 if ((buffer_type == loader_dri3_buffer_back ||
2062 (buffer_type == loader_dri3_buffer_front && draw->have_fake_front))
2063 && buffer) {
2064
2065 /* Fill the new buffer with data from an old buffer */
2066 if (!loader_dri3_blit_image(draw,
2067 new_buffer->image,
2068 buffer->image,
2069 0, 0,
2070 MIN2(buffer->width, new_buffer->width),
2071 MIN2(buffer->height, new_buffer->height),
2072 0, 0, 0) &&
2073 !buffer->linear_buffer) {
2074 dri3_fence_reset(draw->conn, new_buffer);
2075 dri3_copy_area(draw->conn,
2076 buffer->pixmap,
2077 new_buffer->pixmap,
2078 dri3_drawable_gc(draw),
2079 0, 0, 0, 0,
2080 draw->width, draw->height);
2081 dri3_fence_trigger(draw->conn, new_buffer);
2082 fence_await = true;
2083 }
2084 dri3_free_render_buffer(draw, buffer);
2085 } else if (buffer_type == loader_dri3_buffer_front) {
2086 /* Fill the new fake front with data from a real front */
2087 loader_dri3_swapbuffer_barrier(draw);
2088 dri3_fence_reset(draw->conn, new_buffer);
2089 dri3_copy_area(draw->conn,
2090 draw->drawable,
2091 new_buffer->pixmap,
2092 dri3_drawable_gc(draw),
2093 0, 0, 0, 0,
2094 draw->width, draw->height);
2095 dri3_fence_trigger(draw->conn, new_buffer);
2096
2097 if (new_buffer->linear_buffer) {
2098 dri3_fence_await(draw->conn, draw, new_buffer);
2099 (void) loader_dri3_blit_image(draw,
2100 new_buffer->image,
2101 new_buffer->linear_buffer,
2102 0, 0, draw->width, draw->height,
2103 0, 0, 0);
2104 } else
2105 fence_await = true;
2106 }
2107 buffer = new_buffer;
2108 draw->buffers[buf_id] = buffer;
2109 }
2110
2111 if (fence_await)
2112 dri3_fence_await(draw->conn, draw, buffer);
2113
2114 /*
2115 * Do we need to preserve the content of a previous buffer?
2116 *
2117 * Note that this blit is needed only to avoid a wait for a buffer that
2118 * is currently in the flip chain or being scanned out from. That's really
2119 * a tradeoff. If we're ok with the wait we can reduce the number of back
2120 * buffers to 1 for SWAP_EXCHANGE, and 1 for SWAP_COPY,
2121 * but in the latter case we must disallow page-flipping.
2122 */
2123 if (buffer_type == loader_dri3_buffer_back &&
2124 draw->cur_blit_source != -1 &&
2125 draw->buffers[draw->cur_blit_source] &&
2126 buffer != draw->buffers[draw->cur_blit_source]) {
2127
2128 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2129
2130 /* Avoid flushing here. Will propably do good for tiling hardware. */
2131 (void) loader_dri3_blit_image(draw,
2132 buffer->image,
2133 source->image,
2134 0, 0, draw->width, draw->height,
2135 0, 0, 0);
2136 buffer->last_swap = source->last_swap;
2137 draw->cur_blit_source = -1;
2138 }
2139 /* Return the requested buffer */
2140 return buffer;
2141 }
2142
2143 /** dri3_free_buffers
2144 *
2145 * Free the front bufffer or all of the back buffers. Used
2146 * when the application changes which buffers it needs
2147 */
2148 static void
dri3_free_buffers(__DRIdrawable * driDrawable,enum loader_dri3_buffer_type buffer_type,struct loader_dri3_drawable * draw)2149 dri3_free_buffers(__DRIdrawable *driDrawable,
2150 enum loader_dri3_buffer_type buffer_type,
2151 struct loader_dri3_drawable *draw)
2152 {
2153 struct loader_dri3_buffer *buffer;
2154 int first_id;
2155 int n_id;
2156 int buf_id;
2157
2158 switch (buffer_type) {
2159 case loader_dri3_buffer_back:
2160 first_id = LOADER_DRI3_BACK_ID(0);
2161 n_id = LOADER_DRI3_MAX_BACK;
2162 draw->cur_blit_source = -1;
2163 break;
2164 case loader_dri3_buffer_front:
2165 first_id = LOADER_DRI3_FRONT_ID;
2166 /* Don't free a fake front holding new backbuffer content. */
2167 n_id = (draw->cur_blit_source == LOADER_DRI3_FRONT_ID) ? 0 : 1;
2168 break;
2169 default:
2170 unreachable("unhandled buffer_type");
2171 }
2172
2173 for (buf_id = first_id; buf_id < first_id + n_id; buf_id++) {
2174 buffer = draw->buffers[buf_id];
2175 if (buffer) {
2176 dri3_free_render_buffer(draw, buffer);
2177 draw->buffers[buf_id] = NULL;
2178 }
2179 }
2180 }
2181
2182 /** loader_dri3_get_buffers
2183 *
2184 * The published buffer allocation API.
2185 * Returns all of the necessary buffers, allocating
2186 * as needed.
2187 */
2188 int
loader_dri3_get_buffers(__DRIdrawable * driDrawable,unsigned int format,uint32_t * stamp,void * loaderPrivate,uint32_t buffer_mask,struct __DRIimageList * buffers)2189 loader_dri3_get_buffers(__DRIdrawable *driDrawable,
2190 unsigned int format,
2191 uint32_t *stamp,
2192 void *loaderPrivate,
2193 uint32_t buffer_mask,
2194 struct __DRIimageList *buffers)
2195 {
2196 struct loader_dri3_drawable *draw = loaderPrivate;
2197 struct loader_dri3_buffer *front, *back;
2198 int buf_id;
2199
2200 buffers->image_mask = 0;
2201 buffers->front = NULL;
2202 buffers->back = NULL;
2203
2204 front = NULL;
2205 back = NULL;
2206
2207 if (!dri3_update_drawable(draw))
2208 return false;
2209
2210 dri3_update_max_num_back(draw);
2211
2212 /* Free no longer needed back buffers */
2213 for (buf_id = draw->cur_num_back; buf_id < LOADER_DRI3_MAX_BACK; buf_id++) {
2214 if (draw->cur_blit_source != buf_id && draw->buffers[buf_id]) {
2215 dri3_free_render_buffer(draw, draw->buffers[buf_id]);
2216 draw->buffers[buf_id] = NULL;
2217 }
2218 }
2219
2220 /* pixmaps always have front buffers.
2221 * Exchange swaps also mandate fake front buffers.
2222 */
2223 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW ||
2224 draw->swap_method == __DRI_ATTRIB_SWAP_EXCHANGE)
2225 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
2226
2227 if (buffer_mask & __DRI_IMAGE_BUFFER_FRONT) {
2228 /* All pixmaps are owned by the server gpu.
2229 * When we use a different gpu, we can't use the pixmap
2230 * as buffer since it is potentially tiled a way
2231 * our device can't understand. In this case, use
2232 * a fake front buffer. Hopefully the pixmap
2233 * content will get synced with the fake front
2234 * buffer.
2235 */
2236 if (draw->type != LOADER_DRI3_DRAWABLE_WINDOW && !draw->is_different_gpu)
2237 front = dri3_get_pixmap_buffer(driDrawable,
2238 format,
2239 loader_dri3_buffer_front,
2240 draw);
2241 else
2242 front = dri3_get_buffer(driDrawable,
2243 format,
2244 loader_dri3_buffer_front,
2245 draw);
2246
2247 if (!front)
2248 return false;
2249 } else {
2250 dri3_free_buffers(driDrawable, loader_dri3_buffer_front, draw);
2251 draw->have_fake_front = 0;
2252 }
2253
2254 if (buffer_mask & __DRI_IMAGE_BUFFER_BACK) {
2255 back = dri3_get_buffer(driDrawable,
2256 format,
2257 loader_dri3_buffer_back,
2258 draw);
2259 if (!back)
2260 return false;
2261 draw->have_back = 1;
2262 } else {
2263 dri3_free_buffers(driDrawable, loader_dri3_buffer_back, draw);
2264 draw->have_back = 0;
2265 }
2266
2267 if (front) {
2268 buffers->image_mask |= __DRI_IMAGE_BUFFER_FRONT;
2269 buffers->front = front->image;
2270 draw->have_fake_front =
2271 draw->is_different_gpu ||
2272 draw->type == LOADER_DRI3_DRAWABLE_WINDOW;
2273 }
2274
2275 if (back) {
2276 buffers->image_mask |= __DRI_IMAGE_BUFFER_BACK;
2277 buffers->back = back->image;
2278 }
2279
2280 draw->stamp = stamp;
2281
2282 return true;
2283 }
2284
2285 /** loader_dri3_update_drawable_geometry
2286 *
2287 * Get the current drawable geometry.
2288 */
2289 void
loader_dri3_update_drawable_geometry(struct loader_dri3_drawable * draw)2290 loader_dri3_update_drawable_geometry(struct loader_dri3_drawable *draw)
2291 {
2292 xcb_get_geometry_cookie_t geom_cookie;
2293 xcb_get_geometry_reply_t *geom_reply;
2294
2295 geom_cookie = xcb_get_geometry(draw->conn, draw->drawable);
2296
2297 geom_reply = xcb_get_geometry_reply(draw->conn, geom_cookie, NULL);
2298
2299 if (geom_reply) {
2300 draw->width = geom_reply->width;
2301 draw->height = geom_reply->height;
2302 draw->vtable->set_drawable_size(draw, draw->width, draw->height);
2303 draw->ext->flush->invalidate(draw->dri_drawable);
2304
2305 free(geom_reply);
2306 }
2307 }
2308
2309
2310 /**
2311 * Make sure the server has flushed all pending swap buffers to hardware
2312 * for this drawable. Ideally we'd want to send an X protocol request to
2313 * have the server block our connection until the swaps are complete. That
2314 * would avoid the potential round-trip here.
2315 */
2316 void
loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable * draw)2317 loader_dri3_swapbuffer_barrier(struct loader_dri3_drawable *draw)
2318 {
2319 int64_t ust, msc, sbc;
2320
2321 (void) loader_dri3_wait_for_sbc(draw, 0, &ust, &msc, &sbc);
2322 }
2323
2324 /**
2325 * Perform any cleanup associated with a close screen operation.
2326 * \param dri_screen[in,out] Pointer to __DRIscreen about to be closed.
2327 *
2328 * This function destroys the screen's cached swap context if any.
2329 */
2330 void
loader_dri3_close_screen(__DRIscreen * dri_screen)2331 loader_dri3_close_screen(__DRIscreen *dri_screen)
2332 {
2333 mtx_lock(&blit_context.mtx);
2334 if (blit_context.ctx && blit_context.cur_screen == dri_screen) {
2335 blit_context.core->destroyContext(blit_context.ctx);
2336 blit_context.ctx = NULL;
2337 }
2338 mtx_unlock(&blit_context.mtx);
2339 }
2340
2341 /**
2342 * Find a backbuffer slot - potentially allocating a back buffer
2343 *
2344 * \param draw[in,out] Pointer to the drawable for which to find back.
2345 * \return Pointer to a new back buffer or NULL if allocation failed or was
2346 * not mandated.
2347 *
2348 * Find a potentially new back buffer, and if it's not been allocated yet and
2349 * in addition needs initializing, then try to allocate and initialize it.
2350 */
2351 #include <stdio.h>
2352 static struct loader_dri3_buffer *
dri3_find_back_alloc(struct loader_dri3_drawable * draw)2353 dri3_find_back_alloc(struct loader_dri3_drawable *draw)
2354 {
2355 struct loader_dri3_buffer *back;
2356 int id;
2357
2358 id = dri3_find_back(draw, false);
2359 if (id < 0)
2360 return NULL;
2361
2362 back = draw->buffers[id];
2363 /* Allocate a new back if we haven't got one */
2364 if (!back && draw->back_format != __DRI_IMAGE_FORMAT_NONE &&
2365 dri3_update_drawable(draw))
2366 back = dri3_alloc_render_buffer(draw, draw->back_format,
2367 draw->width, draw->height, draw->depth);
2368
2369 if (!back)
2370 return NULL;
2371
2372 draw->buffers[id] = back;
2373
2374 /* If necessary, prefill the back with data according to swap_method mode. */
2375 if (draw->cur_blit_source != -1 &&
2376 draw->buffers[draw->cur_blit_source] &&
2377 back != draw->buffers[draw->cur_blit_source]) {
2378 struct loader_dri3_buffer *source = draw->buffers[draw->cur_blit_source];
2379
2380 dri3_fence_await(draw->conn, draw, source);
2381 dri3_fence_await(draw->conn, draw, back);
2382 (void) loader_dri3_blit_image(draw,
2383 back->image,
2384 source->image,
2385 0, 0, draw->width, draw->height,
2386 0, 0, 0);
2387 back->last_swap = source->last_swap;
2388 draw->cur_blit_source = -1;
2389 }
2390
2391 return back;
2392 }
2393