1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright 2009, VMware, Inc.
5 * All Rights Reserved.
6 * Copyright (C) 2010 LunarG Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included
16 * in all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Keith Whitwell <keithw@vmware.com> Jakob Bornecrantz
28 * <wallbraker@gmail.com> Chia-I Wu <olv@lunarg.com>
29 */
30
31 #include "util/libdrm.h"
32 #include "git_sha1.h"
33 #include "GL/mesa_glinterop.h"
34 #include "mesa_interface.h"
35 #include "util/disk_cache.h"
36 #include "util/u_memory.h"
37 #include "util/u_inlines.h"
38 #include "util/format/u_format.h"
39 #include "util/u_debug.h"
40 #include "util/libsync.h"
41 #include "util/os_file.h"
42 #include "util/log.h"
43 #include "frontend/drm_driver.h"
44 #include "state_tracker/st_format.h"
45 #include "state_tracker/st_cb_texture.h"
46 #include "state_tracker/st_texture.h"
47 #include "state_tracker/st_context.h"
48 #include "state_tracker/st_interop.h"
49 #include "pipe-loader/pipe_loader.h"
50 #include "main/bufferobj.h"
51 #include "main/texobj.h"
52
53 #include "dri_util.h"
54
55 #include "dri_helpers.h"
56 #include "dri_drawable.h"
57 #include "dri_query_renderer.h"
58 #include "loader_dri_helper.h"
59
60 #include "drm-uapi/drm_fourcc.h"
61
62 struct dri2_buffer
63 {
64 __DRIbuffer base;
65 struct pipe_resource *resource;
66 };
67
68 static inline struct dri2_buffer *
dri2_buffer(__DRIbuffer * driBufferPriv)69 dri2_buffer(__DRIbuffer * driBufferPriv)
70 {
71 return (struct dri2_buffer *) driBufferPriv;
72 }
73
74 /**
75 * Invalidate the drawable.
76 *
77 * How we get here is listed below.
78 *
79 * 1. Called by these SwapBuffers implementations where the context is known:
80 * loader_dri3_swap_buffers_msc
81 * EGL: droid_swap_buffers
82 * EGL: dri2_drm_swap_buffers
83 * EGL: dri2_wl_swap_buffers_with_damage
84 * EGL: dri2_x11_swap_buffers_msc
85 *
86 * 2. Other callers where the context is known:
87 * st_manager_flush_frontbuffer -> dri2_flush_frontbuffer
88 * -> EGL droid_display_shared_buffer
89 *
90 * 3. Other callers where the context is unknown:
91 * loader: dri3_handle_present_event - XCB_PRESENT_CONFIGURE_NOTIFY
92 * eglQuerySurface -> dri3_query_surface
93 * -> loader_dri3_update_drawable_geometry
94 * EGL: wl_egl_window::resize_callback (called outside Mesa)
95 */
96 void
dri_invalidate_drawable(struct dri_drawable * drawable)97 dri_invalidate_drawable(struct dri_drawable *drawable)
98 {
99 drawable->lastStamp++;
100 drawable->texture_mask = 0; /* mark all attachments as invalid */
101
102 p_atomic_inc(&drawable->base.stamp);
103 }
104
105 /**
106 * Retrieve __DRIbuffer from the DRI loader.
107 */
108 static __DRIbuffer *
dri2_drawable_get_buffers(struct dri_drawable * drawable,const enum st_attachment_type * atts,unsigned * count)109 dri2_drawable_get_buffers(struct dri_drawable *drawable,
110 const enum st_attachment_type *atts,
111 unsigned *count)
112 {
113 const __DRIdri2LoaderExtension *loader = drawable->screen->dri2.loader;
114 bool with_format;
115 __DRIbuffer *buffers;
116 int num_buffers;
117 unsigned attachments[__DRI_BUFFER_COUNT];
118 unsigned num_attachments, i;
119
120 assert(loader);
121 assert(*count <= __DRI_BUFFER_COUNT);
122 with_format = dri_with_format(drawable->screen);
123
124 num_attachments = 0;
125
126 /* for Xserver 1.6.0 (DRI2 version 1) we always need to ask for the front */
127 if (!with_format)
128 attachments[num_attachments++] = __DRI_BUFFER_FRONT_LEFT;
129
130 for (i = 0; i < *count; i++) {
131 enum pipe_format format;
132 unsigned bind;
133 int att, depth;
134
135 dri_drawable_get_format(drawable, atts[i], &format, &bind);
136 if (format == PIPE_FORMAT_NONE)
137 continue;
138
139 switch (atts[i]) {
140 case ST_ATTACHMENT_FRONT_LEFT:
141 /* already added */
142 if (!with_format)
143 continue;
144 att = __DRI_BUFFER_FRONT_LEFT;
145 break;
146 case ST_ATTACHMENT_BACK_LEFT:
147 att = __DRI_BUFFER_BACK_LEFT;
148 break;
149 case ST_ATTACHMENT_FRONT_RIGHT:
150 att = __DRI_BUFFER_FRONT_RIGHT;
151 break;
152 case ST_ATTACHMENT_BACK_RIGHT:
153 att = __DRI_BUFFER_BACK_RIGHT;
154 break;
155 default:
156 continue;
157 }
158
159 /*
160 * In this switch statement we must support all formats that
161 * may occur as the stvis->color_format.
162 */
163 switch(format) {
164 case PIPE_FORMAT_R16G16B16A16_FLOAT:
165 depth = 64;
166 break;
167 case PIPE_FORMAT_R16G16B16X16_FLOAT:
168 depth = 48;
169 break;
170 case PIPE_FORMAT_B10G10R10A2_UNORM:
171 case PIPE_FORMAT_R10G10B10A2_UNORM:
172 case PIPE_FORMAT_BGRA8888_UNORM:
173 case PIPE_FORMAT_RGBA8888_UNORM:
174 depth = 32;
175 break;
176 case PIPE_FORMAT_R10G10B10X2_UNORM:
177 case PIPE_FORMAT_B10G10R10X2_UNORM:
178 depth = 30;
179 break;
180 case PIPE_FORMAT_BGRX8888_UNORM:
181 case PIPE_FORMAT_RGBX8888_UNORM:
182 depth = 24;
183 break;
184 case PIPE_FORMAT_B5G6R5_UNORM:
185 depth = 16;
186 break;
187 default:
188 depth = util_format_get_blocksizebits(format);
189 assert(!"Unexpected format in dri2_drawable_get_buffers()");
190 }
191
192 attachments[num_attachments++] = att;
193 if (with_format) {
194 attachments[num_attachments++] = depth;
195 }
196 }
197
198 if (with_format) {
199 num_attachments /= 2;
200 buffers = loader->getBuffersWithFormat(drawable,
201 &drawable->w, &drawable->h,
202 attachments, num_attachments,
203 &num_buffers, drawable->loaderPrivate);
204 }
205 else {
206 buffers = loader->getBuffers(drawable,
207 &drawable->w, &drawable->h,
208 attachments, num_attachments,
209 &num_buffers, drawable->loaderPrivate);
210 }
211
212 if (buffers)
213 *count = num_buffers;
214
215 return buffers;
216 }
217
218 bool
219 dri_image_drawable_get_buffers(struct dri_drawable *drawable,
220 struct __DRIimageList *images,
221 const enum st_attachment_type *statts,
222 unsigned statts_count);
223 bool
dri_image_drawable_get_buffers(struct dri_drawable * drawable,struct __DRIimageList * images,const enum st_attachment_type * statts,unsigned statts_count)224 dri_image_drawable_get_buffers(struct dri_drawable *drawable,
225 struct __DRIimageList *images,
226 const enum st_attachment_type *statts,
227 unsigned statts_count)
228 {
229 enum pipe_format color_format = PIPE_FORMAT_NONE;
230 uint32_t buffer_mask = 0;
231 unsigned i;
232
233 for (i = 0; i < statts_count; i++) {
234 enum pipe_format pf;
235 unsigned bind;
236
237 dri_drawable_get_format(drawable, statts[i], &pf, &bind);
238 if (pf == PIPE_FORMAT_NONE)
239 continue;
240
241 switch (statts[i]) {
242 case ST_ATTACHMENT_FRONT_LEFT:
243 buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
244 color_format = pf;
245 break;
246 case ST_ATTACHMENT_BACK_LEFT:
247 buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
248 color_format = pf;
249 break;
250 default:
251 break;
252 }
253 }
254
255 /* Stamp usage behavior in the getBuffers callback:
256 *
257 * 1. DRI3 (EGL and GLX):
258 * This calls loader_dri3_get_buffers, which saves the stamp pointer
259 * in loader_dri3_drawable::stamp, which is only changed (incremented)
260 * by loader_dri3_swap_buffers_msc.
261 *
262 * 2. EGL Android, Device, Surfaceless, Wayland:
263 * The stamp is unused.
264 *
265 * How do we get here:
266 * dri_set_tex_buffer2 (GLX_EXT_texture_from_pixmap)
267 * st_api_make_current
268 * st_manager_validate_framebuffers (part of st_validate_state)
269 */
270 return drawable->screen->image.loader->getBuffers(
271 drawable,
272 color_format,
273 (uint32_t *)&drawable->base.stamp,
274 drawable->loaderPrivate, buffer_mask,
275 images);
276 }
277
278 static void
dri2_release_buffer(__DRIbuffer * bPriv)279 dri2_release_buffer(__DRIbuffer *bPriv)
280 {
281 struct dri2_buffer *buffer = dri2_buffer(bPriv);
282
283 pipe_resource_reference(&buffer->resource, NULL);
284 FREE(buffer);
285 }
286
287 void
dri2_set_in_fence_fd(struct dri_image * img,int fd)288 dri2_set_in_fence_fd(struct dri_image *img, int fd)
289 {
290 validate_fence_fd(fd);
291 validate_fence_fd(img->in_fence_fd);
292 sync_accumulate("dri", &img->in_fence_fd, fd);
293 }
294
295 /*
296 * Backend functions for pipe_frontend_drawable.
297 */
298
299 static void
dri2_allocate_textures(struct dri_context * ctx,struct dri_drawable * drawable,const enum st_attachment_type * statts,unsigned statts_count)300 dri2_allocate_textures(struct dri_context *ctx,
301 struct dri_drawable *drawable,
302 const enum st_attachment_type *statts,
303 unsigned statts_count)
304 {
305 struct dri_screen *screen = drawable->screen;
306 struct pipe_resource templ;
307 bool alloc_depthstencil = false;
308 unsigned i, j, bind;
309 const __DRIimageLoaderExtension *image = screen->image.loader;
310 /* Image specific variables */
311 struct __DRIimageList images;
312 /* Dri2 specific variables */
313 __DRIbuffer *buffers = NULL;
314 struct winsys_handle whandle;
315 unsigned num_buffers = statts_count;
316
317 assert(num_buffers <= __DRI_BUFFER_COUNT);
318
319 /* Wait for glthread to finish because we can't use pipe_context from
320 * multiple threads.
321 */
322 _mesa_glthread_finish(ctx->st->ctx);
323
324 /* First get the buffers from the loader */
325 if (image) {
326 if (!dri_image_drawable_get_buffers(drawable, &images,
327 statts, statts_count))
328 return;
329 }
330 else {
331 buffers = dri2_drawable_get_buffers(drawable, statts, &num_buffers);
332 if (!buffers || (drawable->old_num == num_buffers &&
333 drawable->old_w == drawable->w &&
334 drawable->old_h == drawable->h &&
335 memcmp(drawable->old, buffers,
336 sizeof(__DRIbuffer) * num_buffers) == 0))
337 return;
338 }
339
340 /* Second clean useless resources*/
341
342 /* See if we need a depth-stencil buffer. */
343 for (i = 0; i < statts_count; i++) {
344 if (statts[i] == ST_ATTACHMENT_DEPTH_STENCIL) {
345 alloc_depthstencil = true;
346 break;
347 }
348 }
349
350 /* Delete the resources we won't need. */
351 for (i = 0; i < ST_ATTACHMENT_COUNT; i++) {
352 /* Don't delete the depth-stencil buffer, we can reuse it. */
353 if (i == ST_ATTACHMENT_DEPTH_STENCIL && alloc_depthstencil)
354 continue;
355
356 /* Flush the texture before unreferencing, so that other clients can
357 * see what the driver has rendered.
358 */
359 if (i != ST_ATTACHMENT_DEPTH_STENCIL && drawable->textures[i]) {
360 struct pipe_context *pipe = ctx->st->pipe;
361 pipe->flush_resource(pipe, drawable->textures[i]);
362 }
363
364 pipe_resource_reference(&drawable->textures[i], NULL);
365 }
366
367 if (drawable->stvis.samples > 1) {
368 for (i = 0; i < ST_ATTACHMENT_COUNT; i++) {
369 bool del = true;
370
371 /* Don't delete MSAA resources for the attachments which are enabled,
372 * we can reuse them. */
373 for (j = 0; j < statts_count; j++) {
374 if (i == statts[j]) {
375 del = false;
376 break;
377 }
378 }
379
380 if (del) {
381 pipe_resource_reference(&drawable->msaa_textures[i], NULL);
382 }
383 }
384 }
385
386 /* Third use the buffers retrieved to fill the drawable info */
387
388 memset(&templ, 0, sizeof(templ));
389 templ.target = screen->target;
390 templ.last_level = 0;
391 templ.depth0 = 1;
392 templ.array_size = 1;
393
394 if (image) {
395 if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
396 struct pipe_resource **buf =
397 &drawable->textures[ST_ATTACHMENT_FRONT_LEFT];
398 struct pipe_resource *texture = images.front->texture;
399
400 drawable->w = texture->width0;
401 drawable->h = texture->height0;
402
403 pipe_resource_reference(buf, texture);
404 dri_image_fence_sync(ctx, images.front);
405 }
406
407 if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
408 struct pipe_resource **buf =
409 &drawable->textures[ST_ATTACHMENT_BACK_LEFT];
410 struct pipe_resource *texture = images.back->texture;
411
412 drawable->w = texture->width0;
413 drawable->h = texture->height0;
414
415 pipe_resource_reference(buf, texture);
416 dri_image_fence_sync(ctx, images.back);
417 }
418
419 if (images.image_mask & __DRI_IMAGE_BUFFER_SHARED) {
420 struct pipe_resource **buf =
421 &drawable->textures[ST_ATTACHMENT_BACK_LEFT];
422 struct pipe_resource *texture = images.back->texture;
423
424 drawable->w = texture->width0;
425 drawable->h = texture->height0;
426
427 pipe_resource_reference(buf, texture);
428 dri_image_fence_sync(ctx, images.back);
429
430 ctx->is_shared_buffer_bound = true;
431 } else {
432 ctx->is_shared_buffer_bound = false;
433 }
434
435 /* Note: if there is both a back and a front buffer,
436 * then they have the same size.
437 */
438 templ.width0 = drawable->w;
439 templ.height0 = drawable->h;
440 }
441 else {
442 memset(&whandle, 0, sizeof(whandle));
443
444 /* Process DRI-provided buffers and get pipe_resources. */
445 for (i = 0; i < num_buffers; i++) {
446 __DRIbuffer *buf = &buffers[i];
447 enum st_attachment_type statt;
448 enum pipe_format format;
449
450 switch (buf->attachment) {
451 case __DRI_BUFFER_FRONT_LEFT:
452 if (!screen->auto_fake_front) {
453 continue; /* invalid attachment */
454 }
455 FALLTHROUGH;
456 case __DRI_BUFFER_FAKE_FRONT_LEFT:
457 statt = ST_ATTACHMENT_FRONT_LEFT;
458 break;
459 case __DRI_BUFFER_BACK_LEFT:
460 statt = ST_ATTACHMENT_BACK_LEFT;
461 break;
462 default:
463 continue; /* invalid attachment */
464 }
465
466 dri_drawable_get_format(drawable, statt, &format, &bind);
467 if (format == PIPE_FORMAT_NONE)
468 continue;
469
470 /* dri2_drawable_get_buffers has already filled dri_drawable->w
471 * and dri_drawable->h */
472 templ.width0 = drawable->w;
473 templ.height0 = drawable->h;
474 templ.format = format;
475 templ.bind = bind;
476 whandle.handle = buf->name;
477 whandle.stride = buf->pitch;
478 whandle.offset = 0;
479 whandle.format = format;
480 whandle.modifier = DRM_FORMAT_MOD_INVALID;
481 if (screen->can_share_buffer)
482 whandle.type = WINSYS_HANDLE_TYPE_SHARED;
483 else
484 whandle.type = WINSYS_HANDLE_TYPE_KMS;
485 drawable->textures[statt] =
486 screen->base.screen->resource_from_handle(screen->base.screen,
487 &templ, &whandle,
488 PIPE_HANDLE_USAGE_EXPLICIT_FLUSH);
489 assert(drawable->textures[statt]);
490 }
491 }
492
493 /* Allocate private MSAA colorbuffers. */
494 if (drawable->stvis.samples > 1) {
495 for (i = 0; i < statts_count; i++) {
496 enum st_attachment_type statt = statts[i];
497
498 if (statt == ST_ATTACHMENT_DEPTH_STENCIL)
499 continue;
500
501 if (drawable->textures[statt]) {
502 templ.format = drawable->textures[statt]->format;
503 templ.bind = drawable->textures[statt]->bind &
504 ~(PIPE_BIND_SCANOUT | PIPE_BIND_SHARED);
505 templ.nr_samples = drawable->stvis.samples;
506 templ.nr_storage_samples = drawable->stvis.samples;
507
508 /* Try to reuse the resource.
509 * (the other resource parameters should be constant)
510 */
511 if (!drawable->msaa_textures[statt] ||
512 drawable->msaa_textures[statt]->width0 != templ.width0 ||
513 drawable->msaa_textures[statt]->height0 != templ.height0) {
514 /* Allocate a new one. */
515 pipe_resource_reference(&drawable->msaa_textures[statt], NULL);
516
517 drawable->msaa_textures[statt] =
518 screen->base.screen->resource_create(screen->base.screen,
519 &templ);
520 assert(drawable->msaa_textures[statt]);
521
522 /* If there are any MSAA resources, we should initialize them
523 * such that they contain the same data as the single-sample
524 * resources we just got from the X server.
525 *
526 * The reason for this is that the gallium frontend (and
527 * therefore the app) can access the MSAA resources only.
528 * The single-sample resources are not exposed
529 * to the gallium frontend.
530 *
531 */
532 dri_pipe_blit(ctx->st->pipe,
533 drawable->msaa_textures[statt],
534 drawable->textures[statt]);
535 }
536 }
537 else {
538 pipe_resource_reference(&drawable->msaa_textures[statt], NULL);
539 }
540 }
541 }
542
543 /* Allocate a private depth-stencil buffer. */
544 if (alloc_depthstencil) {
545 enum st_attachment_type statt = ST_ATTACHMENT_DEPTH_STENCIL;
546 struct pipe_resource **zsbuf;
547 enum pipe_format format;
548 unsigned bind;
549
550 dri_drawable_get_format(drawable, statt, &format, &bind);
551
552 if (format) {
553 templ.format = format;
554 templ.bind = bind & ~PIPE_BIND_SHARED;
555
556 if (drawable->stvis.samples > 1) {
557 templ.nr_samples = drawable->stvis.samples;
558 templ.nr_storage_samples = drawable->stvis.samples;
559 zsbuf = &drawable->msaa_textures[statt];
560 }
561 else {
562 templ.nr_samples = 0;
563 templ.nr_storage_samples = 0;
564 zsbuf = &drawable->textures[statt];
565 }
566
567 /* Try to reuse the resource.
568 * (the other resource parameters should be constant)
569 */
570 if (!*zsbuf ||
571 (*zsbuf)->width0 != templ.width0 ||
572 (*zsbuf)->height0 != templ.height0) {
573 /* Allocate a new one. */
574 pipe_resource_reference(zsbuf, NULL);
575 *zsbuf = screen->base.screen->resource_create(screen->base.screen,
576 &templ);
577 assert(*zsbuf);
578 }
579 }
580 else {
581 pipe_resource_reference(&drawable->msaa_textures[statt], NULL);
582 pipe_resource_reference(&drawable->textures[statt], NULL);
583 }
584 }
585
586 /* For DRI2, we may get the same buffers again from the server.
587 * To prevent useless imports of gem names, drawable->old* is used
588 * to bypass the import if we get the same buffers. This doesn't apply
589 * to DRI3/Wayland, users of image.loader, since the buffer is managed
590 * by the client (no import), and the back buffer is going to change
591 * at every redraw.
592 */
593 if (!image) {
594 drawable->old_num = num_buffers;
595 drawable->old_w = drawable->w;
596 drawable->old_h = drawable->h;
597 memcpy(drawable->old, buffers, sizeof(__DRIbuffer) * num_buffers);
598 }
599 }
600
601 static bool
dri2_flush_frontbuffer(struct dri_context * ctx,struct dri_drawable * drawable,enum st_attachment_type statt)602 dri2_flush_frontbuffer(struct dri_context *ctx,
603 struct dri_drawable *drawable,
604 enum st_attachment_type statt)
605 {
606 const __DRIimageLoaderExtension *image = drawable->screen->image.loader;
607 const __DRIdri2LoaderExtension *loader = drawable->screen->dri2.loader;
608 const __DRImutableRenderBufferLoaderExtension *shared_buffer_loader =
609 drawable->screen->mutableRenderBuffer.loader;
610 struct pipe_context *pipe = ctx->st->pipe;
611 struct pipe_fence_handle *fence = NULL;
612 int fence_fd = -1;
613
614 /* We need to flush for front buffer rendering when either we're using the
615 * front buffer at the GL API level, or when EGL_KHR_mutable_render_buffer
616 * has redirected GL_BACK to the front buffer.
617 */
618 if (statt != ST_ATTACHMENT_FRONT_LEFT &&
619 (!ctx->is_shared_buffer_bound || statt != ST_ATTACHMENT_BACK_LEFT))
620 return false;
621
622 /* Wait for glthread to finish because we can't use pipe_context from
623 * multiple threads.
624 */
625 _mesa_glthread_finish(ctx->st->ctx);
626
627 if (drawable->stvis.samples > 1) {
628 /* Resolve the buffer used for front rendering. */
629 dri_pipe_blit(ctx->st->pipe, drawable->textures[statt],
630 drawable->msaa_textures[statt]);
631 }
632
633 if (drawable->textures[statt]) {
634 pipe->flush_resource(pipe, drawable->textures[statt]);
635 }
636
637 if (ctx->is_shared_buffer_bound) {
638 /* is_shared_buffer_bound should only be true with image extension: */
639 assert(image);
640 pipe->flush(pipe, &fence, PIPE_FLUSH_FENCE_FD);
641 } else {
642 pipe->flush(pipe, NULL, 0);
643 }
644
645 if (image) {
646 image->flushFrontBuffer(drawable, drawable->loaderPrivate);
647 if (ctx->is_shared_buffer_bound) {
648 if (fence)
649 fence_fd = pipe->screen->fence_get_fd(pipe->screen, fence);
650
651 shared_buffer_loader->displaySharedBuffer(drawable, fence_fd,
652 drawable->loaderPrivate);
653
654 pipe->screen->fence_reference(pipe->screen, &fence, NULL);
655 }
656 }
657 else if (loader->flushFrontBuffer) {
658 loader->flushFrontBuffer(drawable, drawable->loaderPrivate);
659 }
660
661 return true;
662 }
663
664 /**
665 * The struct dri_drawable flush_swapbuffers callback
666 */
667 static void
dri2_flush_swapbuffers(struct dri_context * ctx,struct dri_drawable * drawable)668 dri2_flush_swapbuffers(struct dri_context *ctx,
669 struct dri_drawable *drawable)
670 {
671 const __DRIimageLoaderExtension *image = drawable->screen->image.loader;
672
673 if (image && image->flushSwapBuffers) {
674 image->flushSwapBuffers(drawable, drawable->loaderPrivate);
675 }
676 }
677
678 static void
dri2_update_tex_buffer(struct dri_drawable * drawable,struct dri_context * ctx,struct pipe_resource * res)679 dri2_update_tex_buffer(struct dri_drawable *drawable,
680 struct dri_context *ctx,
681 struct pipe_resource *res)
682 {
683 /* no-op */
684 }
685
686 static const struct dri2_format_mapping r8_b8_g8_mapping = {
687 DRM_FORMAT_YVU420,
688 __DRI_IMAGE_FORMAT_NONE,
689 __DRI_IMAGE_COMPONENTS_Y_U_V,
690 PIPE_FORMAT_R8_B8_G8_420_UNORM,
691 3,
692 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
693 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8 },
694 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8 } }
695 };
696
697 static const struct dri2_format_mapping r8_g8_b8_mapping = {
698 DRM_FORMAT_YUV420,
699 __DRI_IMAGE_FORMAT_NONE,
700 __DRI_IMAGE_COMPONENTS_Y_U_V,
701 PIPE_FORMAT_R8_G8_B8_420_UNORM,
702 3,
703 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
704 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8 },
705 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8 } }
706 };
707
708 static const struct dri2_format_mapping r8_g8b8_mapping = {
709 DRM_FORMAT_NV12,
710 __DRI_IMAGE_FORMAT_NONE,
711 __DRI_IMAGE_COMPONENTS_Y_UV,
712 PIPE_FORMAT_R8_G8B8_420_UNORM,
713 2,
714 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
715 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88 } }
716 };
717
718 static const struct dri2_format_mapping r8_g8b8_mapping_422 = {
719 DRM_FORMAT_NV16,
720 __DRI_IMAGE_FORMAT_NONE,
721 __DRI_IMAGE_COMPONENTS_Y_UV,
722 PIPE_FORMAT_R8_G8B8_422_UNORM,
723 2,
724 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
725 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88 } }
726 };
727
728 static const struct dri2_format_mapping r8_b8g8_mapping = {
729 DRM_FORMAT_NV21,
730 __DRI_IMAGE_FORMAT_NONE,
731 __DRI_IMAGE_COMPONENTS_Y_UV,
732 PIPE_FORMAT_R8_B8G8_420_UNORM,
733 2,
734 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
735 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88 } }
736 };
737
738 static const struct dri2_format_mapping r8g8_r8b8_mapping = {
739 DRM_FORMAT_YUYV,
740 __DRI_IMAGE_FORMAT_NONE,
741 __DRI_IMAGE_COMPONENTS_Y_XUXV,
742 PIPE_FORMAT_R8G8_R8B8_UNORM, 2,
743 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
744 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888 } }
745 };
746
747 static const struct dri2_format_mapping r8b8_r8g8_mapping = {
748 DRM_FORMAT_YVYU,
749 __DRI_IMAGE_FORMAT_NONE,
750 __DRI_IMAGE_COMPONENTS_Y_XUXV,
751 PIPE_FORMAT_R8B8_R8G8_UNORM, 2,
752 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
753 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888 } }
754 };
755
756 static const struct dri2_format_mapping b8r8_g8r8_mapping = {
757 DRM_FORMAT_VYUY,
758 __DRI_IMAGE_FORMAT_NONE,
759 __DRI_IMAGE_COMPONENTS_Y_XUXV,
760 PIPE_FORMAT_B8R8_G8R8_UNORM, 2,
761 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
762 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888 } }
763 };
764
765 static const struct dri2_format_mapping g8r8_b8r8_mapping = {
766 DRM_FORMAT_UYVY,
767 __DRI_IMAGE_FORMAT_NONE,
768 __DRI_IMAGE_COMPONENTS_Y_XUXV,
769 PIPE_FORMAT_G8R8_B8R8_UNORM, 2,
770 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
771 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888 } }
772 };
773
774 static const struct dri2_format_mapping r10_g10b10_mapping = {
775 DRM_FORMAT_NV15,
776 __DRI_IMAGE_FORMAT_NONE,
777 __DRI_IMAGE_COMPONENTS_Y_UV,
778 PIPE_FORMAT_R10_G10B10_420_UNORM,
779 2,
780 { { 0, 0, 0, __DRI_IMAGE_FORMAT_NONE },
781 { 1, 1, 1, __DRI_IMAGE_FORMAT_NONE } }
782 };
783
784 static const struct dri2_format_mapping r10_g10b10_mapping_422 = {
785 DRM_FORMAT_NV20,
786 __DRI_IMAGE_FORMAT_NONE,
787 __DRI_IMAGE_COMPONENTS_Y_UV,
788 PIPE_FORMAT_R10_G10B10_422_UNORM,
789 2,
790 { { 0, 0, 0, __DRI_IMAGE_FORMAT_NONE },
791 { 1, 1, 0, __DRI_IMAGE_FORMAT_NONE } }
792 };
793
794 static enum __DRIFixedRateCompression
to_dri_compression_rate(uint32_t rate)795 to_dri_compression_rate(uint32_t rate)
796 {
797 switch (rate) {
798 case PIPE_COMPRESSION_FIXED_RATE_NONE:
799 return __DRI_FIXED_RATE_COMPRESSION_NONE;
800 case PIPE_COMPRESSION_FIXED_RATE_DEFAULT:
801 return __DRI_FIXED_RATE_COMPRESSION_DEFAULT;
802 case 1: return __DRI_FIXED_RATE_COMPRESSION_1BPC;
803 case 2: return __DRI_FIXED_RATE_COMPRESSION_2BPC;
804 case 3: return __DRI_FIXED_RATE_COMPRESSION_3BPC;
805 case 4: return __DRI_FIXED_RATE_COMPRESSION_4BPC;
806 case 5: return __DRI_FIXED_RATE_COMPRESSION_5BPC;
807 case 6: return __DRI_FIXED_RATE_COMPRESSION_6BPC;
808 case 7: return __DRI_FIXED_RATE_COMPRESSION_7BPC;
809 case 8: return __DRI_FIXED_RATE_COMPRESSION_8BPC;
810 case 9: return __DRI_FIXED_RATE_COMPRESSION_9BPC;
811 case 10: return __DRI_FIXED_RATE_COMPRESSION_10BPC;
812 case 11: return __DRI_FIXED_RATE_COMPRESSION_11BPC;
813 case 12: return __DRI_FIXED_RATE_COMPRESSION_12BPC;
814 default:
815 unreachable("invalid compression fixed-rate value");
816 }
817 }
818
819 static uint32_t
from_dri_compression_rate(enum __DRIFixedRateCompression rate)820 from_dri_compression_rate(enum __DRIFixedRateCompression rate)
821 {
822 switch (rate) {
823 case __DRI_FIXED_RATE_COMPRESSION_NONE:
824 return PIPE_COMPRESSION_FIXED_RATE_NONE;
825 case __DRI_FIXED_RATE_COMPRESSION_DEFAULT:
826 return PIPE_COMPRESSION_FIXED_RATE_DEFAULT;
827 case __DRI_FIXED_RATE_COMPRESSION_1BPC: return 1;
828 case __DRI_FIXED_RATE_COMPRESSION_2BPC: return 2;
829 case __DRI_FIXED_RATE_COMPRESSION_3BPC: return 3;
830 case __DRI_FIXED_RATE_COMPRESSION_4BPC: return 4;
831 case __DRI_FIXED_RATE_COMPRESSION_5BPC: return 5;
832 case __DRI_FIXED_RATE_COMPRESSION_6BPC: return 6;
833 case __DRI_FIXED_RATE_COMPRESSION_7BPC: return 7;
834 case __DRI_FIXED_RATE_COMPRESSION_8BPC: return 8;
835 case __DRI_FIXED_RATE_COMPRESSION_9BPC: return 9;
836 case __DRI_FIXED_RATE_COMPRESSION_10BPC: return 10;
837 case __DRI_FIXED_RATE_COMPRESSION_11BPC: return 11;
838 case __DRI_FIXED_RATE_COMPRESSION_12BPC: return 12;
839 default:
840 unreachable("invalid compression fixed-rate value");
841 }
842 }
843
844 static struct dri_image *
dri_create_image_from_winsys(struct dri_screen * screen,int width,int height,const struct dri2_format_mapping * map,int num_handles,struct winsys_handle * whandle,unsigned bind,void * loaderPrivate)845 dri_create_image_from_winsys(struct dri_screen *screen,
846 int width, int height, const struct dri2_format_mapping *map,
847 int num_handles, struct winsys_handle *whandle,
848 unsigned bind,
849 void *loaderPrivate)
850 {
851 struct pipe_screen *pscreen = screen->base.screen;
852 struct dri_image *img;
853 struct pipe_resource templ;
854 unsigned tex_usage = 0;
855 int i;
856 bool use_lowered = false;
857 const unsigned format_planes = util_format_get_num_planes(map->pipe_format);
858
859 if (pscreen->is_format_supported(pscreen, map->pipe_format, screen->target, 0, 0,
860 PIPE_BIND_RENDER_TARGET))
861 tex_usage |= PIPE_BIND_RENDER_TARGET;
862 if (pscreen->is_format_supported(pscreen, map->pipe_format, screen->target, 0, 0,
863 PIPE_BIND_SAMPLER_VIEW))
864 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
865
866 /* For NV12, see if we have support for sampling r8_g8b8 */
867 if (!tex_usage && map->pipe_format == PIPE_FORMAT_NV12 &&
868 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8_G8B8_420_UNORM,
869 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
870 map = &r8_g8b8_mapping;
871 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
872 }
873
874 /* For NV21, see if we have support for sampling r8_b8g8 */
875 if (!tex_usage && map->pipe_format == PIPE_FORMAT_NV21 &&
876 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8_B8G8_420_UNORM,
877 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
878 map = &r8_b8g8_mapping;
879 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
880 }
881
882 /* For NV16, see if we have support for sampling r8_g8b8 */
883 if (!tex_usage && map->pipe_format == PIPE_FORMAT_NV16 &&
884 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8_G8B8_422_UNORM,
885 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
886 map = &r8_g8b8_mapping_422;
887 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
888 }
889
890 /* For NV15, see if we have support for sampling r10_g10b10 */
891 if (!tex_usage && map->pipe_format == PIPE_FORMAT_NV15 &&
892 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R10_G10B10_420_UNORM,
893 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
894 map = &r10_g10b10_mapping;
895 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
896 }
897
898 if (!tex_usage && map->pipe_format == PIPE_FORMAT_NV20 &&
899 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R10_G10B10_422_UNORM,
900 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
901 map = &r10_g10b10_mapping_422;
902 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
903 }
904
905 /* For YV12 and I420, see if we have support for sampling r8_b8_g8 or r8_g8_b8 */
906 if (!tex_usage && map->pipe_format == PIPE_FORMAT_IYUV) {
907 if (map->dri_fourcc == DRM_FORMAT_YUV420 &&
908 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8_G8_B8_420_UNORM,
909 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
910 map = &r8_g8_b8_mapping;
911 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
912 } else if (map->dri_fourcc == DRM_FORMAT_YVU420 &&
913 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8_B8_G8_420_UNORM,
914 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
915 map = &r8_b8_g8_mapping;
916 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
917 }
918 }
919
920 /* If the hardware supports R8G8_R8B8 style subsampled RGB formats, these
921 * can be used for YUYV and UYVY formats.
922 */
923 if (!tex_usage && map->pipe_format == PIPE_FORMAT_YUYV &&
924 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8G8_R8B8_UNORM,
925 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
926 map = &r8g8_r8b8_mapping;
927 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
928 }
929
930 if (!tex_usage && map->pipe_format == PIPE_FORMAT_YVYU &&
931 pscreen->is_format_supported(pscreen, PIPE_FORMAT_R8B8_R8G8_UNORM,
932 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
933 map = &r8b8_r8g8_mapping;
934 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
935 }
936
937 if (!tex_usage && map->pipe_format == PIPE_FORMAT_UYVY &&
938 pscreen->is_format_supported(pscreen, PIPE_FORMAT_G8R8_B8R8_UNORM,
939 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
940 map = &g8r8_b8r8_mapping;
941 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
942 }
943
944 if (!tex_usage && map->pipe_format == PIPE_FORMAT_VYUY &&
945 pscreen->is_format_supported(pscreen, PIPE_FORMAT_B8R8_G8R8_UNORM,
946 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW)) {
947 map = &b8r8_g8r8_mapping;
948 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
949 }
950
951 if (!tex_usage && util_format_is_yuv(map->pipe_format)) {
952 /* YUV format sampling can be emulated by the GL gallium frontend by
953 * using multiple samplers of varying formats.
954 * If no tex_usage is set and we detect a YUV format,
955 * test for support of all planes' sampler formats and
956 * add sampler view usage.
957 */
958 use_lowered = true;
959 if (dri2_yuv_dma_buf_supported(screen, map))
960 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
961 }
962
963 if (!tex_usage)
964 return NULL;
965
966 img = CALLOC_STRUCT(dri_image);
967 if (!img)
968 return NULL;
969
970 memset(&templ, 0, sizeof(templ));
971 templ.bind = tex_usage | bind;
972 templ.target = screen->target;
973 templ.last_level = 0;
974 templ.depth0 = 1;
975 templ.array_size = 1;
976 templ.width0 = width;
977 templ.height0 = height;
978
979 for (i = num_handles - 1; i >= format_planes; i--) {
980 struct pipe_resource *tex;
981
982 templ.next = img->texture;
983
984 tex = pscreen->resource_from_handle(pscreen, &templ, &whandle[i],
985 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE);
986 if (!tex) {
987 pipe_resource_reference(&img->texture, NULL);
988 FREE(img);
989 return NULL;
990 }
991
992 img->texture = tex;
993 }
994
995 for (i = (use_lowered ? map->nplanes : format_planes) - 1; i >= 0; i--) {
996 struct pipe_resource *tex;
997
998 templ.next = img->texture;
999 templ.width0 = width >> map->planes[i].width_shift;
1000 templ.height0 = height >> map->planes[i].height_shift;
1001 if (use_lowered)
1002 templ.format = dri2_get_pipe_format_for_dri_format(map->planes[i].dri_format);
1003 else
1004 templ.format = map->pipe_format;
1005 assert(templ.format != PIPE_FORMAT_NONE);
1006
1007 tex = pscreen->resource_from_handle(pscreen,
1008 &templ, &whandle[use_lowered ? map->planes[i].buffer_index : i],
1009 PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE);
1010 if (!tex) {
1011 pipe_resource_reference(&img->texture, NULL);
1012 FREE(img);
1013 return NULL;
1014 }
1015
1016 /* Reject image creation if there's an inconsistency between
1017 * content protection status of tex and img.
1018 */
1019 const struct driOptionCache *optionCache = &screen->dev->option_cache;
1020 if (driQueryOptionb(optionCache, "force_protected_content_check") &&
1021 (tex->bind & PIPE_BIND_PROTECTED) != (bind & PIPE_BIND_PROTECTED)) {
1022 pipe_resource_reference(&img->texture, NULL);
1023 pipe_resource_reference(&tex, NULL);
1024 FREE(img);
1025 return NULL;
1026 }
1027
1028 img->texture = tex;
1029 }
1030
1031 img->level = 0;
1032 img->layer = 0;
1033 img->use = 0;
1034 img->in_fence_fd = -1;
1035 img->loader_private = loaderPrivate;
1036 img->screen = screen;
1037
1038 return img;
1039 }
1040
1041 static unsigned
dri2_get_modifier_num_planes(struct dri_screen * screen,uint64_t modifier,int fourcc)1042 dri2_get_modifier_num_planes(struct dri_screen *screen,
1043 uint64_t modifier, int fourcc)
1044 {
1045 struct pipe_screen *pscreen = screen->base.screen;
1046 const struct dri2_format_mapping *map = dri2_get_mapping_by_fourcc(fourcc);
1047
1048 if (!map)
1049 return 0;
1050
1051 switch (modifier) {
1052 case DRM_FORMAT_MOD_LINEAR:
1053 /* DRM_FORMAT_MOD_NONE is the same as LINEAR */
1054 case DRM_FORMAT_MOD_INVALID:
1055 return util_format_get_num_planes(map->pipe_format);
1056 default:
1057 if (!pscreen->is_dmabuf_modifier_supported ||
1058 !pscreen->is_dmabuf_modifier_supported(pscreen, modifier,
1059 map->pipe_format, NULL)) {
1060 return 0;
1061 }
1062
1063 if (pscreen->get_dmabuf_modifier_planes) {
1064 return pscreen->get_dmabuf_modifier_planes(pscreen, modifier,
1065 map->pipe_format);
1066 }
1067
1068 return map->nplanes;
1069 }
1070 }
1071
1072 struct dri_image *
dri_create_image(struct dri_screen * screen,int width,int height,int format,const uint64_t * modifiers,const unsigned _count,unsigned int use,void * loaderPrivate)1073 dri_create_image(struct dri_screen *screen,
1074 int width, int height,
1075 int format,
1076 const uint64_t *modifiers,
1077 const unsigned _count,
1078 unsigned int use,
1079 void *loaderPrivate)
1080 {
1081 const struct dri2_format_mapping *map = dri2_get_mapping_by_format(format);
1082 struct pipe_screen *pscreen = screen->base.screen;
1083 struct dri_image *img;
1084 struct pipe_resource templ;
1085 unsigned tex_usage = 0;
1086 unsigned count = _count;
1087
1088 if (!map)
1089 return NULL;
1090
1091 if (!pscreen->resource_create_with_modifiers && count > 0)
1092 return NULL;
1093
1094 if (pscreen->is_format_supported(pscreen, map->pipe_format, screen->target,
1095 0, 0, PIPE_BIND_RENDER_TARGET))
1096 tex_usage |= PIPE_BIND_RENDER_TARGET;
1097 if (pscreen->is_format_supported(pscreen, map->pipe_format, screen->target,
1098 0, 0, PIPE_BIND_SAMPLER_VIEW))
1099 tex_usage |= PIPE_BIND_SAMPLER_VIEW;
1100
1101 if (!tex_usage)
1102 return NULL;
1103
1104 if (use & __DRI_IMAGE_USE_SCANOUT)
1105 tex_usage |= PIPE_BIND_SCANOUT;
1106 if (use & __DRI_IMAGE_USE_SHARE)
1107 tex_usage |= PIPE_BIND_SHARED;
1108 if (use & __DRI_IMAGE_USE_LINEAR)
1109 tex_usage |= PIPE_BIND_LINEAR;
1110 if (use & __DRI_IMAGE_USE_CURSOR) {
1111 if (width != 64 || height != 64)
1112 return NULL;
1113 tex_usage |= PIPE_BIND_CURSOR;
1114 }
1115 if (use & __DRI_IMAGE_USE_PROTECTED)
1116 tex_usage |= PIPE_BIND_PROTECTED;
1117 if (use & __DRI_IMAGE_USE_PRIME_BUFFER)
1118 tex_usage |= PIPE_BIND_PRIME_BLIT_DST;
1119 if (use & __DRI_IMAGE_USE_FRONT_RENDERING)
1120 tex_usage |= PIPE_BIND_USE_FRONT_RENDERING;
1121
1122 img = CALLOC_STRUCT(dri_image);
1123 if (!img)
1124 return NULL;
1125
1126 memset(&templ, 0, sizeof(templ));
1127 templ.bind = tex_usage;
1128 templ.format = map->pipe_format;
1129 templ.target = PIPE_TEXTURE_2D;
1130 templ.last_level = 0;
1131 templ.width0 = width;
1132 templ.height0 = height;
1133 templ.depth0 = 1;
1134 templ.array_size = 1;
1135
1136 if (modifiers)
1137 img->texture =
1138 screen->base.screen
1139 ->resource_create_with_modifiers(screen->base.screen,
1140 &templ,
1141 modifiers,
1142 count);
1143 else
1144 img->texture =
1145 screen->base.screen->resource_create(screen->base.screen, &templ);
1146 if (!img->texture) {
1147 FREE(img);
1148 return NULL;
1149 }
1150
1151 img->level = 0;
1152 img->layer = 0;
1153 img->dri_format = format;
1154 img->dri_fourcc = map->dri_fourcc;
1155 img->dri_components = 0;
1156 img->use = use;
1157 img->in_fence_fd = -1;
1158
1159 img->loader_private = loaderPrivate;
1160 img->screen = screen;
1161 return img;
1162 }
1163
1164 static bool
dri2_query_image_common(struct dri_image * image,int attrib,int * value)1165 dri2_query_image_common(struct dri_image *image, int attrib, int *value)
1166 {
1167 switch (attrib) {
1168 case __DRI_IMAGE_ATTRIB_WIDTH:
1169 *value = image->texture->width0;
1170 return true;
1171 case __DRI_IMAGE_ATTRIB_HEIGHT:
1172 *value = image->texture->height0;
1173 return true;
1174 case __DRI_IMAGE_ATTRIB_COMPONENTS:
1175 if (image->dri_components == 0)
1176 return false;
1177 *value = image->dri_components;
1178 return true;
1179 case __DRI_IMAGE_ATTRIB_FOURCC:
1180 if (image->dri_fourcc) {
1181 *value = image->dri_fourcc;
1182 } else {
1183 const struct dri2_format_mapping *map;
1184
1185 map = dri2_get_mapping_by_format(image->dri_format);
1186 if (!map)
1187 return false;
1188
1189 *value = map->dri_fourcc;
1190 }
1191 return true;
1192 case __DRI_IMAGE_ATTRIB_COMPRESSION_RATE:
1193 if (!image->texture)
1194 *value = __DRI_FIXED_RATE_COMPRESSION_NONE;
1195 else
1196 *value = to_dri_compression_rate(image->texture->compression_rate);
1197 return true;
1198
1199 default:
1200 return false;
1201 }
1202 }
1203
1204 static bool
dri2_query_image_by_resource_handle(struct dri_image * image,int attrib,int * value)1205 dri2_query_image_by_resource_handle(struct dri_image *image, int attrib, int *value)
1206 {
1207 struct pipe_screen *pscreen = image->texture->screen;
1208 struct winsys_handle whandle;
1209 struct pipe_resource *tex;
1210 unsigned usage;
1211 memset(&whandle, 0, sizeof(whandle));
1212 whandle.plane = image->plane;
1213 int i;
1214
1215 switch (attrib) {
1216 case __DRI_IMAGE_ATTRIB_STRIDE:
1217 case __DRI_IMAGE_ATTRIB_OFFSET:
1218 case __DRI_IMAGE_ATTRIB_HANDLE:
1219 whandle.type = WINSYS_HANDLE_TYPE_KMS;
1220 break;
1221 case __DRI_IMAGE_ATTRIB_NAME:
1222 whandle.type = WINSYS_HANDLE_TYPE_SHARED;
1223 break;
1224 case __DRI_IMAGE_ATTRIB_FD:
1225 whandle.type = WINSYS_HANDLE_TYPE_FD;
1226 break;
1227 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
1228 for (i = 0, tex = image->texture; tex; tex = tex->next)
1229 i++;
1230 *value = i;
1231 return true;
1232 case __DRI_IMAGE_ATTRIB_MODIFIER_UPPER:
1233 case __DRI_IMAGE_ATTRIB_MODIFIER_LOWER:
1234 whandle.type = WINSYS_HANDLE_TYPE_KMS;
1235 whandle.modifier = DRM_FORMAT_MOD_INVALID;
1236 break;
1237 default:
1238 return false;
1239 }
1240
1241 usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
1242
1243 if (image->use & __DRI_IMAGE_USE_BACKBUFFER)
1244 usage |= PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
1245
1246 if (!pscreen->resource_get_handle(pscreen, NULL, image->texture,
1247 &whandle, usage))
1248 return false;
1249
1250 switch (attrib) {
1251 case __DRI_IMAGE_ATTRIB_STRIDE:
1252 *value = whandle.stride;
1253 return true;
1254 case __DRI_IMAGE_ATTRIB_OFFSET:
1255 *value = whandle.offset;
1256 return true;
1257 case __DRI_IMAGE_ATTRIB_HANDLE:
1258 case __DRI_IMAGE_ATTRIB_NAME:
1259 case __DRI_IMAGE_ATTRIB_FD:
1260 *value = whandle.handle;
1261 return true;
1262 case __DRI_IMAGE_ATTRIB_MODIFIER_UPPER:
1263 if (whandle.modifier == DRM_FORMAT_MOD_INVALID)
1264 return false;
1265 *value = (whandle.modifier >> 32) & 0xffffffff;
1266 return true;
1267 case __DRI_IMAGE_ATTRIB_MODIFIER_LOWER:
1268 if (whandle.modifier == DRM_FORMAT_MOD_INVALID)
1269 return false;
1270 *value = whandle.modifier & 0xffffffff;
1271 return true;
1272 default:
1273 return false;
1274 }
1275 }
1276
1277 static bool
dri2_resource_get_param(struct dri_image * image,enum pipe_resource_param param,unsigned handle_usage,uint64_t * value)1278 dri2_resource_get_param(struct dri_image *image, enum pipe_resource_param param,
1279 unsigned handle_usage, uint64_t *value)
1280 {
1281 struct pipe_screen *pscreen = image->texture->screen;
1282 if (!pscreen->resource_get_param)
1283 return false;
1284
1285 if (image->use & __DRI_IMAGE_USE_BACKBUFFER)
1286 handle_usage |= PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
1287
1288 return pscreen->resource_get_param(pscreen, NULL, image->texture,
1289 image->plane, 0, 0, param, handle_usage,
1290 value);
1291 }
1292
1293 static bool
dri2_query_image_by_resource_param(struct dri_image * image,int attrib,int * value)1294 dri2_query_image_by_resource_param(struct dri_image *image, int attrib, int *value)
1295 {
1296 enum pipe_resource_param param;
1297 uint64_t res_param;
1298 unsigned handle_usage;
1299
1300 if (!image->texture->screen->resource_get_param)
1301 return false;
1302
1303 switch (attrib) {
1304 case __DRI_IMAGE_ATTRIB_STRIDE:
1305 param = PIPE_RESOURCE_PARAM_STRIDE;
1306 break;
1307 case __DRI_IMAGE_ATTRIB_OFFSET:
1308 param = PIPE_RESOURCE_PARAM_OFFSET;
1309 break;
1310 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
1311 param = PIPE_RESOURCE_PARAM_NPLANES;
1312 break;
1313 case __DRI_IMAGE_ATTRIB_MODIFIER_UPPER:
1314 case __DRI_IMAGE_ATTRIB_MODIFIER_LOWER:
1315 param = PIPE_RESOURCE_PARAM_MODIFIER;
1316 break;
1317 case __DRI_IMAGE_ATTRIB_HANDLE:
1318 param = PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS;
1319 break;
1320 case __DRI_IMAGE_ATTRIB_NAME:
1321 param = PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED;
1322 break;
1323 case __DRI_IMAGE_ATTRIB_FD:
1324 param = PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD;
1325 break;
1326 default:
1327 return false;
1328 }
1329
1330 handle_usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
1331
1332 if (!dri2_resource_get_param(image, param, handle_usage, &res_param))
1333 return false;
1334
1335 switch (attrib) {
1336 case __DRI_IMAGE_ATTRIB_STRIDE:
1337 case __DRI_IMAGE_ATTRIB_OFFSET:
1338 case __DRI_IMAGE_ATTRIB_NUM_PLANES:
1339 if (res_param > INT_MAX)
1340 return false;
1341 *value = (int)res_param;
1342 return true;
1343 case __DRI_IMAGE_ATTRIB_HANDLE:
1344 case __DRI_IMAGE_ATTRIB_NAME:
1345 case __DRI_IMAGE_ATTRIB_FD:
1346 if (res_param > UINT_MAX)
1347 return false;
1348 *value = (int)res_param;
1349 return true;
1350 case __DRI_IMAGE_ATTRIB_MODIFIER_UPPER:
1351 if (res_param == DRM_FORMAT_MOD_INVALID)
1352 return false;
1353 *value = (res_param >> 32) & 0xffffffff;
1354 return true;
1355 case __DRI_IMAGE_ATTRIB_MODIFIER_LOWER:
1356 if (res_param == DRM_FORMAT_MOD_INVALID)
1357 return false;
1358 *value = res_param & 0xffffffff;
1359 return true;
1360 default:
1361 return false;
1362 }
1363 }
1364
1365 GLboolean
dri2_query_image(struct dri_image * image,int attrib,int * value)1366 dri2_query_image(struct dri_image *image, int attrib, int *value)
1367 {
1368 if (dri2_query_image_common(image, attrib, value))
1369 return GL_TRUE;
1370 else if (dri2_query_image_by_resource_param(image, attrib, value))
1371 return GL_TRUE;
1372 else if (dri2_query_image_by_resource_handle(image, attrib, value))
1373 return GL_TRUE;
1374 else
1375 return GL_FALSE;
1376 }
1377
1378 struct dri_image *
dri2_dup_image(struct dri_image * image,void * loaderPrivate)1379 dri2_dup_image(struct dri_image *image, void *loaderPrivate)
1380 {
1381 struct dri_image *img;
1382
1383 img = CALLOC_STRUCT(dri_image);
1384 if (!img)
1385 return NULL;
1386
1387 img->texture = NULL;
1388 pipe_resource_reference(&img->texture, image->texture);
1389 img->level = image->level;
1390 img->layer = image->layer;
1391 img->dri_format = image->dri_format;
1392 img->internal_format = image->internal_format;
1393 /* This should be 0 for sub images, but dup is also used for base images. */
1394 img->dri_components = image->dri_components;
1395 img->use = image->use;
1396 img->in_fence_fd = (image->in_fence_fd > 0) ?
1397 os_dupfd_cloexec(image->in_fence_fd) : -1;
1398 img->loader_private = loaderPrivate;
1399 img->screen = image->screen;
1400
1401 return img;
1402 }
1403
1404 GLboolean
dri2_validate_usage(struct dri_image * image,unsigned int use)1405 dri2_validate_usage(struct dri_image *image, unsigned int use)
1406 {
1407 if (!image || !image->texture)
1408 return false;
1409
1410 struct pipe_screen *screen = image->texture->screen;
1411 if (!screen->check_resource_capability)
1412 return true;
1413
1414 /* We don't want to check these:
1415 * __DRI_IMAGE_USE_SHARE (all images are shareable)
1416 * __DRI_IMAGE_USE_BACKBUFFER (all images support this)
1417 */
1418 unsigned bind = 0;
1419 if (use & __DRI_IMAGE_USE_SCANOUT)
1420 bind |= PIPE_BIND_SCANOUT;
1421 if (use & __DRI_IMAGE_USE_LINEAR)
1422 bind |= PIPE_BIND_LINEAR;
1423 if (use & __DRI_IMAGE_USE_CURSOR)
1424 bind |= PIPE_BIND_CURSOR;
1425
1426 if (!bind)
1427 return true;
1428
1429 return screen->check_resource_capability(screen, image->texture, bind);
1430 }
1431
1432 struct dri_image *
dri2_from_names(struct dri_screen * screen,int width,int height,int fourcc,int * names,int num_names,int * strides,int * offsets,void * loaderPrivate)1433 dri2_from_names(struct dri_screen *screen, int width, int height, int fourcc,
1434 int *names, int num_names, int *strides, int *offsets,
1435 void *loaderPrivate)
1436 {
1437 const struct dri2_format_mapping *map = dri2_get_mapping_by_fourcc(fourcc);
1438 struct dri_image *img;
1439 struct winsys_handle whandle;
1440
1441 if (!map)
1442 return NULL;
1443
1444 if (num_names != 1)
1445 return NULL;
1446
1447 memset(&whandle, 0, sizeof(whandle));
1448 whandle.type = WINSYS_HANDLE_TYPE_SHARED;
1449 whandle.handle = names[0];
1450 whandle.stride = strides[0];
1451 whandle.offset = offsets[0];
1452 whandle.format = map->pipe_format;
1453 whandle.modifier = DRM_FORMAT_MOD_INVALID;
1454
1455 img = dri_create_image_from_winsys(screen, width, height, map,
1456 1, &whandle, 0, loaderPrivate);
1457 if (img == NULL)
1458 return NULL;
1459
1460 img->dri_components = map->dri_components;
1461 img->dri_fourcc = map->dri_fourcc;
1462 img->dri_format = map->dri_format;
1463
1464 return img;
1465 }
1466
1467 struct dri_image *
dri2_from_planar(struct dri_image * image,int plane,void * loaderPrivate)1468 dri2_from_planar(struct dri_image *image, int plane, void *loaderPrivate)
1469 {
1470 struct dri_image *img;
1471
1472 if (plane < 0) {
1473 return NULL;
1474 } else if (plane > 0) {
1475 uint64_t planes;
1476 if (!dri2_resource_get_param(image, PIPE_RESOURCE_PARAM_NPLANES, 0,
1477 &planes) ||
1478 plane >= planes) {
1479 return NULL;
1480 }
1481 }
1482
1483 if (image->dri_components == 0) {
1484 uint64_t modifier;
1485 if (!dri2_resource_get_param(image, PIPE_RESOURCE_PARAM_MODIFIER, 0,
1486 &modifier) ||
1487 modifier == DRM_FORMAT_MOD_INVALID) {
1488 return NULL;
1489 }
1490 }
1491
1492 img = dri2_dup_image(image, loaderPrivate);
1493 if (img == NULL)
1494 return NULL;
1495
1496 if (img->texture->screen->resource_changed)
1497 img->texture->screen->resource_changed(img->texture->screen,
1498 img->texture);
1499
1500 /* set this to 0 for sub images. */
1501 img->dri_components = 0;
1502 img->plane = plane;
1503 return img;
1504 }
1505
1506 bool
dri_query_dma_buf_modifiers(struct dri_screen * screen,int fourcc,int max,uint64_t * modifiers,unsigned int * external_only,int * count)1507 dri_query_dma_buf_modifiers(struct dri_screen *screen, int fourcc, int max,
1508 uint64_t *modifiers, unsigned int *external_only,
1509 int *count)
1510 {
1511 struct pipe_screen *pscreen = screen->base.screen;
1512 const struct dri2_format_mapping *map = dri2_get_mapping_by_fourcc(fourcc);
1513 enum pipe_format format;
1514
1515 if (!map)
1516 return false;
1517
1518 format = map->pipe_format;
1519
1520 bool native_sampling = pscreen->is_format_supported(pscreen, format, screen->target, 0, 0,
1521 PIPE_BIND_SAMPLER_VIEW);
1522 if (pscreen->is_format_supported(pscreen, format, screen->target, 0, 0,
1523 PIPE_BIND_RENDER_TARGET) ||
1524 native_sampling ||
1525 dri2_yuv_dma_buf_supported(screen, map)) {
1526 if (pscreen->query_dmabuf_modifiers != NULL) {
1527 pscreen->query_dmabuf_modifiers(pscreen, format, max, modifiers,
1528 external_only, count);
1529 if (!native_sampling && external_only) {
1530 /* To support it using YUV lowering, we need it to be samplerExternalOES.
1531 */
1532 for (int i = 0; i < *count; i++)
1533 external_only[i] = true;
1534 }
1535 } else {
1536 *count = 0;
1537 }
1538 return true;
1539 }
1540 return false;
1541 }
1542
1543 bool
dri2_query_dma_buf_format_modifier_attribs(struct dri_screen * screen,uint32_t fourcc,uint64_t modifier,int attrib,uint64_t * value)1544 dri2_query_dma_buf_format_modifier_attribs(struct dri_screen *screen,
1545 uint32_t fourcc, uint64_t modifier,
1546 int attrib, uint64_t *value)
1547 {
1548 struct pipe_screen *pscreen = screen->base.screen;
1549
1550 if (!pscreen->query_dmabuf_modifiers)
1551 return false;
1552
1553 switch (attrib) {
1554 case __DRI_IMAGE_FORMAT_MODIFIER_ATTRIB_PLANE_COUNT: {
1555 uint64_t mod_planes = dri2_get_modifier_num_planes(screen, modifier,
1556 fourcc);
1557 if (mod_planes > 0)
1558 *value = mod_planes;
1559 return mod_planes > 0;
1560 }
1561 default:
1562 return false;
1563 }
1564 }
1565
1566 struct dri_image *
dri2_from_dma_bufs(struct dri_screen * screen,int width,int height,int fourcc,uint64_t modifier,int * fds,int num_fds,int * strides,int * offsets,enum __DRIYUVColorSpace yuv_color_space,enum __DRISampleRange sample_range,enum __DRIChromaSiting horizontal_siting,enum __DRIChromaSiting vertical_siting,uint32_t dri_flags,unsigned * error,void * loaderPrivate)1567 dri2_from_dma_bufs(struct dri_screen *screen,
1568 int width, int height, int fourcc,
1569 uint64_t modifier, int *fds, int num_fds,
1570 int *strides, int *offsets,
1571 enum __DRIYUVColorSpace yuv_color_space,
1572 enum __DRISampleRange sample_range,
1573 enum __DRIChromaSiting horizontal_siting,
1574 enum __DRIChromaSiting vertical_siting,
1575 uint32_t dri_flags,
1576 unsigned *error,
1577 void *loaderPrivate)
1578 {
1579 struct dri_image *img;
1580 const struct dri2_format_mapping *map = dri2_get_mapping_by_fourcc(fourcc);
1581
1582 if (!screen->dmabuf_import) {
1583 if (error)
1584 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
1585 return NULL;
1586 }
1587
1588 unsigned err = __DRI_IMAGE_ERROR_SUCCESS;
1589 /* Allow a NULL error arg since many callers don't care. */
1590 unsigned unused_error;
1591 if (!error)
1592 error = &unused_error;
1593
1594 uint32_t flags = 0;
1595 if (dri_flags & __DRI_IMAGE_PROTECTED_CONTENT_FLAG)
1596 flags |= PIPE_BIND_PROTECTED;
1597 if (dri_flags & __DRI_IMAGE_PRIME_LINEAR_BUFFER)
1598 flags |= PIPE_BIND_PRIME_BLIT_DST;
1599
1600 const int expected_num_fds = dri2_get_modifier_num_planes(screen, modifier, fourcc);
1601 if (!map || expected_num_fds == 0) {
1602 err = __DRI_IMAGE_ERROR_BAD_MATCH;
1603 goto exit;
1604 }
1605
1606 if (num_fds != expected_num_fds) {
1607 err = __DRI_IMAGE_ERROR_BAD_MATCH;
1608 goto exit;
1609 }
1610
1611 struct winsys_handle whandles[4];
1612 memset(whandles, 0, sizeof(whandles));
1613
1614 for (int i = 0; i < num_fds; i++) {
1615 if (fds[i] < 0) {
1616 err = __DRI_IMAGE_ERROR_BAD_ALLOC;
1617 goto exit;
1618 }
1619
1620 whandles[i].type = WINSYS_HANDLE_TYPE_FD;
1621 whandles[i].handle = (unsigned)fds[i];
1622 whandles[i].stride = (unsigned)strides[i];
1623 whandles[i].offset = (unsigned)offsets[i];
1624 whandles[i].format = map->pipe_format;
1625 whandles[i].modifier = modifier;
1626 whandles[i].plane = i;
1627 }
1628
1629 img = dri_create_image_from_winsys(screen, width, height, map,
1630 num_fds, whandles, flags,
1631 loaderPrivate);
1632 if (img == NULL) {
1633 err = __DRI_IMAGE_ERROR_BAD_ALLOC;
1634 goto exit;
1635 }
1636
1637 img->dri_components = map->dri_components;
1638 img->dri_fourcc = fourcc;
1639 img->dri_format = map->dri_format;
1640 img->imported_dmabuf = true;
1641 img->yuv_color_space = yuv_color_space;
1642 img->sample_range = sample_range;
1643 img->horizontal_siting = horizontal_siting;
1644 img->vertical_siting = vertical_siting;
1645
1646 *error = __DRI_IMAGE_ERROR_SUCCESS;
1647 return img;
1648
1649 exit:
1650 *error = err;
1651 return NULL;
1652 }
1653
1654 bool
dri2_query_compression_rates(struct dri_screen * screen,const struct dri_config * config,int max,enum __DRIFixedRateCompression * rates,int * count)1655 dri2_query_compression_rates(struct dri_screen *screen, const struct dri_config *config, int max,
1656 enum __DRIFixedRateCompression *rates, int *count)
1657 {
1658 struct pipe_screen *pscreen = screen->base.screen;
1659 struct gl_config *gl_config = (struct gl_config *) config;
1660 enum pipe_format format = gl_config->color_format;
1661 uint32_t pipe_rates[max];
1662
1663 if (!pscreen->is_format_supported(pscreen, format, screen->target, 0, 0,
1664 PIPE_BIND_RENDER_TARGET))
1665 return false;
1666
1667 if (pscreen->query_compression_rates != NULL) {
1668 pscreen->query_compression_rates(pscreen, format, max, pipe_rates, count);
1669 for (int i = 0; i < *count && i < max; ++i)
1670 rates[i] = to_dri_compression_rate(pipe_rates[i]);
1671 } else {
1672 *count = 0;
1673 }
1674
1675 return true;
1676 }
1677
1678 bool
dri2_query_compression_modifiers(struct dri_screen * screen,uint32_t fourcc,enum __DRIFixedRateCompression rate,int max,uint64_t * modifiers,int * count)1679 dri2_query_compression_modifiers(struct dri_screen *screen, uint32_t fourcc,
1680 enum __DRIFixedRateCompression rate, int max,
1681 uint64_t *modifiers, int *count)
1682 {
1683 struct pipe_screen *pscreen = screen->base.screen;
1684 const struct dri2_format_mapping *map = dri2_get_mapping_by_fourcc(fourcc);
1685 uint32_t pipe_rate = from_dri_compression_rate(rate);
1686
1687 if (!map)
1688 return false;
1689
1690 if (!pscreen->is_format_supported(pscreen, map->pipe_format, screen->target,
1691 0, 0, PIPE_BIND_RENDER_TARGET))
1692 return false;
1693
1694 if (pscreen->query_compression_modifiers != NULL) {
1695 pscreen->query_compression_modifiers(pscreen, map->pipe_format, pipe_rate,
1696 max, modifiers, count);
1697 } else {
1698 *count = 0;
1699 }
1700
1701 return true;
1702 }
1703
1704 void
dri2_blit_image(struct dri_context * ctx,struct dri_image * dst,struct dri_image * src,int dstx0,int dsty0,int dstwidth,int dstheight,int srcx0,int srcy0,int srcwidth,int srcheight,int flush_flag)1705 dri2_blit_image(struct dri_context *ctx, struct dri_image *dst, struct dri_image *src,
1706 int dstx0, int dsty0, int dstwidth, int dstheight,
1707 int srcx0, int srcy0, int srcwidth, int srcheight,
1708 int flush_flag)
1709 {
1710 struct pipe_context *pipe = ctx->st->pipe;
1711 struct pipe_screen *screen;
1712 struct pipe_fence_handle *fence;
1713 struct pipe_blit_info blit;
1714
1715 if (!dst || !src)
1716 return;
1717
1718 /* Wait for glthread to finish because we can't use pipe_context from
1719 * multiple threads.
1720 */
1721 _mesa_glthread_finish(ctx->st->ctx);
1722
1723 dri_image_fence_sync(ctx, dst);
1724
1725 memset(&blit, 0, sizeof(blit));
1726 blit.dst.resource = dst->texture;
1727 blit.dst.box.x = dstx0;
1728 blit.dst.box.y = dsty0;
1729 blit.dst.box.width = dstwidth;
1730 blit.dst.box.height = dstheight;
1731 blit.dst.box.depth = 1;
1732 blit.dst.format = dst->texture->format;
1733 blit.src.resource = src->texture;
1734 blit.src.box.x = srcx0;
1735 blit.src.box.y = srcy0;
1736 blit.src.box.width = srcwidth;
1737 blit.src.box.height = srcheight;
1738 blit.src.box.depth = 1;
1739 blit.src.format = src->texture->format;
1740 blit.mask = PIPE_MASK_RGBA;
1741 blit.filter = PIPE_TEX_FILTER_NEAREST;
1742
1743 pipe->blit(pipe, &blit);
1744
1745 if (flush_flag == __BLIT_FLAG_FLUSH) {
1746 pipe->flush_resource(pipe, dst->texture);
1747 st_context_flush(ctx->st, 0, NULL, NULL, NULL);
1748 } else if (flush_flag == __BLIT_FLAG_FINISH) {
1749 screen = ctx->screen->base.screen;
1750 pipe->flush_resource(pipe, dst->texture);
1751 st_context_flush(ctx->st, 0, &fence, NULL, NULL);
1752 (void) screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
1753 screen->fence_reference(screen, &fence, NULL);
1754 }
1755 }
1756
1757 void *
dri2_map_image(struct dri_context * ctx,struct dri_image * image,int x0,int y0,int width,int height,unsigned int flags,int * stride,void ** data)1758 dri2_map_image(struct dri_context *ctx, struct dri_image *image,
1759 int x0, int y0, int width, int height,
1760 unsigned int flags, int *stride, void **data)
1761 {
1762 struct pipe_context *pipe = ctx->st->pipe;
1763 enum pipe_map_flags pipe_access = 0;
1764 struct pipe_transfer *trans;
1765 void *map;
1766
1767 if (!image || !data || *data)
1768 return NULL;
1769
1770 unsigned plane = image->plane;
1771 if (plane >= dri2_get_mapping_by_format(image->dri_format)->nplanes)
1772 return NULL;
1773
1774 /* Wait for glthread to finish because we can't use pipe_context from
1775 * multiple threads.
1776 */
1777 _mesa_glthread_finish(ctx->st->ctx);
1778
1779 dri_image_fence_sync(ctx, image);
1780
1781 struct pipe_resource *resource = image->texture;
1782 while (plane--)
1783 resource = resource->next;
1784
1785 if (flags & __DRI_IMAGE_TRANSFER_READ)
1786 pipe_access |= PIPE_MAP_READ;
1787 if (flags & __DRI_IMAGE_TRANSFER_WRITE)
1788 pipe_access |= PIPE_MAP_WRITE;
1789
1790 map = pipe_texture_map(pipe, resource, 0, 0, pipe_access, x0, y0,
1791 width, height, &trans);
1792 if (map) {
1793 *data = trans;
1794 *stride = trans->stride;
1795 }
1796
1797 return map;
1798 }
1799
1800 void
dri2_unmap_image(struct dri_context * ctx,struct dri_image * image,void * data)1801 dri2_unmap_image(struct dri_context *ctx, struct dri_image *image, void *data)
1802 {
1803 struct pipe_context *pipe = ctx->st->pipe;
1804
1805 /* Wait for glthread to finish because we can't use pipe_context from
1806 * multiple threads.
1807 */
1808 _mesa_glthread_finish(ctx->st->ctx);
1809
1810 pipe_texture_unmap(pipe, (struct pipe_transfer *)data);
1811 }
1812
1813 int
dri2_get_capabilities(struct dri_screen * screen)1814 dri2_get_capabilities(struct dri_screen *screen)
1815 {
1816 return (screen->can_share_buffer ? __DRI_IMAGE_CAP_GLOBAL_NAMES : 0);
1817 }
1818
1819 int
dri_interop_query_device_info(struct dri_context * ctx,struct mesa_glinterop_device_info * out)1820 dri_interop_query_device_info(struct dri_context *ctx,
1821 struct mesa_glinterop_device_info *out)
1822 {
1823 return st_interop_query_device_info(ctx->st, out);
1824 }
1825
1826 int
dri_interop_export_object(struct dri_context * ctx,struct mesa_glinterop_export_in * in,struct mesa_glinterop_export_out * out)1827 dri_interop_export_object(struct dri_context *ctx,
1828 struct mesa_glinterop_export_in *in,
1829 struct mesa_glinterop_export_out *out)
1830 {
1831 return st_interop_export_object(ctx->st, in, out);
1832 }
1833
1834 int
dri_interop_flush_objects(struct dri_context * ctx,unsigned count,struct mesa_glinterop_export_in * objects,struct mesa_glinterop_flush_out * out)1835 dri_interop_flush_objects(struct dri_context *ctx,
1836 unsigned count, struct mesa_glinterop_export_in *objects,
1837 struct mesa_glinterop_flush_out *out)
1838 {
1839 return st_interop_flush_objects(ctx->st, count, objects, out);
1840 }
1841
1842 /**
1843 * \brief the DRI2bufferDamageExtension set_damage_region method
1844 */
1845 void
dri_set_damage_region(struct dri_drawable * drawable,unsigned int nrects,int * rects)1846 dri_set_damage_region(struct dri_drawable *drawable, unsigned int nrects, int *rects)
1847 {
1848 struct pipe_box *boxes = NULL;
1849
1850 if (nrects) {
1851 boxes = CALLOC(nrects, sizeof(*boxes));
1852 assert(boxes);
1853
1854 for (unsigned int i = 0; i < nrects; i++) {
1855 int *rect = &rects[i * 4];
1856
1857 u_box_2d(rect[0], rect[1], rect[2], rect[3], &boxes[i]);
1858 }
1859 }
1860
1861 FREE(drawable->damage_rects);
1862 drawable->damage_rects = boxes;
1863 drawable->num_damage_rects = nrects;
1864
1865 /* Only apply the damage region if the BACK_LEFT texture is up-to-date. */
1866 if (drawable->texture_stamp == drawable->lastStamp &&
1867 (drawable->texture_mask & (1 << ST_ATTACHMENT_BACK_LEFT))) {
1868 struct pipe_screen *screen = drawable->screen->base.screen;
1869 struct pipe_resource *resource;
1870
1871 if (drawable->stvis.samples > 1)
1872 resource = drawable->msaa_textures[ST_ATTACHMENT_BACK_LEFT];
1873 else
1874 resource = drawable->textures[ST_ATTACHMENT_BACK_LEFT];
1875
1876 screen->set_damage_region(screen, resource,
1877 drawable->num_damage_rects,
1878 drawable->damage_rects);
1879 }
1880 }
1881
1882 /**
1883 * \brief the DRI2blobExtension set_cache_funcs method
1884 */
1885 void
dri_set_blob_cache_funcs(struct dri_screen * screen,__DRIblobCacheSet set,__DRIblobCacheGet get)1886 dri_set_blob_cache_funcs(struct dri_screen *screen, __DRIblobCacheSet set,
1887 __DRIblobCacheGet get)
1888 {
1889 struct pipe_screen *pscreen = screen->base.screen;
1890
1891 if (!pscreen->get_disk_shader_cache)
1892 return;
1893
1894 struct disk_cache *cache = pscreen->get_disk_shader_cache(pscreen);
1895
1896 if (!cache)
1897 return;
1898
1899 disk_cache_set_callbacks(cache, set, get);
1900 }
1901
1902 /*
1903 * Backend function init_screen.
1904 */
1905
1906 void
dri2_init_drawable(struct dri_drawable * drawable,bool isPixmap,int alphaBits)1907 dri2_init_drawable(struct dri_drawable *drawable, bool isPixmap, int alphaBits)
1908 {
1909 drawable->allocate_textures = dri2_allocate_textures;
1910 drawable->flush_frontbuffer = dri2_flush_frontbuffer;
1911 drawable->update_tex_buffer = dri2_update_tex_buffer;
1912 drawable->flush_swapbuffers = dri2_flush_swapbuffers;
1913 }
1914
1915 /**
1916 * This is the driver specific part of the createNewScreen entry point.
1917 *
1918 * Returns the struct gl_config supported by this driver.
1919 */
1920 struct pipe_screen *
dri2_init_screen(struct dri_screen * screen,bool driver_name_is_inferred)1921 dri2_init_screen(struct dri_screen *screen, bool driver_name_is_inferred)
1922 {
1923 struct pipe_screen *pscreen = NULL;
1924
1925 screen->can_share_buffer = true;
1926 screen->auto_fake_front = dri_with_format(screen);
1927
1928 #ifdef HAVE_LIBDRM
1929 if (pipe_loader_drm_probe_fd(&screen->dev, screen->fd, false))
1930 pscreen = pipe_loader_create_screen(screen->dev, driver_name_is_inferred);
1931 #endif
1932
1933 return pscreen;
1934 }
1935
1936 /**
1937 * This is the driver specific part of the createNewScreen entry point.
1938 *
1939 * Returns the struct gl_config supported by this driver.
1940 */
1941 struct pipe_screen *
dri_swrast_kms_init_screen(struct dri_screen * screen,bool driver_name_is_inferred)1942 dri_swrast_kms_init_screen(struct dri_screen *screen, bool driver_name_is_inferred)
1943 {
1944 struct pipe_screen *pscreen = NULL;
1945 screen->can_share_buffer = false;
1946 screen->auto_fake_front = dri_with_format(screen);
1947
1948 #if defined(HAVE_DRISW_KMS) && defined(HAVE_SWRAST)
1949 if (pipe_loader_sw_probe_kms(&screen->dev, screen->fd))
1950 pscreen = pipe_loader_create_screen(screen->dev, driver_name_is_inferred);
1951 #endif
1952
1953 return pscreen;
1954 }
1955
1956 int
dri_query_compatible_render_only_device_fd(int kms_only_fd)1957 dri_query_compatible_render_only_device_fd(int kms_only_fd)
1958 {
1959 #ifdef HAVE_LIBDRM
1960 return pipe_loader_get_compatible_render_capable_device_fd(kms_only_fd);
1961 #else
1962 return -1;
1963 #endif
1964 }
1965 /* vim: set sw=3 ts=8 sts=3 expandtab: */
1966