1 /*
2 * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <dlfcn.h>
24 #include "drm-uapi/drm_fourcc.h"
25 #include "util/u_memory.h"
26 #include "pipe/p_screen.h"
27 #include "state_tracker/st_texture.h"
28 #include "state_tracker/st_context.h"
29 #include "main/texobj.h"
30
31 #include "dri_helpers.h"
32 #include "loader_dri_helper.h"
33
34 static bool
dri2_is_opencl_interop_loaded_locked(struct dri_screen * screen)35 dri2_is_opencl_interop_loaded_locked(struct dri_screen *screen)
36 {
37 return screen->opencl_dri_event_add_ref &&
38 screen->opencl_dri_event_release &&
39 screen->opencl_dri_event_wait &&
40 screen->opencl_dri_event_get_fence;
41 }
42
43 static bool
dri2_load_opencl_interop(struct dri_screen * screen)44 dri2_load_opencl_interop(struct dri_screen *screen)
45 {
46 #if defined(RTLD_DEFAULT)
47 bool success;
48
49 mtx_lock(&screen->opencl_func_mutex);
50
51 if (dri2_is_opencl_interop_loaded_locked(screen)) {
52 mtx_unlock(&screen->opencl_func_mutex);
53 return true;
54 }
55
56 screen->opencl_dri_event_add_ref =
57 dlsym(RTLD_DEFAULT, "opencl_dri_event_add_ref");
58 screen->opencl_dri_event_release =
59 dlsym(RTLD_DEFAULT, "opencl_dri_event_release");
60 screen->opencl_dri_event_wait =
61 dlsym(RTLD_DEFAULT, "opencl_dri_event_wait");
62 screen->opencl_dri_event_get_fence =
63 dlsym(RTLD_DEFAULT, "opencl_dri_event_get_fence");
64
65 success = dri2_is_opencl_interop_loaded_locked(screen);
66 mtx_unlock(&screen->opencl_func_mutex);
67 return success;
68 #else
69 return false;
70 #endif
71 }
72
73 struct dri2_fence {
74 struct dri_screen *driscreen;
75 struct pipe_fence_handle *pipe_fence;
76 void *cl_event;
77 };
78
dri2_fence_get_caps(__DRIscreen * _screen)79 static unsigned dri2_fence_get_caps(__DRIscreen *_screen)
80 {
81 struct dri_screen *driscreen = dri_screen(_screen);
82 struct pipe_screen *screen = driscreen->base.screen;
83 unsigned caps = 0;
84
85 if (screen->get_param(screen, PIPE_CAP_NATIVE_FENCE_FD))
86 caps |= __DRI_FENCE_CAP_NATIVE_FD;
87
88 return caps;
89 }
90
91 static void *
dri2_create_fence(__DRIcontext * _ctx)92 dri2_create_fence(__DRIcontext *_ctx)
93 {
94 struct dri_context *ctx = dri_context(_ctx);
95 struct st_context *st = ctx->st;
96 struct dri2_fence *fence = CALLOC_STRUCT(dri2_fence);
97
98 if (!fence)
99 return NULL;
100
101 /* Wait for glthread to finish because we can't use pipe_context from
102 * multiple threads.
103 */
104 _mesa_glthread_finish(st->ctx);
105
106 st_context_flush(st, 0, &fence->pipe_fence, NULL, NULL);
107
108 if (!fence->pipe_fence) {
109 FREE(fence);
110 return NULL;
111 }
112
113 fence->driscreen = ctx->screen;
114 return fence;
115 }
116
117 static void *
dri2_create_fence_fd(__DRIcontext * _ctx,int fd)118 dri2_create_fence_fd(__DRIcontext *_ctx, int fd)
119 {
120 struct dri_context *dri_ctx = dri_context(_ctx);
121 struct st_context *st = dri_ctx->st;
122 struct pipe_context *ctx = st->pipe;
123 struct dri2_fence *fence = CALLOC_STRUCT(dri2_fence);
124
125 /* Wait for glthread to finish because we can't use pipe_context from
126 * multiple threads.
127 */
128 _mesa_glthread_finish(st->ctx);
129
130 if (fd == -1) {
131 /* exporting driver created fence, flush: */
132 st_context_flush(st, ST_FLUSH_FENCE_FD, &fence->pipe_fence, NULL, NULL);
133 } else {
134 /* importing a foreign fence fd: */
135 ctx->create_fence_fd(ctx, &fence->pipe_fence, fd, PIPE_FD_TYPE_NATIVE_SYNC);
136 }
137 if (!fence->pipe_fence) {
138 FREE(fence);
139 return NULL;
140 }
141
142 fence->driscreen = dri_ctx->screen;
143 return fence;
144 }
145
146 static int
dri2_get_fence_fd(__DRIscreen * _screen,void * _fence)147 dri2_get_fence_fd(__DRIscreen *_screen, void *_fence)
148 {
149 struct dri_screen *driscreen = dri_screen(_screen);
150 struct pipe_screen *screen = driscreen->base.screen;
151 struct dri2_fence *fence = (struct dri2_fence*)_fence;
152
153 return screen->fence_get_fd(screen, fence->pipe_fence);
154 }
155
156 static void *
dri2_get_fence_from_cl_event(__DRIscreen * _screen,intptr_t cl_event)157 dri2_get_fence_from_cl_event(__DRIscreen *_screen, intptr_t cl_event)
158 {
159 struct dri_screen *driscreen = dri_screen(_screen);
160 struct dri2_fence *fence;
161
162 if (!dri2_load_opencl_interop(driscreen))
163 return NULL;
164
165 fence = CALLOC_STRUCT(dri2_fence);
166 if (!fence)
167 return NULL;
168
169 fence->cl_event = (void*)cl_event;
170
171 if (!driscreen->opencl_dri_event_add_ref(fence->cl_event)) {
172 free(fence);
173 return NULL;
174 }
175
176 fence->driscreen = driscreen;
177 return fence;
178 }
179
180 static void
dri2_destroy_fence(__DRIscreen * _screen,void * _fence)181 dri2_destroy_fence(__DRIscreen *_screen, void *_fence)
182 {
183 struct dri_screen *driscreen = dri_screen(_screen);
184 struct pipe_screen *screen = driscreen->base.screen;
185 struct dri2_fence *fence = (struct dri2_fence*)_fence;
186
187 if (fence->pipe_fence)
188 screen->fence_reference(screen, &fence->pipe_fence, NULL);
189 else if (fence->cl_event)
190 driscreen->opencl_dri_event_release(fence->cl_event);
191 else
192 assert(0);
193
194 FREE(fence);
195 }
196
197 static GLboolean
dri2_client_wait_sync(__DRIcontext * _ctx,void * _fence,unsigned flags,uint64_t timeout)198 dri2_client_wait_sync(__DRIcontext *_ctx, void *_fence, unsigned flags,
199 uint64_t timeout)
200 {
201 struct dri2_fence *fence = (struct dri2_fence*)_fence;
202 struct dri_screen *driscreen = fence->driscreen;
203 struct pipe_screen *screen = driscreen->base.screen;
204
205 /* No need to flush. The context was flushed when the fence was created. */
206
207 if (fence->pipe_fence)
208 return screen->fence_finish(screen, NULL, fence->pipe_fence, timeout);
209 else if (fence->cl_event) {
210 struct pipe_fence_handle *pipe_fence =
211 driscreen->opencl_dri_event_get_fence(fence->cl_event);
212
213 if (pipe_fence)
214 return screen->fence_finish(screen, NULL, pipe_fence, timeout);
215 else
216 return driscreen->opencl_dri_event_wait(fence->cl_event, timeout);
217 }
218 else {
219 assert(0);
220 return false;
221 }
222 }
223
224 static void
dri2_server_wait_sync(__DRIcontext * _ctx,void * _fence,unsigned flags)225 dri2_server_wait_sync(__DRIcontext *_ctx, void *_fence, unsigned flags)
226 {
227 struct st_context *st = dri_context(_ctx)->st;
228 struct pipe_context *ctx = st->pipe;
229 struct dri2_fence *fence = (struct dri2_fence*)_fence;
230
231 /* We might be called here with a NULL fence as a result of WaitSyncKHR
232 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
233 */
234 if (!fence)
235 return;
236
237 /* Wait for glthread to finish because we can't use pipe_context from
238 * multiple threads.
239 */
240 _mesa_glthread_finish(st->ctx);
241
242 if (ctx->fence_server_sync)
243 ctx->fence_server_sync(ctx, fence->pipe_fence);
244 }
245
246 const __DRI2fenceExtension dri2FenceExtension = {
247 .base = { __DRI2_FENCE, 2 },
248
249 .create_fence = dri2_create_fence,
250 .get_fence_from_cl_event = dri2_get_fence_from_cl_event,
251 .destroy_fence = dri2_destroy_fence,
252 .client_wait_sync = dri2_client_wait_sync,
253 .server_wait_sync = dri2_server_wait_sync,
254 .get_capabilities = dri2_fence_get_caps,
255 .create_fence_fd = dri2_create_fence_fd,
256 .get_fence_fd = dri2_get_fence_fd,
257 };
258
259 __DRIimage *
dri2_lookup_egl_image(struct dri_screen * screen,void * handle)260 dri2_lookup_egl_image(struct dri_screen *screen, void *handle)
261 {
262 const __DRIimageLookupExtension *loader = screen->dri2.image;
263 __DRIimage *img;
264
265 if (!loader->lookupEGLImage)
266 return NULL;
267
268 img = loader->lookupEGLImage(opaque_dri_screen(screen),
269 handle, screen->loaderPrivate);
270
271 return img;
272 }
273
274 bool
dri2_validate_egl_image(struct dri_screen * screen,void * handle)275 dri2_validate_egl_image(struct dri_screen *screen, void *handle)
276 {
277 const __DRIimageLookupExtension *loader = screen->dri2.image;
278
279 return loader->validateEGLImage(handle, screen->loaderPrivate);
280 }
281
282 __DRIimage *
dri2_lookup_egl_image_validated(struct dri_screen * screen,void * handle)283 dri2_lookup_egl_image_validated(struct dri_screen *screen, void *handle)
284 {
285 const __DRIimageLookupExtension *loader = screen->dri2.image;
286
287 return loader->lookupEGLImageValidated(handle, screen->loaderPrivate);
288 }
289
290 __DRIimage *
dri2_create_image_from_renderbuffer2(__DRIcontext * context,int renderbuffer,void * loaderPrivate,unsigned * error)291 dri2_create_image_from_renderbuffer2(__DRIcontext *context,
292 int renderbuffer, void *loaderPrivate,
293 unsigned *error)
294 {
295 struct dri_context *dri_ctx = dri_context(context);
296 struct st_context *st = dri_ctx->st;
297 struct gl_context *ctx = st->ctx;
298 struct pipe_context *p_ctx = st->pipe;
299 struct gl_renderbuffer *rb;
300 struct pipe_resource *tex;
301 __DRIimage *img;
302
303 /* Wait for glthread to finish to get up-to-date GL object lookups. */
304 _mesa_glthread_finish(st->ctx);
305
306 /* Section 3.9 (EGLImage Specification and Management) of the EGL 1.5
307 * specification says:
308 *
309 * "If target is EGL_GL_RENDERBUFFER and buffer is not the name of a
310 * renderbuffer object, or if buffer is the name of a multisampled
311 * renderbuffer object, the error EGL_BAD_PARAMETER is generated."
312 *
313 * "If target is EGL_GL_TEXTURE_2D , EGL_GL_TEXTURE_CUBE_MAP_*,
314 * EGL_GL_RENDERBUFFER or EGL_GL_TEXTURE_3D and buffer refers to the
315 * default GL texture object (0) for the corresponding GL target, the
316 * error EGL_BAD_PARAMETER is generated."
317 * (rely on _mesa_lookup_renderbuffer returning NULL in this case)
318 */
319 rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
320 if (!rb || rb->NumSamples > 0) {
321 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
322 return NULL;
323 }
324
325 tex = rb->texture;
326 if (!tex) {
327 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
328 return NULL;
329 }
330
331 img = CALLOC_STRUCT(__DRIimageRec);
332 if (!img) {
333 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
334 return NULL;
335 }
336
337 img->dri_format = tex->format;
338 img->internal_format = rb->InternalFormat;
339 img->loader_private = loaderPrivate;
340 img->screen = dri_ctx->screen;
341 img->in_fence_fd = -1;
342
343 pipe_resource_reference(&img->texture, tex);
344
345 /* If the resource supports EGL_MESA_image_dma_buf_export, make sure that
346 * it's in a shareable state. Do this now while we still have the access to
347 * the context.
348 */
349 if (dri2_get_mapping_by_format(img->dri_format))
350 p_ctx->flush_resource(p_ctx, tex);
351
352 ctx->Shared->HasExternallySharedImages = true;
353 *error = __DRI_IMAGE_ERROR_SUCCESS;
354 return img;
355 }
356
357 __DRIimage *
dri2_create_image_from_renderbuffer(__DRIcontext * context,int renderbuffer,void * loaderPrivate)358 dri2_create_image_from_renderbuffer(__DRIcontext *context,
359 int renderbuffer, void *loaderPrivate)
360 {
361 unsigned error;
362 return dri2_create_image_from_renderbuffer2(context, renderbuffer,
363 loaderPrivate, &error);
364 }
365
366 void
dri2_destroy_image(__DRIimage * img)367 dri2_destroy_image(__DRIimage *img)
368 {
369 const __DRIimageLoaderExtension *imgLoader = img->screen->image.loader;
370 const __DRIdri2LoaderExtension *dri2Loader = img->screen->dri2.loader;
371
372 if (imgLoader && imgLoader->base.version >= 4 &&
373 imgLoader->destroyLoaderImageState) {
374 imgLoader->destroyLoaderImageState(img->loader_private);
375 } else if (dri2Loader && dri2Loader->base.version >= 5 &&
376 dri2Loader->destroyLoaderImageState) {
377 dri2Loader->destroyLoaderImageState(img->loader_private);
378 }
379
380 pipe_resource_reference(&img->texture, NULL);
381
382 if (img->in_fence_fd != -1)
383 close(img->in_fence_fd);
384
385 FREE(img);
386 }
387
388
389 __DRIimage *
dri2_create_from_texture(__DRIcontext * context,int target,unsigned texture,int depth,int level,unsigned * error,void * loaderPrivate)390 dri2_create_from_texture(__DRIcontext *context, int target, unsigned texture,
391 int depth, int level, unsigned *error,
392 void *loaderPrivate)
393 {
394 __DRIimage *img;
395 struct dri_context *dri_ctx = dri_context(context);
396 struct st_context *st = dri_ctx->st;
397 struct gl_context *ctx = st->ctx;
398 struct pipe_context *p_ctx = st->pipe;
399 struct gl_texture_object *obj;
400 struct gl_texture_image *glimg;
401 GLuint face = 0;
402
403 /* Wait for glthread to finish to get up-to-date GL object lookups. */
404 _mesa_glthread_finish(st->ctx);
405
406 obj = _mesa_lookup_texture(ctx, texture);
407 if (!obj || obj->Target != target) {
408 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
409 return NULL;
410 }
411
412 if (target == GL_TEXTURE_CUBE_MAP)
413 face = depth;
414
415 _mesa_test_texobj_completeness(ctx, obj);
416 if (!obj->_BaseComplete || (level > 0 && !obj->_MipmapComplete)) {
417 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
418 return NULL;
419 }
420
421 if (level < obj->Attrib.BaseLevel || level > obj->_MaxLevel) {
422 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
423 return NULL;
424 }
425
426 glimg = obj->Image[face][level];
427 if (!glimg || !glimg->pt) {
428 *error = __DRI_IMAGE_ERROR_BAD_PARAMETER;
429 return NULL;
430 }
431
432 if (target == GL_TEXTURE_3D && glimg->Depth < depth) {
433 *error = __DRI_IMAGE_ERROR_BAD_MATCH;
434 return NULL;
435 }
436
437 img = CALLOC_STRUCT(__DRIimageRec);
438 if (!img) {
439 *error = __DRI_IMAGE_ERROR_BAD_ALLOC;
440 return NULL;
441 }
442
443 img->level = level;
444 img->layer = depth;
445 img->in_fence_fd = -1;
446 img->dri_format = glimg->pt->format;
447 img->internal_format = glimg->InternalFormat;
448
449 img->loader_private = loaderPrivate;
450 img->screen = dri_ctx->screen;
451
452 pipe_resource_reference(&img->texture, glimg->pt);
453
454 /* If the resource supports EGL_MESA_image_dma_buf_export, make sure that
455 * it's in a shareable state. Do this now while we still have the access to
456 * the context.
457 */
458 if (dri2_get_mapping_by_format(img->dri_format))
459 p_ctx->flush_resource(p_ctx, glimg->pt);
460
461 ctx->Shared->HasExternallySharedImages = true;
462 *error = __DRI_IMAGE_ERROR_SUCCESS;
463 return img;
464 }
465
466 static const struct dri2_format_mapping dri2_format_table[] = {
467 { DRM_FORMAT_ABGR16161616F, __DRI_IMAGE_FORMAT_ABGR16161616F,
468 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R16G16B16A16_FLOAT, 1,
469 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616F } } },
470 { DRM_FORMAT_XBGR16161616F, __DRI_IMAGE_FORMAT_XBGR16161616F,
471 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_R16G16B16X16_FLOAT, 1,
472 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR16161616F } } },
473 { DRM_FORMAT_ABGR16161616, __DRI_IMAGE_FORMAT_ABGR16161616,
474 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R16G16B16A16_UNORM, 1,
475 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
476 { DRM_FORMAT_XBGR16161616, __DRI_IMAGE_FORMAT_XBGR16161616,
477 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_R16G16B16X16_UNORM, 1,
478 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR16161616 } } },
479 { DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010,
480 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_B10G10R10A2_UNORM, 1,
481 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB2101010 } } },
482 { DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010,
483 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_B10G10R10X2_UNORM, 1,
484 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB2101010 } } },
485 { DRM_FORMAT_ABGR2101010, __DRI_IMAGE_FORMAT_ABGR2101010,
486 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R10G10B10A2_UNORM, 1,
487 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR2101010 } } },
488 { DRM_FORMAT_XBGR2101010, __DRI_IMAGE_FORMAT_XBGR2101010,
489 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_R10G10B10X2_UNORM, 1,
490 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR2101010 } } },
491 { DRM_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_ARGB8888,
492 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_BGRA8888_UNORM, 1,
493 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB8888 } } },
494 { DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888,
495 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_RGBA8888_UNORM, 1,
496 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
497 { __DRI_IMAGE_FOURCC_SARGB8888, __DRI_IMAGE_FORMAT_SARGB8,
498 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_BGRA8888_SRGB, 1,
499 { { 0, 0, 0, __DRI_IMAGE_FORMAT_SARGB8 } } },
500 { DRM_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_XRGB8888,
501 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_BGRX8888_UNORM, 1,
502 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XRGB8888 } } },
503 { DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888,
504 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_RGBX8888_UNORM, 1,
505 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888 } } },
506 { DRM_FORMAT_ARGB1555, __DRI_IMAGE_FORMAT_ARGB1555,
507 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_B5G5R5A1_UNORM, 1,
508 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB1555 } } },
509 { DRM_FORMAT_ABGR1555, __DRI_IMAGE_FORMAT_ABGR1555,
510 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R5G5B5A1_UNORM, 1,
511 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR1555 } } },
512 { DRM_FORMAT_ARGB4444, __DRI_IMAGE_FORMAT_ARGB4444,
513 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_B4G4R4A4_UNORM, 1,
514 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ARGB4444 } } },
515 { DRM_FORMAT_ABGR4444, __DRI_IMAGE_FORMAT_ABGR4444,
516 __DRI_IMAGE_COMPONENTS_RGBA, PIPE_FORMAT_R4G4B4A4_UNORM, 1,
517 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR4444 } } },
518 { DRM_FORMAT_RGB565, __DRI_IMAGE_FORMAT_RGB565,
519 __DRI_IMAGE_COMPONENTS_RGB, PIPE_FORMAT_B5G6R5_UNORM, 1,
520 { { 0, 0, 0, __DRI_IMAGE_FORMAT_RGB565 } } },
521 { DRM_FORMAT_R8, __DRI_IMAGE_FORMAT_R8,
522 __DRI_IMAGE_COMPONENTS_R, PIPE_FORMAT_R8_UNORM, 1,
523 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 } } },
524 { DRM_FORMAT_R16, __DRI_IMAGE_FORMAT_R16,
525 __DRI_IMAGE_COMPONENTS_R, PIPE_FORMAT_R16_UNORM, 1,
526 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 } } },
527 { DRM_FORMAT_GR88, __DRI_IMAGE_FORMAT_GR88,
528 __DRI_IMAGE_COMPONENTS_RG, PIPE_FORMAT_RG88_UNORM, 1,
529 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 } } },
530 { DRM_FORMAT_GR1616, __DRI_IMAGE_FORMAT_GR1616,
531 __DRI_IMAGE_COMPONENTS_RG, PIPE_FORMAT_RG1616_UNORM, 1,
532 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 } } },
533
534 { DRM_FORMAT_YUV410, __DRI_IMAGE_FORMAT_NONE,
535 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
536 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
537 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8 },
538 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8 } } },
539 { DRM_FORMAT_YUV411, __DRI_IMAGE_FORMAT_NONE,
540 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
541 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
542 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8 },
543 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8 } } },
544 { DRM_FORMAT_YUV420, __DRI_IMAGE_FORMAT_NONE,
545 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
546 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
547 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8 },
548 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8 } } },
549 { DRM_FORMAT_YUV422, __DRI_IMAGE_FORMAT_NONE,
550 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
551 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
552 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8 },
553 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8 } } },
554 { DRM_FORMAT_YUV444, __DRI_IMAGE_FORMAT_NONE,
555 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
556 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
557 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8 },
558 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8 } } },
559
560 { DRM_FORMAT_YVU410, __DRI_IMAGE_FORMAT_NONE,
561 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
562 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
563 { 2, 2, 2, __DRI_IMAGE_FORMAT_R8 },
564 { 1, 2, 2, __DRI_IMAGE_FORMAT_R8 } } },
565 { DRM_FORMAT_YVU411, __DRI_IMAGE_FORMAT_NONE,
566 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
567 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
568 { 2, 2, 0, __DRI_IMAGE_FORMAT_R8 },
569 { 1, 2, 0, __DRI_IMAGE_FORMAT_R8 } } },
570 { DRM_FORMAT_YVU420, __DRI_IMAGE_FORMAT_NONE,
571 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
572 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
573 { 2, 1, 1, __DRI_IMAGE_FORMAT_R8 },
574 { 1, 1, 1, __DRI_IMAGE_FORMAT_R8 } } },
575 { DRM_FORMAT_YVU422, __DRI_IMAGE_FORMAT_NONE,
576 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
577 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
578 { 2, 1, 0, __DRI_IMAGE_FORMAT_R8 },
579 { 1, 1, 0, __DRI_IMAGE_FORMAT_R8 } } },
580 { DRM_FORMAT_YVU444, __DRI_IMAGE_FORMAT_NONE,
581 __DRI_IMAGE_COMPONENTS_Y_U_V, PIPE_FORMAT_IYUV, 3,
582 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
583 { 2, 0, 0, __DRI_IMAGE_FORMAT_R8 },
584 { 1, 0, 0, __DRI_IMAGE_FORMAT_R8 } } },
585
586 { DRM_FORMAT_NV12, __DRI_IMAGE_FORMAT_NONE,
587 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_NV12, 2,
588 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
589 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88 } } },
590 { DRM_FORMAT_NV21, __DRI_IMAGE_FORMAT_NONE,
591 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_NV21, 2,
592 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
593 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR88 } } },
594
595 { DRM_FORMAT_P010, __DRI_IMAGE_FORMAT_NONE,
596 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P010, 2,
597 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
598 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
599 { DRM_FORMAT_P012, __DRI_IMAGE_FORMAT_NONE,
600 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P012, 2,
601 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
602 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
603 { DRM_FORMAT_P016, __DRI_IMAGE_FORMAT_NONE,
604 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P016, 2,
605 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
606 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
607 { DRM_FORMAT_P030, __DRI_IMAGE_FORMAT_NONE,
608 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_P030, 2,
609 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R16 },
610 { 1, 1, 1, __DRI_IMAGE_FORMAT_GR1616 } } },
611
612 { DRM_FORMAT_NV16, __DRI_IMAGE_FORMAT_NONE,
613 __DRI_IMAGE_COMPONENTS_Y_UV, PIPE_FORMAT_NV12, 2,
614 { { 0, 0, 0, __DRI_IMAGE_FORMAT_R8 },
615 { 1, 1, 0, __DRI_IMAGE_FORMAT_GR88 } } },
616
617 { DRM_FORMAT_AYUV, __DRI_IMAGE_FORMAT_ABGR8888,
618 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_AYUV, 1,
619 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
620 { DRM_FORMAT_XYUV8888, __DRI_IMAGE_FORMAT_XBGR8888,
621 __DRI_IMAGE_COMPONENTS_XYUV, PIPE_FORMAT_XYUV, 1,
622 { { 0, 0, 0, __DRI_IMAGE_FORMAT_XBGR8888 } } },
623
624 { DRM_FORMAT_Y410, __DRI_IMAGE_FORMAT_ABGR2101010,
625 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_Y410, 1,
626 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR2101010 } } },
627
628 /* Y412 is an unusual format. It has the same layout as Y416 (i.e.,
629 * 16-bits of physical storage per channel), but the low 4 bits of each
630 * component are unused padding. The writer is supposed to write zeros
631 * to these bits.
632 */
633 { DRM_FORMAT_Y412, __DRI_IMAGE_FORMAT_ABGR16161616,
634 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_Y412, 1,
635 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
636 { DRM_FORMAT_Y416, __DRI_IMAGE_FORMAT_ABGR16161616,
637 __DRI_IMAGE_COMPONENTS_AYUV, PIPE_FORMAT_Y416, 1,
638 { { 0, 0, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
639
640 /* For YUYV and UYVY buffers, we set up two overlapping DRI images
641 * and treat them as planar buffers in the compositors.
642 * Plane 0 is GR88 and samples YU or YV pairs and places Y into
643 * the R component, while plane 1 is ARGB/ABGR and samples YUYV/UYVY
644 * clusters and places pairs and places U into the G component and
645 * V into A. This lets the texture sampler interpolate the Y
646 * components correctly when sampling from plane 0, and interpolate
647 * U and V correctly when sampling from plane 1. */
648 { DRM_FORMAT_YUYV, __DRI_IMAGE_FORMAT_NONE,
649 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_YUYV, 2,
650 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
651 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888 } } },
652 { DRM_FORMAT_YVYU, __DRI_IMAGE_FORMAT_NONE,
653 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_YVYU, 2,
654 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
655 { 0, 1, 0, __DRI_IMAGE_FORMAT_ARGB8888 } } },
656 { DRM_FORMAT_UYVY, __DRI_IMAGE_FORMAT_NONE,
657 __DRI_IMAGE_COMPONENTS_Y_UXVX, PIPE_FORMAT_UYVY, 2,
658 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
659 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
660 { DRM_FORMAT_VYUY, __DRI_IMAGE_FORMAT_NONE,
661 __DRI_IMAGE_COMPONENTS_Y_UXVX, PIPE_FORMAT_VYUY, 2,
662 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR88 },
663 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR8888 } } },
664
665 /* The Y21x formats work in a similar fashion to the YUYV and UYVY
666 * formats.
667 */
668 { DRM_FORMAT_Y210, __DRI_IMAGE_FORMAT_NONE,
669 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_Y210, 2,
670 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 },
671 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
672 /* Y212 is an unusual format. It has the same layout as Y216 (i.e.,
673 * 16-bits of physical storage per channel), but the low 4 bits of each
674 * component are unused padding. The writer is supposed to write zeros
675 * to these bits.
676 */
677 { DRM_FORMAT_Y212, __DRI_IMAGE_FORMAT_NONE,
678 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_Y212, 2,
679 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 },
680 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
681 { DRM_FORMAT_Y216, __DRI_IMAGE_FORMAT_NONE,
682 __DRI_IMAGE_COMPONENTS_Y_XUXV, PIPE_FORMAT_Y216, 2,
683 { { 0, 0, 0, __DRI_IMAGE_FORMAT_GR1616 },
684 { 0, 1, 0, __DRI_IMAGE_FORMAT_ABGR16161616 } } },
685 };
686
687 const struct dri2_format_mapping *
dri2_get_mapping_by_fourcc(int fourcc)688 dri2_get_mapping_by_fourcc(int fourcc)
689 {
690 for (unsigned i = 0; i < ARRAY_SIZE(dri2_format_table); i++) {
691 if (dri2_format_table[i].dri_fourcc == fourcc)
692 return &dri2_format_table[i];
693 }
694
695 return NULL;
696 }
697
698 const struct dri2_format_mapping *
dri2_get_mapping_by_format(int format)699 dri2_get_mapping_by_format(int format)
700 {
701 if (format == __DRI_IMAGE_FORMAT_NONE)
702 return NULL;
703
704 for (unsigned i = 0; i < ARRAY_SIZE(dri2_format_table); i++) {
705 if (dri2_format_table[i].dri_format == format)
706 return &dri2_format_table[i];
707 }
708
709 return NULL;
710 }
711
712 enum pipe_format
dri2_get_pipe_format_for_dri_format(int format)713 dri2_get_pipe_format_for_dri_format(int format)
714 {
715 for (unsigned i = 0; i < ARRAY_SIZE(dri2_format_table); i++) {
716 if (dri2_format_table[i].dri_format == format)
717 return dri2_format_table[i].pipe_format;
718 }
719
720 return PIPE_FORMAT_NONE;
721 }
722
723 bool
dri2_yuv_dma_buf_supported(struct dri_screen * screen,const struct dri2_format_mapping * map)724 dri2_yuv_dma_buf_supported(struct dri_screen *screen,
725 const struct dri2_format_mapping *map)
726 {
727 struct pipe_screen *pscreen = screen->base.screen;
728
729 for (unsigned i = 0; i < map->nplanes; i++) {
730 if (!pscreen->is_format_supported(pscreen,
731 dri2_get_pipe_format_for_dri_format(map->planes[i].dri_format),
732 screen->target, 0, 0, PIPE_BIND_SAMPLER_VIEW))
733 return false;
734 }
735 return true;
736 }
737
738 bool
dri2_query_dma_buf_formats(__DRIscreen * _screen,int max,int * formats,int * count)739 dri2_query_dma_buf_formats(__DRIscreen *_screen, int max, int *formats,
740 int *count)
741 {
742 struct dri_screen *screen = dri_screen(_screen);
743 struct pipe_screen *pscreen = screen->base.screen;
744 int i, j;
745
746 for (i = 0, j = 0; (i < ARRAY_SIZE(dri2_format_table)) &&
747 (j < max || max == 0); i++) {
748 const struct dri2_format_mapping *map = &dri2_format_table[i];
749
750 /* The sRGB format is not a real FourCC as defined by drm_fourcc.h, so we
751 * must not leak it out to clients. */
752 if (dri2_format_table[i].dri_fourcc == __DRI_IMAGE_FOURCC_SARGB8888)
753 continue;
754
755 if (pscreen->is_format_supported(pscreen, map->pipe_format,
756 screen->target, 0, 0,
757 PIPE_BIND_RENDER_TARGET) ||
758 pscreen->is_format_supported(pscreen, map->pipe_format,
759 screen->target, 0, 0,
760 PIPE_BIND_SAMPLER_VIEW) ||
761 dri2_yuv_dma_buf_supported(screen, map)) {
762 if (j < max)
763 formats[j] = map->dri_fourcc;
764 j++;
765 }
766 }
767 *count = j;
768 return true;
769 }
770
771 /* vim: set sw=3 ts=8 sts=3 expandtab: */
772