1 /*
2 * Copyright 2014 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <unistd.h>
15 #include <xf86drm.h>
16
17 #include "drv_priv.h"
18 #include "helpers.h"
19 #include "util.h"
20
21 struct planar_layout {
22 size_t num_planes;
23 int horizontal_subsampling[DRV_MAX_PLANES];
24 int vertical_subsampling[DRV_MAX_PLANES];
25 int bytes_per_pixel[DRV_MAX_PLANES];
26 };
27
28 // clang-format off
29
30 static const struct planar_layout packed_1bpp_layout = {
31 .num_planes = 1,
32 .horizontal_subsampling = { 1 },
33 .vertical_subsampling = { 1 },
34 .bytes_per_pixel = { 1 }
35 };
36
37 static const struct planar_layout packed_2bpp_layout = {
38 .num_planes = 1,
39 .horizontal_subsampling = { 1 },
40 .vertical_subsampling = { 1 },
41 .bytes_per_pixel = { 2 }
42 };
43
44 static const struct planar_layout packed_3bpp_layout = {
45 .num_planes = 1,
46 .horizontal_subsampling = { 1 },
47 .vertical_subsampling = { 1 },
48 .bytes_per_pixel = { 3 }
49 };
50
51 static const struct planar_layout packed_4bpp_layout = {
52 .num_planes = 1,
53 .horizontal_subsampling = { 1 },
54 .vertical_subsampling = { 1 },
55 .bytes_per_pixel = { 4 }
56 };
57
58 static const struct planar_layout packed_8bpp_layout = {
59 .num_planes = 1,
60 .horizontal_subsampling = { 1 },
61 .vertical_subsampling = { 1 },
62 .bytes_per_pixel = { 8 }
63 };
64
65 static const struct planar_layout biplanar_yuv_420_layout = {
66 .num_planes = 2,
67 .horizontal_subsampling = { 1, 2 },
68 .vertical_subsampling = { 1, 2 },
69 .bytes_per_pixel = { 1, 2 }
70 };
71
72 static const struct planar_layout triplanar_yuv_420_layout = {
73 .num_planes = 3,
74 .horizontal_subsampling = { 1, 2, 2 },
75 .vertical_subsampling = { 1, 2, 2 },
76 .bytes_per_pixel = { 1, 1, 1 }
77 };
78
79 static const struct planar_layout biplanar_yuv_p010_layout = {
80 .num_planes = 2,
81 .horizontal_subsampling = { 1, 2 },
82 .vertical_subsampling = { 1, 2 },
83 .bytes_per_pixel = { 2, 4 }
84 };
85
86 // clang-format on
87
layout_from_format(uint32_t format)88 static const struct planar_layout *layout_from_format(uint32_t format)
89 {
90 switch (format) {
91 case DRM_FORMAT_BGR233:
92 case DRM_FORMAT_C8:
93 case DRM_FORMAT_R8:
94 case DRM_FORMAT_RGB332:
95 return &packed_1bpp_layout;
96
97 case DRM_FORMAT_R16:
98 return &packed_2bpp_layout;
99
100 case DRM_FORMAT_YVU420:
101 case DRM_FORMAT_YVU420_ANDROID:
102 return &triplanar_yuv_420_layout;
103
104 case DRM_FORMAT_NV12:
105 case DRM_FORMAT_NV21:
106 return &biplanar_yuv_420_layout;
107
108 case DRM_FORMAT_P010:
109 return &biplanar_yuv_p010_layout;
110
111 case DRM_FORMAT_ABGR1555:
112 case DRM_FORMAT_ABGR4444:
113 case DRM_FORMAT_ARGB1555:
114 case DRM_FORMAT_ARGB4444:
115 case DRM_FORMAT_BGR565:
116 case DRM_FORMAT_BGRA4444:
117 case DRM_FORMAT_BGRA5551:
118 case DRM_FORMAT_BGRX4444:
119 case DRM_FORMAT_BGRX5551:
120 case DRM_FORMAT_GR88:
121 case DRM_FORMAT_RG88:
122 case DRM_FORMAT_RGB565:
123 case DRM_FORMAT_RGBA4444:
124 case DRM_FORMAT_RGBA5551:
125 case DRM_FORMAT_RGBX4444:
126 case DRM_FORMAT_RGBX5551:
127 case DRM_FORMAT_UYVY:
128 case DRM_FORMAT_VYUY:
129 case DRM_FORMAT_XBGR1555:
130 case DRM_FORMAT_XBGR4444:
131 case DRM_FORMAT_XRGB1555:
132 case DRM_FORMAT_XRGB4444:
133 case DRM_FORMAT_YUYV:
134 case DRM_FORMAT_YVYU:
135 case DRM_FORMAT_MTISP_SXYZW10:
136 return &packed_2bpp_layout;
137
138 case DRM_FORMAT_BGR888:
139 case DRM_FORMAT_RGB888:
140 return &packed_3bpp_layout;
141
142 case DRM_FORMAT_ABGR2101010:
143 case DRM_FORMAT_ABGR8888:
144 case DRM_FORMAT_ARGB2101010:
145 case DRM_FORMAT_ARGB8888:
146 case DRM_FORMAT_AYUV:
147 case DRM_FORMAT_BGRA1010102:
148 case DRM_FORMAT_BGRA8888:
149 case DRM_FORMAT_BGRX1010102:
150 case DRM_FORMAT_BGRX8888:
151 case DRM_FORMAT_RGBA1010102:
152 case DRM_FORMAT_RGBA8888:
153 case DRM_FORMAT_RGBX1010102:
154 case DRM_FORMAT_RGBX8888:
155 case DRM_FORMAT_XBGR2101010:
156 case DRM_FORMAT_XBGR8888:
157 case DRM_FORMAT_XRGB2101010:
158 case DRM_FORMAT_XRGB8888:
159 return &packed_4bpp_layout;
160
161 case DRM_FORMAT_ABGR16161616F:
162 return &packed_8bpp_layout;
163
164 default:
165 drv_log("UNKNOWN FORMAT %d\n", format);
166 return NULL;
167 }
168 }
169
drv_num_planes_from_format(uint32_t format)170 size_t drv_num_planes_from_format(uint32_t format)
171 {
172 const struct planar_layout *layout = layout_from_format(format);
173
174 /*
175 * drv_bo_new calls this function early to query number of planes and
176 * considers 0 planes to mean unknown format, so we have to support
177 * that. All other layout_from_format() queries can assume that the
178 * format is supported and that the return value is non-NULL.
179 */
180
181 return layout ? layout->num_planes : 0;
182 }
183
drv_num_planes_from_modifier(struct driver * drv,uint32_t format,uint64_t modifier)184 size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
185 {
186 size_t planes = drv_num_planes_from_format(format);
187
188 /* Disallow unsupported formats. */
189 if (!planes)
190 return 0;
191
192 if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID &&
193 modifier != DRM_FORMAT_MOD_LINEAR)
194 return drv->backend->num_planes_from_modifier(drv, format, modifier);
195
196 return planes;
197 }
198
drv_height_from_format(uint32_t format,uint32_t height,size_t plane)199 uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
200 {
201 const struct planar_layout *layout = layout_from_format(format);
202
203 assert(plane < layout->num_planes);
204
205 return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
206 }
207
drv_vertical_subsampling_from_format(uint32_t format,size_t plane)208 uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane)
209 {
210 const struct planar_layout *layout = layout_from_format(format);
211
212 assert(plane < layout->num_planes);
213
214 return layout->vertical_subsampling[plane];
215 }
216
drv_bytes_per_pixel_from_format(uint32_t format,size_t plane)217 uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
218 {
219 const struct planar_layout *layout = layout_from_format(format);
220
221 assert(plane < layout->num_planes);
222
223 return layout->bytes_per_pixel[plane];
224 }
225
226 /*
227 * This function returns the stride for a given format, width and plane.
228 */
drv_stride_from_format(uint32_t format,uint32_t width,size_t plane)229 uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
230 {
231 const struct planar_layout *layout = layout_from_format(format);
232 assert(plane < layout->num_planes);
233
234 uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
235 uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
236
237 /*
238 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
239 * (see <system/graphics.h>).
240 */
241 if (format == DRM_FORMAT_YVU420_ANDROID)
242 stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
243
244 return stride;
245 }
246
drv_size_from_format(uint32_t format,uint32_t stride,uint32_t height,size_t plane)247 uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
248 {
249 return stride * drv_height_from_format(format, height, plane);
250 }
251
subsample_stride(uint32_t stride,uint32_t format,size_t plane)252 static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
253 {
254 if (plane != 0) {
255 switch (format) {
256 case DRM_FORMAT_YVU420:
257 case DRM_FORMAT_YVU420_ANDROID:
258 stride = DIV_ROUND_UP(stride, 2);
259 break;
260 }
261 }
262
263 return stride;
264 }
265
266 /*
267 * This function fills in the buffer object given the driver aligned stride of
268 * the first plane, height and a format. This function assumes there is just
269 * one kernel buffer per buffer object.
270 */
drv_bo_from_format(struct bo * bo,uint32_t stride,uint32_t aligned_height,uint32_t format)271 int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
272 {
273 uint32_t padding[DRV_MAX_PLANES] = { 0 };
274 return drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding);
275 }
276
drv_bo_from_format_and_padding(struct bo * bo,uint32_t stride,uint32_t aligned_height,uint32_t format,uint32_t padding[DRV_MAX_PLANES])277 int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height,
278 uint32_t format, uint32_t padding[DRV_MAX_PLANES])
279 {
280 size_t p, num_planes;
281 uint32_t offset = 0;
282
283 num_planes = drv_num_planes_from_format(format);
284 assert(num_planes);
285
286 /*
287 * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
288 * - the aligned height is same as the buffer's height.
289 * - the chroma stride is 16 bytes aligned, i.e., the luma's strides
290 * is 32 bytes aligned.
291 */
292 if (format == DRM_FORMAT_YVU420_ANDROID) {
293 assert(aligned_height == bo->meta.height);
294 assert(stride == ALIGN(stride, 32));
295 }
296
297 for (p = 0; p < num_planes; p++) {
298 bo->meta.strides[p] = subsample_stride(stride, format, p);
299 bo->meta.sizes[p] =
300 drv_size_from_format(format, bo->meta.strides[p], aligned_height, p) +
301 padding[p];
302 bo->meta.offsets[p] = offset;
303 offset += bo->meta.sizes[p];
304 }
305
306 bo->meta.total_size = offset;
307 return 0;
308 }
309
drv_dumb_bo_create_ex(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,uint64_t quirks)310 int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
311 uint64_t use_flags, uint64_t quirks)
312 {
313 int ret;
314 size_t plane;
315 uint32_t aligned_width, aligned_height;
316 struct drm_mode_create_dumb create_dumb = { 0 };
317
318 aligned_width = width;
319 aligned_height = height;
320 switch (format) {
321 case DRM_FORMAT_R16:
322 /* HAL_PIXEL_FORMAT_Y16 requires that the buffer's width be 16 pixel
323 * aligned. See hardware/interfaces/graphics/common/1.0/types.hal. */
324 aligned_width = ALIGN(width, 16);
325 break;
326 case DRM_FORMAT_YVU420_ANDROID:
327 /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
328 * be aligned. Update 'height' so that drv_bo_from_format below
329 * uses the non-aligned height. */
330 height = bo->meta.height;
331
332 /* Align width to 32 pixels, so chroma strides are 16 bytes as
333 * Android requires. */
334 aligned_width = ALIGN(width, 32);
335
336 /* Adjust the height to include room for chroma planes. */
337 aligned_height = 3 * DIV_ROUND_UP(height, 2);
338 break;
339 case DRM_FORMAT_YVU420:
340 case DRM_FORMAT_NV12:
341 case DRM_FORMAT_NV21:
342 case DRM_FORMAT_P010:
343 /* Adjust the height to include room for chroma planes */
344 aligned_height = 3 * DIV_ROUND_UP(height, 2);
345 break;
346 default:
347 break;
348 }
349
350 if (quirks & BO_QUIRK_DUMB32BPP) {
351 aligned_width =
352 DIV_ROUND_UP(aligned_width * layout_from_format(format)->bytes_per_pixel[0], 4);
353 create_dumb.bpp = 32;
354 } else {
355 create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
356 }
357 create_dumb.width = aligned_width;
358 create_dumb.height = aligned_height;
359 create_dumb.flags = 0;
360
361 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
362 if (ret) {
363 drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
364 return -errno;
365 }
366
367 drv_bo_from_format(bo, create_dumb.pitch, height, format);
368
369 for (plane = 0; plane < bo->meta.num_planes; plane++)
370 bo->handles[plane].u32 = create_dumb.handle;
371
372 bo->meta.total_size = create_dumb.size;
373 return 0;
374 }
375
drv_dumb_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)376 int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
377 uint64_t use_flags)
378 {
379 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_NONE);
380 }
381
drv_dumb_bo_destroy(struct bo * bo)382 int drv_dumb_bo_destroy(struct bo *bo)
383 {
384 int ret;
385 struct drm_mode_destroy_dumb destroy_dumb = { 0 };
386
387 destroy_dumb.handle = bo->handles[0].u32;
388 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
389 if (ret) {
390 drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
391 return -errno;
392 }
393
394 return 0;
395 }
396
drv_gem_bo_destroy(struct bo * bo)397 int drv_gem_bo_destroy(struct bo *bo)
398 {
399 struct drm_gem_close gem_close;
400 int ret, error = 0;
401 size_t plane, i;
402
403 for (plane = 0; plane < bo->meta.num_planes; plane++) {
404 for (i = 0; i < plane; i++)
405 if (bo->handles[i].u32 == bo->handles[plane].u32)
406 break;
407 /* Make sure close hasn't already been called on this handle */
408 if (i != plane)
409 continue;
410
411 memset(&gem_close, 0, sizeof(gem_close));
412 gem_close.handle = bo->handles[plane].u32;
413
414 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
415 if (ret) {
416 drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
417 bo->handles[plane].u32, ret);
418 error = -errno;
419 }
420 }
421
422 return error;
423 }
424
drv_prime_bo_import(struct bo * bo,struct drv_import_fd_data * data)425 int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
426 {
427 int ret;
428 size_t plane;
429 struct drm_prime_handle prime_handle;
430
431 for (plane = 0; plane < bo->meta.num_planes; plane++) {
432 memset(&prime_handle, 0, sizeof(prime_handle));
433 prime_handle.fd = data->fds[plane];
434
435 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
436
437 if (ret) {
438 drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
439
440 /*
441 * Need to call GEM close on planes that were opened,
442 * if any. Adjust the num_planes variable to be the
443 * plane that failed, so GEM close will be called on
444 * planes before that plane.
445 */
446 bo->meta.num_planes = plane;
447 drv_gem_bo_destroy(bo);
448 return -errno;
449 }
450
451 bo->handles[plane].u32 = prime_handle.handle;
452 }
453 bo->meta.tiling = data->tiling;
454
455 return 0;
456 }
457
drv_dumb_bo_map(struct bo * bo,struct vma * vma,size_t plane,uint32_t map_flags)458 void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
459 {
460 int ret;
461 size_t i;
462 struct drm_mode_map_dumb map_dumb;
463
464 memset(&map_dumb, 0, sizeof(map_dumb));
465 map_dumb.handle = bo->handles[plane].u32;
466
467 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
468 if (ret) {
469 drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
470 return MAP_FAILED;
471 }
472
473 for (i = 0; i < bo->meta.num_planes; i++)
474 if (bo->handles[i].u32 == bo->handles[plane].u32)
475 vma->length += bo->meta.sizes[i];
476
477 return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
478 map_dumb.offset);
479 }
480
drv_bo_munmap(struct bo * bo,struct vma * vma)481 int drv_bo_munmap(struct bo *bo, struct vma *vma)
482 {
483 return munmap(vma->addr, vma->length);
484 }
485
drv_mapping_destroy(struct bo * bo)486 int drv_mapping_destroy(struct bo *bo)
487 {
488 int ret;
489 size_t plane;
490 struct mapping *mapping;
491 uint32_t idx;
492
493 /*
494 * This function is called right before the buffer is destroyed. It will free any mappings
495 * associated with the buffer.
496 */
497
498 idx = 0;
499 for (plane = 0; plane < bo->meta.num_planes; plane++) {
500 while (idx < drv_array_size(bo->drv->mappings)) {
501 mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
502 if (mapping->vma->handle != bo->handles[plane].u32) {
503 idx++;
504 continue;
505 }
506
507 if (!--mapping->vma->refcount) {
508 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
509 if (ret) {
510 drv_log("munmap failed\n");
511 return ret;
512 }
513
514 free(mapping->vma);
515 }
516
517 /* This shrinks and shifts the array, so don't increment idx. */
518 drv_array_remove(bo->drv->mappings, idx);
519 }
520 }
521
522 return 0;
523 }
524
drv_get_prot(uint32_t map_flags)525 int drv_get_prot(uint32_t map_flags)
526 {
527 return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
528 }
529
drv_get_reference_count(struct driver * drv,struct bo * bo,size_t plane)530 uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
531 {
532 void *count;
533 uintptr_t num = 0;
534
535 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
536 num = (uintptr_t)(count);
537
538 return num;
539 }
540
drv_increment_reference_count(struct driver * drv,struct bo * bo,size_t plane)541 void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
542 {
543 uintptr_t num = drv_get_reference_count(drv, bo, plane);
544
545 /* If a value isn't in the table, drmHashDelete is a no-op */
546 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
547 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
548 }
549
drv_decrement_reference_count(struct driver * drv,struct bo * bo,size_t plane)550 void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
551 {
552 uintptr_t num = drv_get_reference_count(drv, bo, plane);
553
554 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
555
556 if (num > 0)
557 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
558 }
559
drv_add_combination(struct driver * drv,const uint32_t format,struct format_metadata * metadata,uint64_t use_flags)560 void drv_add_combination(struct driver *drv, const uint32_t format,
561 struct format_metadata *metadata, uint64_t use_flags)
562 {
563 struct combination combo = { .format = format,
564 .metadata = *metadata,
565 .use_flags = use_flags };
566
567 drv_array_append(drv->combos, &combo);
568 }
569
drv_add_combinations(struct driver * drv,const uint32_t * formats,uint32_t num_formats,struct format_metadata * metadata,uint64_t use_flags)570 void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
571 struct format_metadata *metadata, uint64_t use_flags)
572 {
573 uint32_t i;
574
575 for (i = 0; i < num_formats; i++) {
576 struct combination combo = { .format = formats[i],
577 .metadata = *metadata,
578 .use_flags = use_flags };
579
580 drv_array_append(drv->combos, &combo);
581 }
582 }
583
drv_modify_combination(struct driver * drv,uint32_t format,struct format_metadata * metadata,uint64_t use_flags)584 void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
585 uint64_t use_flags)
586 {
587 uint32_t i;
588 struct combination *combo;
589 /* Attempts to add the specified flags to an existing combination. */
590 for (i = 0; i < drv_array_size(drv->combos); i++) {
591 combo = (struct combination *)drv_array_at_idx(drv->combos, i);
592 if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
593 combo->metadata.modifier == metadata->modifier)
594 combo->use_flags |= use_flags;
595 }
596 }
597
drv_modify_linear_combinations(struct driver * drv)598 int drv_modify_linear_combinations(struct driver *drv)
599 {
600 /*
601 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
602 * plane and as a cursor.
603 */
604 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
605 BO_USE_CURSOR | BO_USE_SCANOUT);
606 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
607 BO_USE_CURSOR | BO_USE_SCANOUT);
608 return 0;
609 }
610
611 /*
612 * Pick the best modifier from modifiers, according to the ordering
613 * given by modifier_order.
614 */
drv_pick_modifier(const uint64_t * modifiers,uint32_t count,const uint64_t * modifier_order,uint32_t order_count)615 uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
616 const uint64_t *modifier_order, uint32_t order_count)
617 {
618 uint32_t i, j;
619
620 for (i = 0; i < order_count; i++) {
621 for (j = 0; j < count; j++) {
622 if (modifiers[j] == modifier_order[i]) {
623 return modifiers[j];
624 }
625 }
626 }
627
628 return DRM_FORMAT_MOD_LINEAR;
629 }
630
631 /*
632 * Search a list of modifiers to see if a given modifier is present
633 */
drv_has_modifier(const uint64_t * list,uint32_t count,uint64_t modifier)634 bool drv_has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier)
635 {
636 uint32_t i;
637 for (i = 0; i < count; i++)
638 if (list[i] == modifier)
639 return true;
640
641 return false;
642 }
643
644 /*
645 * Map internal fourcc codes back to standard fourcc codes.
646 */
drv_get_standard_fourcc(uint32_t fourcc_internal)647 uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
648 {
649 return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
650 }
651
drv_resolve_format_helper(struct driver * drv,uint32_t format,uint64_t use_flags)652 uint32_t drv_resolve_format_helper(struct driver *drv, uint32_t format, uint64_t use_flags)
653 {
654 switch (format) {
655 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
656 /* Common camera implementation defined format. */
657 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
658 return DRM_FORMAT_NV12;
659 /* A common hack: See b/28671744 */
660 return DRM_FORMAT_XBGR8888;
661 case DRM_FORMAT_FLEX_YCbCr_420_888:
662 /* Common flexible video format. */
663 return DRM_FORMAT_NV12;
664 default:
665 return format;
666 }
667 }
668