• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <unistd.h>
15 #include <xf86drm.h>
16 
17 #include "drv_priv.h"
18 #include "helpers.h"
19 #include "util.h"
20 
21 struct planar_layout {
22 	size_t num_planes;
23 	int horizontal_subsampling[DRV_MAX_PLANES];
24 	int vertical_subsampling[DRV_MAX_PLANES];
25 	int bytes_per_pixel[DRV_MAX_PLANES];
26 };
27 
28 // clang-format off
29 
30 static const struct planar_layout packed_1bpp_layout = {
31 	.num_planes = 1,
32 	.horizontal_subsampling = { 1 },
33 	.vertical_subsampling = { 1 },
34 	.bytes_per_pixel = { 1 }
35 };
36 
37 static const struct planar_layout packed_2bpp_layout = {
38 	.num_planes = 1,
39 	.horizontal_subsampling = { 1 },
40 	.vertical_subsampling = { 1 },
41 	.bytes_per_pixel = { 2 }
42 };
43 
44 static const struct planar_layout packed_3bpp_layout = {
45 	.num_planes = 1,
46 	.horizontal_subsampling = { 1 },
47 	.vertical_subsampling = { 1 },
48 	.bytes_per_pixel = { 3 }
49 };
50 
51 static const struct planar_layout packed_4bpp_layout = {
52 	.num_planes = 1,
53 	.horizontal_subsampling = { 1 },
54 	.vertical_subsampling = { 1 },
55 	.bytes_per_pixel = { 4 }
56 };
57 
58 static const struct planar_layout packed_8bpp_layout = {
59 	.num_planes = 1,
60 	.horizontal_subsampling = { 1 },
61 	.vertical_subsampling = { 1 },
62 	.bytes_per_pixel = { 8 }
63 };
64 
65 static const struct planar_layout biplanar_yuv_420_layout = {
66 	.num_planes = 2,
67 	.horizontal_subsampling = { 1, 2 },
68 	.vertical_subsampling = { 1, 2 },
69 	.bytes_per_pixel = { 1, 2 }
70 };
71 
72 static const struct planar_layout triplanar_yuv_420_layout = {
73 	.num_planes = 3,
74 	.horizontal_subsampling = { 1, 2, 2 },
75 	.vertical_subsampling = { 1, 2, 2 },
76 	.bytes_per_pixel = { 1, 1, 1 }
77 };
78 
79 static const struct planar_layout biplanar_yuv_p010_layout = {
80 	.num_planes = 2,
81 	.horizontal_subsampling = { 1, 2 },
82 	.vertical_subsampling = { 1, 2 },
83 	.bytes_per_pixel = { 2, 4 }
84 };
85 
86 // clang-format on
87 
layout_from_format(uint32_t format)88 static const struct planar_layout *layout_from_format(uint32_t format)
89 {
90 	switch (format) {
91 	case DRM_FORMAT_BGR233:
92 	case DRM_FORMAT_C8:
93 	case DRM_FORMAT_R8:
94 	case DRM_FORMAT_RGB332:
95 		return &packed_1bpp_layout;
96 
97 	case DRM_FORMAT_R16:
98 		return &packed_2bpp_layout;
99 
100 	case DRM_FORMAT_YVU420:
101 	case DRM_FORMAT_YVU420_ANDROID:
102 		return &triplanar_yuv_420_layout;
103 
104 	case DRM_FORMAT_NV12:
105 	case DRM_FORMAT_NV21:
106 		return &biplanar_yuv_420_layout;
107 
108 	case DRM_FORMAT_P010:
109 		return &biplanar_yuv_p010_layout;
110 
111 	case DRM_FORMAT_ABGR1555:
112 	case DRM_FORMAT_ABGR4444:
113 	case DRM_FORMAT_ARGB1555:
114 	case DRM_FORMAT_ARGB4444:
115 	case DRM_FORMAT_BGR565:
116 	case DRM_FORMAT_BGRA4444:
117 	case DRM_FORMAT_BGRA5551:
118 	case DRM_FORMAT_BGRX4444:
119 	case DRM_FORMAT_BGRX5551:
120 	case DRM_FORMAT_GR88:
121 	case DRM_FORMAT_RG88:
122 	case DRM_FORMAT_RGB565:
123 	case DRM_FORMAT_RGBA4444:
124 	case DRM_FORMAT_RGBA5551:
125 	case DRM_FORMAT_RGBX4444:
126 	case DRM_FORMAT_RGBX5551:
127 	case DRM_FORMAT_UYVY:
128 	case DRM_FORMAT_VYUY:
129 	case DRM_FORMAT_XBGR1555:
130 	case DRM_FORMAT_XBGR4444:
131 	case DRM_FORMAT_XRGB1555:
132 	case DRM_FORMAT_XRGB4444:
133 	case DRM_FORMAT_YUYV:
134 	case DRM_FORMAT_YVYU:
135 	case DRM_FORMAT_MTISP_SXYZW10:
136 		return &packed_2bpp_layout;
137 
138 	case DRM_FORMAT_BGR888:
139 	case DRM_FORMAT_RGB888:
140 		return &packed_3bpp_layout;
141 
142 	case DRM_FORMAT_ABGR2101010:
143 	case DRM_FORMAT_ABGR8888:
144 	case DRM_FORMAT_ARGB2101010:
145 	case DRM_FORMAT_ARGB8888:
146 	case DRM_FORMAT_AYUV:
147 	case DRM_FORMAT_BGRA1010102:
148 	case DRM_FORMAT_BGRA8888:
149 	case DRM_FORMAT_BGRX1010102:
150 	case DRM_FORMAT_BGRX8888:
151 	case DRM_FORMAT_RGBA1010102:
152 	case DRM_FORMAT_RGBA8888:
153 	case DRM_FORMAT_RGBX1010102:
154 	case DRM_FORMAT_RGBX8888:
155 	case DRM_FORMAT_XBGR2101010:
156 	case DRM_FORMAT_XBGR8888:
157 	case DRM_FORMAT_XRGB2101010:
158 	case DRM_FORMAT_XRGB8888:
159 		return &packed_4bpp_layout;
160 
161 	case DRM_FORMAT_ABGR16161616F:
162 		return &packed_8bpp_layout;
163 
164 	default:
165 		drv_log("UNKNOWN FORMAT %d\n", format);
166 		return NULL;
167 	}
168 }
169 
drv_num_planes_from_format(uint32_t format)170 size_t drv_num_planes_from_format(uint32_t format)
171 {
172 	const struct planar_layout *layout = layout_from_format(format);
173 
174 	/*
175 	 * drv_bo_new calls this function early to query number of planes and
176 	 * considers 0 planes to mean unknown format, so we have to support
177 	 * that.  All other layout_from_format() queries can assume that the
178 	 * format is supported and that the return value is non-NULL.
179 	 */
180 
181 	return layout ? layout->num_planes : 0;
182 }
183 
drv_num_planes_from_modifier(struct driver * drv,uint32_t format,uint64_t modifier)184 size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier)
185 {
186 	size_t planes = drv_num_planes_from_format(format);
187 
188 	/* Disallow unsupported formats. */
189 	if (!planes)
190 		return 0;
191 
192 	if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID)
193 		return drv->backend->num_planes_from_modifier(drv, format, modifier);
194 
195 	return planes;
196 }
197 
drv_height_from_format(uint32_t format,uint32_t height,size_t plane)198 uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
199 {
200 	const struct planar_layout *layout = layout_from_format(format);
201 
202 	assert(plane < layout->num_planes);
203 
204 	return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
205 }
206 
drv_vertical_subsampling_from_format(uint32_t format,size_t plane)207 uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane)
208 {
209 	const struct planar_layout *layout = layout_from_format(format);
210 
211 	assert(plane < layout->num_planes);
212 
213 	return layout->vertical_subsampling[plane];
214 }
215 
drv_bytes_per_pixel_from_format(uint32_t format,size_t plane)216 uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
217 {
218 	const struct planar_layout *layout = layout_from_format(format);
219 
220 	assert(plane < layout->num_planes);
221 
222 	return layout->bytes_per_pixel[plane];
223 }
224 
225 /*
226  * This function returns the stride for a given format, width and plane.
227  */
drv_stride_from_format(uint32_t format,uint32_t width,size_t plane)228 uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
229 {
230 	const struct planar_layout *layout = layout_from_format(format);
231 	assert(plane < layout->num_planes);
232 
233 	uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
234 	uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
235 
236 	/*
237 	 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
238 	 * (see <system/graphics.h>).
239 	 */
240 	if (format == DRM_FORMAT_YVU420_ANDROID)
241 		stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
242 
243 	return stride;
244 }
245 
drv_size_from_format(uint32_t format,uint32_t stride,uint32_t height,size_t plane)246 uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
247 {
248 	return stride * drv_height_from_format(format, height, plane);
249 }
250 
subsample_stride(uint32_t stride,uint32_t format,size_t plane)251 static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
252 {
253 	if (plane != 0) {
254 		switch (format) {
255 		case DRM_FORMAT_YVU420:
256 		case DRM_FORMAT_YVU420_ANDROID:
257 			stride = DIV_ROUND_UP(stride, 2);
258 			break;
259 		}
260 	}
261 
262 	return stride;
263 }
264 
265 /*
266  * This function fills in the buffer object given the driver aligned stride of
267  * the first plane, height and a format. This function assumes there is just
268  * one kernel buffer per buffer object.
269  */
drv_bo_from_format(struct bo * bo,uint32_t stride,uint32_t aligned_height,uint32_t format)270 int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
271 {
272 	uint32_t padding[DRV_MAX_PLANES] = { 0 };
273 	return drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding);
274 }
275 
drv_bo_from_format_and_padding(struct bo * bo,uint32_t stride,uint32_t aligned_height,uint32_t format,uint32_t padding[DRV_MAX_PLANES])276 int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height,
277 				   uint32_t format, uint32_t padding[DRV_MAX_PLANES])
278 {
279 	size_t p, num_planes;
280 	uint32_t offset = 0;
281 
282 	num_planes = drv_num_planes_from_format(format);
283 	assert(num_planes);
284 
285 	/*
286 	 * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
287 	 *  - the aligned height is same as the buffer's height.
288 	 *  - the chroma stride is 16 bytes aligned, i.e., the luma's strides
289 	 *    is 32 bytes aligned.
290 	 */
291 	if (format == DRM_FORMAT_YVU420_ANDROID) {
292 		assert(aligned_height == bo->meta.height);
293 		assert(stride == ALIGN(stride, 32));
294 	}
295 
296 	for (p = 0; p < num_planes; p++) {
297 		bo->meta.strides[p] = subsample_stride(stride, format, p);
298 		bo->meta.sizes[p] =
299 		    drv_size_from_format(format, bo->meta.strides[p], aligned_height, p) +
300 		    padding[p];
301 		bo->meta.offsets[p] = offset;
302 		offset += bo->meta.sizes[p];
303 	}
304 
305 	bo->meta.total_size = offset;
306 	return 0;
307 }
308 
drv_dumb_bo_create_ex(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,uint64_t quirks)309 int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
310 			  uint64_t use_flags, uint64_t quirks)
311 {
312 	int ret;
313 	size_t plane;
314 	uint32_t aligned_width, aligned_height;
315 	struct drm_mode_create_dumb create_dumb;
316 
317 	aligned_width = width;
318 	aligned_height = height;
319 	switch (format) {
320 	case DRM_FORMAT_R16:
321 		/* HAL_PIXEL_FORMAT_Y16 requires that the buffer's width be 16 pixel
322 		 * aligned. See hardware/interfaces/graphics/common/1.0/types.hal. */
323 		aligned_width = ALIGN(width, 16);
324 		break;
325 	case DRM_FORMAT_YVU420_ANDROID:
326 		/* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not
327 		 * be aligned. Update 'height' so that drv_bo_from_format below
328 		 * uses the non-aligned height. */
329 		height = bo->meta.height;
330 
331 		/* Align width to 32 pixels, so chroma strides are 16 bytes as
332 		 * Android requires. */
333 		aligned_width = ALIGN(width, 32);
334 
335 		/* Adjust the height to include room for chroma planes. */
336 		aligned_height = 3 * DIV_ROUND_UP(height, 2);
337 		break;
338 	case DRM_FORMAT_YVU420:
339 	case DRM_FORMAT_NV12:
340 	case DRM_FORMAT_NV21:
341 		/* Adjust the height to include room for chroma planes */
342 		aligned_height = 3 * DIV_ROUND_UP(height, 2);
343 		break;
344 	default:
345 		break;
346 	}
347 
348 	memset(&create_dumb, 0, sizeof(create_dumb));
349 	if (quirks & BO_QUIRK_DUMB32BPP) {
350 		aligned_width =
351 		    DIV_ROUND_UP(aligned_width * layout_from_format(format)->bytes_per_pixel[0], 4);
352 		create_dumb.bpp = 32;
353 	} else {
354 		create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
355 	}
356 	create_dumb.width = aligned_width;
357 	create_dumb.height = aligned_height;
358 	create_dumb.flags = 0;
359 
360 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
361 	if (ret) {
362 		drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
363 		return -errno;
364 	}
365 
366 	drv_bo_from_format(bo, create_dumb.pitch, height, format);
367 
368 	for (plane = 0; plane < bo->meta.num_planes; plane++)
369 		bo->handles[plane].u32 = create_dumb.handle;
370 
371 	bo->meta.total_size = create_dumb.size;
372 	return 0;
373 }
374 
drv_dumb_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)375 int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
376 		       uint64_t use_flags)
377 {
378 	return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_NONE);
379 }
380 
drv_dumb_bo_destroy(struct bo * bo)381 int drv_dumb_bo_destroy(struct bo *bo)
382 {
383 	struct drm_mode_destroy_dumb destroy_dumb;
384 	int ret;
385 
386 	memset(&destroy_dumb, 0, sizeof(destroy_dumb));
387 	destroy_dumb.handle = bo->handles[0].u32;
388 
389 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
390 	if (ret) {
391 		drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
392 		return -errno;
393 	}
394 
395 	return 0;
396 }
397 
drv_gem_bo_destroy(struct bo * bo)398 int drv_gem_bo_destroy(struct bo *bo)
399 {
400 	struct drm_gem_close gem_close;
401 	int ret, error = 0;
402 	size_t plane, i;
403 
404 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
405 		for (i = 0; i < plane; i++)
406 			if (bo->handles[i].u32 == bo->handles[plane].u32)
407 				break;
408 		/* Make sure close hasn't already been called on this handle */
409 		if (i != plane)
410 			continue;
411 
412 		memset(&gem_close, 0, sizeof(gem_close));
413 		gem_close.handle = bo->handles[plane].u32;
414 
415 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
416 		if (ret) {
417 			drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
418 				bo->handles[plane].u32, ret);
419 			error = -errno;
420 		}
421 	}
422 
423 	return error;
424 }
425 
drv_prime_bo_import(struct bo * bo,struct drv_import_fd_data * data)426 int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
427 {
428 	int ret;
429 	size_t plane;
430 	struct drm_prime_handle prime_handle;
431 
432 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
433 		memset(&prime_handle, 0, sizeof(prime_handle));
434 		prime_handle.fd = data->fds[plane];
435 
436 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
437 
438 		if (ret) {
439 			drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
440 
441 			/*
442 			 * Need to call GEM close on planes that were opened,
443 			 * if any. Adjust the num_planes variable to be the
444 			 * plane that failed, so GEM close will be called on
445 			 * planes before that plane.
446 			 */
447 			bo->meta.num_planes = plane;
448 			drv_gem_bo_destroy(bo);
449 			return -errno;
450 		}
451 
452 		bo->handles[plane].u32 = prime_handle.handle;
453 	}
454 
455 	return 0;
456 }
457 
drv_dumb_bo_map(struct bo * bo,struct vma * vma,size_t plane,uint32_t map_flags)458 void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
459 {
460 	int ret;
461 	size_t i;
462 	struct drm_mode_map_dumb map_dumb;
463 
464 	memset(&map_dumb, 0, sizeof(map_dumb));
465 	map_dumb.handle = bo->handles[plane].u32;
466 
467 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
468 	if (ret) {
469 		drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
470 		return MAP_FAILED;
471 	}
472 
473 	for (i = 0; i < bo->meta.num_planes; i++)
474 		if (bo->handles[i].u32 == bo->handles[plane].u32)
475 			vma->length += bo->meta.sizes[i];
476 
477 	return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
478 		    map_dumb.offset);
479 }
480 
drv_bo_munmap(struct bo * bo,struct vma * vma)481 int drv_bo_munmap(struct bo *bo, struct vma *vma)
482 {
483 	return munmap(vma->addr, vma->length);
484 }
485 
drv_mapping_destroy(struct bo * bo)486 int drv_mapping_destroy(struct bo *bo)
487 {
488 	int ret;
489 	size_t plane;
490 	struct mapping *mapping;
491 	uint32_t idx;
492 
493 	/*
494 	 * This function is called right before the buffer is destroyed. It will free any mappings
495 	 * associated with the buffer.
496 	 */
497 
498 	idx = 0;
499 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
500 		while (idx < drv_array_size(bo->drv->mappings)) {
501 			mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
502 			if (mapping->vma->handle != bo->handles[plane].u32) {
503 				idx++;
504 				continue;
505 			}
506 
507 			if (!--mapping->vma->refcount) {
508 				ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
509 				if (ret) {
510 					drv_log("munmap failed\n");
511 					return ret;
512 				}
513 
514 				free(mapping->vma);
515 			}
516 
517 			/* This shrinks and shifts the array, so don't increment idx. */
518 			drv_array_remove(bo->drv->mappings, idx);
519 		}
520 	}
521 
522 	return 0;
523 }
524 
drv_get_prot(uint32_t map_flags)525 int drv_get_prot(uint32_t map_flags)
526 {
527 	return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
528 }
529 
drv_get_reference_count(struct driver * drv,struct bo * bo,size_t plane)530 uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
531 {
532 	void *count;
533 	uintptr_t num = 0;
534 
535 	if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
536 		num = (uintptr_t)(count);
537 
538 	return num;
539 }
540 
drv_increment_reference_count(struct driver * drv,struct bo * bo,size_t plane)541 void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
542 {
543 	uintptr_t num = drv_get_reference_count(drv, bo, plane);
544 
545 	/* If a value isn't in the table, drmHashDelete is a no-op */
546 	drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
547 	drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
548 }
549 
drv_decrement_reference_count(struct driver * drv,struct bo * bo,size_t plane)550 void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
551 {
552 	uintptr_t num = drv_get_reference_count(drv, bo, plane);
553 
554 	drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
555 
556 	if (num > 0)
557 		drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
558 }
559 
drv_add_combination(struct driver * drv,const uint32_t format,struct format_metadata * metadata,uint64_t use_flags)560 void drv_add_combination(struct driver *drv, const uint32_t format,
561 			 struct format_metadata *metadata, uint64_t use_flags)
562 {
563 	struct combination combo = { .format = format,
564 				     .metadata = *metadata,
565 				     .use_flags = use_flags };
566 
567 	drv_array_append(drv->combos, &combo);
568 }
569 
drv_add_combinations(struct driver * drv,const uint32_t * formats,uint32_t num_formats,struct format_metadata * metadata,uint64_t use_flags)570 void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
571 			  struct format_metadata *metadata, uint64_t use_flags)
572 {
573 	uint32_t i;
574 
575 	for (i = 0; i < num_formats; i++) {
576 		struct combination combo = { .format = formats[i],
577 					     .metadata = *metadata,
578 					     .use_flags = use_flags };
579 
580 		drv_array_append(drv->combos, &combo);
581 	}
582 }
583 
drv_modify_combination(struct driver * drv,uint32_t format,struct format_metadata * metadata,uint64_t use_flags)584 void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
585 			    uint64_t use_flags)
586 {
587 	uint32_t i;
588 	struct combination *combo;
589 	/* Attempts to add the specified flags to an existing combination. */
590 	for (i = 0; i < drv_array_size(drv->combos); i++) {
591 		combo = (struct combination *)drv_array_at_idx(drv->combos, i);
592 		if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
593 		    combo->metadata.modifier == metadata->modifier)
594 			combo->use_flags |= use_flags;
595 	}
596 }
597 
drv_modify_linear_combinations(struct driver * drv)598 int drv_modify_linear_combinations(struct driver *drv)
599 {
600 	/*
601 	 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
602 	 * plane and as a cursor.
603 	 */
604 	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
605 			       BO_USE_CURSOR | BO_USE_SCANOUT);
606 	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
607 			       BO_USE_CURSOR | BO_USE_SCANOUT);
608 	return 0;
609 }
610 
611 /*
612  * Pick the best modifier from modifiers, according to the ordering
613  * given by modifier_order.
614  */
drv_pick_modifier(const uint64_t * modifiers,uint32_t count,const uint64_t * modifier_order,uint32_t order_count)615 uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
616 			   const uint64_t *modifier_order, uint32_t order_count)
617 {
618 	uint32_t i, j;
619 
620 	for (i = 0; i < order_count; i++) {
621 		for (j = 0; j < count; j++) {
622 			if (modifiers[j] == modifier_order[i]) {
623 				return modifiers[j];
624 			}
625 		}
626 	}
627 
628 	return DRM_FORMAT_MOD_LINEAR;
629 }
630 
631 /*
632  * Search a list of modifiers to see if a given modifier is present
633  */
drv_has_modifier(const uint64_t * list,uint32_t count,uint64_t modifier)634 bool drv_has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier)
635 {
636 	uint32_t i;
637 	for (i = 0; i < count; i++)
638 		if (list[i] == modifier)
639 			return true;
640 
641 	return false;
642 }
643