• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <xf86drm.h>
15 #include <xf86drmMode.h>
16 
17 #include "drv_priv.h"
18 #include "helpers.h"
19 #include "util.h"
20 
21 struct planar_layout {
22 	size_t num_planes;
23 	int horizontal_subsampling[DRV_MAX_PLANES];
24 	int vertical_subsampling[DRV_MAX_PLANES];
25 	int bytes_per_pixel[DRV_MAX_PLANES];
26 };
27 
28 // clang-format off
29 
30 static const struct planar_layout packed_1bpp_layout = {
31 	.num_planes = 1,
32 	.horizontal_subsampling = { 1 },
33 	.vertical_subsampling = { 1 },
34 	.bytes_per_pixel = { 1 }
35 };
36 
37 static const struct planar_layout packed_2bpp_layout = {
38 	.num_planes = 1,
39 	.horizontal_subsampling = { 1 },
40 	.vertical_subsampling = { 1 },
41 	.bytes_per_pixel = { 2 }
42 };
43 
44 static const struct planar_layout packed_3bpp_layout = {
45 	.num_planes = 1,
46 	.horizontal_subsampling = { 1 },
47 	.vertical_subsampling = { 1 },
48 	.bytes_per_pixel = { 3 }
49 };
50 
51 static const struct planar_layout packed_4bpp_layout = {
52 	.num_planes = 1,
53 	.horizontal_subsampling = { 1 },
54 	.vertical_subsampling = { 1 },
55 	.bytes_per_pixel = { 4 }
56 };
57 
58 static const struct planar_layout biplanar_yuv_420_layout = {
59 	.num_planes = 2,
60 	.horizontal_subsampling = { 1, 2 },
61 	.vertical_subsampling = { 1, 2 },
62 	.bytes_per_pixel = { 1, 2 }
63 };
64 
65 static const struct planar_layout triplanar_yuv_420_layout = {
66 	.num_planes = 3,
67 	.horizontal_subsampling = { 1, 2, 2 },
68 	.vertical_subsampling = { 1, 2, 2 },
69 	.bytes_per_pixel = { 1, 1, 1 }
70 };
71 
72 // clang-format on
73 
layout_from_format(uint32_t format)74 static const struct planar_layout *layout_from_format(uint32_t format)
75 {
76 	switch (format) {
77 	case DRM_FORMAT_BGR233:
78 	case DRM_FORMAT_C8:
79 	case DRM_FORMAT_R8:
80 	case DRM_FORMAT_RGB332:
81 		return &packed_1bpp_layout;
82 
83 	case DRM_FORMAT_YVU420:
84 	case DRM_FORMAT_YVU420_ANDROID:
85 		return &triplanar_yuv_420_layout;
86 
87 	case DRM_FORMAT_NV12:
88 	case DRM_FORMAT_NV21:
89 		return &biplanar_yuv_420_layout;
90 
91 	case DRM_FORMAT_ABGR1555:
92 	case DRM_FORMAT_ABGR4444:
93 	case DRM_FORMAT_ARGB1555:
94 	case DRM_FORMAT_ARGB4444:
95 	case DRM_FORMAT_BGR565:
96 	case DRM_FORMAT_BGRA4444:
97 	case DRM_FORMAT_BGRA5551:
98 	case DRM_FORMAT_BGRX4444:
99 	case DRM_FORMAT_BGRX5551:
100 	case DRM_FORMAT_GR88:
101 	case DRM_FORMAT_RG88:
102 	case DRM_FORMAT_RGB565:
103 	case DRM_FORMAT_RGBA4444:
104 	case DRM_FORMAT_RGBA5551:
105 	case DRM_FORMAT_RGBX4444:
106 	case DRM_FORMAT_RGBX5551:
107 	case DRM_FORMAT_UYVY:
108 	case DRM_FORMAT_VYUY:
109 	case DRM_FORMAT_XBGR1555:
110 	case DRM_FORMAT_XBGR4444:
111 	case DRM_FORMAT_XRGB1555:
112 	case DRM_FORMAT_XRGB4444:
113 	case DRM_FORMAT_YUYV:
114 	case DRM_FORMAT_YVYU:
115 		return &packed_2bpp_layout;
116 
117 	case DRM_FORMAT_BGR888:
118 	case DRM_FORMAT_RGB888:
119 		return &packed_3bpp_layout;
120 
121 	case DRM_FORMAT_ABGR2101010:
122 	case DRM_FORMAT_ABGR8888:
123 	case DRM_FORMAT_ARGB2101010:
124 	case DRM_FORMAT_ARGB8888:
125 	case DRM_FORMAT_AYUV:
126 	case DRM_FORMAT_BGRA1010102:
127 	case DRM_FORMAT_BGRA8888:
128 	case DRM_FORMAT_BGRX1010102:
129 	case DRM_FORMAT_BGRX8888:
130 	case DRM_FORMAT_RGBA1010102:
131 	case DRM_FORMAT_RGBA8888:
132 	case DRM_FORMAT_RGBX1010102:
133 	case DRM_FORMAT_RGBX8888:
134 	case DRM_FORMAT_XBGR2101010:
135 	case DRM_FORMAT_XBGR8888:
136 	case DRM_FORMAT_XRGB2101010:
137 	case DRM_FORMAT_XRGB8888:
138 		return &packed_4bpp_layout;
139 
140 	default:
141 		drv_log("UNKNOWN FORMAT %d\n", format);
142 		return NULL;
143 	}
144 }
145 
drv_num_planes_from_format(uint32_t format)146 size_t drv_num_planes_from_format(uint32_t format)
147 {
148 	const struct planar_layout *layout = layout_from_format(format);
149 
150 	/*
151 	 * drv_bo_new calls this function early to query number of planes and
152 	 * considers 0 planes to mean unknown format, so we have to support
153 	 * that.  All other layout_from_format() queries can assume that the
154 	 * format is supported and that the return value is non-NULL.
155 	 */
156 
157 	return layout ? layout->num_planes : 0;
158 }
159 
drv_height_from_format(uint32_t format,uint32_t height,size_t plane)160 uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
161 {
162 	const struct planar_layout *layout = layout_from_format(format);
163 
164 	assert(plane < layout->num_planes);
165 
166 	return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
167 }
168 
drv_bytes_per_pixel_from_format(uint32_t format,size_t plane)169 uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
170 {
171 	const struct planar_layout *layout = layout_from_format(format);
172 
173 	assert(plane < layout->num_planes);
174 
175 	return layout->bytes_per_pixel[plane];
176 }
177 
178 /*
179  * This function returns the stride for a given format, width and plane.
180  */
drv_stride_from_format(uint32_t format,uint32_t width,size_t plane)181 uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
182 {
183 	const struct planar_layout *layout = layout_from_format(format);
184 	assert(plane < layout->num_planes);
185 
186 	uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
187 	uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
188 
189 	/*
190 	 * The stride of Android YV12 buffers is required to be aligned to 16 bytes
191 	 * (see <system/graphics.h>).
192 	 */
193 	if (format == DRM_FORMAT_YVU420_ANDROID)
194 		stride = (plane == 0) ? ALIGN(stride, 32) : ALIGN(stride, 16);
195 
196 	return stride;
197 }
198 
drv_size_from_format(uint32_t format,uint32_t stride,uint32_t height,size_t plane)199 uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
200 {
201 	return stride * drv_height_from_format(format, height, plane);
202 }
203 
subsample_stride(uint32_t stride,uint32_t format,size_t plane)204 static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
205 {
206 	if (plane != 0) {
207 		switch (format) {
208 		case DRM_FORMAT_YVU420:
209 		case DRM_FORMAT_YVU420_ANDROID:
210 			stride = DIV_ROUND_UP(stride, 2);
211 			break;
212 		}
213 	}
214 
215 	return stride;
216 }
217 
218 /*
219  * This function fills in the buffer object given the driver aligned stride of
220  * the first plane, height and a format. This function assumes there is just
221  * one kernel buffer per buffer object.
222  */
drv_bo_from_format(struct bo * bo,uint32_t stride,uint32_t aligned_height,uint32_t format)223 int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format)
224 {
225 
226 	size_t p, num_planes;
227 	uint32_t offset = 0;
228 
229 	num_planes = drv_num_planes_from_format(format);
230 	assert(num_planes);
231 
232 	/*
233 	 * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
234 	 *  - the aligned height is same as the buffer's height.
235 	 *  - the chroma stride is 16 bytes aligned, i.e., the luma's strides
236 	 *    is 32 bytes aligned.
237 	 */
238 	if (format == DRM_FORMAT_YVU420_ANDROID) {
239 		assert(aligned_height == bo->height);
240 		assert(stride == ALIGN(stride, 32));
241 	}
242 
243 	for (p = 0; p < num_planes; p++) {
244 		bo->strides[p] = subsample_stride(stride, format, p);
245 		bo->sizes[p] = drv_size_from_format(format, bo->strides[p], aligned_height, p);
246 		bo->offsets[p] = offset;
247 		offset += bo->sizes[p];
248 	}
249 
250 	bo->total_size = offset;
251 	return 0;
252 }
253 
drv_dumb_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)254 int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
255 		       uint64_t use_flags)
256 {
257 	int ret;
258 	size_t plane;
259 	uint32_t aligned_width, aligned_height;
260 	struct drm_mode_create_dumb create_dumb;
261 
262 	aligned_width = width;
263 	aligned_height = height;
264 	if (format == DRM_FORMAT_YVU420_ANDROID) {
265 		/*
266 		 * Align width to 32 pixels, so chroma strides are 16 bytes as
267 		 * Android requires.
268 		 */
269 		aligned_width = ALIGN(width, 32);
270 	}
271 
272 	if (format == DRM_FORMAT_YVU420_ANDROID || format == DRM_FORMAT_YVU420) {
273 		aligned_height = 3 * DIV_ROUND_UP(height, 2);
274 	}
275 
276 	memset(&create_dumb, 0, sizeof(create_dumb));
277 	create_dumb.height = aligned_height;
278 	create_dumb.width = aligned_width;
279 	create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
280 	create_dumb.flags = 0;
281 
282 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
283 	if (ret) {
284 		drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
285 		return ret;
286 	}
287 
288 	drv_bo_from_format(bo, create_dumb.pitch, height, format);
289 
290 	for (plane = 0; plane < bo->num_planes; plane++)
291 		bo->handles[plane].u32 = create_dumb.handle;
292 
293 	bo->total_size = create_dumb.size;
294 	return 0;
295 }
296 
drv_dumb_bo_destroy(struct bo * bo)297 int drv_dumb_bo_destroy(struct bo *bo)
298 {
299 	struct drm_mode_destroy_dumb destroy_dumb;
300 	int ret;
301 
302 	memset(&destroy_dumb, 0, sizeof(destroy_dumb));
303 	destroy_dumb.handle = bo->handles[0].u32;
304 
305 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
306 	if (ret) {
307 		drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
308 		return ret;
309 	}
310 
311 	return 0;
312 }
313 
drv_gem_bo_destroy(struct bo * bo)314 int drv_gem_bo_destroy(struct bo *bo)
315 {
316 	struct drm_gem_close gem_close;
317 	int ret, error = 0;
318 	size_t plane, i;
319 
320 	for (plane = 0; plane < bo->num_planes; plane++) {
321 		for (i = 0; i < plane; i++)
322 			if (bo->handles[i].u32 == bo->handles[plane].u32)
323 				break;
324 		/* Make sure close hasn't already been called on this handle */
325 		if (i != plane)
326 			continue;
327 
328 		memset(&gem_close, 0, sizeof(gem_close));
329 		gem_close.handle = bo->handles[plane].u32;
330 
331 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
332 		if (ret) {
333 			drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
334 				bo->handles[plane].u32, ret);
335 			error = ret;
336 		}
337 	}
338 
339 	return error;
340 }
341 
drv_prime_bo_import(struct bo * bo,struct drv_import_fd_data * data)342 int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data)
343 {
344 	int ret;
345 	size_t plane;
346 	struct drm_prime_handle prime_handle;
347 
348 	for (plane = 0; plane < bo->num_planes; plane++) {
349 		memset(&prime_handle, 0, sizeof(prime_handle));
350 		prime_handle.fd = data->fds[plane];
351 
352 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
353 
354 		if (ret) {
355 			drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
356 
357 			/*
358 			 * Need to call GEM close on planes that were opened,
359 			 * if any. Adjust the num_planes variable to be the
360 			 * plane that failed, so GEM close will be called on
361 			 * planes before that plane.
362 			 */
363 			bo->num_planes = plane;
364 			drv_gem_bo_destroy(bo);
365 			return ret;
366 		}
367 
368 		bo->handles[plane].u32 = prime_handle.handle;
369 	}
370 
371 	for (plane = 0; plane < bo->num_planes; plane++) {
372 		pthread_mutex_lock(&bo->drv->driver_lock);
373 		drv_increment_reference_count(bo->drv, bo, plane);
374 		pthread_mutex_unlock(&bo->drv->driver_lock);
375 	}
376 
377 	return 0;
378 }
379 
drv_dumb_bo_map(struct bo * bo,struct vma * vma,size_t plane,uint32_t map_flags)380 void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
381 {
382 	int ret;
383 	size_t i;
384 	struct drm_mode_map_dumb map_dumb;
385 
386 	memset(&map_dumb, 0, sizeof(map_dumb));
387 	map_dumb.handle = bo->handles[plane].u32;
388 
389 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
390 	if (ret) {
391 		drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
392 		return MAP_FAILED;
393 	}
394 
395 	for (i = 0; i < bo->num_planes; i++)
396 		if (bo->handles[i].u32 == bo->handles[plane].u32)
397 			vma->length += bo->sizes[i];
398 
399 	return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
400 		    map_dumb.offset);
401 }
402 
drv_bo_munmap(struct bo * bo,struct vma * vma)403 int drv_bo_munmap(struct bo *bo, struct vma *vma)
404 {
405 	return munmap(vma->addr, vma->length);
406 }
407 
drv_mapping_destroy(struct bo * bo)408 int drv_mapping_destroy(struct bo *bo)
409 {
410 	int ret;
411 	size_t plane;
412 	struct mapping *mapping;
413 	uint32_t idx;
414 
415 	/*
416 	 * This function is called right before the buffer is destroyed. It will free any mappings
417 	 * associated with the buffer.
418 	 */
419 
420 	idx = 0;
421 	for (plane = 0; plane < bo->num_planes; plane++) {
422 		while (idx < drv_array_size(bo->drv->mappings)) {
423 			mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
424 			if (mapping->vma->handle != bo->handles[plane].u32) {
425 				idx++;
426 				continue;
427 			}
428 
429 			if (!--mapping->vma->refcount) {
430 				ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
431 				if (ret) {
432 					drv_log("munmap failed\n");
433 					return ret;
434 				}
435 
436 				free(mapping->vma);
437 			}
438 
439 			/* This shrinks and shifts the array, so don't increment idx. */
440 			drv_array_remove(bo->drv->mappings, idx);
441 		}
442 	}
443 
444 	return 0;
445 }
446 
drv_get_prot(uint32_t map_flags)447 int drv_get_prot(uint32_t map_flags)
448 {
449 	return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
450 }
451 
drv_get_reference_count(struct driver * drv,struct bo * bo,size_t plane)452 uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
453 {
454 	void *count;
455 	uintptr_t num = 0;
456 
457 	if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, &count))
458 		num = (uintptr_t)(count);
459 
460 	return num;
461 }
462 
drv_increment_reference_count(struct driver * drv,struct bo * bo,size_t plane)463 void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane)
464 {
465 	uintptr_t num = drv_get_reference_count(drv, bo, plane);
466 
467 	/* If a value isn't in the table, drmHashDelete is a no-op */
468 	drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
469 	drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
470 }
471 
drv_decrement_reference_count(struct driver * drv,struct bo * bo,size_t plane)472 void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane)
473 {
474 	uintptr_t num = drv_get_reference_count(drv, bo, plane);
475 
476 	drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
477 
478 	if (num > 0)
479 		drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1));
480 }
481 
drv_log_base2(uint32_t value)482 uint32_t drv_log_base2(uint32_t value)
483 {
484 	int ret = 0;
485 
486 	while (value >>= 1)
487 		++ret;
488 
489 	return ret;
490 }
491 
drv_add_combinations(struct driver * drv,const uint32_t * formats,uint32_t num_formats,struct format_metadata * metadata,uint64_t use_flags)492 void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
493 			  struct format_metadata *metadata, uint64_t use_flags)
494 {
495 	uint32_t i;
496 
497 	for (i = 0; i < num_formats; i++) {
498 		struct combination combo = { .format = formats[i],
499 					     .metadata = *metadata,
500 					     .use_flags = use_flags };
501 
502 		drv_array_append(drv->combos, &combo);
503 	}
504 }
505 
drv_modify_combination(struct driver * drv,uint32_t format,struct format_metadata * metadata,uint64_t use_flags)506 void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
507 			    uint64_t use_flags)
508 {
509 	uint32_t i;
510 	struct combination *combo;
511 	/* Attempts to add the specified flags to an existing combination. */
512 	for (i = 0; i < drv_array_size(drv->combos); i++) {
513 		combo = (struct combination *)drv_array_at_idx(drv->combos, i);
514 		if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
515 		    combo->metadata.modifier == metadata->modifier)
516 			combo->use_flags |= use_flags;
517 	}
518 }
519 
drv_query_kms(struct driver * drv)520 struct drv_array *drv_query_kms(struct driver *drv)
521 {
522 	struct drv_array *kms_items;
523 	uint64_t plane_type, use_flag;
524 	uint32_t i, j, k;
525 
526 	drmModePlanePtr plane;
527 	drmModePropertyPtr prop;
528 	drmModePlaneResPtr resources;
529 	drmModeObjectPropertiesPtr props;
530 
531 	kms_items = drv_array_init(sizeof(struct kms_item));
532 	if (!kms_items)
533 		goto out;
534 
535 	/*
536 	 * The ability to return universal planes is only complete on
537 	 * ChromeOS kernel versions >= v3.18.  The SET_CLIENT_CAP ioctl
538 	 * therefore might return an error code, so don't check it.  If it
539 	 * fails, it'll just return the plane list as overlay planes, which is
540 	 * fine in our case (our drivers already have cursor bits set).
541 	 * modetest in libdrm does the same thing.
542 	 */
543 	drmSetClientCap(drv->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
544 
545 	resources = drmModeGetPlaneResources(drv->fd);
546 	if (!resources)
547 		goto out;
548 
549 	for (i = 0; i < resources->count_planes; i++) {
550 		plane = drmModeGetPlane(drv->fd, resources->planes[i]);
551 		if (!plane)
552 			goto out;
553 
554 		props = drmModeObjectGetProperties(drv->fd, plane->plane_id, DRM_MODE_OBJECT_PLANE);
555 		if (!props)
556 			goto out;
557 
558 		for (j = 0; j < props->count_props; j++) {
559 			prop = drmModeGetProperty(drv->fd, props->props[j]);
560 			if (prop) {
561 				if (strcmp(prop->name, "type") == 0) {
562 					plane_type = props->prop_values[j];
563 				}
564 
565 				drmModeFreeProperty(prop);
566 			}
567 		}
568 
569 		switch (plane_type) {
570 		case DRM_PLANE_TYPE_OVERLAY:
571 		case DRM_PLANE_TYPE_PRIMARY:
572 			use_flag = BO_USE_SCANOUT;
573 			break;
574 		case DRM_PLANE_TYPE_CURSOR:
575 			use_flag = BO_USE_CURSOR;
576 			break;
577 		default:
578 			assert(0);
579 		}
580 
581 		for (j = 0; j < plane->count_formats; j++) {
582 			bool found = false;
583 			for (k = 0; k < drv_array_size(kms_items); k++) {
584 				struct kms_item *item = drv_array_at_idx(kms_items, k);
585 				if (item->format == plane->formats[j] &&
586 				    item->modifier == DRM_FORMAT_MOD_LINEAR) {
587 					item->use_flags |= use_flag;
588 					found = true;
589 					break;
590 				}
591 			}
592 
593 			if (!found) {
594 				struct kms_item item = { .format = plane->formats[j],
595 							 .modifier = DRM_FORMAT_MOD_LINEAR,
596 							 .use_flags = use_flag };
597 
598 				drv_array_append(kms_items, &item);
599 			}
600 		}
601 
602 		drmModeFreeObjectProperties(props);
603 		drmModeFreePlane(plane);
604 	}
605 
606 	drmModeFreePlaneResources(resources);
607 out:
608 	if (kms_items && !drv_array_size(kms_items)) {
609 		drv_array_destroy(kms_items);
610 		return NULL;
611 	}
612 
613 	return kms_items;
614 }
615 
drv_modify_linear_combinations(struct driver * drv)616 int drv_modify_linear_combinations(struct driver *drv)
617 {
618 	uint32_t i, j;
619 	struct kms_item *item;
620 	struct combination *combo;
621 	struct drv_array *kms_items;
622 
623 	/*
624 	 * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
625 	 * plane and as a cursor. Some drivers don't support
626 	 * drmModeGetPlaneResources, so add the combination here. Note that the
627 	 * kernel disregards the alpha component of ARGB unless it's an overlay
628 	 * plane.
629 	 */
630 	drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
631 			       BO_USE_CURSOR | BO_USE_SCANOUT);
632 	drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
633 			       BO_USE_CURSOR | BO_USE_SCANOUT);
634 
635 	kms_items = drv_query_kms(drv);
636 	if (!kms_items)
637 		return 0;
638 
639 	for (i = 0; i < drv_array_size(kms_items); i++) {
640 		item = (struct kms_item *)drv_array_at_idx(kms_items, i);
641 		for (j = 0; j < drv_array_size(drv->combos); j++) {
642 			combo = drv_array_at_idx(drv->combos, j);
643 			if (item->format == combo->format)
644 				combo->use_flags |= BO_USE_SCANOUT;
645 		}
646 	}
647 
648 	drv_array_destroy(kms_items);
649 	return 0;
650 }
651 
652 /*
653  * Pick the best modifier from modifiers, according to the ordering
654  * given by modifier_order.
655  */
drv_pick_modifier(const uint64_t * modifiers,uint32_t count,const uint64_t * modifier_order,uint32_t order_count)656 uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
657 			   const uint64_t *modifier_order, uint32_t order_count)
658 {
659 	uint32_t i, j;
660 
661 	for (i = 0; i < order_count; i++) {
662 		for (j = 0; j < count; j++) {
663 			if (modifiers[j] == modifier_order[i]) {
664 				return modifiers[j];
665 			}
666 		}
667 	}
668 
669 	return DRM_FORMAT_MOD_LINEAR;
670 }
671