• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <pthread.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <sys/mman.h>
14 #include <sys/types.h>
15 #include <unistd.h>
16 #include <xf86drm.h>
17 
18 #ifdef __ANDROID__
19 #include <cutils/log.h>
20 #include <libgen.h>
21 #endif
22 
23 #include "drv_helpers.h"
24 #include "drv_priv.h"
25 #include "util.h"
26 
27 #ifdef DRV_AMDGPU
28 extern const struct backend backend_amdgpu;
29 #endif
30 #ifdef DRV_I915
31 extern const struct backend backend_i915;
32 #endif
33 #ifdef DRV_MEDIATEK
34 extern const struct backend backend_mediatek;
35 #endif
36 #ifdef DRV_MSM
37 extern const struct backend backend_msm;
38 #endif
39 #ifdef DRV_ROCKCHIP
40 extern const struct backend backend_rockchip;
41 #endif
42 #ifdef DRV_VC4
43 extern const struct backend backend_vc4;
44 #endif
45 
46 // Dumb / generic drivers
47 extern const struct backend backend_evdi;
48 extern const struct backend backend_marvell;
49 extern const struct backend backend_meson;
50 extern const struct backend backend_nouveau;
51 extern const struct backend backend_komeda;
52 extern const struct backend backend_radeon;
53 extern const struct backend backend_synaptics;
54 extern const struct backend backend_virtgpu;
55 extern const struct backend backend_udl;
56 extern const struct backend backend_vkms;
57 
drv_get_backend(int fd)58 static const struct backend *drv_get_backend(int fd)
59 {
60 	drmVersionPtr drm_version;
61 	unsigned int i;
62 
63 	drm_version = drmGetVersion(fd);
64 
65 	if (!drm_version)
66 		return NULL;
67 
68 	const struct backend *backend_list[] = {
69 #ifdef DRV_AMDGPU
70 		&backend_amdgpu,
71 #endif
72 #ifdef DRV_I915
73 		&backend_i915,
74 #endif
75 #ifdef DRV_MEDIATEK
76 		&backend_mediatek,
77 #endif
78 #ifdef DRV_MSM
79 		&backend_msm,
80 #endif
81 #ifdef DRV_ROCKCHIP
82 		&backend_rockchip,
83 #endif
84 #ifdef DRV_VC4
85 		&backend_vc4,
86 #endif
87 		&backend_evdi,	   &backend_marvell, &backend_meson,	 &backend_nouveau,
88 		&backend_komeda,   &backend_radeon,  &backend_synaptics, &backend_virtgpu,
89 		&backend_udl,	   &backend_virtgpu, &backend_vkms
90 	};
91 
92 	for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
93 		const struct backend *b = backend_list[i];
94 		if (!strcmp(drm_version->name, b->name)) {
95 			drmFreeVersion(drm_version);
96 			return b;
97 		}
98 	}
99 
100 	drmFreeVersion(drm_version);
101 	return NULL;
102 }
103 
drv_create(int fd)104 struct driver *drv_create(int fd)
105 {
106 	struct driver *drv;
107 	int ret;
108 
109 	drv = (struct driver *)calloc(1, sizeof(*drv));
110 
111 	if (!drv)
112 		return NULL;
113 
114 	char *minigbm_debug;
115 	minigbm_debug = getenv("MINIGBM_DEBUG");
116 	drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
117 
118 	drv->fd = fd;
119 	drv->backend = drv_get_backend(fd);
120 
121 	if (!drv->backend)
122 		goto free_driver;
123 
124 	if (pthread_mutex_init(&drv->buffer_table_lock, NULL))
125 		goto free_driver;
126 
127 	drv->buffer_table = drmHashCreate();
128 	if (!drv->buffer_table)
129 		goto free_buffer_table_lock;
130 
131 	if (pthread_mutex_init(&drv->mappings_lock, NULL))
132 		goto free_buffer_table;
133 
134 	drv->mappings = drv_array_init(sizeof(struct mapping));
135 	if (!drv->mappings)
136 		goto free_mappings_lock;
137 
138 	drv->combos = drv_array_init(sizeof(struct combination));
139 	if (!drv->combos)
140 		goto free_mappings;
141 
142 	if (drv->backend->init) {
143 		ret = drv->backend->init(drv);
144 		if (ret) {
145 			drv_array_destroy(drv->combos);
146 			goto free_mappings;
147 		}
148 	}
149 
150 	return drv;
151 
152 free_mappings:
153 	drv_array_destroy(drv->mappings);
154 free_mappings_lock:
155 	pthread_mutex_destroy(&drv->mappings_lock);
156 free_buffer_table:
157 	drmHashDestroy(drv->buffer_table);
158 free_buffer_table_lock:
159 	pthread_mutex_destroy(&drv->buffer_table_lock);
160 free_driver:
161 	free(drv);
162 	return NULL;
163 }
164 
drv_destroy(struct driver * drv)165 void drv_destroy(struct driver *drv)
166 {
167 	if (drv->backend->close)
168 		drv->backend->close(drv);
169 
170 	drv_array_destroy(drv->combos);
171 
172 	drv_array_destroy(drv->mappings);
173 	pthread_mutex_destroy(&drv->mappings_lock);
174 
175 	drmHashDestroy(drv->buffer_table);
176 	pthread_mutex_destroy(&drv->buffer_table_lock);
177 
178 	free(drv);
179 }
180 
drv_get_fd(struct driver * drv)181 int drv_get_fd(struct driver *drv)
182 {
183 	return drv->fd;
184 }
185 
drv_get_name(struct driver * drv)186 const char *drv_get_name(struct driver *drv)
187 {
188 	return drv->backend->name;
189 }
190 
drv_get_combination(struct driver * drv,uint32_t format,uint64_t use_flags)191 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
192 {
193 	struct combination *curr, *best;
194 
195 	if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
196 		return 0;
197 
198 	best = NULL;
199 	uint32_t i;
200 	for (i = 0; i < drv_array_size(drv->combos); i++) {
201 		curr = drv_array_at_idx(drv->combos, i);
202 		if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
203 			if (!best || best->metadata.priority < curr->metadata.priority)
204 				best = curr;
205 	}
206 
207 	return best;
208 }
209 
drv_bo_new(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,bool is_test_buffer)210 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
211 		      uint64_t use_flags, bool is_test_buffer)
212 {
213 
214 	struct bo *bo;
215 	bo = (struct bo *)calloc(1, sizeof(*bo));
216 
217 	if (!bo)
218 		return NULL;
219 
220 	bo->drv = drv;
221 	bo->meta.width = width;
222 	bo->meta.height = height;
223 	bo->meta.format = format;
224 	bo->meta.use_flags = use_flags;
225 	bo->meta.num_planes = drv_num_planes_from_format(format);
226 	bo->is_test_buffer = is_test_buffer;
227 
228 	if (!bo->meta.num_planes) {
229 		free(bo);
230 		errno = EINVAL;
231 		return NULL;
232 	}
233 
234 	return bo;
235 }
236 
drv_bo_mapping_destroy(struct bo * bo)237 static void drv_bo_mapping_destroy(struct bo *bo)
238 {
239 	struct driver *drv = bo->drv;
240 	uint32_t idx = 0;
241 
242 	/*
243 	 * This function is called right before the buffer is destroyed. It will free any mappings
244 	 * associated with the buffer.
245 	 */
246 	pthread_mutex_lock(&drv->mappings_lock);
247 	for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
248 		while (idx < drv_array_size(drv->mappings)) {
249 			struct mapping *mapping =
250 			    (struct mapping *)drv_array_at_idx(drv->mappings, idx);
251 			if (mapping->vma->handle != bo->handles[plane].u32) {
252 				idx++;
253 				continue;
254 			}
255 
256 			if (!--mapping->vma->refcount) {
257 				int ret = drv->backend->bo_unmap(bo, mapping->vma);
258 				if (ret) {
259 					pthread_mutex_unlock(&drv->mappings_lock);
260 					assert(ret);
261 					drv_log("munmap failed\n");
262 					return;
263 				}
264 
265 				free(mapping->vma);
266 			}
267 
268 			/* This shrinks and shifts the array, so don't increment idx. */
269 			drv_array_remove(drv->mappings, idx);
270 		}
271 	}
272 	pthread_mutex_unlock(&drv->mappings_lock);
273 }
274 
275 /*
276  * Acquire a reference on plane buffers of the bo.
277  */
drv_bo_acquire(struct bo * bo)278 static void drv_bo_acquire(struct bo *bo)
279 {
280 	struct driver *drv = bo->drv;
281 
282 	pthread_mutex_lock(&drv->buffer_table_lock);
283 	for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
284 		uintptr_t num = 0;
285 
286 		if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num))
287 			drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
288 
289 		drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
290 	}
291 	pthread_mutex_unlock(&drv->buffer_table_lock);
292 }
293 
294 /*
295  * Release a reference on plane buffers of the bo. Return true when the bo has lost all its
296  * references. Otherwise, return false.
297  */
drv_bo_release(struct bo * bo)298 static bool drv_bo_release(struct bo *bo)
299 {
300 	struct driver *drv = bo->drv;
301 	uintptr_t num;
302 
303 	if (drv->backend->bo_release)
304 		drv->backend->bo_release(bo);
305 
306 	pthread_mutex_lock(&drv->buffer_table_lock);
307 	for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
308 		if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
309 			drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
310 
311 			if (num > 1) {
312 				drmHashInsert(drv->buffer_table, bo->handles[plane].u32,
313 					      (void *)(num - 1));
314 			}
315 		}
316 	}
317 
318 	/* The same buffer can back multiple planes with different offsets. */
319 	for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
320 		if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
321 			/* num is positive if found in the hashmap. */
322 			pthread_mutex_unlock(&drv->buffer_table_lock);
323 			return false;
324 		}
325 	}
326 	pthread_mutex_unlock(&drv->buffer_table_lock);
327 
328 	return true;
329 }
330 
drv_bo_create(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)331 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
332 			 uint64_t use_flags)
333 {
334 	int ret;
335 	struct bo *bo;
336 	bool is_test_alloc;
337 
338 	is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
339 	use_flags &= ~BO_USE_TEST_ALLOC;
340 
341 	bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
342 
343 	if (!bo)
344 		return NULL;
345 
346 	ret = -EINVAL;
347 	if (drv->backend->bo_compute_metadata) {
348 		ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
349 							0);
350 		if (!is_test_alloc && ret == 0)
351 			ret = drv->backend->bo_create_from_metadata(bo);
352 	} else if (!is_test_alloc) {
353 		ret = drv->backend->bo_create(bo, width, height, format, use_flags);
354 	}
355 
356 	if (ret) {
357 		errno = -ret;
358 		free(bo);
359 		return NULL;
360 	}
361 
362 	drv_bo_acquire(bo);
363 
364 	return bo;
365 }
366 
drv_bo_create_with_modifiers(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)367 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
368 					uint32_t format, const uint64_t *modifiers, uint32_t count)
369 {
370 	int ret;
371 	struct bo *bo;
372 
373 	if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
374 		errno = ENOENT;
375 		return NULL;
376 	}
377 
378 	bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
379 
380 	if (!bo)
381 		return NULL;
382 
383 	ret = -EINVAL;
384 	if (drv->backend->bo_compute_metadata) {
385 		ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
386 							modifiers, count);
387 		if (ret == 0)
388 			ret = drv->backend->bo_create_from_metadata(bo);
389 	} else {
390 		ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
391 							     count);
392 	}
393 
394 	if (ret) {
395 		free(bo);
396 		return NULL;
397 	}
398 
399 	drv_bo_acquire(bo);
400 
401 	return bo;
402 }
403 
drv_bo_destroy(struct bo * bo)404 void drv_bo_destroy(struct bo *bo)
405 {
406 	if (!bo->is_test_buffer && drv_bo_release(bo)) {
407 		drv_bo_mapping_destroy(bo);
408 		bo->drv->backend->bo_destroy(bo);
409 	}
410 
411 	free(bo);
412 }
413 
drv_bo_import(struct driver * drv,struct drv_import_fd_data * data)414 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
415 {
416 	int ret;
417 	size_t plane;
418 	struct bo *bo;
419 	off_t seek_end;
420 
421 	bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
422 
423 	if (!bo)
424 		return NULL;
425 
426 	ret = drv->backend->bo_import(bo, data);
427 	if (ret) {
428 		free(bo);
429 		return NULL;
430 	}
431 
432 	drv_bo_acquire(bo);
433 
434 	bo->meta.format_modifier = data->format_modifier;
435 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
436 		bo->meta.strides[plane] = data->strides[plane];
437 		bo->meta.offsets[plane] = data->offsets[plane];
438 
439 		seek_end = lseek(data->fds[plane], 0, SEEK_END);
440 		if (seek_end == (off_t)(-1)) {
441 			drv_log("lseek() failed with %s\n", strerror(errno));
442 			goto destroy_bo;
443 		}
444 
445 		lseek(data->fds[plane], 0, SEEK_SET);
446 		if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
447 			bo->meta.sizes[plane] = seek_end - data->offsets[plane];
448 		else
449 			bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
450 
451 		if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
452 			drv_log("buffer size is too large.\n");
453 			goto destroy_bo;
454 		}
455 
456 		bo->meta.total_size += bo->meta.sizes[plane];
457 	}
458 
459 	return bo;
460 
461 destroy_bo:
462 	drv_bo_destroy(bo);
463 	return NULL;
464 }
465 
drv_bo_map(struct bo * bo,const struct rectangle * rect,uint32_t map_flags,struct mapping ** map_data,size_t plane)466 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
467 		 struct mapping **map_data, size_t plane)
468 {
469 	struct driver *drv = bo->drv;
470 	uint32_t i;
471 	uint8_t *addr;
472 	struct mapping mapping = { 0 };
473 
474 	assert(rect->width >= 0);
475 	assert(rect->height >= 0);
476 	assert(rect->x + rect->width <= drv_bo_get_width(bo));
477 	assert(rect->y + rect->height <= drv_bo_get_height(bo));
478 	assert(BO_MAP_READ_WRITE & map_flags);
479 	/* No CPU access for protected buffers. */
480 	assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
481 
482 	if (bo->is_test_buffer)
483 		return MAP_FAILED;
484 
485 	mapping.rect = *rect;
486 	mapping.refcount = 1;
487 
488 	pthread_mutex_lock(&drv->mappings_lock);
489 
490 	for (i = 0; i < drv_array_size(drv->mappings); i++) {
491 		struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
492 		if (prior->vma->handle != bo->handles[plane].u32 ||
493 		    prior->vma->map_flags != map_flags)
494 			continue;
495 
496 		if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
497 		    rect->width != prior->rect.width || rect->height != prior->rect.height)
498 			continue;
499 
500 		prior->refcount++;
501 		*map_data = prior;
502 		goto exact_match;
503 	}
504 
505 	for (i = 0; i < drv_array_size(drv->mappings); i++) {
506 		struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
507 		if (prior->vma->handle != bo->handles[plane].u32 ||
508 		    prior->vma->map_flags != map_flags)
509 			continue;
510 
511 		prior->vma->refcount++;
512 		mapping.vma = prior->vma;
513 		goto success;
514 	}
515 
516 	mapping.vma = calloc(1, sizeof(*mapping.vma));
517 	if (!mapping.vma) {
518 		*map_data = NULL;
519 		pthread_mutex_unlock(&drv->mappings_lock);
520 		return MAP_FAILED;
521 	}
522 
523 	memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
524 	addr = drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
525 	if (addr == MAP_FAILED) {
526 		*map_data = NULL;
527 		free(mapping.vma);
528 		pthread_mutex_unlock(&drv->mappings_lock);
529 		return MAP_FAILED;
530 	}
531 
532 	mapping.vma->refcount = 1;
533 	mapping.vma->addr = addr;
534 	mapping.vma->handle = bo->handles[plane].u32;
535 	mapping.vma->map_flags = map_flags;
536 
537 success:
538 	*map_data = drv_array_append(drv->mappings, &mapping);
539 exact_match:
540 	drv_bo_invalidate(bo, *map_data);
541 	addr = (uint8_t *)((*map_data)->vma->addr);
542 	addr += drv_bo_get_plane_offset(bo, plane);
543 	pthread_mutex_unlock(&drv->mappings_lock);
544 	return (void *)addr;
545 }
546 
drv_bo_unmap(struct bo * bo,struct mapping * mapping)547 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
548 {
549 	struct driver *drv = bo->drv;
550 	uint32_t i;
551 	int ret = 0;
552 
553 	pthread_mutex_lock(&drv->mappings_lock);
554 
555 	if (--mapping->refcount)
556 		goto out;
557 
558 	if (!--mapping->vma->refcount) {
559 		ret = drv->backend->bo_unmap(bo, mapping->vma);
560 		free(mapping->vma);
561 	}
562 
563 	for (i = 0; i < drv_array_size(drv->mappings); i++) {
564 		if (mapping == (struct mapping *)drv_array_at_idx(drv->mappings, i)) {
565 			drv_array_remove(drv->mappings, i);
566 			break;
567 		}
568 	}
569 
570 out:
571 	pthread_mutex_unlock(&drv->mappings_lock);
572 	return ret;
573 }
574 
drv_bo_invalidate(struct bo * bo,struct mapping * mapping)575 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
576 {
577 	int ret = 0;
578 
579 	assert(mapping);
580 	assert(mapping->vma);
581 	assert(mapping->refcount > 0);
582 	assert(mapping->vma->refcount > 0);
583 
584 	if (bo->drv->backend->bo_invalidate)
585 		ret = bo->drv->backend->bo_invalidate(bo, mapping);
586 
587 	return ret;
588 }
589 
drv_bo_flush(struct bo * bo,struct mapping * mapping)590 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
591 {
592 	int ret = 0;
593 
594 	assert(mapping);
595 	assert(mapping->vma);
596 	assert(mapping->refcount > 0);
597 	assert(mapping->vma->refcount > 0);
598 
599 	if (bo->drv->backend->bo_flush)
600 		ret = bo->drv->backend->bo_flush(bo, mapping);
601 
602 	return ret;
603 }
604 
drv_bo_flush_or_unmap(struct bo * bo,struct mapping * mapping)605 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
606 {
607 	int ret = 0;
608 
609 	assert(mapping);
610 	assert(mapping->vma);
611 	assert(mapping->refcount > 0);
612 	assert(mapping->vma->refcount > 0);
613 	assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
614 
615 	if (bo->drv->backend->bo_flush)
616 		ret = bo->drv->backend->bo_flush(bo, mapping);
617 	else
618 		ret = drv_bo_unmap(bo, mapping);
619 
620 	return ret;
621 }
622 
drv_bo_get_width(struct bo * bo)623 uint32_t drv_bo_get_width(struct bo *bo)
624 {
625 	return bo->meta.width;
626 }
627 
drv_bo_get_height(struct bo * bo)628 uint32_t drv_bo_get_height(struct bo *bo)
629 {
630 	return bo->meta.height;
631 }
632 
drv_bo_get_num_planes(struct bo * bo)633 size_t drv_bo_get_num_planes(struct bo *bo)
634 {
635 	return bo->meta.num_planes;
636 }
637 
drv_bo_get_plane_handle(struct bo * bo,size_t plane)638 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
639 {
640 	return bo->handles[plane];
641 }
642 
643 #ifndef DRM_RDWR
644 #define DRM_RDWR O_RDWR
645 #endif
646 
drv_bo_get_plane_fd(struct bo * bo,size_t plane)647 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
648 {
649 
650 	int ret, fd;
651 	assert(plane < bo->meta.num_planes);
652 
653 	if (bo->is_test_buffer)
654 		return -EINVAL;
655 
656 	ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
657 
658 	// Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
659 	if (ret)
660 		ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
661 
662 	if (ret)
663 		drv_log("Failed to get plane fd: %s\n", strerror(errno));
664 
665 	return (ret) ? ret : fd;
666 }
667 
drv_bo_get_plane_offset(struct bo * bo,size_t plane)668 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
669 {
670 	assert(plane < bo->meta.num_planes);
671 	return bo->meta.offsets[plane];
672 }
673 
drv_bo_get_plane_size(struct bo * bo,size_t plane)674 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
675 {
676 	assert(plane < bo->meta.num_planes);
677 	return bo->meta.sizes[plane];
678 }
679 
drv_bo_get_plane_stride(struct bo * bo,size_t plane)680 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
681 {
682 	assert(plane < bo->meta.num_planes);
683 	return bo->meta.strides[plane];
684 }
685 
drv_bo_get_format_modifier(struct bo * bo)686 uint64_t drv_bo_get_format_modifier(struct bo *bo)
687 {
688 	return bo->meta.format_modifier;
689 }
690 
drv_bo_get_format(struct bo * bo)691 uint32_t drv_bo_get_format(struct bo *bo)
692 {
693 	return bo->meta.format;
694 }
695 
drv_bo_get_tiling(struct bo * bo)696 uint32_t drv_bo_get_tiling(struct bo *bo)
697 {
698 	return bo->meta.tiling;
699 }
700 
drv_bo_get_use_flags(struct bo * bo)701 uint64_t drv_bo_get_use_flags(struct bo *bo)
702 {
703 	return bo->meta.use_flags;
704 }
705 
drv_bo_get_total_size(struct bo * bo)706 size_t drv_bo_get_total_size(struct bo *bo)
707 {
708 	return bo->meta.total_size;
709 }
710 
711 /*
712  * Map internal fourcc codes back to standard fourcc codes.
713  */
drv_get_standard_fourcc(uint32_t fourcc_internal)714 uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
715 {
716 	return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
717 }
718 
drv_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)719 void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint64_t use_flags,
720 				      uint32_t *out_format, uint64_t *out_use_flags)
721 {
722 	assert(drv->backend->resolve_format_and_use_flags);
723 
724 	drv->backend->resolve_format_and_use_flags(drv, format, use_flags, out_format,
725 						   out_use_flags);
726 }
727 
drv_num_buffers_per_bo(struct bo * bo)728 uint32_t drv_num_buffers_per_bo(struct bo *bo)
729 {
730 	uint32_t count = 0;
731 	size_t plane, p;
732 
733 	if (bo->is_test_buffer)
734 		return 0;
735 
736 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
737 		for (p = 0; p < plane; p++)
738 			if (bo->handles[p].u32 == bo->handles[plane].u32)
739 				break;
740 		if (p == plane)
741 			count++;
742 	}
743 
744 	return count;
745 }
746 
drv_log_prefix(const char * prefix,const char * file,int line,const char * format,...)747 void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
748 {
749 	char buf[50];
750 	snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
751 
752 	va_list args;
753 	va_start(args, format);
754 #ifdef __ANDROID__
755 	__android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
756 #else
757 	fprintf(stderr, "%s ", buf);
758 	vfprintf(stderr, format, args);
759 #endif
760 	va_end(args);
761 }
762 
drv_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES],uint64_t * format_modifier)763 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
764 		      uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
765 {
766 	for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
767 		strides[plane] = bo->meta.strides[plane];
768 		offsets[plane] = bo->meta.offsets[plane];
769 	}
770 	*format_modifier = bo->meta.format_modifier;
771 
772 	if (bo->drv->backend->resource_info)
773 		return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
774 
775 	return 0;
776 }
777 
drv_get_max_texture_2d_size(struct driver * drv)778 uint32_t drv_get_max_texture_2d_size(struct driver *drv)
779 {
780 	if (drv->backend->get_max_texture_2d_size)
781 		return drv->backend->get_max_texture_2d_size(drv);
782 
783 	return UINT32_MAX;
784 }
785