• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <pthread.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17 #include <xf86drm.h>
18 
19 #ifdef __ANDROID__
20 #include <cutils/log.h>
21 #include <libgen.h>
22 #endif
23 
24 #include "drv_priv.h"
25 #include "helpers.h"
26 #include "util.h"
27 
28 #ifdef DRV_AMDGPU
29 extern const struct backend backend_amdgpu;
30 #endif
31 #ifdef DRV_EXYNOS
32 extern const struct backend backend_exynos;
33 #endif
34 #ifdef DRV_I915
35 extern const struct backend backend_i915;
36 #endif
37 #ifdef DRV_MEDIATEK
38 extern const struct backend backend_mediatek;
39 #endif
40 #ifdef DRV_MSM
41 extern const struct backend backend_msm;
42 #endif
43 #ifdef DRV_ROCKCHIP
44 extern const struct backend backend_rockchip;
45 #endif
46 #ifdef DRV_VC4
47 extern const struct backend backend_vc4;
48 #endif
49 
50 // Dumb / generic drivers
51 extern const struct backend backend_evdi;
52 extern const struct backend backend_marvell;
53 extern const struct backend backend_meson;
54 extern const struct backend backend_nouveau;
55 extern const struct backend backend_komeda;
56 extern const struct backend backend_radeon;
57 extern const struct backend backend_synaptics;
58 extern const struct backend backend_virtgpu;
59 extern const struct backend backend_udl;
60 extern const struct backend backend_vkms;
61 
drv_get_backend(int fd)62 static const struct backend *drv_get_backend(int fd)
63 {
64 	drmVersionPtr drm_version;
65 	unsigned int i;
66 
67 	drm_version = drmGetVersion(fd);
68 
69 	if (!drm_version)
70 		return NULL;
71 
72 	const struct backend *backend_list[] = {
73 #ifdef DRV_AMDGPU
74 		&backend_amdgpu,
75 #endif
76 #ifdef DRV_EXYNOS
77 		&backend_exynos,
78 #endif
79 #ifdef DRV_I915
80 		&backend_i915,
81 #endif
82 #ifdef DRV_MEDIATEK
83 		&backend_mediatek,
84 #endif
85 #ifdef DRV_MSM
86 		&backend_msm,
87 #endif
88 #ifdef DRV_ROCKCHIP
89 		&backend_rockchip,
90 #endif
91 #ifdef DRV_VC4
92 		&backend_vc4,
93 #endif
94 		&backend_evdi,	   &backend_marvell, &backend_meson,	 &backend_nouveau,
95 		&backend_komeda,   &backend_radeon,  &backend_synaptics, &backend_virtgpu,
96 		&backend_udl,	   &backend_virtgpu, &backend_vkms
97 	};
98 
99 	for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
100 		const struct backend *b = backend_list[i];
101 		if (!strcmp(drm_version->name, b->name)) {
102 			drmFreeVersion(drm_version);
103 			return b;
104 		}
105 	}
106 
107 	drmFreeVersion(drm_version);
108 	return NULL;
109 }
110 
drv_create(int fd)111 struct driver *drv_create(int fd)
112 {
113 	struct driver *drv;
114 	int ret;
115 
116 	drv = (struct driver *)calloc(1, sizeof(*drv));
117 
118 	if (!drv)
119 		return NULL;
120 
121 	char *minigbm_debug;
122 	minigbm_debug = getenv("MINIGBM_DEBUG");
123 	drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
124 
125 	drv->fd = fd;
126 	drv->backend = drv_get_backend(fd);
127 
128 	if (!drv->backend)
129 		goto free_driver;
130 
131 	if (pthread_mutex_init(&drv->driver_lock, NULL))
132 		goto free_driver;
133 
134 	drv->buffer_table = drmHashCreate();
135 	if (!drv->buffer_table)
136 		goto free_lock;
137 
138 	drv->mappings = drv_array_init(sizeof(struct mapping));
139 	if (!drv->mappings)
140 		goto free_buffer_table;
141 
142 	drv->combos = drv_array_init(sizeof(struct combination));
143 	if (!drv->combos)
144 		goto free_mappings;
145 
146 	if (drv->backend->init) {
147 		ret = drv->backend->init(drv);
148 		if (ret) {
149 			drv_array_destroy(drv->combos);
150 			goto free_mappings;
151 		}
152 	}
153 
154 	return drv;
155 
156 free_mappings:
157 	drv_array_destroy(drv->mappings);
158 free_buffer_table:
159 	drmHashDestroy(drv->buffer_table);
160 free_lock:
161 	pthread_mutex_destroy(&drv->driver_lock);
162 free_driver:
163 	free(drv);
164 	return NULL;
165 }
166 
drv_destroy(struct driver * drv)167 void drv_destroy(struct driver *drv)
168 {
169 	pthread_mutex_lock(&drv->driver_lock);
170 
171 	if (drv->backend->close)
172 		drv->backend->close(drv);
173 
174 	drmHashDestroy(drv->buffer_table);
175 	drv_array_destroy(drv->mappings);
176 	drv_array_destroy(drv->combos);
177 
178 	pthread_mutex_unlock(&drv->driver_lock);
179 	pthread_mutex_destroy(&drv->driver_lock);
180 
181 	free(drv);
182 }
183 
drv_get_fd(struct driver * drv)184 int drv_get_fd(struct driver *drv)
185 {
186 	return drv->fd;
187 }
188 
drv_get_name(struct driver * drv)189 const char *drv_get_name(struct driver *drv)
190 {
191 	return drv->backend->name;
192 }
193 
drv_get_combination(struct driver * drv,uint32_t format,uint64_t use_flags)194 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
195 {
196 	struct combination *curr, *best;
197 
198 	if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
199 		return 0;
200 
201 	best = NULL;
202 	uint32_t i;
203 	for (i = 0; i < drv_array_size(drv->combos); i++) {
204 		curr = drv_array_at_idx(drv->combos, i);
205 		if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
206 			if (!best || best->metadata.priority < curr->metadata.priority)
207 				best = curr;
208 	}
209 
210 	return best;
211 }
212 
drv_bo_new(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,bool is_test_buffer)213 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
214 		      uint64_t use_flags, bool is_test_buffer)
215 {
216 
217 	struct bo *bo;
218 	bo = (struct bo *)calloc(1, sizeof(*bo));
219 
220 	if (!bo)
221 		return NULL;
222 
223 	bo->drv = drv;
224 	bo->meta.width = width;
225 	bo->meta.height = height;
226 	bo->meta.format = format;
227 	bo->meta.use_flags = use_flags;
228 	bo->meta.num_planes = drv_num_planes_from_format(format);
229 	bo->is_test_buffer = is_test_buffer;
230 
231 	if (!bo->meta.num_planes) {
232 		free(bo);
233 		return NULL;
234 	}
235 
236 	return bo;
237 }
238 
drv_bo_create(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)239 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
240 			 uint64_t use_flags)
241 {
242 	int ret;
243 	size_t plane;
244 	struct bo *bo;
245 	bool is_test_alloc;
246 
247 	is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
248 	use_flags &= ~BO_USE_TEST_ALLOC;
249 
250 	bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
251 
252 	if (!bo)
253 		return NULL;
254 
255 	ret = -EINVAL;
256 	if (drv->backend->bo_compute_metadata) {
257 		ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
258 							0);
259 		if (!is_test_alloc && ret == 0)
260 			ret = drv->backend->bo_create_from_metadata(bo);
261 	} else if (!is_test_alloc) {
262 		ret = drv->backend->bo_create(bo, width, height, format, use_flags);
263 	}
264 
265 	if (ret) {
266 		free(bo);
267 		return NULL;
268 	}
269 
270 	pthread_mutex_lock(&drv->driver_lock);
271 
272 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
273 		if (plane > 0)
274 			assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
275 
276 		drv_increment_reference_count(drv, bo, plane);
277 	}
278 
279 	pthread_mutex_unlock(&drv->driver_lock);
280 
281 	return bo;
282 }
283 
drv_bo_create_with_modifiers(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)284 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
285 					uint32_t format, const uint64_t *modifiers, uint32_t count)
286 {
287 	int ret;
288 	size_t plane;
289 	struct bo *bo;
290 
291 	if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
292 		errno = ENOENT;
293 		return NULL;
294 	}
295 
296 	bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
297 
298 	if (!bo)
299 		return NULL;
300 
301 	ret = -EINVAL;
302 	if (drv->backend->bo_compute_metadata) {
303 		ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
304 							modifiers, count);
305 		if (ret == 0)
306 			ret = drv->backend->bo_create_from_metadata(bo);
307 	} else {
308 		ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
309 							     count);
310 	}
311 
312 	if (ret) {
313 		free(bo);
314 		return NULL;
315 	}
316 
317 	pthread_mutex_lock(&drv->driver_lock);
318 
319 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
320 		if (plane > 0)
321 			assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
322 
323 		drv_increment_reference_count(drv, bo, plane);
324 	}
325 
326 	pthread_mutex_unlock(&drv->driver_lock);
327 
328 	return bo;
329 }
330 
drv_bo_destroy(struct bo * bo)331 void drv_bo_destroy(struct bo *bo)
332 {
333 	int ret;
334 	size_t plane;
335 	uintptr_t total = 0;
336 	struct driver *drv = bo->drv;
337 
338 	if (!bo->is_test_buffer) {
339 		pthread_mutex_lock(&drv->driver_lock);
340 
341 		for (plane = 0; plane < bo->meta.num_planes; plane++)
342 			drv_decrement_reference_count(drv, bo, plane);
343 
344 		for (plane = 0; plane < bo->meta.num_planes; plane++)
345 			total += drv_get_reference_count(drv, bo, plane);
346 
347 		pthread_mutex_unlock(&drv->driver_lock);
348 
349 		if (total == 0) {
350 			ret = drv_mapping_destroy(bo);
351 			assert(ret == 0);
352 			bo->drv->backend->bo_destroy(bo);
353 		}
354 	}
355 
356 	free(bo);
357 }
358 
drv_bo_import(struct driver * drv,struct drv_import_fd_data * data)359 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
360 {
361 	int ret;
362 	size_t plane;
363 	struct bo *bo;
364 	off_t seek_end;
365 
366 	bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
367 
368 	if (!bo)
369 		return NULL;
370 
371 	ret = drv->backend->bo_import(bo, data);
372 	if (ret) {
373 		free(bo);
374 		return NULL;
375 	}
376 
377 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
378 		pthread_mutex_lock(&bo->drv->driver_lock);
379 		drv_increment_reference_count(bo->drv, bo, plane);
380 		pthread_mutex_unlock(&bo->drv->driver_lock);
381 	}
382 
383 	bo->meta.format_modifier = data->format_modifier;
384 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
385 		bo->meta.strides[plane] = data->strides[plane];
386 		bo->meta.offsets[plane] = data->offsets[plane];
387 
388 		seek_end = lseek(data->fds[plane], 0, SEEK_END);
389 		if (seek_end == (off_t)(-1)) {
390 			drv_log("lseek() failed with %s\n", strerror(errno));
391 			goto destroy_bo;
392 		}
393 
394 		lseek(data->fds[plane], 0, SEEK_SET);
395 		if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
396 			bo->meta.sizes[plane] = seek_end - data->offsets[plane];
397 		else
398 			bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
399 
400 		if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
401 			drv_log("buffer size is too large.\n");
402 			goto destroy_bo;
403 		}
404 
405 		bo->meta.total_size += bo->meta.sizes[plane];
406 	}
407 
408 	return bo;
409 
410 destroy_bo:
411 	drv_bo_destroy(bo);
412 	return NULL;
413 }
414 
drv_bo_map(struct bo * bo,const struct rectangle * rect,uint32_t map_flags,struct mapping ** map_data,size_t plane)415 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
416 		 struct mapping **map_data, size_t plane)
417 {
418 	uint32_t i;
419 	uint8_t *addr;
420 	struct mapping mapping = { 0 };
421 
422 	assert(rect->width >= 0);
423 	assert(rect->height >= 0);
424 	assert(rect->x + rect->width <= drv_bo_get_width(bo));
425 	assert(rect->y + rect->height <= drv_bo_get_height(bo));
426 	assert(BO_MAP_READ_WRITE & map_flags);
427 	/* No CPU access for protected buffers. */
428 	assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
429 
430 	if (bo->is_test_buffer)
431 		return MAP_FAILED;
432 
433 	mapping.rect = *rect;
434 	mapping.refcount = 1;
435 
436 	pthread_mutex_lock(&bo->drv->driver_lock);
437 
438 	for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
439 		struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
440 		if (prior->vma->handle != bo->handles[plane].u32 ||
441 		    prior->vma->map_flags != map_flags)
442 			continue;
443 
444 		if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
445 		    rect->width != prior->rect.width || rect->height != prior->rect.height)
446 			continue;
447 
448 		prior->refcount++;
449 		*map_data = prior;
450 		goto exact_match;
451 	}
452 
453 	for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
454 		struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
455 		if (prior->vma->handle != bo->handles[plane].u32 ||
456 		    prior->vma->map_flags != map_flags)
457 			continue;
458 
459 		prior->vma->refcount++;
460 		mapping.vma = prior->vma;
461 		goto success;
462 	}
463 
464 	mapping.vma = calloc(1, sizeof(*mapping.vma));
465 	memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
466 	addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
467 	if (addr == MAP_FAILED) {
468 		*map_data = NULL;
469 		free(mapping.vma);
470 		pthread_mutex_unlock(&bo->drv->driver_lock);
471 		return MAP_FAILED;
472 	}
473 
474 	mapping.vma->refcount = 1;
475 	mapping.vma->addr = addr;
476 	mapping.vma->handle = bo->handles[plane].u32;
477 	mapping.vma->map_flags = map_flags;
478 
479 success:
480 	*map_data = drv_array_append(bo->drv->mappings, &mapping);
481 exact_match:
482 	drv_bo_invalidate(bo, *map_data);
483 	addr = (uint8_t *)((*map_data)->vma->addr);
484 	addr += drv_bo_get_plane_offset(bo, plane);
485 	pthread_mutex_unlock(&bo->drv->driver_lock);
486 	return (void *)addr;
487 }
488 
drv_bo_unmap(struct bo * bo,struct mapping * mapping)489 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
490 {
491 	uint32_t i;
492 	int ret = 0;
493 
494 	pthread_mutex_lock(&bo->drv->driver_lock);
495 
496 	if (--mapping->refcount)
497 		goto out;
498 
499 	if (!--mapping->vma->refcount) {
500 		ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
501 		free(mapping->vma);
502 	}
503 
504 	for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
505 		if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) {
506 			drv_array_remove(bo->drv->mappings, i);
507 			break;
508 		}
509 	}
510 
511 out:
512 	pthread_mutex_unlock(&bo->drv->driver_lock);
513 	return ret;
514 }
515 
drv_bo_invalidate(struct bo * bo,struct mapping * mapping)516 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
517 {
518 	int ret = 0;
519 
520 	assert(mapping);
521 	assert(mapping->vma);
522 	assert(mapping->refcount > 0);
523 	assert(mapping->vma->refcount > 0);
524 
525 	if (bo->drv->backend->bo_invalidate)
526 		ret = bo->drv->backend->bo_invalidate(bo, mapping);
527 
528 	return ret;
529 }
530 
drv_bo_flush(struct bo * bo,struct mapping * mapping)531 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
532 {
533 	int ret = 0;
534 
535 	assert(mapping);
536 	assert(mapping->vma);
537 	assert(mapping->refcount > 0);
538 	assert(mapping->vma->refcount > 0);
539 
540 	if (bo->drv->backend->bo_flush)
541 		ret = bo->drv->backend->bo_flush(bo, mapping);
542 
543 	return ret;
544 }
545 
drv_bo_flush_or_unmap(struct bo * bo,struct mapping * mapping)546 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
547 {
548 	int ret = 0;
549 
550 	assert(mapping);
551 	assert(mapping->vma);
552 	assert(mapping->refcount > 0);
553 	assert(mapping->vma->refcount > 0);
554 	assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
555 
556 	if (bo->drv->backend->bo_flush)
557 		ret = bo->drv->backend->bo_flush(bo, mapping);
558 	else
559 		ret = drv_bo_unmap(bo, mapping);
560 
561 	return ret;
562 }
563 
drv_bo_get_width(struct bo * bo)564 uint32_t drv_bo_get_width(struct bo *bo)
565 {
566 	return bo->meta.width;
567 }
568 
drv_bo_get_height(struct bo * bo)569 uint32_t drv_bo_get_height(struct bo *bo)
570 {
571 	return bo->meta.height;
572 }
573 
drv_bo_get_num_planes(struct bo * bo)574 size_t drv_bo_get_num_planes(struct bo *bo)
575 {
576 	return bo->meta.num_planes;
577 }
578 
drv_bo_get_plane_handle(struct bo * bo,size_t plane)579 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
580 {
581 	return bo->handles[plane];
582 }
583 
584 #ifndef DRM_RDWR
585 #define DRM_RDWR O_RDWR
586 #endif
587 
drv_bo_get_plane_fd(struct bo * bo,size_t plane)588 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
589 {
590 
591 	int ret, fd;
592 	assert(plane < bo->meta.num_planes);
593 
594 	if (bo->is_test_buffer)
595 		return -EINVAL;
596 
597 	ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
598 
599 	// Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
600 	if (ret)
601 		ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
602 
603 	if (ret)
604 		drv_log("Failed to get plane fd: %s\n", strerror(errno));
605 
606 	return (ret) ? ret : fd;
607 }
608 
drv_bo_get_plane_offset(struct bo * bo,size_t plane)609 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
610 {
611 	assert(plane < bo->meta.num_planes);
612 	return bo->meta.offsets[plane];
613 }
614 
drv_bo_get_plane_size(struct bo * bo,size_t plane)615 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
616 {
617 	assert(plane < bo->meta.num_planes);
618 	return bo->meta.sizes[plane];
619 }
620 
drv_bo_get_plane_stride(struct bo * bo,size_t plane)621 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
622 {
623 	assert(plane < bo->meta.num_planes);
624 	return bo->meta.strides[plane];
625 }
626 
drv_bo_get_format_modifier(struct bo * bo)627 uint64_t drv_bo_get_format_modifier(struct bo *bo)
628 {
629 	return bo->meta.format_modifier;
630 }
631 
drv_bo_get_format(struct bo * bo)632 uint32_t drv_bo_get_format(struct bo *bo)
633 {
634 	return bo->meta.format;
635 }
636 
drv_bo_get_total_size(struct bo * bo)637 size_t drv_bo_get_total_size(struct bo *bo)
638 {
639 	return bo->meta.total_size;
640 }
641 
drv_resolve_format(struct driver * drv,uint32_t format,uint64_t use_flags)642 uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
643 {
644 	if (drv->backend->resolve_format)
645 		return drv->backend->resolve_format(drv, format, use_flags);
646 
647 	return format;
648 }
649 
drv_num_buffers_per_bo(struct bo * bo)650 uint32_t drv_num_buffers_per_bo(struct bo *bo)
651 {
652 	uint32_t count = 0;
653 	size_t plane, p;
654 
655 	if (bo->is_test_buffer)
656 		return 0;
657 
658 	for (plane = 0; plane < bo->meta.num_planes; plane++) {
659 		for (p = 0; p < plane; p++)
660 			if (bo->handles[p].u32 == bo->handles[plane].u32)
661 				break;
662 		if (p == plane)
663 			count++;
664 	}
665 
666 	return count;
667 }
668 
drv_log_prefix(const char * prefix,const char * file,int line,const char * format,...)669 void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
670 {
671 	char buf[50];
672 	snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
673 
674 	va_list args;
675 	va_start(args, format);
676 #ifdef __ANDROID__
677 	__android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
678 #else
679 	fprintf(stderr, "%s ", buf);
680 	vfprintf(stderr, format, args);
681 #endif
682 	va_end(args);
683 }
684 
drv_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES],uint64_t * format_modifier)685 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
686 		      uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
687 {
688 	for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
689 		strides[plane] = bo->meta.strides[plane];
690 		offsets[plane] = bo->meta.offsets[plane];
691 	}
692 	*format_modifier = bo->meta.format_modifier;
693 
694 	if (bo->drv->backend->resource_info)
695 		return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
696 
697 	return 0;
698 }
699