1 /*
2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <pthread.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17 #include <xf86drm.h>
18
19 #ifdef __ANDROID__
20 #include <cutils/log.h>
21 #include <libgen.h>
22 #endif
23
24 #include "drv_priv.h"
25 #include "helpers.h"
26 #include "util.h"
27
28 #ifdef DRV_AMDGPU
29 extern const struct backend backend_amdgpu;
30 #endif
31 extern const struct backend backend_evdi;
32 #ifdef DRV_EXYNOS
33 extern const struct backend backend_exynos;
34 #endif
35 #ifdef DRV_I915
36 extern const struct backend backend_i915;
37 #endif
38 #ifdef DRV_MARVELL
39 extern const struct backend backend_marvell;
40 #endif
41 #ifdef DRV_MEDIATEK
42 extern const struct backend backend_mediatek;
43 #endif
44 #ifdef DRV_MESON
45 extern const struct backend backend_meson;
46 #endif
47 #ifdef DRV_MSM
48 extern const struct backend backend_msm;
49 #endif
50 extern const struct backend backend_nouveau;
51 #ifdef DRV_RADEON
52 extern const struct backend backend_radeon;
53 #endif
54 #ifdef DRV_ROCKCHIP
55 extern const struct backend backend_rockchip;
56 #endif
57 #ifdef DRV_SYNAPTICS
58 extern const struct backend backend_synaptics;
59 #endif
60 #ifdef DRV_TEGRA
61 extern const struct backend backend_tegra;
62 #endif
63 extern const struct backend backend_udl;
64 #ifdef DRV_VC4
65 extern const struct backend backend_vc4;
66 #endif
67 extern const struct backend backend_vgem;
68 extern const struct backend backend_virtio_gpu;
69
drv_get_backend(int fd)70 static const struct backend *drv_get_backend(int fd)
71 {
72 drmVersionPtr drm_version;
73 unsigned int i;
74
75 drm_version = drmGetVersion(fd);
76
77 if (!drm_version)
78 return NULL;
79
80 const struct backend *backend_list[] = {
81 #ifdef DRV_AMDGPU
82 &backend_amdgpu,
83 #endif
84 &backend_evdi,
85 #ifdef DRV_EXYNOS
86 &backend_exynos,
87 #endif
88 #ifdef DRV_I915
89 &backend_i915,
90 #endif
91 #ifdef DRV_MARVELL
92 &backend_marvell,
93 #endif
94 #ifdef DRV_MEDIATEK
95 &backend_mediatek,
96 #endif
97 #ifdef DRV_MESON
98 &backend_meson,
99 #endif
100 #ifdef DRV_MSM
101 &backend_msm,
102 #endif
103 &backend_nouveau,
104 #ifdef DRV_RADEON
105 &backend_radeon,
106 #endif
107 #ifdef DRV_ROCKCHIP
108 &backend_rockchip,
109 #endif
110 #ifdef DRV_SYNAPTICS
111 &backend_synaptics,
112 #endif
113 #ifdef DRV_TEGRA
114 &backend_tegra,
115 #endif
116 &backend_udl,
117 #ifdef DRV_VC4
118 &backend_vc4,
119 #endif
120 &backend_vgem, &backend_virtio_gpu,
121 };
122
123 for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
124 const struct backend *b = backend_list[i];
125 // Exactly one of the main create functions must be defined.
126 assert((b->bo_create != NULL) ^ (b->bo_create_from_metadata != NULL));
127 // Either both or neither must be implemented.
128 assert((b->bo_compute_metadata != NULL) == (b->bo_create_from_metadata != NULL));
129 // Both can't be defined, but it's okay for neither to be (i.e. only bo_create).
130 assert((b->bo_create_with_modifiers == NULL) ||
131 (b->bo_create_from_metadata == NULL));
132
133 if (!strcmp(drm_version->name, b->name)) {
134 drmFreeVersion(drm_version);
135 return b;
136 }
137 }
138
139 drmFreeVersion(drm_version);
140 return NULL;
141 }
142
drv_create(int fd)143 struct driver *drv_create(int fd)
144 {
145 struct driver *drv;
146 int ret;
147
148 drv = (struct driver *)calloc(1, sizeof(*drv));
149
150 if (!drv)
151 return NULL;
152
153 drv->fd = fd;
154 drv->backend = drv_get_backend(fd);
155
156 if (!drv->backend)
157 goto free_driver;
158
159 if (pthread_mutex_init(&drv->driver_lock, NULL))
160 goto free_driver;
161
162 drv->buffer_table = drmHashCreate();
163 if (!drv->buffer_table)
164 goto free_lock;
165
166 drv->mappings = drv_array_init(sizeof(struct mapping));
167 if (!drv->mappings)
168 goto free_buffer_table;
169
170 drv->combos = drv_array_init(sizeof(struct combination));
171 if (!drv->combos)
172 goto free_mappings;
173
174 if (drv->backend->init) {
175 ret = drv->backend->init(drv);
176 if (ret) {
177 drv_array_destroy(drv->combos);
178 goto free_mappings;
179 }
180 }
181
182 return drv;
183
184 free_mappings:
185 drv_array_destroy(drv->mappings);
186 free_buffer_table:
187 drmHashDestroy(drv->buffer_table);
188 free_lock:
189 pthread_mutex_destroy(&drv->driver_lock);
190 free_driver:
191 free(drv);
192 return NULL;
193 }
194
drv_destroy(struct driver * drv)195 void drv_destroy(struct driver *drv)
196 {
197 pthread_mutex_lock(&drv->driver_lock);
198
199 if (drv->backend->close)
200 drv->backend->close(drv);
201
202 drmHashDestroy(drv->buffer_table);
203 drv_array_destroy(drv->mappings);
204 drv_array_destroy(drv->combos);
205
206 pthread_mutex_unlock(&drv->driver_lock);
207 pthread_mutex_destroy(&drv->driver_lock);
208
209 free(drv);
210 }
211
drv_get_fd(struct driver * drv)212 int drv_get_fd(struct driver *drv)
213 {
214 return drv->fd;
215 }
216
drv_get_name(struct driver * drv)217 const char *drv_get_name(struct driver *drv)
218 {
219 return drv->backend->name;
220 }
221
drv_get_combination(struct driver * drv,uint32_t format,uint64_t use_flags)222 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
223 {
224 struct combination *curr, *best;
225
226 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
227 return 0;
228
229 best = NULL;
230 uint32_t i;
231 for (i = 0; i < drv_array_size(drv->combos); i++) {
232 curr = drv_array_at_idx(drv->combos, i);
233 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
234 if (!best || best->metadata.priority < curr->metadata.priority)
235 best = curr;
236 }
237
238 return best;
239 }
240
drv_bo_new(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,bool is_test_buffer)241 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
242 uint64_t use_flags, bool is_test_buffer)
243 {
244
245 struct bo *bo;
246 bo = (struct bo *)calloc(1, sizeof(*bo));
247
248 if (!bo)
249 return NULL;
250
251 bo->drv = drv;
252 bo->meta.width = width;
253 bo->meta.height = height;
254 bo->meta.format = format;
255 bo->meta.use_flags = use_flags;
256 bo->meta.num_planes = drv_num_planes_from_format(format);
257 bo->is_test_buffer = is_test_buffer;
258
259 if (!bo->meta.num_planes) {
260 free(bo);
261 return NULL;
262 }
263
264 return bo;
265 }
266
drv_bo_create(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)267 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
268 uint64_t use_flags)
269 {
270 int ret;
271 size_t plane;
272 struct bo *bo;
273 bool is_test_alloc;
274
275 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
276 use_flags &= ~BO_USE_TEST_ALLOC;
277
278 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
279
280 if (!bo)
281 return NULL;
282
283 ret = -EINVAL;
284 if (drv->backend->bo_compute_metadata) {
285 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
286 0);
287 if (!is_test_alloc && ret == 0)
288 ret = drv->backend->bo_create_from_metadata(bo);
289 } else if (!is_test_alloc) {
290 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
291 }
292
293 if (ret) {
294 free(bo);
295 return NULL;
296 }
297
298 pthread_mutex_lock(&drv->driver_lock);
299
300 for (plane = 0; plane < bo->meta.num_planes; plane++) {
301 if (plane > 0)
302 assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
303
304 drv_increment_reference_count(drv, bo, plane);
305 }
306
307 pthread_mutex_unlock(&drv->driver_lock);
308
309 return bo;
310 }
311
drv_bo_create_with_modifiers(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)312 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
313 uint32_t format, const uint64_t *modifiers, uint32_t count)
314 {
315 int ret;
316 size_t plane;
317 struct bo *bo;
318
319 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
320 errno = ENOENT;
321 return NULL;
322 }
323
324 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
325
326 if (!bo)
327 return NULL;
328
329 ret = -EINVAL;
330 if (drv->backend->bo_compute_metadata) {
331 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
332 modifiers, count);
333 if (ret == 0)
334 ret = drv->backend->bo_create_from_metadata(bo);
335 } else {
336 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
337 count);
338 }
339
340 if (ret) {
341 free(bo);
342 return NULL;
343 }
344
345 pthread_mutex_lock(&drv->driver_lock);
346
347 for (plane = 0; plane < bo->meta.num_planes; plane++) {
348 if (plane > 0)
349 assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]);
350
351 drv_increment_reference_count(drv, bo, plane);
352 }
353
354 pthread_mutex_unlock(&drv->driver_lock);
355
356 return bo;
357 }
358
drv_bo_destroy(struct bo * bo)359 void drv_bo_destroy(struct bo *bo)
360 {
361 int ret;
362 size_t plane;
363 uintptr_t total = 0;
364 struct driver *drv = bo->drv;
365
366 if (!bo->is_test_buffer) {
367 pthread_mutex_lock(&drv->driver_lock);
368
369 for (plane = 0; plane < bo->meta.num_planes; plane++)
370 drv_decrement_reference_count(drv, bo, plane);
371
372 for (plane = 0; plane < bo->meta.num_planes; plane++)
373 total += drv_get_reference_count(drv, bo, plane);
374
375 pthread_mutex_unlock(&drv->driver_lock);
376
377 if (total == 0) {
378 ret = drv_mapping_destroy(bo);
379 assert(ret == 0);
380 bo->drv->backend->bo_destroy(bo);
381 }
382 }
383
384 free(bo);
385 }
386
drv_bo_import(struct driver * drv,struct drv_import_fd_data * data)387 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
388 {
389 int ret;
390 size_t plane;
391 struct bo *bo;
392 off_t seek_end;
393
394 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
395
396 if (!bo)
397 return NULL;
398
399 ret = drv->backend->bo_import(bo, data);
400 if (ret) {
401 free(bo);
402 return NULL;
403 }
404
405 for (plane = 0; plane < bo->meta.num_planes; plane++) {
406 pthread_mutex_lock(&bo->drv->driver_lock);
407 drv_increment_reference_count(bo->drv, bo, plane);
408 pthread_mutex_unlock(&bo->drv->driver_lock);
409 }
410
411 for (plane = 0; plane < bo->meta.num_planes; plane++) {
412 bo->meta.strides[plane] = data->strides[plane];
413 bo->meta.offsets[plane] = data->offsets[plane];
414 bo->meta.format_modifiers[plane] = data->format_modifiers[plane];
415
416 seek_end = lseek(data->fds[plane], 0, SEEK_END);
417 if (seek_end == (off_t)(-1)) {
418 drv_log("lseek() failed with %s\n", strerror(errno));
419 goto destroy_bo;
420 }
421
422 lseek(data->fds[plane], 0, SEEK_SET);
423 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
424 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
425 else
426 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
427
428 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
429 drv_log("buffer size is too large.\n");
430 goto destroy_bo;
431 }
432
433 bo->meta.total_size += bo->meta.sizes[plane];
434 }
435
436 return bo;
437
438 destroy_bo:
439 drv_bo_destroy(bo);
440 return NULL;
441 }
442
drv_bo_map(struct bo * bo,const struct rectangle * rect,uint32_t map_flags,struct mapping ** map_data,size_t plane)443 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
444 struct mapping **map_data, size_t plane)
445 {
446 uint32_t i;
447 uint8_t *addr;
448 struct mapping mapping;
449
450 assert(rect->width >= 0);
451 assert(rect->height >= 0);
452 assert(rect->x + rect->width <= drv_bo_get_width(bo));
453 assert(rect->y + rect->height <= drv_bo_get_height(bo));
454 assert(BO_MAP_READ_WRITE & map_flags);
455 /* No CPU access for protected buffers. */
456 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
457
458 if (bo->is_test_buffer) {
459 return MAP_FAILED;
460 }
461
462 memset(&mapping, 0, sizeof(mapping));
463 mapping.rect = *rect;
464 mapping.refcount = 1;
465
466 pthread_mutex_lock(&bo->drv->driver_lock);
467
468 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
469 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
470 if (prior->vma->handle != bo->handles[plane].u32 ||
471 prior->vma->map_flags != map_flags)
472 continue;
473
474 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
475 rect->width != prior->rect.width || rect->height != prior->rect.height)
476 continue;
477
478 prior->refcount++;
479 *map_data = prior;
480 goto exact_match;
481 }
482
483 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
484 struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
485 if (prior->vma->handle != bo->handles[plane].u32 ||
486 prior->vma->map_flags != map_flags)
487 continue;
488
489 prior->vma->refcount++;
490 mapping.vma = prior->vma;
491 goto success;
492 }
493
494 mapping.vma = calloc(1, sizeof(*mapping.vma));
495 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
496 addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
497 if (addr == MAP_FAILED) {
498 *map_data = NULL;
499 free(mapping.vma);
500 pthread_mutex_unlock(&bo->drv->driver_lock);
501 return MAP_FAILED;
502 }
503
504 mapping.vma->refcount = 1;
505 mapping.vma->addr = addr;
506 mapping.vma->handle = bo->handles[plane].u32;
507 mapping.vma->map_flags = map_flags;
508
509 success:
510 *map_data = drv_array_append(bo->drv->mappings, &mapping);
511 exact_match:
512 drv_bo_invalidate(bo, *map_data);
513 addr = (uint8_t *)((*map_data)->vma->addr);
514 addr += drv_bo_get_plane_offset(bo, plane);
515 pthread_mutex_unlock(&bo->drv->driver_lock);
516 return (void *)addr;
517 }
518
drv_bo_unmap(struct bo * bo,struct mapping * mapping)519 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
520 {
521 uint32_t i;
522 int ret = 0;
523
524 pthread_mutex_lock(&bo->drv->driver_lock);
525
526 if (--mapping->refcount)
527 goto out;
528
529 if (!--mapping->vma->refcount) {
530 ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
531 free(mapping->vma);
532 }
533
534 for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
535 if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) {
536 drv_array_remove(bo->drv->mappings, i);
537 break;
538 }
539 }
540
541 out:
542 pthread_mutex_unlock(&bo->drv->driver_lock);
543 return ret;
544 }
545
drv_bo_invalidate(struct bo * bo,struct mapping * mapping)546 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
547 {
548 int ret = 0;
549
550 assert(mapping);
551 assert(mapping->vma);
552 assert(mapping->refcount > 0);
553 assert(mapping->vma->refcount > 0);
554
555 if (bo->drv->backend->bo_invalidate)
556 ret = bo->drv->backend->bo_invalidate(bo, mapping);
557
558 return ret;
559 }
560
drv_bo_flush(struct bo * bo,struct mapping * mapping)561 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
562 {
563 int ret = 0;
564
565 assert(mapping);
566 assert(mapping->vma);
567 assert(mapping->refcount > 0);
568 assert(mapping->vma->refcount > 0);
569
570 if (bo->drv->backend->bo_flush)
571 ret = bo->drv->backend->bo_flush(bo, mapping);
572
573 return ret;
574 }
575
drv_bo_flush_or_unmap(struct bo * bo,struct mapping * mapping)576 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
577 {
578 int ret = 0;
579
580 assert(mapping);
581 assert(mapping->vma);
582 assert(mapping->refcount > 0);
583 assert(mapping->vma->refcount > 0);
584 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
585
586 if (bo->drv->backend->bo_flush)
587 ret = bo->drv->backend->bo_flush(bo, mapping);
588 else
589 ret = drv_bo_unmap(bo, mapping);
590
591 return ret;
592 }
593
drv_bo_get_width(struct bo * bo)594 uint32_t drv_bo_get_width(struct bo *bo)
595 {
596 return bo->meta.width;
597 }
598
drv_bo_get_height(struct bo * bo)599 uint32_t drv_bo_get_height(struct bo *bo)
600 {
601 return bo->meta.height;
602 }
603
drv_bo_get_num_planes(struct bo * bo)604 size_t drv_bo_get_num_planes(struct bo *bo)
605 {
606 return bo->meta.num_planes;
607 }
608
drv_bo_get_plane_handle(struct bo * bo,size_t plane)609 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
610 {
611 return bo->handles[plane];
612 }
613
614 #ifndef DRM_RDWR
615 #define DRM_RDWR O_RDWR
616 #endif
617
drv_bo_get_plane_fd(struct bo * bo,size_t plane)618 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
619 {
620
621 int ret, fd;
622 assert(plane < bo->meta.num_planes);
623
624 if (bo->is_test_buffer) {
625 return -EINVAL;
626 }
627
628 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
629
630 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
631 if (ret)
632 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
633
634 return (ret) ? ret : fd;
635 }
636
drv_bo_get_plane_offset(struct bo * bo,size_t plane)637 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
638 {
639 assert(plane < bo->meta.num_planes);
640 return bo->meta.offsets[plane];
641 }
642
drv_bo_get_plane_size(struct bo * bo,size_t plane)643 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
644 {
645 assert(plane < bo->meta.num_planes);
646 return bo->meta.sizes[plane];
647 }
648
drv_bo_get_plane_stride(struct bo * bo,size_t plane)649 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
650 {
651 assert(plane < bo->meta.num_planes);
652 return bo->meta.strides[plane];
653 }
654
drv_bo_get_plane_format_modifier(struct bo * bo,size_t plane)655 uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane)
656 {
657 assert(plane < bo->meta.num_planes);
658 return bo->meta.format_modifiers[plane];
659 }
660
drv_bo_get_format(struct bo * bo)661 uint32_t drv_bo_get_format(struct bo *bo)
662 {
663 return bo->meta.format;
664 }
665
drv_bo_get_total_size(struct bo * bo)666 size_t drv_bo_get_total_size(struct bo *bo)
667 {
668 return bo->meta.total_size;
669 }
670
drv_resolve_format(struct driver * drv,uint32_t format,uint64_t use_flags)671 uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
672 {
673 if (drv->backend->resolve_format)
674 return drv->backend->resolve_format(drv, format, use_flags);
675
676 return format;
677 }
678
drv_num_buffers_per_bo(struct bo * bo)679 uint32_t drv_num_buffers_per_bo(struct bo *bo)
680 {
681 uint32_t count = 0;
682 size_t plane, p;
683
684 if (bo->is_test_buffer) {
685 return 0;
686 }
687
688 for (plane = 0; plane < bo->meta.num_planes; plane++) {
689 for (p = 0; p < plane; p++)
690 if (bo->handles[p].u32 == bo->handles[plane].u32)
691 break;
692 if (p == plane)
693 count++;
694 }
695
696 return count;
697 }
698
drv_log_prefix(const char * prefix,const char * file,int line,const char * format,...)699 void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
700 {
701 char buf[50];
702 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
703
704 va_list args;
705 va_start(args, format);
706 #ifdef __ANDROID__
707 __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
708 #else
709 fprintf(stderr, "%s ", buf);
710 vfprintf(stderr, format, args);
711 #endif
712 va_end(args);
713 }
714
drv_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES])715 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
716 uint32_t offsets[DRV_MAX_PLANES])
717 {
718 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
719 strides[plane] = bo->meta.strides[plane];
720 offsets[plane] = bo->meta.offsets[plane];
721 }
722
723 if (bo->drv->backend->resource_info)
724 return bo->drv->backend->resource_info(bo, strides, offsets);
725
726 return 0;
727 }
728