1 /*
2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <inttypes.h>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17 #include <xf86drm.h>
18
19 #ifdef __ANDROID__
20 #include <cutils/log.h>
21 #include <libgen.h>
22 #define MINIGBM_DEBUG "vendor.minigbm.debug"
23 #else
24 #define MINIGBM_DEBUG "MINIGBM_DEBUG"
25 #endif
26
27 #include "drv_helpers.h"
28 #include "drv_priv.h"
29 #include "util.h"
30
31 #ifdef DRV_AMDGPU
32 extern const struct backend backend_amdgpu;
33 #endif
34 #ifdef DRV_I915
35 extern const struct backend backend_i915;
36 #endif
37 #ifdef DRV_XE
38 extern const struct backend backend_xe;
39 #endif
40 #ifdef DRV_MSM
41 extern const struct backend backend_msm;
42 #endif
43 #ifdef DRV_VC4
44 extern const struct backend backend_vc4;
45 #endif
46
47 // Dumb / generic drivers
48 extern const struct backend backend_evdi;
49 extern const struct backend backend_marvell;
50 extern const struct backend backend_mediatek;
51 extern const struct backend backend_meson;
52 extern const struct backend backend_nouveau;
53 extern const struct backend backend_komeda;
54 extern const struct backend backend_radeon;
55 extern const struct backend backend_rockchip;
56 extern const struct backend backend_sun4i_drm;
57 extern const struct backend backend_synaptics;
58 extern const struct backend backend_virtgpu;
59 extern const struct backend backend_udl;
60 extern const struct backend backend_vkms;
61
62 extern const struct backend backend_mock;
63
64 static const struct backend *drv_backend_list[] = {
65 #ifdef DRV_AMDGPU
66 &backend_amdgpu,
67 #endif
68 #ifdef DRV_I915
69 &backend_i915,
70 #endif
71 #ifdef DRV_MSM
72 &backend_msm,
73 #endif
74 #ifdef DRV_VC4
75 &backend_vc4,
76 #endif
77 #ifdef DRV_XE
78 &backend_xe,
79 #endif
80 &backend_evdi, &backend_komeda, &backend_marvell, &backend_mediatek,
81 &backend_meson, &backend_nouveau, &backend_radeon, &backend_rockchip,
82 &backend_sun4i_drm, &backend_synaptics, &backend_udl, &backend_virtgpu,
83 &backend_vkms, &backend_mock
84 };
85
drv_preload(bool load)86 void drv_preload(bool load)
87 {
88 unsigned int i;
89 for (i = 0; i < ARRAY_SIZE(drv_backend_list); i++) {
90 const struct backend *b = drv_backend_list[i];
91 if (b->preload)
92 b->preload(load);
93 }
94 }
95
drv_get_backend(int fd)96 static const struct backend *drv_get_backend(int fd)
97 {
98 drmVersionPtr drm_version;
99 unsigned int i;
100
101 drm_version = drmGetVersion(fd);
102
103 if (!drm_version)
104 return NULL;
105
106 for (i = 0; i < ARRAY_SIZE(drv_backend_list); i++) {
107 const struct backend *b = drv_backend_list[i];
108 if (!strcmp(drm_version->name, b->name)) {
109 drmFreeVersion(drm_version);
110 return b;
111 }
112 }
113
114 drmFreeVersion(drm_version);
115 return NULL;
116 }
117
drv_create(int fd)118 struct driver *drv_create(int fd)
119 {
120 struct driver *drv;
121 int ret;
122
123 drv = (struct driver *)calloc(1, sizeof(*drv));
124
125 if (!drv)
126 return NULL;
127
128 const char *minigbm_debug;
129 minigbm_debug = drv_get_os_option(MINIGBM_DEBUG);
130 drv->compression =
131 (minigbm_debug == NULL) || (strstr(minigbm_debug, "nocompression") == NULL);
132 drv->log_bos = (minigbm_debug && strstr(minigbm_debug, "log_bos") != NULL);
133
134 drv->fd = fd;
135 drv->backend = drv_get_backend(fd);
136
137 if (!drv->backend)
138 goto free_driver;
139
140 if (pthread_mutex_init(&drv->buffer_table_lock, NULL))
141 goto free_driver;
142
143 drv->buffer_table = drmHashCreate();
144 if (!drv->buffer_table)
145 goto free_buffer_table_lock;
146
147 if (pthread_mutex_init(&drv->mappings_lock, NULL))
148 goto free_buffer_table;
149
150 drv->mappings = drv_array_init(sizeof(struct mapping));
151 if (!drv->mappings)
152 goto free_mappings_lock;
153
154 drv->combos = drv_array_init(sizeof(struct combination));
155 if (!drv->combos)
156 goto free_mappings;
157
158 if (drv->backend->init) {
159 ret = drv->backend->init(drv);
160 if (ret) {
161 drv_array_destroy(drv->combos);
162 goto free_mappings;
163 }
164 }
165
166 return drv;
167
168 free_mappings:
169 drv_array_destroy(drv->mappings);
170 free_mappings_lock:
171 pthread_mutex_destroy(&drv->mappings_lock);
172 free_buffer_table:
173 drmHashDestroy(drv->buffer_table);
174 free_buffer_table_lock:
175 pthread_mutex_destroy(&drv->buffer_table_lock);
176 free_driver:
177 free(drv);
178 return NULL;
179 }
180
drv_destroy(struct driver * drv)181 void drv_destroy(struct driver *drv)
182 {
183 if (drv->backend->close)
184 drv->backend->close(drv);
185
186 drv_array_destroy(drv->combos);
187
188 drv_array_destroy(drv->mappings);
189 pthread_mutex_destroy(&drv->mappings_lock);
190
191 drmHashDestroy(drv->buffer_table);
192 pthread_mutex_destroy(&drv->buffer_table_lock);
193
194 free(drv);
195 }
196
drv_get_fd(struct driver * drv)197 int drv_get_fd(struct driver *drv)
198 {
199 return drv->fd;
200 }
201
drv_get_name(struct driver * drv)202 const char *drv_get_name(struct driver *drv)
203 {
204 return drv->backend->name;
205 }
206
drv_get_combination(struct driver * drv,uint32_t format,uint64_t use_flags)207 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
208 {
209 struct combination *curr, *best;
210
211 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
212 return 0;
213
214 best = NULL;
215 uint32_t i;
216 for (i = 0; i < drv_array_size(drv->combos); i++) {
217 curr = drv_array_at_idx(drv->combos, i);
218 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
219 if (!best || best->metadata.priority < curr->metadata.priority)
220 best = curr;
221 }
222
223 return best;
224 }
225
drv_bo_new(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,bool is_test_buffer)226 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
227 uint64_t use_flags, bool is_test_buffer)
228 {
229
230 struct bo *bo;
231 bo = (struct bo *)calloc(1, sizeof(*bo));
232
233 if (!bo)
234 return NULL;
235
236 bo->drv = drv;
237 bo->meta.width = width;
238 bo->meta.height = height;
239 bo->meta.format = format;
240 bo->meta.use_flags = use_flags;
241 bo->meta.num_planes = drv_num_planes_from_format(format);
242 bo->is_test_buffer = is_test_buffer;
243
244 if (!bo->meta.num_planes) {
245 free(bo);
246 errno = EINVAL;
247 return NULL;
248 }
249
250 return bo;
251 }
252
drv_bo_mapping_destroy(struct bo * bo)253 static void drv_bo_mapping_destroy(struct bo *bo)
254 {
255 struct driver *drv = bo->drv;
256 uint32_t idx = 0;
257
258 /*
259 * This function is called right before the buffer is destroyed. It will free any mappings
260 * associated with the buffer.
261 */
262 pthread_mutex_lock(&drv->mappings_lock);
263 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
264 while (idx < drv_array_size(drv->mappings)) {
265 struct mapping *mapping =
266 (struct mapping *)drv_array_at_idx(drv->mappings, idx);
267 if (mapping->vma->handle != bo->handle.u32) {
268 idx++;
269 continue;
270 }
271
272 if (!--mapping->vma->refcount) {
273 int ret = drv->backend->bo_unmap(bo, mapping->vma);
274 if (ret) {
275 pthread_mutex_unlock(&drv->mappings_lock);
276 assert(ret);
277 drv_loge("munmap failed\n");
278 return;
279 }
280
281 free(mapping->vma);
282 }
283
284 /* This shrinks and shifts the array, so don't increment idx. */
285 drv_array_remove(drv->mappings, idx);
286 }
287 }
288 pthread_mutex_unlock(&drv->mappings_lock);
289 }
290
291 /*
292 * Acquire a reference on plane buffers of the bo.
293 */
drv_bo_acquire(struct bo * bo)294 static void drv_bo_acquire(struct bo *bo)
295 {
296 struct driver *drv = bo->drv;
297
298 pthread_mutex_lock(&drv->buffer_table_lock);
299 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
300 uintptr_t num = 0;
301
302 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num))
303 drmHashDelete(drv->buffer_table, bo->handle.u32);
304
305 drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num + 1));
306 }
307 pthread_mutex_unlock(&drv->buffer_table_lock);
308 }
309
310 /*
311 * Release a reference on plane buffers of the bo. Return true when the bo has lost all its
312 * references. Otherwise, return false.
313 */
drv_bo_release(struct bo * bo)314 static bool drv_bo_release(struct bo *bo)
315 {
316 struct driver *drv = bo->drv;
317 uintptr_t num;
318
319 if (drv->backend->bo_release)
320 drv->backend->bo_release(bo);
321
322 pthread_mutex_lock(&drv->buffer_table_lock);
323 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
324 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
325 drmHashDelete(drv->buffer_table, bo->handle.u32);
326
327 if (num > 1) {
328 drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num - 1));
329 }
330 }
331 }
332
333 /* The same buffer can back multiple planes with different offsets. */
334 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
335 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
336 /* num is positive if found in the hashmap. */
337 pthread_mutex_unlock(&drv->buffer_table_lock);
338 return false;
339 }
340 }
341 pthread_mutex_unlock(&drv->buffer_table_lock);
342
343 return true;
344 }
345
drv_bo_create(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)346 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
347 uint64_t use_flags)
348 {
349 int ret;
350 struct bo *bo;
351 bool is_test_alloc;
352
353 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
354 use_flags &= ~BO_USE_TEST_ALLOC;
355
356 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
357
358 if (!bo)
359 return NULL;
360
361 ret = -EINVAL;
362 if (drv->backend->bo_compute_metadata) {
363 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
364 0);
365 if (!is_test_alloc && ret == 0)
366 ret = drv->backend->bo_create_from_metadata(bo);
367 } else if (!is_test_alloc) {
368 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
369 }
370
371 if (ret) {
372 errno = -ret;
373 free(bo);
374 return NULL;
375 }
376
377 drv_bo_acquire(bo);
378
379 if (drv->log_bos)
380 drv_bo_log_info(bo, "legacy created");
381
382 return bo;
383 }
384
drv_bo_create_with_modifiers(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)385 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
386 uint32_t format, const uint64_t *modifiers, uint32_t count)
387 {
388 int ret;
389 struct bo *bo;
390
391 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
392 errno = ENOENT;
393 return NULL;
394 }
395
396 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
397
398 if (!bo)
399 return NULL;
400
401 ret = -EINVAL;
402 if (drv->backend->bo_compute_metadata) {
403 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
404 modifiers, count);
405 if (ret == 0)
406 ret = drv->backend->bo_create_from_metadata(bo);
407 } else {
408 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
409 count);
410 }
411
412 if (ret) {
413 free(bo);
414 return NULL;
415 }
416
417 drv_bo_acquire(bo);
418
419 if (drv->log_bos)
420 drv_bo_log_info(bo, "created");
421
422 return bo;
423 }
424
drv_bo_destroy(struct bo * bo)425 void drv_bo_destroy(struct bo *bo)
426 {
427 if (!bo->is_test_buffer && drv_bo_release(bo)) {
428 drv_bo_mapping_destroy(bo);
429 bo->drv->backend->bo_destroy(bo);
430 }
431
432 free(bo);
433 }
434
drv_bo_import(struct driver * drv,struct drv_import_fd_data * data)435 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
436 {
437 int ret;
438 size_t plane;
439 struct bo *bo;
440 off_t seek_end;
441
442 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
443
444 if (!bo)
445 return NULL;
446
447 ret = drv->backend->bo_import(bo, data);
448 if (ret) {
449 free(bo);
450 return NULL;
451 }
452
453 drv_bo_acquire(bo);
454
455 bo->meta.format_modifier = data->format_modifier;
456 for (plane = 0; plane < bo->meta.num_planes; plane++) {
457 bo->meta.strides[plane] = data->strides[plane];
458 bo->meta.offsets[plane] = data->offsets[plane];
459
460 seek_end = lseek(data->fds[plane], 0, SEEK_END);
461 if (seek_end == (off_t)(-1)) {
462 drv_loge("lseek() failed with %s\n", strerror(errno));
463 goto destroy_bo;
464 }
465
466 lseek(data->fds[plane], 0, SEEK_SET);
467 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
468 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
469 else
470 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
471
472 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
473 drv_loge("buffer size is too large.\n");
474 goto destroy_bo;
475 }
476
477 bo->meta.total_size += bo->meta.sizes[plane];
478 }
479
480 if (drv->log_bos)
481 drv_bo_log_info(bo, "imported");
482
483 return bo;
484
485 destroy_bo:
486 drv_bo_destroy(bo);
487 return NULL;
488 }
489
drv_bo_map(struct bo * bo,const struct rectangle * rect,uint32_t map_flags,struct mapping ** map_data,size_t plane)490 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
491 struct mapping **map_data, size_t plane)
492 {
493 struct driver *drv = bo->drv;
494 uint32_t i;
495 uint8_t *addr;
496 struct mapping mapping = { 0 };
497
498 assert(rect->width >= 0);
499 assert(rect->height >= 0);
500 assert(rect->x + rect->width <= drv_bo_get_width(bo));
501 assert(rect->y + rect->height <= drv_bo_get_height(bo));
502 assert(BO_MAP_READ_WRITE & map_flags);
503 /* No CPU access for protected buffers. */
504 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
505
506 if (bo->is_test_buffer)
507 return MAP_FAILED;
508
509 mapping.rect = *rect;
510 mapping.refcount = 1;
511
512 pthread_mutex_lock(&drv->mappings_lock);
513
514 for (i = 0; i < drv_array_size(drv->mappings); i++) {
515 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
516 if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
517 continue;
518
519 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
520 rect->width != prior->rect.width || rect->height != prior->rect.height)
521 continue;
522
523 prior->refcount++;
524 *map_data = prior;
525 goto exact_match;
526 }
527
528 for (i = 0; i < drv_array_size(drv->mappings); i++) {
529 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
530 if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
531 continue;
532
533 prior->vma->refcount++;
534 mapping.vma = prior->vma;
535 goto success;
536 }
537
538 mapping.vma = calloc(1, sizeof(*mapping.vma));
539 if (!mapping.vma) {
540 *map_data = NULL;
541 pthread_mutex_unlock(&drv->mappings_lock);
542 return MAP_FAILED;
543 }
544
545 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
546 addr = drv->backend->bo_map(bo, mapping.vma, map_flags);
547 if (addr == MAP_FAILED) {
548 *map_data = NULL;
549 free(mapping.vma);
550 pthread_mutex_unlock(&drv->mappings_lock);
551 return MAP_FAILED;
552 }
553
554 mapping.vma->refcount = 1;
555 mapping.vma->addr = addr;
556 mapping.vma->handle = bo->handle.u32;
557 mapping.vma->map_flags = map_flags;
558
559 success:
560 *map_data = drv_array_append(drv->mappings, &mapping);
561 exact_match:
562 drv_bo_invalidate(bo, *map_data);
563 addr = (uint8_t *)((*map_data)->vma->addr);
564 addr += drv_bo_get_plane_offset(bo, plane);
565 pthread_mutex_unlock(&drv->mappings_lock);
566 return (void *)addr;
567 }
568
drv_bo_unmap(struct bo * bo,struct mapping * mapping)569 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
570 {
571 struct driver *drv = bo->drv;
572 uint32_t i;
573 int ret = 0;
574
575 pthread_mutex_lock(&drv->mappings_lock);
576
577 if (--mapping->refcount)
578 goto out;
579
580 if (!--mapping->vma->refcount) {
581 ret = drv->backend->bo_unmap(bo, mapping->vma);
582 free(mapping->vma);
583 }
584
585 for (i = 0; i < drv_array_size(drv->mappings); i++) {
586 if (mapping == (struct mapping *)drv_array_at_idx(drv->mappings, i)) {
587 drv_array_remove(drv->mappings, i);
588 break;
589 }
590 }
591
592 out:
593 pthread_mutex_unlock(&drv->mappings_lock);
594 return ret;
595 }
596
drv_bo_cached(struct bo * bo)597 bool drv_bo_cached(struct bo *bo)
598 {
599 return bo->meta.cached;
600 }
601
drv_bo_invalidate(struct bo * bo,struct mapping * mapping)602 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
603 {
604 int ret = 0;
605
606 assert(mapping);
607 assert(mapping->vma);
608 assert(mapping->refcount > 0);
609 assert(mapping->vma->refcount > 0);
610
611 if (bo->drv->backend->bo_invalidate)
612 ret = bo->drv->backend->bo_invalidate(bo, mapping);
613
614 return ret;
615 }
616
drv_bo_flush(struct bo * bo,struct mapping * mapping)617 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
618 {
619 int ret = 0;
620
621 assert(mapping);
622 assert(mapping->vma);
623 assert(mapping->refcount > 0);
624 assert(mapping->vma->refcount > 0);
625
626 if (bo->drv->backend->bo_flush)
627 ret = bo->drv->backend->bo_flush(bo, mapping);
628
629 return ret;
630 }
631
drv_bo_flush_or_unmap(struct bo * bo,struct mapping * mapping)632 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
633 {
634 int ret = 0;
635
636 assert(mapping);
637 assert(mapping->vma);
638 assert(mapping->refcount > 0);
639 assert(mapping->vma->refcount > 0);
640 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
641
642 if (bo->drv->backend->bo_flush)
643 ret = bo->drv->backend->bo_flush(bo, mapping);
644 else
645 ret = drv_bo_unmap(bo, mapping);
646
647 return ret;
648 }
649
drv_bo_get_width(struct bo * bo)650 uint32_t drv_bo_get_width(struct bo *bo)
651 {
652 return bo->meta.width;
653 }
654
drv_bo_get_height(struct bo * bo)655 uint32_t drv_bo_get_height(struct bo *bo)
656 {
657 return bo->meta.height;
658 }
659
drv_bo_get_num_planes(struct bo * bo)660 size_t drv_bo_get_num_planes(struct bo *bo)
661 {
662 return bo->meta.num_planes;
663 }
664
drv_bo_get_plane_handle(struct bo * bo,size_t plane)665 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
666 {
667 return bo->handle;
668 }
669
670 #ifndef DRM_RDWR
671 #define DRM_RDWR O_RDWR
672 #endif
673
drv_bo_get_plane_fd(struct bo * bo,size_t plane)674 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
675 {
676
677 int ret, fd;
678 assert(plane < bo->meta.num_planes);
679
680 if (bo->is_test_buffer)
681 return -EINVAL;
682
683 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC | DRM_RDWR, &fd);
684
685 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
686 if (ret)
687 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC, &fd);
688
689 if (ret)
690 drv_loge("Failed to get plane fd: %s\n", strerror(errno));
691
692 return (ret) ? ret : fd;
693 }
694
drv_bo_get_plane_offset(struct bo * bo,size_t plane)695 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
696 {
697 assert(plane < bo->meta.num_planes);
698 return bo->meta.offsets[plane];
699 }
700
drv_bo_get_plane_size(struct bo * bo,size_t plane)701 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
702 {
703 assert(plane < bo->meta.num_planes);
704 return bo->meta.sizes[plane];
705 }
706
drv_bo_get_plane_stride(struct bo * bo,size_t plane)707 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
708 {
709 assert(plane < bo->meta.num_planes);
710 return bo->meta.strides[plane];
711 }
712
drv_bo_get_format_modifier(struct bo * bo)713 uint64_t drv_bo_get_format_modifier(struct bo *bo)
714 {
715 return bo->meta.format_modifier;
716 }
717
drv_bo_get_format(struct bo * bo)718 uint32_t drv_bo_get_format(struct bo *bo)
719 {
720 return bo->meta.format;
721 }
722
drv_bo_get_tiling(struct bo * bo)723 uint32_t drv_bo_get_tiling(struct bo *bo)
724 {
725 return bo->meta.tiling;
726 }
727
drv_bo_get_use_flags(struct bo * bo)728 uint64_t drv_bo_get_use_flags(struct bo *bo)
729 {
730 return bo->meta.use_flags;
731 }
732
drv_bo_get_total_size(struct bo * bo)733 size_t drv_bo_get_total_size(struct bo *bo)
734 {
735 return bo->meta.total_size;
736 }
737
drv_bo_log_info(const struct bo * bo,const char * prefix)738 void drv_bo_log_info(const struct bo *bo, const char *prefix)
739 {
740 const struct bo_metadata *meta = &bo->meta;
741
742 drv_logd("%s %s bo %p: %dx%d '%c%c%c%c' tiling %d plane %zu mod 0x%" PRIx64
743 " use 0x%" PRIx64 " size %zu\n",
744 prefix, bo->drv->backend->name, bo, meta->width, meta->height, meta->format & 0xff,
745 (meta->format >> 8) & 0xff, (meta->format >> 16) & 0xff,
746 (meta->format >> 24) & 0xff, meta->tiling, meta->num_planes, meta->format_modifier,
747 meta->use_flags, meta->total_size);
748 for (uint32_t i = 0; i < meta->num_planes; i++) {
749 drv_logd(" bo %p plane %d: offset %d size %d stride %d\n", bo, i, meta->offsets[i],
750 meta->sizes[i], meta->strides[i]);
751 }
752 }
753
754 /*
755 * Map internal fourcc codes back to standard fourcc codes.
756 */
drv_get_standard_fourcc(uint32_t fourcc_internal)757 uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
758 {
759 return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
760 }
761
drv_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)762 void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint64_t use_flags,
763 uint32_t *out_format, uint64_t *out_use_flags)
764 {
765 assert(drv->backend->resolve_format_and_use_flags);
766
767 drv->backend->resolve_format_and_use_flags(drv, format, use_flags, out_format,
768 out_use_flags);
769 }
770
drv_log_prefix(enum drv_log_level level,const char * prefix,const char * func,int line,const char * format,...)771 void drv_log_prefix(enum drv_log_level level, const char *prefix, const char *func, int line,
772 const char *format, ...)
773 {
774 char buf[50];
775 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, func, line);
776
777 va_list args;
778 va_start(args, format);
779 #ifdef __ANDROID__
780 int prio = ANDROID_LOG_ERROR;
781 switch (level) {
782 case DRV_LOGV:
783 prio = ANDROID_LOG_VERBOSE;
784 break;
785 case DRV_LOGD:
786 prio = ANDROID_LOG_DEBUG;
787 break;
788 case DRV_LOGI:
789 prio = ANDROID_LOG_INFO;
790 break;
791 case DRV_LOGE:
792 default:
793 break;
794 };
795 __android_log_vprint(prio, buf, format, args);
796 #else
797 fprintf(stderr, "%s ", buf);
798 vfprintf(stderr, format, args);
799 #endif
800 va_end(args);
801 }
802
drv_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES],uint64_t * format_modifier)803 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
804 uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
805 {
806 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
807 strides[plane] = bo->meta.strides[plane];
808 offsets[plane] = bo->meta.offsets[plane];
809 }
810 *format_modifier = bo->meta.format_modifier;
811
812 if (bo->drv->backend->resource_info)
813 return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
814
815 return 0;
816 }
817
drv_get_max_texture_2d_size(struct driver * drv)818 uint32_t drv_get_max_texture_2d_size(struct driver *drv)
819 {
820 if (drv->backend->get_max_texture_2d_size)
821 return drv->backend->get_max_texture_2d_size(drv);
822
823 return UINT32_MAX;
824 }
825