1 /*
2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <inttypes.h>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/types.h>
16 #include <unistd.h>
17 #include <xf86drm.h>
18
19 #ifdef __ANDROID__
20 #include <cutils/log.h>
21 #include <libgen.h>
22 #define MINIGBM_DEBUG "vendor.minigbm.debug"
23 #else
24 #define MINIGBM_DEBUG "MINIGBM_DEBUG"
25 #endif
26
27 #include "drv_helpers.h"
28 #include "drv_priv.h"
29 #include "util.h"
30
31 #ifdef DRV_AMDGPU
32 extern const struct backend backend_amdgpu;
33 #endif
34 #ifdef DRV_I915
35 extern const struct backend backend_i915;
36 #endif
37 #ifdef DRV_MSM
38 extern const struct backend backend_msm;
39 #endif
40 #ifdef DRV_VC4
41 extern const struct backend backend_vc4;
42 #endif
43
44 // Dumb / generic drivers
45 extern const struct backend backend_evdi;
46 extern const struct backend backend_marvell;
47 extern const struct backend backend_mediatek;
48 extern const struct backend backend_meson;
49 extern const struct backend backend_nouveau;
50 extern const struct backend backend_komeda;
51 extern const struct backend backend_radeon;
52 extern const struct backend backend_rockchip;
53 extern const struct backend backend_sun4i_drm;
54 extern const struct backend backend_synaptics;
55 extern const struct backend backend_virtgpu;
56 extern const struct backend backend_udl;
57 extern const struct backend backend_vkms;
58
59 extern const struct backend backend_mock;
60
61 static const struct backend *drv_backend_list[] = {
62 #ifdef DRV_AMDGPU
63 &backend_amdgpu,
64 #endif
65 #ifdef DRV_I915
66 &backend_i915,
67 #endif
68 #ifdef DRV_MSM
69 &backend_msm,
70 #endif
71 #ifdef DRV_VC4
72 &backend_vc4,
73 #endif
74 &backend_evdi, &backend_komeda, &backend_marvell, &backend_mediatek,
75 &backend_meson, &backend_nouveau, &backend_radeon, &backend_rockchip,
76 &backend_sun4i_drm, &backend_synaptics, &backend_udl, &backend_virtgpu,
77 &backend_vkms, &backend_mock
78 };
79
drv_preload(bool load)80 void drv_preload(bool load)
81 {
82 unsigned int i;
83 for (i = 0; i < ARRAY_SIZE(drv_backend_list); i++) {
84 const struct backend *b = drv_backend_list[i];
85 if (b->preload)
86 b->preload(load);
87 }
88 }
89
drv_get_backend(int fd)90 static const struct backend *drv_get_backend(int fd)
91 {
92 drmVersionPtr drm_version;
93 unsigned int i;
94
95 drm_version = drmGetVersion(fd);
96
97 if (!drm_version)
98 return NULL;
99
100 for (i = 0; i < ARRAY_SIZE(drv_backend_list); i++) {
101 const struct backend *b = drv_backend_list[i];
102 if (!strcmp(drm_version->name, b->name)) {
103 drmFreeVersion(drm_version);
104 return b;
105 }
106 }
107
108 drmFreeVersion(drm_version);
109 return NULL;
110 }
111
drv_create(int fd)112 struct driver *drv_create(int fd)
113 {
114 struct driver *drv;
115 int ret;
116
117 drv = (struct driver *)calloc(1, sizeof(*drv));
118
119 if (!drv)
120 return NULL;
121
122 const char *minigbm_debug;
123 minigbm_debug = drv_get_os_option(MINIGBM_DEBUG);
124 drv->compression = (minigbm_debug == NULL) || (strstr(minigbm_debug, "nocompression") == NULL);
125 drv->log_bos = (minigbm_debug && strstr(minigbm_debug, "log_bos") != NULL);
126
127 drv->fd = fd;
128 drv->backend = drv_get_backend(fd);
129
130 if (!drv->backend)
131 goto free_driver;
132
133 if (pthread_mutex_init(&drv->buffer_table_lock, NULL))
134 goto free_driver;
135
136 drv->buffer_table = drmHashCreate();
137 if (!drv->buffer_table)
138 goto free_buffer_table_lock;
139
140 if (pthread_mutex_init(&drv->mappings_lock, NULL))
141 goto free_buffer_table;
142
143 drv->mappings = drv_array_init(sizeof(struct mapping));
144 if (!drv->mappings)
145 goto free_mappings_lock;
146
147 drv->combos = drv_array_init(sizeof(struct combination));
148 if (!drv->combos)
149 goto free_mappings;
150
151 if (drv->backend->init) {
152 ret = drv->backend->init(drv);
153 if (ret) {
154 drv_array_destroy(drv->combos);
155 goto free_mappings;
156 }
157 }
158
159 return drv;
160
161 free_mappings:
162 drv_array_destroy(drv->mappings);
163 free_mappings_lock:
164 pthread_mutex_destroy(&drv->mappings_lock);
165 free_buffer_table:
166 drmHashDestroy(drv->buffer_table);
167 free_buffer_table_lock:
168 pthread_mutex_destroy(&drv->buffer_table_lock);
169 free_driver:
170 free(drv);
171 return NULL;
172 }
173
drv_destroy(struct driver * drv)174 void drv_destroy(struct driver *drv)
175 {
176 if (drv->backend->close)
177 drv->backend->close(drv);
178
179 drv_array_destroy(drv->combos);
180
181 drv_array_destroy(drv->mappings);
182 pthread_mutex_destroy(&drv->mappings_lock);
183
184 drmHashDestroy(drv->buffer_table);
185 pthread_mutex_destroy(&drv->buffer_table_lock);
186
187 free(drv);
188 }
189
drv_get_fd(struct driver * drv)190 int drv_get_fd(struct driver *drv)
191 {
192 return drv->fd;
193 }
194
drv_get_name(struct driver * drv)195 const char *drv_get_name(struct driver *drv)
196 {
197 return drv->backend->name;
198 }
199
drv_get_combination(struct driver * drv,uint32_t format,uint64_t use_flags)200 struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
201 {
202 struct combination *curr, *best;
203
204 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
205 return 0;
206
207 best = NULL;
208 uint32_t i;
209 for (i = 0; i < drv_array_size(drv->combos); i++) {
210 curr = drv_array_at_idx(drv->combos, i);
211 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
212 if (!best || best->metadata.priority < curr->metadata.priority)
213 best = curr;
214 }
215
216 return best;
217 }
218
drv_bo_new(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags,bool is_test_buffer)219 struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
220 uint64_t use_flags, bool is_test_buffer)
221 {
222
223 struct bo *bo;
224 bo = (struct bo *)calloc(1, sizeof(*bo));
225
226 if (!bo)
227 return NULL;
228
229 bo->drv = drv;
230 bo->meta.width = width;
231 bo->meta.height = height;
232 bo->meta.format = format;
233 bo->meta.use_flags = use_flags;
234 bo->meta.num_planes = drv_num_planes_from_format(format);
235 bo->is_test_buffer = is_test_buffer;
236
237 if (!bo->meta.num_planes) {
238 free(bo);
239 errno = EINVAL;
240 return NULL;
241 }
242
243 return bo;
244 }
245
drv_bo_mapping_destroy(struct bo * bo)246 static void drv_bo_mapping_destroy(struct bo *bo)
247 {
248 struct driver *drv = bo->drv;
249 uint32_t idx = 0;
250
251 /*
252 * This function is called right before the buffer is destroyed. It will free any mappings
253 * associated with the buffer.
254 */
255 pthread_mutex_lock(&drv->mappings_lock);
256 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
257 while (idx < drv_array_size(drv->mappings)) {
258 struct mapping *mapping =
259 (struct mapping *)drv_array_at_idx(drv->mappings, idx);
260 if (mapping->vma->handle != bo->handle.u32) {
261 idx++;
262 continue;
263 }
264
265 if (!--mapping->vma->refcount) {
266 int ret = drv->backend->bo_unmap(bo, mapping->vma);
267 if (ret) {
268 pthread_mutex_unlock(&drv->mappings_lock);
269 assert(ret);
270 drv_loge("munmap failed\n");
271 return;
272 }
273
274 free(mapping->vma);
275 }
276
277 /* This shrinks and shifts the array, so don't increment idx. */
278 drv_array_remove(drv->mappings, idx);
279 }
280 }
281 pthread_mutex_unlock(&drv->mappings_lock);
282 }
283
284 /*
285 * Acquire a reference on plane buffers of the bo.
286 */
drv_bo_acquire(struct bo * bo)287 static void drv_bo_acquire(struct bo *bo)
288 {
289 struct driver *drv = bo->drv;
290
291 pthread_mutex_lock(&drv->buffer_table_lock);
292 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
293 uintptr_t num = 0;
294
295 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num))
296 drmHashDelete(drv->buffer_table, bo->handle.u32);
297
298 drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num + 1));
299 }
300 pthread_mutex_unlock(&drv->buffer_table_lock);
301 }
302
303 /*
304 * Release a reference on plane buffers of the bo. Return true when the bo has lost all its
305 * references. Otherwise, return false.
306 */
drv_bo_release(struct bo * bo)307 static bool drv_bo_release(struct bo *bo)
308 {
309 struct driver *drv = bo->drv;
310 uintptr_t num;
311
312 if (drv->backend->bo_release)
313 drv->backend->bo_release(bo);
314
315 pthread_mutex_lock(&drv->buffer_table_lock);
316 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
317 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
318 drmHashDelete(drv->buffer_table, bo->handle.u32);
319
320 if (num > 1) {
321 drmHashInsert(drv->buffer_table, bo->handle.u32, (void *)(num - 1));
322 }
323 }
324 }
325
326 /* The same buffer can back multiple planes with different offsets. */
327 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
328 if (!drmHashLookup(drv->buffer_table, bo->handle.u32, (void **)&num)) {
329 /* num is positive if found in the hashmap. */
330 pthread_mutex_unlock(&drv->buffer_table_lock);
331 return false;
332 }
333 }
334 pthread_mutex_unlock(&drv->buffer_table_lock);
335
336 return true;
337 }
338
drv_bo_create(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)339 struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
340 uint64_t use_flags)
341 {
342 int ret;
343 struct bo *bo;
344 bool is_test_alloc;
345
346 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
347 use_flags &= ~BO_USE_TEST_ALLOC;
348
349 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
350
351 if (!bo)
352 return NULL;
353
354 ret = -EINVAL;
355 if (drv->backend->bo_compute_metadata) {
356 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
357 0);
358 if (!is_test_alloc && ret == 0)
359 ret = drv->backend->bo_create_from_metadata(bo);
360 } else if (!is_test_alloc) {
361 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
362 }
363
364 if (ret) {
365 errno = -ret;
366 free(bo);
367 return NULL;
368 }
369
370 drv_bo_acquire(bo);
371
372 if (drv->log_bos)
373 drv_bo_log_info(bo, "legacy created");
374
375 return bo;
376 }
377
drv_bo_create_with_modifiers(struct driver * drv,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)378 struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
379 uint32_t format, const uint64_t *modifiers, uint32_t count)
380 {
381 int ret;
382 struct bo *bo;
383
384 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
385 errno = ENOENT;
386 return NULL;
387 }
388
389 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
390
391 if (!bo)
392 return NULL;
393
394 ret = -EINVAL;
395 if (drv->backend->bo_compute_metadata) {
396 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
397 modifiers, count);
398 if (ret == 0)
399 ret = drv->backend->bo_create_from_metadata(bo);
400 } else {
401 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
402 count);
403 }
404
405 if (ret) {
406 free(bo);
407 return NULL;
408 }
409
410 drv_bo_acquire(bo);
411
412 if (drv->log_bos)
413 drv_bo_log_info(bo, "created");
414
415 return bo;
416 }
417
drv_bo_destroy(struct bo * bo)418 void drv_bo_destroy(struct bo *bo)
419 {
420 if (!bo->is_test_buffer && drv_bo_release(bo)) {
421 drv_bo_mapping_destroy(bo);
422 bo->drv->backend->bo_destroy(bo);
423 }
424
425 free(bo);
426 }
427
drv_bo_import(struct driver * drv,struct drv_import_fd_data * data)428 struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
429 {
430 int ret;
431 size_t plane;
432 struct bo *bo;
433 off_t seek_end;
434
435 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
436
437 if (!bo)
438 return NULL;
439
440 ret = drv->backend->bo_import(bo, data);
441 if (ret) {
442 free(bo);
443 return NULL;
444 }
445
446 drv_bo_acquire(bo);
447
448 bo->meta.format_modifier = data->format_modifier;
449 for (plane = 0; plane < bo->meta.num_planes; plane++) {
450 bo->meta.strides[plane] = data->strides[plane];
451 bo->meta.offsets[plane] = data->offsets[plane];
452
453 seek_end = lseek(data->fds[plane], 0, SEEK_END);
454 if (seek_end == (off_t)(-1)) {
455 drv_loge("lseek() failed with %s\n", strerror(errno));
456 goto destroy_bo;
457 }
458
459 lseek(data->fds[plane], 0, SEEK_SET);
460 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
461 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
462 else
463 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
464
465 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
466 drv_loge("buffer size is too large.\n");
467 goto destroy_bo;
468 }
469
470 bo->meta.total_size += bo->meta.sizes[plane];
471 }
472
473 if (drv->log_bos)
474 drv_bo_log_info(bo, "imported");
475
476 return bo;
477
478 destroy_bo:
479 drv_bo_destroy(bo);
480 return NULL;
481 }
482
drv_bo_map(struct bo * bo,const struct rectangle * rect,uint32_t map_flags,struct mapping ** map_data,size_t plane)483 void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
484 struct mapping **map_data, size_t plane)
485 {
486 struct driver *drv = bo->drv;
487 uint32_t i;
488 uint8_t *addr;
489 struct mapping mapping = { 0 };
490
491 assert(rect->width >= 0);
492 assert(rect->height >= 0);
493 assert(rect->x + rect->width <= drv_bo_get_width(bo));
494 assert(rect->y + rect->height <= drv_bo_get_height(bo));
495 assert(BO_MAP_READ_WRITE & map_flags);
496 /* No CPU access for protected buffers. */
497 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
498
499 if (bo->is_test_buffer)
500 return MAP_FAILED;
501
502 mapping.rect = *rect;
503 mapping.refcount = 1;
504
505 pthread_mutex_lock(&drv->mappings_lock);
506
507 for (i = 0; i < drv_array_size(drv->mappings); i++) {
508 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
509 if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
510 continue;
511
512 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
513 rect->width != prior->rect.width || rect->height != prior->rect.height)
514 continue;
515
516 prior->refcount++;
517 *map_data = prior;
518 goto exact_match;
519 }
520
521 for (i = 0; i < drv_array_size(drv->mappings); i++) {
522 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
523 if (prior->vma->handle != bo->handle.u32 || prior->vma->map_flags != map_flags)
524 continue;
525
526 prior->vma->refcount++;
527 mapping.vma = prior->vma;
528 goto success;
529 }
530
531 mapping.vma = calloc(1, sizeof(*mapping.vma));
532 if (!mapping.vma) {
533 *map_data = NULL;
534 pthread_mutex_unlock(&drv->mappings_lock);
535 return MAP_FAILED;
536 }
537
538 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
539 addr = drv->backend->bo_map(bo, mapping.vma, map_flags);
540 if (addr == MAP_FAILED) {
541 *map_data = NULL;
542 free(mapping.vma);
543 pthread_mutex_unlock(&drv->mappings_lock);
544 return MAP_FAILED;
545 }
546
547 mapping.vma->refcount = 1;
548 mapping.vma->addr = addr;
549 mapping.vma->handle = bo->handle.u32;
550 mapping.vma->map_flags = map_flags;
551
552 success:
553 *map_data = drv_array_append(drv->mappings, &mapping);
554 exact_match:
555 drv_bo_invalidate(bo, *map_data);
556 addr = (uint8_t *)((*map_data)->vma->addr);
557 addr += drv_bo_get_plane_offset(bo, plane);
558 pthread_mutex_unlock(&drv->mappings_lock);
559 return (void *)addr;
560 }
561
drv_bo_unmap(struct bo * bo,struct mapping * mapping)562 int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
563 {
564 struct driver *drv = bo->drv;
565 uint32_t i;
566 int ret = 0;
567
568 pthread_mutex_lock(&drv->mappings_lock);
569
570 if (--mapping->refcount)
571 goto out;
572
573 if (!--mapping->vma->refcount) {
574 ret = drv->backend->bo_unmap(bo, mapping->vma);
575 free(mapping->vma);
576 }
577
578 for (i = 0; i < drv_array_size(drv->mappings); i++) {
579 if (mapping == (struct mapping *)drv_array_at_idx(drv->mappings, i)) {
580 drv_array_remove(drv->mappings, i);
581 break;
582 }
583 }
584
585 out:
586 pthread_mutex_unlock(&drv->mappings_lock);
587 return ret;
588 }
589
drv_bo_cached(struct bo * bo)590 bool drv_bo_cached(struct bo *bo)
591 {
592 return bo->meta.cached;
593 }
594
drv_bo_invalidate(struct bo * bo,struct mapping * mapping)595 int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
596 {
597 int ret = 0;
598
599 assert(mapping);
600 assert(mapping->vma);
601 assert(mapping->refcount > 0);
602 assert(mapping->vma->refcount > 0);
603
604 if (bo->drv->backend->bo_invalidate)
605 ret = bo->drv->backend->bo_invalidate(bo, mapping);
606
607 return ret;
608 }
609
drv_bo_flush(struct bo * bo,struct mapping * mapping)610 int drv_bo_flush(struct bo *bo, struct mapping *mapping)
611 {
612 int ret = 0;
613
614 assert(mapping);
615 assert(mapping->vma);
616 assert(mapping->refcount > 0);
617 assert(mapping->vma->refcount > 0);
618
619 if (bo->drv->backend->bo_flush)
620 ret = bo->drv->backend->bo_flush(bo, mapping);
621
622 return ret;
623 }
624
drv_bo_flush_or_unmap(struct bo * bo,struct mapping * mapping)625 int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
626 {
627 int ret = 0;
628
629 assert(mapping);
630 assert(mapping->vma);
631 assert(mapping->refcount > 0);
632 assert(mapping->vma->refcount > 0);
633 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
634
635 if (bo->drv->backend->bo_flush)
636 ret = bo->drv->backend->bo_flush(bo, mapping);
637 else
638 ret = drv_bo_unmap(bo, mapping);
639
640 return ret;
641 }
642
drv_bo_get_width(struct bo * bo)643 uint32_t drv_bo_get_width(struct bo *bo)
644 {
645 return bo->meta.width;
646 }
647
drv_bo_get_height(struct bo * bo)648 uint32_t drv_bo_get_height(struct bo *bo)
649 {
650 return bo->meta.height;
651 }
652
drv_bo_get_num_planes(struct bo * bo)653 size_t drv_bo_get_num_planes(struct bo *bo)
654 {
655 return bo->meta.num_planes;
656 }
657
drv_bo_get_plane_handle(struct bo * bo,size_t plane)658 union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
659 {
660 return bo->handle;
661 }
662
663 #ifndef DRM_RDWR
664 #define DRM_RDWR O_RDWR
665 #endif
666
drv_bo_get_plane_fd(struct bo * bo,size_t plane)667 int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
668 {
669
670 int ret, fd;
671 assert(plane < bo->meta.num_planes);
672
673 if (bo->is_test_buffer)
674 return -EINVAL;
675
676 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC | DRM_RDWR, &fd);
677
678 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
679 if (ret)
680 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handle.u32, DRM_CLOEXEC, &fd);
681
682 if (ret)
683 drv_loge("Failed to get plane fd: %s\n", strerror(errno));
684
685 return (ret) ? ret : fd;
686 }
687
drv_bo_get_plane_offset(struct bo * bo,size_t plane)688 uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
689 {
690 assert(plane < bo->meta.num_planes);
691 return bo->meta.offsets[plane];
692 }
693
drv_bo_get_plane_size(struct bo * bo,size_t plane)694 uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
695 {
696 assert(plane < bo->meta.num_planes);
697 return bo->meta.sizes[plane];
698 }
699
drv_bo_get_plane_stride(struct bo * bo,size_t plane)700 uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
701 {
702 assert(plane < bo->meta.num_planes);
703 return bo->meta.strides[plane];
704 }
705
drv_bo_get_format_modifier(struct bo * bo)706 uint64_t drv_bo_get_format_modifier(struct bo *bo)
707 {
708 return bo->meta.format_modifier;
709 }
710
drv_bo_get_format(struct bo * bo)711 uint32_t drv_bo_get_format(struct bo *bo)
712 {
713 return bo->meta.format;
714 }
715
drv_bo_get_tiling(struct bo * bo)716 uint32_t drv_bo_get_tiling(struct bo *bo)
717 {
718 return bo->meta.tiling;
719 }
720
drv_bo_get_use_flags(struct bo * bo)721 uint64_t drv_bo_get_use_flags(struct bo *bo)
722 {
723 return bo->meta.use_flags;
724 }
725
drv_bo_get_total_size(struct bo * bo)726 size_t drv_bo_get_total_size(struct bo *bo)
727 {
728 return bo->meta.total_size;
729 }
730
drv_bo_log_info(const struct bo * bo,const char * prefix)731 void drv_bo_log_info(const struct bo *bo, const char *prefix)
732 {
733 const struct bo_metadata *meta = &bo->meta;
734
735 drv_logd("%s %s bo %p: %dx%d '%c%c%c%c' tiling %d plane %zu mod 0x%" PRIx64 " use 0x%" PRIx64 " size %zu\n",
736 prefix, bo->drv->backend->name, bo,
737 meta->width, meta->height,
738 meta->format & 0xff,
739 (meta->format >> 8) & 0xff,
740 (meta->format >> 16) & 0xff,
741 (meta->format >> 24) & 0xff,
742 meta->tiling, meta->num_planes, meta->format_modifier,
743 meta->use_flags, meta->total_size);
744 for (uint32_t i = 0; i < meta->num_planes; i++) {
745 drv_logd(" bo %p plane %d: offset %d size %d stride %d\n",
746 bo, i, meta->offsets[i], meta->sizes[i],
747 meta->strides[i]);
748 }
749 }
750
751 /*
752 * Map internal fourcc codes back to standard fourcc codes.
753 */
drv_get_standard_fourcc(uint32_t fourcc_internal)754 uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
755 {
756 return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
757 }
758
drv_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)759 void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint64_t use_flags,
760 uint32_t *out_format, uint64_t *out_use_flags)
761 {
762 assert(drv->backend->resolve_format_and_use_flags);
763
764 drv->backend->resolve_format_and_use_flags(drv, format, use_flags, out_format,
765 out_use_flags);
766 }
767
drv_log_prefix(enum drv_log_level level,const char * prefix,const char * func,int line,const char * format,...)768 void drv_log_prefix(enum drv_log_level level, const char *prefix, const char *func, int line,
769 const char *format, ...)
770 {
771 char buf[50];
772 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, func, line);
773
774 va_list args;
775 va_start(args, format);
776 #ifdef __ANDROID__
777 int prio = ANDROID_LOG_ERROR;
778 switch (level) {
779 case DRV_LOGV:
780 prio = ANDROID_LOG_VERBOSE;
781 break;
782 case DRV_LOGD:
783 prio = ANDROID_LOG_DEBUG;
784 break;
785 case DRV_LOGI:
786 prio = ANDROID_LOG_INFO;
787 break;
788 case DRV_LOGE:
789 default:
790 break;
791 };
792 __android_log_vprint(prio, buf, format, args);
793 #else
794 fprintf(stderr, "%s ", buf);
795 vfprintf(stderr, format, args);
796 #endif
797 va_end(args);
798 }
799
drv_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES],uint64_t * format_modifier)800 int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
801 uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
802 {
803 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
804 strides[plane] = bo->meta.strides[plane];
805 offsets[plane] = bo->meta.offsets[plane];
806 }
807 *format_modifier = bo->meta.format_modifier;
808
809 if (bo->drv->backend->resource_info)
810 return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
811
812 return 0;
813 }
814
drv_get_max_texture_2d_size(struct driver * drv)815 uint32_t drv_get_max_texture_2d_size(struct driver *drv)
816 {
817 if (drv->backend->get_max_texture_2d_size)
818 return drv->backend->get_max_texture_2d_size(drv);
819
820 return UINT32_MAX;
821 }
822