1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 * SPDX-License-Identifier: MIT
5 *
6 * based in part on anv driver which is:
7 * Copyright © 2015 Intel Corporation
8 */
9
10 #ifndef TU_DRM_H
11 #define TU_DRM_H
12
13 #include "tu_common.h"
14
15 /* Keep tu_syncobj until porting to common code for kgsl too */
16 #ifdef TU_USE_KGSL
17 struct tu_syncobj;
18 /* for TU_FROM_HANDLE with both VkFence and VkSemaphore: */
19 #define tu_syncobj_from_handle(x) ((struct tu_syncobj*) (uintptr_t) (x))
20 #endif
21
22 struct tu_u_trace_syncobj;
23
24 enum tu_bo_alloc_flags
25 {
26 TU_BO_ALLOC_NO_FLAGS = 0,
27 TU_BO_ALLOC_ALLOW_DUMP = 1 << 0,
28 TU_BO_ALLOC_GPU_READ_ONLY = 1 << 1,
29 };
30
31 /* Define tu_timeline_sync type based on drm syncobj for a point type
32 * for vk_sync_timeline, and the logic to handle is mostly copied from
33 * anv_bo_sync since it seems it can be used by similar way to anv.
34 */
35 enum tu_timeline_sync_state {
36 /** Indicates that this is a new (or newly reset fence) */
37 TU_TIMELINE_SYNC_STATE_RESET,
38
39 /** Indicates that this fence has been submitted to the GPU but is still
40 * (as far as we know) in use by the GPU.
41 */
42 TU_TIMELINE_SYNC_STATE_SUBMITTED,
43
44 TU_TIMELINE_SYNC_STATE_SIGNALED,
45 };
46
47 struct tu_bo
48 {
49 uint32_t gem_handle;
50 uint64_t size;
51 uint64_t iova;
52 void *map;
53 int32_t refcnt;
54
55 #ifndef TU_USE_KGSL
56 uint32_t bo_list_idx;
57 #endif
58
59 bool implicit_sync : 1;
60 };
61
62 struct tu_timeline_sync {
63 struct vk_sync base;
64
65 enum tu_timeline_sync_state state;
66 uint32_t syncobj;
67 };
68
69 VkResult
70 tu_bo_init_new(struct tu_device *dev, struct tu_bo **bo, uint64_t size,
71 enum tu_bo_alloc_flags flags);
72
73 VkResult
74 tu_bo_init_dmabuf(struct tu_device *dev,
75 struct tu_bo **bo,
76 uint64_t size,
77 int fd);
78
79 int
80 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
81
82 void
83 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
84
85 VkResult
86 tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
87
88 static inline struct tu_bo *
tu_bo_get_ref(struct tu_bo * bo)89 tu_bo_get_ref(struct tu_bo *bo)
90 {
91 p_atomic_inc(&bo->refcnt);
92 return bo;
93 }
94
95 VkResult
96 tu_enumerate_devices(struct tu_instance *instance);
97
98 int
99 tu_device_get_gpu_timestamp(struct tu_device *dev,
100 uint64_t *ts);
101
102 int
103 tu_device_get_suspend_count(struct tu_device *dev,
104 uint64_t *suspend_count);
105
106 VkResult
107 tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj);
108
109 VkResult
110 tu_device_check_status(struct vk_device *vk_device);
111
112 int
113 tu_drm_submitqueue_new(const struct tu_device *dev,
114 int priority,
115 uint32_t *queue_id);
116
117 void
118 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
119
120 int
121 tu_syncobj_to_fd(struct tu_device *device, struct vk_sync *sync);
122
123 VkResult
124 tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit);
125
126 #endif /* TU_DRM_H */
127