• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  * SPDX-License-Identifier: MIT
5  *
6  * based in part on anv driver which is:
7  * Copyright © 2015 Intel Corporation
8  */
9 
10 #ifndef TU_DRM_H
11 #define TU_DRM_H
12 
13 #include "tu_common.h"
14 
15 struct tu_u_trace_syncobj;
16 struct vdrm_bo;
17 
18 enum tu_bo_alloc_flags
19 {
20    TU_BO_ALLOC_NO_FLAGS = 0,
21    TU_BO_ALLOC_ALLOW_DUMP = 1 << 0,
22    TU_BO_ALLOC_GPU_READ_ONLY = 1 << 1,
23    TU_BO_ALLOC_REPLAYABLE = 1 << 2,
24 };
25 
26 /* Define tu_timeline_sync type based on drm syncobj for a point type
27  * for vk_sync_timeline, and the logic to handle is mostly copied from
28  * anv_bo_sync since it seems it can be used by similar way to anv.
29  */
30 enum tu_timeline_sync_state {
31    /** Indicates that this is a new (or newly reset fence) */
32    TU_TIMELINE_SYNC_STATE_RESET,
33 
34    /** Indicates that this fence has been submitted to the GPU but is still
35     * (as far as we know) in use by the GPU.
36     */
37    TU_TIMELINE_SYNC_STATE_SUBMITTED,
38 
39    TU_TIMELINE_SYNC_STATE_SIGNALED,
40 };
41 
42 struct tu_bo {
43    uint32_t gem_handle;
44 #ifdef TU_HAS_VIRTIO
45    uint32_t res_id;
46 #endif
47    uint64_t size;
48    uint64_t iova;
49    void *map;
50    const char *name; /* pointer to device->bo_sizes's entry's name */
51    int32_t refcnt;
52 
53    uint32_t bo_list_idx;
54 
55    bool implicit_sync : 1;
56 };
57 
58 struct tu_knl {
59    const char *name;
60 
61    VkResult (*device_init)(struct tu_device *dev);
62    void (*device_finish)(struct tu_device *dev);
63    int (*device_get_gpu_timestamp)(struct tu_device *dev, uint64_t *ts);
64    int (*device_get_suspend_count)(struct tu_device *dev, uint64_t *suspend_count);
65    VkResult (*device_check_status)(struct tu_device *dev);
66    int (*submitqueue_new)(struct tu_device *dev, int priority, uint32_t *queue_id);
67    void (*submitqueue_close)(struct tu_device *dev, uint32_t queue_id);
68    VkResult (*bo_init)(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
69                        uint64_t client_iova, VkMemoryPropertyFlags mem_property,
70                        enum tu_bo_alloc_flags flags, const char *name);
71    VkResult (*bo_init_dmabuf)(struct tu_device *dev, struct tu_bo **out_bo,
72                               uint64_t size, int prime_fd);
73    int (*bo_export_dmabuf)(struct tu_device *dev, struct tu_bo *bo);
74    VkResult (*bo_map)(struct tu_device *dev, struct tu_bo *bo);
75    void (*bo_allow_dump)(struct tu_device *dev, struct tu_bo *bo);
76    void (*bo_finish)(struct tu_device *dev, struct tu_bo *bo);
77    void (*bo_set_metadata)(struct tu_device *dev, struct tu_bo *bo,
78                            void *metadata, uint32_t metadata_size);
79    int (*bo_get_metadata)(struct tu_device *dev, struct tu_bo *bo,
80                           void *metadata, uint32_t metadata_size);
81    VkResult (*device_wait_u_trace)(struct tu_device *dev,
82                                    struct tu_u_trace_syncobj *syncobj);
83    VkResult (*queue_submit)(struct tu_queue *queue,
84                             struct vk_queue_submit *submit);
85 
86    const struct vk_device_entrypoint_table *device_entrypoints;
87 };
88 
89 struct tu_zombie_vma {
90    int fence;
91    uint32_t gem_handle;
92 #ifdef TU_HAS_VIRTIO
93    uint32_t res_id;
94 #endif
95    uint64_t iova;
96    uint64_t size;
97 };
98 
99 struct tu_timeline_sync {
100    struct vk_sync base;
101 
102    enum tu_timeline_sync_state state;
103    uint32_t syncobj;
104 };
105 
106 VkResult
107 tu_bo_init_new_explicit_iova(struct tu_device *dev,
108                              struct tu_bo **out_bo,
109                              uint64_t size,
110                              uint64_t client_iova,
111                              VkMemoryPropertyFlags mem_property,
112                              enum tu_bo_alloc_flags flags,
113                              const char *name);
114 
115 static inline VkResult
tu_bo_init_new(struct tu_device * dev,struct tu_bo ** out_bo,uint64_t size,enum tu_bo_alloc_flags flags,const char * name)116 tu_bo_init_new(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
117                enum tu_bo_alloc_flags flags, const char *name)
118 {
119    // TODO don't mark everything with HOST_VISIBLE !!! Anything that
120    // never gets CPU access should not have this bit set
121    return tu_bo_init_new_explicit_iova(
122       dev, out_bo, size, 0,
123       VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
124          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
125          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
126       flags, name);
127 }
128 
129 VkResult
130 tu_bo_init_dmabuf(struct tu_device *dev,
131                   struct tu_bo **bo,
132                   uint64_t size,
133                   int fd);
134 
135 int
136 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
137 
138 void
139 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
140 
141 VkResult
142 tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
143 
144 void tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo);
145 
146 void tu_bo_set_metadata(struct tu_device *dev, struct tu_bo *bo,
147                         void *metadata, uint32_t metadata_size);
148 int tu_bo_get_metadata(struct tu_device *dev, struct tu_bo *bo,
149                        void *metadata, uint32_t metadata_size);
150 
151 static inline struct tu_bo *
tu_bo_get_ref(struct tu_bo * bo)152 tu_bo_get_ref(struct tu_bo *bo)
153 {
154    p_atomic_inc(&bo->refcnt);
155    return bo;
156 }
157 
158 VkResult tu_knl_kgsl_load(struct tu_instance *instance, int fd);
159 
160 struct _drmVersion;
161 VkResult tu_knl_drm_msm_load(struct tu_instance *instance,
162                              int fd, struct _drmVersion *version,
163                              struct tu_physical_device **out);
164 VkResult tu_knl_drm_virtio_load(struct tu_instance *instance,
165                                 int fd, struct _drmVersion *version,
166                                 struct tu_physical_device **out);
167 
168 VkResult
169 tu_enumerate_devices(struct vk_instance *vk_instance);
170 VkResult
171 tu_physical_device_try_create(struct vk_instance *vk_instance,
172                               struct _drmDevice *drm_device,
173                               struct vk_physical_device **out);
174 
175 VkResult
176 tu_drm_device_init(struct tu_device *dev);
177 
178 void
179 tu_drm_device_finish(struct tu_device *dev);
180 
181 int
182 tu_device_get_gpu_timestamp(struct tu_device *dev,
183                             uint64_t *ts);
184 
185 int
186 tu_device_get_suspend_count(struct tu_device *dev,
187                             uint64_t *suspend_count);
188 
189 VkResult
190 tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj);
191 
192 VkResult
193 tu_device_check_status(struct vk_device *vk_device);
194 
195 int
196 tu_drm_submitqueue_new(struct tu_device *dev,
197                        int priority,
198                        uint32_t *queue_id);
199 
200 void
201 tu_drm_submitqueue_close(struct tu_device *dev, uint32_t queue_id);
202 
203 VkResult
204 tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit);
205 
206 #endif /* TU_DRM_H */
207