• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  * SPDX-License-Identifier: MIT
5  *
6  * based in part on anv driver which is:
7  * Copyright © 2015 Intel Corporation
8  */
9 
10 #ifndef TU_DRM_H
11 #define TU_DRM_H
12 
13 #include "tu_common.h"
14 
15 struct tu_u_trace_syncobj;
16 struct vdrm_bo;
17 
18 enum tu_bo_alloc_flags {
19    TU_BO_ALLOC_NO_FLAGS = 0,
20    TU_BO_ALLOC_ALLOW_DUMP = 1 << 0,
21    TU_BO_ALLOC_GPU_READ_ONLY = 1 << 1,
22    TU_BO_ALLOC_REPLAYABLE = 1 << 2,
23    TU_BO_ALLOC_INTERNAL_RESOURCE = 1 << 3,
24    TU_BO_ALLOC_DMABUF = 1 << 4,
25    TU_BO_ALLOC_SHAREABLE = 1 << 5,
26 };
27 
28 /* Define tu_timeline_sync type based on drm syncobj for a point type
29  * for vk_sync_timeline, and the logic to handle is mostly copied from
30  * anv_bo_sync since it seems it can be used by similar way to anv.
31  */
32 enum tu_timeline_sync_state {
33    /** Indicates that this is a new (or newly reset fence) */
34    TU_TIMELINE_SYNC_STATE_RESET,
35 
36    /** Indicates that this fence has been submitted to the GPU but is still
37     * (as far as we know) in use by the GPU.
38     */
39    TU_TIMELINE_SYNC_STATE_SUBMITTED,
40 
41    TU_TIMELINE_SYNC_STATE_SIGNALED,
42 };
43 
44 enum tu_mem_sync_op {
45    TU_MEM_SYNC_CACHE_TO_GPU,
46    TU_MEM_SYNC_CACHE_FROM_GPU,
47 };
48 
49 struct tu_bo {
50    uint32_t gem_handle;
51 #ifdef TU_HAS_VIRTIO
52    uint32_t res_id;
53 #endif
54    uint64_t size;
55    uint64_t iova;
56    void *map;
57    const char *name; /* pointer to device->bo_sizes's entry's name */
58    int32_t refcnt;
59 
60    uint32_t submit_bo_list_idx;
61    uint32_t dump_bo_list_idx;
62 
63 #ifdef TU_HAS_KGSL
64    /* We have to store fd returned by ion_fd_data
65     * in order to be able to mmap this buffer and to
66     * export file descriptor.
67     */
68    int shared_fd;
69 #endif
70 
71    bool implicit_sync : 1;
72    bool never_unmap : 1;
73    bool cached_non_coherent : 1;
74 
75    bool dump;
76 
77    /* Pointer to the vk_object_base associated with the BO
78     * for the purposes of VK_EXT_device_address_binding_report
79     */
80    struct vk_object_base *base;
81 };
82 
83 struct tu_knl {
84    const char *name;
85 
86    VkResult (*device_init)(struct tu_device *dev);
87    void (*device_finish)(struct tu_device *dev);
88    int (*device_get_gpu_timestamp)(struct tu_device *dev, uint64_t *ts);
89    int (*device_get_suspend_count)(struct tu_device *dev, uint64_t *suspend_count);
90    VkResult (*device_check_status)(struct tu_device *dev);
91    int (*submitqueue_new)(struct tu_device *dev, int priority, uint32_t *queue_id);
92    void (*submitqueue_close)(struct tu_device *dev, uint32_t queue_id);
93    VkResult (*bo_init)(struct tu_device *dev, struct vk_object_base *base,
94                        struct tu_bo **out_bo, uint64_t size, uint64_t client_iova,
95                        VkMemoryPropertyFlags mem_property,
96                        enum tu_bo_alloc_flags flags, const char *name);
97    VkResult (*bo_init_dmabuf)(struct tu_device *dev, struct tu_bo **out_bo,
98                               uint64_t size, int prime_fd);
99    int (*bo_export_dmabuf)(struct tu_device *dev, struct tu_bo *bo);
100    VkResult (*bo_map)(struct tu_device *dev, struct tu_bo *bo, void *placed_addr);
101    void (*bo_allow_dump)(struct tu_device *dev, struct tu_bo *bo);
102    void (*bo_finish)(struct tu_device *dev, struct tu_bo *bo);
103    void (*bo_set_metadata)(struct tu_device *dev, struct tu_bo *bo,
104                            void *metadata, uint32_t metadata_size);
105    int (*bo_get_metadata)(struct tu_device *dev, struct tu_bo *bo,
106                           void *metadata, uint32_t metadata_size);
107    void *(*submit_create)(struct tu_device *device);
108    void (*submit_finish)(struct tu_device *device, void *_submit);
109    void (*submit_add_entries)(struct tu_device *device, void *_submit,
110                               struct tu_cs_entry *entries,
111                               unsigned num_entries);
112    VkResult (*queue_submit)(struct tu_queue *queue, void *_submit,
113                             struct vk_sync_wait *waits, uint32_t wait_count,
114                             struct vk_sync_signal *signals, uint32_t signal_count,
115                             struct tu_u_trace_submission_data *u_trace_submission_data);
116    VkResult (*queue_wait_fence)(struct tu_queue *queue, uint32_t fence,
117                                 uint64_t timeout_ns);
118 
119    const struct vk_device_entrypoint_table *device_entrypoints;
120 };
121 
122 struct tu_zombie_vma {
123    int fence;
124    uint32_t gem_handle;
125 #ifdef TU_HAS_VIRTIO
126    uint32_t res_id;
127 #endif
128    uint64_t iova;
129    uint64_t size;
130 };
131 
132 struct tu_timeline_sync {
133    struct vk_sync base;
134 
135    enum tu_timeline_sync_state state;
136    uint32_t syncobj;
137 };
138 
139 VkResult
140 tu_bo_init_new_explicit_iova(struct tu_device *dev,
141                              struct vk_object_base *base,
142                              struct tu_bo **out_bo,
143                              uint64_t size,
144                              uint64_t client_iova,
145                              VkMemoryPropertyFlags mem_property,
146                              enum tu_bo_alloc_flags flags,
147                              const char *name);
148 
149 static inline VkResult
tu_bo_init_new(struct tu_device * dev,struct vk_object_base * base,struct tu_bo ** out_bo,uint64_t size,enum tu_bo_alloc_flags flags,const char * name)150 tu_bo_init_new(struct tu_device *dev, struct vk_object_base *base,
151                struct tu_bo **out_bo, uint64_t size,
152                enum tu_bo_alloc_flags flags, const char *name)
153 {
154    // TODO don't mark everything with HOST_VISIBLE !!! Anything that
155    // never gets CPU access should not have this bit set
156    return tu_bo_init_new_explicit_iova(
157       dev, base, out_bo, size, 0,
158       VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
159          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
160          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
161       flags, name);
162 }
163 
164 VkResult
165 tu_bo_init_dmabuf(struct tu_device *dev,
166                   struct tu_bo **bo,
167                   uint64_t size,
168                   int fd);
169 
170 int
171 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
172 
173 void
174 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
175 
176 VkResult
177 tu_bo_map(struct tu_device *dev, struct tu_bo *bo, void *placed_addr);
178 
179 VkResult
180 tu_bo_unmap(struct tu_device *dev, struct tu_bo *bo, bool reserve);
181 
182 void
183 tu_bo_sync_cache(struct tu_device *dev,
184                  struct tu_bo *bo,
185                  VkDeviceSize offset,
186                  VkDeviceSize size,
187                  enum tu_mem_sync_op op);
188 
189 uint32_t tu_get_l1_dcache_size();
190 
191 void tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo);
192 
193 void tu_bo_set_metadata(struct tu_device *dev, struct tu_bo *bo,
194                         void *metadata, uint32_t metadata_size);
195 int tu_bo_get_metadata(struct tu_device *dev, struct tu_bo *bo,
196                        void *metadata, uint32_t metadata_size);
197 
198 static inline struct tu_bo *
tu_bo_get_ref(struct tu_bo * bo)199 tu_bo_get_ref(struct tu_bo *bo)
200 {
201    p_atomic_inc(&bo->refcnt);
202    return bo;
203 }
204 
205 VkResult tu_knl_kgsl_load(struct tu_instance *instance, int fd);
206 
207 struct _drmVersion;
208 VkResult tu_knl_drm_msm_load(struct tu_instance *instance,
209                              int fd, struct _drmVersion *version,
210                              struct tu_physical_device **out);
211 VkResult tu_knl_drm_virtio_load(struct tu_instance *instance,
212                                 int fd, struct _drmVersion *version,
213                                 struct tu_physical_device **out);
214 
215 VkResult
216 tu_enumerate_devices(struct vk_instance *vk_instance);
217 VkResult
218 tu_physical_device_try_create(struct vk_instance *vk_instance,
219                               struct _drmDevice *drm_device,
220                               struct vk_physical_device **out);
221 
222 VkResult
223 tu_drm_device_init(struct tu_device *dev);
224 
225 void
226 tu_drm_device_finish(struct tu_device *dev);
227 
228 int
229 tu_device_get_gpu_timestamp(struct tu_device *dev,
230                             uint64_t *ts);
231 
232 int
233 tu_device_get_suspend_count(struct tu_device *dev,
234                             uint64_t *suspend_count);
235 
236 VkResult
237 tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj);
238 
239 VkResult
240 tu_device_check_status(struct vk_device *vk_device);
241 
242 int
243 tu_drm_submitqueue_new(struct tu_device *dev,
244                        int priority,
245                        uint32_t *queue_id);
246 
247 void
248 tu_drm_submitqueue_close(struct tu_device *dev, uint32_t queue_id);
249 
250 void *
251 tu_submit_create(struct tu_device *dev);
252 
253 void
254 tu_submit_finish(struct tu_device *dev, void *submit);
255 
256 void
257 tu_submit_add_entries(struct tu_device *dev, void *submit,
258                       struct tu_cs_entry *entries,
259                       unsigned num_entries);
260 
261 VkResult
262 tu_queue_submit(struct tu_queue *queue, void *submit,
263                 struct vk_sync_wait *waits, uint32_t wait_count,
264                 struct vk_sync_signal *signals, uint32_t signal_count,
265                 struct tu_u_trace_submission_data *u_trace_submission_data);
266 
267 VkResult
268 tu_queue_wait_fence(struct tu_queue *queue, uint32_t fence,
269                     uint64_t timeout_ns);
270 
271 #endif /* TU_DRM_H */
272