1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 * SPDX-License-Identifier: MIT
5 *
6 * based in part on anv driver which is:
7 * Copyright © 2015 Intel Corporation
8 */
9
10 #include <fcntl.h>
11
12 #ifdef MAJOR_IN_MKDEV
13 #include <sys/mkdev.h>
14 #endif
15 #ifdef MAJOR_IN_SYSMACROS
16 #include <sys/sysmacros.h>
17 #endif
18
19 #include "util/libdrm.h"
20
21 #include "tu_device.h"
22 #include "tu_knl.h"
23
24
25 VkResult
tu_bo_init_new_explicit_iova(struct tu_device * dev,struct tu_bo ** out_bo,uint64_t size,uint64_t client_iova,VkMemoryPropertyFlags mem_property,enum tu_bo_alloc_flags flags,const char * name)26 tu_bo_init_new_explicit_iova(struct tu_device *dev,
27 struct tu_bo **out_bo,
28 uint64_t size,
29 uint64_t client_iova,
30 VkMemoryPropertyFlags mem_property,
31 enum tu_bo_alloc_flags flags, const char *name)
32 {
33 return dev->instance->knl->bo_init(dev, out_bo, size, client_iova, mem_property, flags, name);
34 }
35
36 VkResult
tu_bo_init_dmabuf(struct tu_device * dev,struct tu_bo ** bo,uint64_t size,int fd)37 tu_bo_init_dmabuf(struct tu_device *dev,
38 struct tu_bo **bo,
39 uint64_t size,
40 int fd)
41 {
42 return dev->instance->knl->bo_init_dmabuf(dev, bo, size, fd);
43 }
44
45 int
tu_bo_export_dmabuf(struct tu_device * dev,struct tu_bo * bo)46 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
47 {
48 return dev->instance->knl->bo_export_dmabuf(dev, bo);
49 }
50
51 void
tu_bo_finish(struct tu_device * dev,struct tu_bo * bo)52 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
53 {
54 dev->instance->knl->bo_finish(dev, bo);
55 }
56
57 VkResult
tu_bo_map(struct tu_device * dev,struct tu_bo * bo)58 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
59 {
60 return dev->instance->knl->bo_map(dev, bo);
61 }
62
tu_bo_allow_dump(struct tu_device * dev,struct tu_bo * bo)63 void tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
64 {
65 dev->instance->knl->bo_allow_dump(dev, bo);
66 }
67
68 void
tu_bo_set_metadata(struct tu_device * dev,struct tu_bo * bo,void * metadata,uint32_t metadata_size)69 tu_bo_set_metadata(struct tu_device *dev, struct tu_bo *bo,
70 void *metadata, uint32_t metadata_size)
71 {
72 if (!dev->instance->knl->bo_set_metadata)
73 return;
74 dev->instance->knl->bo_set_metadata(dev, bo, metadata, metadata_size);
75 }
76
77 int
tu_bo_get_metadata(struct tu_device * dev,struct tu_bo * bo,void * metadata,uint32_t metadata_size)78 tu_bo_get_metadata(struct tu_device *dev, struct tu_bo *bo,
79 void *metadata, uint32_t metadata_size)
80 {
81 if (!dev->instance->knl->bo_get_metadata)
82 return -ENOSYS;
83 return dev->instance->knl->bo_get_metadata(dev, bo, metadata, metadata_size);
84 }
85
86 VkResult
tu_drm_device_init(struct tu_device * dev)87 tu_drm_device_init(struct tu_device *dev)
88 {
89 return dev->instance->knl->device_init(dev);
90 }
91
92 void
tu_drm_device_finish(struct tu_device * dev)93 tu_drm_device_finish(struct tu_device *dev)
94 {
95 dev->instance->knl->device_finish(dev);
96 }
97
98 int
tu_device_get_gpu_timestamp(struct tu_device * dev,uint64_t * ts)99 tu_device_get_gpu_timestamp(struct tu_device *dev,
100 uint64_t *ts)
101 {
102 return dev->instance->knl->device_get_gpu_timestamp(dev, ts);
103 }
104
105 int
tu_device_get_suspend_count(struct tu_device * dev,uint64_t * suspend_count)106 tu_device_get_suspend_count(struct tu_device *dev,
107 uint64_t *suspend_count)
108 {
109 return dev->instance->knl->device_get_suspend_count(dev, suspend_count);
110 }
111
112 VkResult
tu_device_wait_u_trace(struct tu_device * dev,struct tu_u_trace_syncobj * syncobj)113 tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
114 {
115 return dev->instance->knl->device_wait_u_trace(dev, syncobj);
116 }
117
118 VkResult
tu_device_check_status(struct vk_device * vk_device)119 tu_device_check_status(struct vk_device *vk_device)
120 {
121 struct tu_device *dev = container_of(vk_device, struct tu_device, vk);
122 return dev->instance->knl->device_check_status(dev);
123 }
124
125 int
tu_drm_submitqueue_new(struct tu_device * dev,int priority,uint32_t * queue_id)126 tu_drm_submitqueue_new(struct tu_device *dev,
127 int priority,
128 uint32_t *queue_id)
129 {
130 return dev->instance->knl->submitqueue_new(dev, priority, queue_id);
131 }
132
133 void
tu_drm_submitqueue_close(struct tu_device * dev,uint32_t queue_id)134 tu_drm_submitqueue_close(struct tu_device *dev, uint32_t queue_id)
135 {
136 dev->instance->knl->submitqueue_close(dev, queue_id);
137 }
138
139 VkResult
tu_queue_submit(struct vk_queue * vk_queue,struct vk_queue_submit * submit)140 tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit)
141 {
142 struct tu_queue *queue = container_of(vk_queue, struct tu_queue, vk);
143 return queue->device->instance->knl->queue_submit(queue, submit);
144 }
145
146 /**
147 * Enumeration entrypoint specific to non-drm devices (ie. kgsl)
148 */
149 VkResult
tu_enumerate_devices(struct vk_instance * vk_instance)150 tu_enumerate_devices(struct vk_instance *vk_instance)
151 {
152 #ifdef TU_HAS_KGSL
153 struct tu_instance *instance =
154 container_of(vk_instance, struct tu_instance, vk);
155
156 static const char path[] = "/dev/kgsl-3d0";
157 int fd;
158
159 fd = open(path, O_RDWR | O_CLOEXEC);
160 if (fd < 0) {
161 if (errno == ENOENT)
162 return VK_ERROR_INCOMPATIBLE_DRIVER;
163
164 return vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
165 "failed to open device %s", path);
166 }
167
168 VkResult result = tu_knl_kgsl_load(instance, fd);
169 if (result != VK_SUCCESS) {
170 close(fd);
171 return result;
172 }
173
174 if (TU_DEBUG(STARTUP))
175 mesa_logi("Found compatible device '%s'.", path);
176
177 return result;
178 #else
179 return VK_ERROR_INCOMPATIBLE_DRIVER;
180 #endif
181 }
182
183 static long
l1_dcache_size()184 l1_dcache_size()
185 {
186 if (!(DETECT_ARCH_AARCH64 || DETECT_ARCH_X86 || DETECT_ARCH_X86_64))
187 return 0;
188
189 #if DETECT_ARCH_AARCH64 && \
190 (!defined(_SC_LEVEL1_DCACHE_LINESIZE) || DETECT_OS_ANDROID)
191 /* Bionic does not implement _SC_LEVEL1_DCACHE_LINESIZE properly: */
192 uint64_t ctr_el0;
193 asm("mrs\t%x0, ctr_el0" : "=r"(ctr_el0));
194 return 4 << ((ctr_el0 >> 16) & 0xf);
195 #elif defined(_SC_LEVEL1_DCACHE_LINESIZE)
196 return sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
197 #else
198 return 0;
199 #endif
200 }
201
202 /**
203 * Enumeration entrypoint for drm devices
204 */
205 VkResult
tu_physical_device_try_create(struct vk_instance * vk_instance,struct _drmDevice * drm_device,struct vk_physical_device ** out)206 tu_physical_device_try_create(struct vk_instance *vk_instance,
207 struct _drmDevice *drm_device,
208 struct vk_physical_device **out)
209 {
210 struct tu_instance *instance =
211 container_of(vk_instance, struct tu_instance, vk);
212
213 /* Note that "msm" is a platform device, but "virtio_gpu" is a pci
214 * device. In general we shouldn't care about the bus type.
215 */
216 if (!(drm_device->available_nodes & (1 << DRM_NODE_RENDER)))
217 return VK_ERROR_INCOMPATIBLE_DRIVER;
218
219 const char *primary_path = drm_device->nodes[DRM_NODE_PRIMARY];
220 const char *path = drm_device->nodes[DRM_NODE_RENDER];
221 drmVersionPtr version;
222 int fd;
223 int master_fd = -1;
224
225 fd = open(path, O_RDWR | O_CLOEXEC);
226 if (fd < 0) {
227 return vk_startup_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
228 "failed to open device %s", path);
229 }
230
231 version = drmGetVersion(fd);
232 if (!version) {
233 close(fd);
234 return vk_startup_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
235 "failed to query kernel driver version for device %s",
236 path);
237 }
238
239 struct tu_physical_device *device = NULL;
240
241 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
242 if (strcmp(version->name, "msm") == 0) {
243 #ifdef TU_HAS_MSM
244 result = tu_knl_drm_msm_load(instance, fd, version, &device);
245 #endif
246 } else if (strcmp(version->name, "virtio_gpu") == 0) {
247 #ifdef TU_HAS_VIRTIO
248 result = tu_knl_drm_virtio_load(instance, fd, version, &device);
249 #endif
250 } else if (TU_DEBUG(STARTUP)) {
251 result = vk_startup_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
252 "device %s (%s) is not compatible with turnip",
253 path, version->name);
254 }
255
256 if (result != VK_SUCCESS)
257 goto out;
258
259 assert(device);
260
261 device->level1_dcache_size = l1_dcache_size();
262 device->has_cached_non_coherent_memory = device->level1_dcache_size > 0;
263
264 if (instance->vk.enabled_extensions.KHR_display) {
265 master_fd = open(primary_path, O_RDWR | O_CLOEXEC);
266 }
267
268 device->master_fd = master_fd;
269
270 assert(strlen(path) < ARRAY_SIZE(device->fd_path));
271 snprintf(device->fd_path, ARRAY_SIZE(device->fd_path), "%s", path);
272
273 struct stat st;
274
275 if (stat(primary_path, &st) == 0) {
276 device->has_master = true;
277 device->master_major = major(st.st_rdev);
278 device->master_minor = minor(st.st_rdev);
279 } else {
280 device->has_master = false;
281 device->master_major = 0;
282 device->master_minor = 0;
283 }
284
285 if (stat(path, &st) == 0) {
286 device->has_local = true;
287 device->local_major = major(st.st_rdev);
288 device->local_minor = minor(st.st_rdev);
289 } else {
290 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
291 "failed to stat DRM render node %s", path);
292 goto out;
293 }
294
295 result = tu_physical_device_init(device, instance);
296 if (result != VK_SUCCESS)
297 goto out;
298
299 if (TU_DEBUG(STARTUP))
300 mesa_logi("Found compatible device '%s' (%s).", path, version->name);
301
302 *out = &device->vk;
303
304 out:
305 if (result != VK_SUCCESS) {
306 if (master_fd != -1)
307 close(master_fd);
308 close(fd);
309 vk_free(&instance->vk.alloc, device);
310 }
311
312 drmFreeVersion(version);
313
314 return result;
315 }
316