1 /*
2 * Copyright © 2023 Collabora, Ltd.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <string.h>
10 #include <xf86drm.h>
11
12 #include "util/hash_table.h"
13 #include "util/macros.h"
14 #include "util/simple_mtx.h"
15
16 #include "drm-uapi/panfrost_drm.h"
17
18 #include "pan_kmod_backend.h"
19
20 const struct pan_kmod_ops panfrost_kmod_ops;
21
22 struct panfrost_kmod_vm {
23 struct pan_kmod_vm base;
24 };
25
26 struct panfrost_kmod_dev {
27 struct pan_kmod_dev base;
28 struct panfrost_kmod_vm *vm;
29 };
30
31 struct panfrost_kmod_bo {
32 struct pan_kmod_bo base;
33
34 /* This is actually the VA assigned to the BO at creation/import time.
35 * We don't control it, it's automatically assigned by the kernel driver.
36 */
37 uint64_t offset;
38 };
39
40 static struct pan_kmod_dev *
panfrost_kmod_dev_create(int fd,uint32_t flags,drmVersionPtr version,const struct pan_kmod_allocator * allocator)41 panfrost_kmod_dev_create(int fd, uint32_t flags, drmVersionPtr version,
42 const struct pan_kmod_allocator *allocator)
43 {
44 struct panfrost_kmod_dev *panfrost_dev =
45 pan_kmod_alloc(allocator, sizeof(*panfrost_dev));
46 if (!panfrost_dev) {
47 mesa_loge("failed to allocate a panfrost_kmod_dev object");
48 return NULL;
49 }
50
51 pan_kmod_dev_init(&panfrost_dev->base, fd, flags, version,
52 &panfrost_kmod_ops, allocator);
53 return &panfrost_dev->base;
54 }
55
56 static void
panfrost_kmod_dev_destroy(struct pan_kmod_dev * dev)57 panfrost_kmod_dev_destroy(struct pan_kmod_dev *dev)
58 {
59 struct panfrost_kmod_dev *panfrost_dev =
60 container_of(dev, struct panfrost_kmod_dev, base);
61
62 pan_kmod_dev_cleanup(dev);
63 pan_kmod_free(dev->allocator, panfrost_dev);
64 }
65
66 /* Abstraction over the raw drm_panfrost_get_param ioctl for fetching
67 * information about devices.
68 */
69 static __u64
panfrost_query_raw(int fd,enum drm_panfrost_param param,bool required,unsigned default_value)70 panfrost_query_raw(int fd, enum drm_panfrost_param param, bool required,
71 unsigned default_value)
72 {
73 struct drm_panfrost_get_param get_param = {};
74 ASSERTED int ret;
75
76 get_param.param = param;
77 ret = drmIoctl(fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
78
79 if (ret) {
80 assert(!required);
81 return default_value;
82 }
83
84 return get_param.value;
85 }
86
87 static void
panfrost_dev_query_props(const struct pan_kmod_dev * dev,struct pan_kmod_dev_props * props)88 panfrost_dev_query_props(const struct pan_kmod_dev *dev,
89 struct pan_kmod_dev_props *props)
90 {
91 int fd = dev->fd;
92
93 memset(props, 0, sizeof(*props));
94 props->gpu_prod_id =
95 panfrost_query_raw(fd, DRM_PANFROST_PARAM_GPU_PROD_ID, true, 0);
96 props->gpu_revision =
97 panfrost_query_raw(fd, DRM_PANFROST_PARAM_GPU_REVISION, true, 0);
98 props->shader_present =
99 panfrost_query_raw(fd, DRM_PANFROST_PARAM_SHADER_PRESENT, false, 0xffff);
100 props->tiler_features =
101 panfrost_query_raw(fd, DRM_PANFROST_PARAM_TILER_FEATURES, false, 0x809);
102 props->mem_features =
103 panfrost_query_raw(fd, DRM_PANFROST_PARAM_MEM_FEATURES, true, 0);
104 props->mmu_features =
105 panfrost_query_raw(fd, DRM_PANFROST_PARAM_MMU_FEATURES, false, 0);
106
107 for (unsigned i = 0; i < ARRAY_SIZE(props->texture_features); i++) {
108 /* If unspecified, assume ASTC/ETC only. Factory default for Juno, and
109 * should exist on any Mali configuration. All hardware should report
110 * these texture formats but the kernel might not be new enough. */
111 static const uint32_t default_tex_features[4] = {
112 (1 << 1) | // ETC2 RGB8
113 (1 << 2) | // ETC2 R11 UNORM
114 (1 << 3) | // ETC2 RGBA8
115 (1 << 4) | // ETC2 RG11 UNORM
116 (1 << 17) | // ETC2 R11 SNORM
117 (1 << 18) | // ETC2 RG11 SNORM
118 (1 << 19) | // ETC2 RGB8A1
119 (1 << 20) | // ASTC 3D LDR
120 (1 << 21) | // ASTC 3D HDR
121 (1 << 22) | // ASTC 2D LDR
122 (1 << 23), // ASTC 2D HDR
123 0,
124 0,
125 0,
126 };
127
128 props->texture_features[i] =
129 panfrost_query_raw(fd, DRM_PANFROST_PARAM_TEXTURE_FEATURES0 + i, false,
130 default_tex_features[i]);
131 }
132
133 props->thread_tls_alloc =
134 panfrost_query_raw(fd, DRM_PANFROST_PARAM_THREAD_TLS_ALLOC, false, 0);
135 props->afbc_features =
136 panfrost_query_raw(fd, DRM_PANFROST_PARAM_AFBC_FEATURES, false, 0);
137 }
138
139 static uint32_t
to_panfrost_bo_flags(struct pan_kmod_dev * dev,uint32_t flags)140 to_panfrost_bo_flags(struct pan_kmod_dev *dev, uint32_t flags)
141 {
142 uint32_t panfrost_flags = 0;
143
144 if (dev->driver.version.major > 1 || dev->driver.version.minor >= 1) {
145 /* The alloc-on-fault feature is only used for the tiler HEAP object,
146 * hence the name of the flag on panfrost.
147 */
148 if (flags & PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT)
149 panfrost_flags |= PANFROST_BO_HEAP;
150
151 if (!(flags & PAN_KMOD_BO_FLAG_EXECUTABLE))
152 panfrost_flags |= PANFROST_BO_NOEXEC;
153 }
154
155 return panfrost_flags;
156 }
157
158 static struct pan_kmod_bo *
panfrost_kmod_bo_alloc(struct pan_kmod_dev * dev,struct pan_kmod_vm * exclusive_vm,size_t size,uint32_t flags)159 panfrost_kmod_bo_alloc(struct pan_kmod_dev *dev,
160 struct pan_kmod_vm *exclusive_vm, size_t size,
161 uint32_t flags)
162 {
163 /* We can't map GPU uncached. */
164 if (flags & PAN_KMOD_BO_FLAG_GPU_UNCACHED)
165 return NULL;
166
167 struct panfrost_kmod_bo *bo = pan_kmod_dev_alloc(dev, sizeof(*bo));
168 if (!bo)
169 return NULL;
170
171 struct drm_panfrost_create_bo req = {
172 .size = size,
173 .flags = to_panfrost_bo_flags(dev, flags),
174 };
175
176 int ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_CREATE_BO, &req);
177 if (ret) {
178 mesa_loge("DRM_IOCTL_PANFROST_CREATE_BO failed (err=%d)", errno);
179 goto err_free_bo;
180 }
181
182 pan_kmod_bo_init(&bo->base, dev, exclusive_vm, req.size, flags, req.handle);
183 bo->offset = req.offset;
184 return &bo->base;
185
186 err_free_bo:
187 pan_kmod_dev_free(dev, bo);
188 return NULL;
189 }
190
191 static void
panfrost_kmod_bo_free(struct pan_kmod_bo * bo)192 panfrost_kmod_bo_free(struct pan_kmod_bo *bo)
193 {
194 drmCloseBufferHandle(bo->dev->fd, bo->handle);
195 pan_kmod_dev_free(bo->dev, bo);
196 }
197
198 static struct pan_kmod_bo *
panfrost_kmod_bo_import(struct pan_kmod_dev * dev,uint32_t handle,size_t size,uint32_t flags)199 panfrost_kmod_bo_import(struct pan_kmod_dev *dev, uint32_t handle, size_t size,
200 uint32_t flags)
201 {
202 struct panfrost_kmod_bo *panfrost_bo =
203 pan_kmod_dev_alloc(dev, sizeof(*panfrost_bo));
204 if (!panfrost_bo) {
205 mesa_loge("failed to allocate a panfrost_kmod_bo object");
206 return NULL;
207 }
208
209 struct drm_panfrost_get_bo_offset get_bo_offset = {.handle = handle, 0};
210 int ret =
211 drmIoctl(dev->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
212 if (ret) {
213 mesa_loge("DRM_IOCTL_PANFROST_GET_BO_OFFSET failed (err=%d)", errno);
214 goto err_free_bo;
215 }
216
217 panfrost_bo->offset = get_bo_offset.offset;
218
219 pan_kmod_bo_init(&panfrost_bo->base, dev, NULL, size,
220 flags | PAN_KMOD_BO_FLAG_IMPORTED, handle);
221 return &panfrost_bo->base;
222
223 err_free_bo:
224 pan_kmod_dev_free(dev, panfrost_bo);
225 return NULL;
226 }
227
228 static off_t
panfrost_kmod_bo_get_mmap_offset(struct pan_kmod_bo * bo)229 panfrost_kmod_bo_get_mmap_offset(struct pan_kmod_bo *bo)
230 {
231 struct drm_panfrost_mmap_bo mmap_bo = {.handle = bo->handle};
232 int ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
233 if (ret) {
234 fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
235 assert(0);
236 }
237
238 return mmap_bo.offset;
239 }
240
241 static bool
panfrost_kmod_bo_wait(struct pan_kmod_bo * bo,int64_t timeout_ns,bool for_read_only_access)242 panfrost_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
243 bool for_read_only_access)
244 {
245 struct drm_panfrost_wait_bo req = {
246 .handle = bo->handle,
247 .timeout_ns = timeout_ns,
248 };
249
250 /* The ioctl returns >= 0 value when the BO we are waiting for is ready
251 * -1 otherwise.
252 */
253 if (drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_WAIT_BO, &req) != -1)
254 return true;
255
256 assert(errno == ETIMEDOUT || errno == EBUSY);
257 return false;
258 }
259
260 static void
panfrost_kmod_bo_make_evictable(struct pan_kmod_bo * bo)261 panfrost_kmod_bo_make_evictable(struct pan_kmod_bo *bo)
262 {
263 struct drm_panfrost_madvise req = {
264 .handle = bo->handle,
265 .madv = PANFROST_MADV_DONTNEED,
266 };
267
268 drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_MADVISE, &req);
269 }
270
271 static bool
panfrost_kmod_bo_make_unevictable(struct pan_kmod_bo * bo)272 panfrost_kmod_bo_make_unevictable(struct pan_kmod_bo *bo)
273 {
274 struct drm_panfrost_madvise req = {
275 .handle = bo->handle,
276 .madv = PANFROST_MADV_WILLNEED,
277 };
278
279 if (drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_MADVISE, &req) == 0 &&
280 req.retained == 0)
281 return false;
282
283 return true;
284 }
285
286 /* The VA range is restricted by the kernel driver. Lower 32MB are reserved, and
287 * the address space is limited to 32-bit.
288 */
289 #define PANFROST_KMOD_VA_START 0x2000000ull
290 #define PANFROST_KMOD_VA_END (1ull << 32)
291
292 static struct pan_kmod_va_range
panfrost_kmod_dev_query_user_va_range(const struct pan_kmod_dev * dev)293 panfrost_kmod_dev_query_user_va_range(const struct pan_kmod_dev *dev)
294 {
295 return (struct pan_kmod_va_range){
296 .start = PANFROST_KMOD_VA_START,
297 .size = PANFROST_KMOD_VA_END - PANFROST_KMOD_VA_START,
298 };
299 }
300
301 static struct pan_kmod_vm *
panfrost_kmod_vm_create(struct pan_kmod_dev * dev,uint32_t flags,uint64_t va_start,uint64_t va_range)302 panfrost_kmod_vm_create(struct pan_kmod_dev *dev, uint32_t flags,
303 uint64_t va_start, uint64_t va_range)
304 {
305 struct panfrost_kmod_dev *panfrost_dev =
306 container_of(dev, struct panfrost_kmod_dev, base);
307
308 /* Only one VM per device. */
309 if (panfrost_dev->vm) {
310 mesa_loge("panfrost_kmod only supports one VM per device");
311 return NULL;
312 }
313
314 /* Panfrost kernel driver doesn't support userspace VA management. */
315 if (!(flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
316 mesa_loge("panfrost_kmod only supports PAN_KMOD_VM_FLAG_AUTO_VA");
317 assert(0);
318 return NULL;
319 }
320
321 struct panfrost_kmod_vm *vm = pan_kmod_dev_alloc(dev, sizeof(*vm));
322 if (!vm) {
323 mesa_loge("failed to allocate a panfrost_kmod_vm object");
324 return NULL;
325 }
326
327 pan_kmod_vm_init(&vm->base, dev, 0, flags);
328 panfrost_dev->vm = vm;
329 return &vm->base;
330 }
331
332 static void
panfrost_kmod_vm_destroy(struct pan_kmod_vm * vm)333 panfrost_kmod_vm_destroy(struct pan_kmod_vm *vm)
334 {
335 struct panfrost_kmod_dev *panfrost_dev =
336 container_of(vm->dev, struct panfrost_kmod_dev, base);
337
338 panfrost_dev->vm = NULL;
339 pan_kmod_dev_free(vm->dev, vm);
340 }
341
342 static int
panfrost_kmod_vm_bind(struct pan_kmod_vm * vm,enum pan_kmod_vm_op_mode mode,struct pan_kmod_vm_op * ops,uint32_t op_count)343 panfrost_kmod_vm_bind(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
344 struct pan_kmod_vm_op *ops, uint32_t op_count)
345 {
346 UNUSED struct panfrost_kmod_vm *panfrost_vm =
347 container_of(vm, struct panfrost_kmod_vm, base);
348
349 /* We only support IMMEDIATE and WAIT_IDLE mode. Actually we always do
350 * WAIT_IDLE in practice, but it shouldn't matter.
351 */
352 if (mode != PAN_KMOD_VM_OP_MODE_IMMEDIATE &&
353 mode != PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT) {
354 mesa_loge("panfrost_kmod doesn't support mode=%d", mode);
355 assert(0);
356 return -1;
357 }
358
359 for (uint32_t i = 0; i < op_count; i++) {
360
361 if (ops[i].type == PAN_KMOD_VM_OP_TYPE_MAP) {
362 struct panfrost_kmod_bo *panfrost_bo =
363 container_of(ops[i].map.bo, struct panfrost_kmod_bo, base);
364
365 /* Panfrost kernel driver doesn't support userspace VA management. */
366 if (ops[i].va.start != PAN_KMOD_VM_MAP_AUTO_VA) {
367 mesa_loge("panfrost_kmod can only do auto-VA allocation");
368 assert(0);
369 return -1;
370 }
371
372 /* Panfrost kernel driver only support full BO mapping. */
373 if (ops[i].map.bo_offset != 0 ||
374 ops[i].va.size != ops[i].map.bo->size) {
375 mesa_loge("panfrost_kmod doesn't support partial BO mapping");
376 assert(0);
377 return -1;
378 }
379
380 ops[i].va.start = panfrost_bo->offset;
381 } else if (ops[i].type == PAN_KMOD_VM_OP_TYPE_UNMAP) {
382 /* Do nothing, unmapping is done at BO destruction time. */
383 } else {
384 /* We reject PAN_KMOD_VM_OP_TYPE_SYNC_ONLY as this implies
385 * supporting PAN_KMOD_VM_OP_MODE_ASYNC, which we don't support.
386 */
387 mesa_loge("panfrost_kmod doesn't support op=%d", ops[i].type);
388 assert(0);
389 return -1;
390 }
391 }
392
393 return 0;
394 }
395
396 const struct pan_kmod_ops panfrost_kmod_ops = {
397 .dev_create = panfrost_kmod_dev_create,
398 .dev_destroy = panfrost_kmod_dev_destroy,
399 .dev_query_props = panfrost_dev_query_props,
400 .dev_query_user_va_range = panfrost_kmod_dev_query_user_va_range,
401 .bo_alloc = panfrost_kmod_bo_alloc,
402 .bo_free = panfrost_kmod_bo_free,
403 .bo_import = panfrost_kmod_bo_import,
404 .bo_get_mmap_offset = panfrost_kmod_bo_get_mmap_offset,
405 .bo_wait = panfrost_kmod_bo_wait,
406 .bo_make_evictable = panfrost_kmod_bo_make_evictable,
407 .bo_make_unevictable = panfrost_kmod_bo_make_unevictable,
408 .vm_create = panfrost_kmod_vm_create,
409 .vm_destroy = panfrost_kmod_vm_destroy,
410 .vm_bind = panfrost_kmod_vm_bind,
411 };
412