1 /*
2 * Copyright © 2023 Collabora, Ltd.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * This file exposes some core KMD functionalities in a driver-agnostic way.
7 * The drivers are still assumed to be regular DRM drivers, such that some
8 * operations can be handled generically.
9 *
10 * Any operation that's too specific to be abstracted can either have a backend
11 * specific helper exposed through pan_kmod_<backend>.h, or no helper at all
12 * (in the latter case, users are expected to call the ioctl directly).
13 *
14 * If some operations are not natively supported by a KMD, the kmod backend
15 * should fail or emulate the functionality (if deemed necessary).
16 */
17
18 #pragma once
19
20 #include <fcntl.h>
21 #include <unistd.h>
22 #include <xf86drm.h>
23
24 #include "drm-uapi/drm.h"
25
26 #include "util/log.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_mman.h"
30 #include "util/ralloc.h"
31 #include "util/simple_mtx.h"
32 #include "util/sparse_array.h"
33 #include "util/u_atomic.h"
34
35 #if defined(__cplusplus)
36 extern "C" {
37 #endif
38
39 struct pan_kmod_dev;
40
41 /* GPU VM creation flags. */
42 enum pan_kmod_vm_flags {
43 /* Set if you want the VM to automatically assign virtual addresses when
44 * pan_kmod_vm_map(). If this flag is set, all pan_kmod_vm_map() calls
45 * must have va=PAN_KMOD_VM_MAP_AUTO_VA.
46 */
47 PAN_KMOD_VM_FLAG_AUTO_VA = BITFIELD_BIT(0),
48 };
49
50 /* Object representing a GPU VM. */
51 struct pan_kmod_vm {
52 /* Combination of pan_kmod_vm_flags flags. */
53 uint32_t flags;
54
55 /* The VM handle returned by the KMD. If the KMD supports only one VM per
56 * context, this should be zero.
57 */
58 uint32_t handle;
59
60 /* Device this VM was created from. */
61 struct pan_kmod_dev *dev;
62 };
63
64 /* Buffer object flags. */
65 enum pan_kmod_bo_flags {
66 /* Allow GPU execution on this buffer. */
67 PAN_KMOD_BO_FLAG_EXECUTABLE = BITFIELD_BIT(0),
68
69 /* Allocate memory when a GPU fault occurs instead of allocating
70 * up-front.
71 */
72 PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT = BITFIELD_BIT(1),
73
74 /* If set, the buffer object will never be CPU-mapped in userspace. */
75 PAN_KMOD_BO_FLAG_NO_MMAP = BITFIELD_BIT(2),
76
77 /* Set when the buffer object has been exported. Users don't directly
78 * control this flag, it's set when pan_kmod_bo_export() is called.
79 */
80 PAN_KMOD_BO_FLAG_EXPORTED = BITFIELD_BIT(3),
81
82 /* Set when the buffer object has been impported. Users don't directly
83 * control this flag, it's set when pan_kmod_bo_import() is called.
84 */
85 PAN_KMOD_BO_FLAG_IMPORTED = BITFIELD_BIT(4),
86
87 /* If set, the buffer in mapped GPU-uncached when pan_kmod_vm_map()
88 * is called.
89 */
90 PAN_KMOD_BO_FLAG_GPU_UNCACHED = BITFIELD_BIT(5),
91 };
92
93 /* Buffer object. */
94 struct pan_kmod_bo {
95 /* Atomic reference count. The only reason we need to refcnt BOs at this
96 * level is because of how DRM prime import works: the import logic
97 * returns the handle of an existing object if the object was previously
98 * imported or was created by the driver.
99 * In order to prevent call GEM_CLOSE on an object that's still supposed
100 * to be active, we need count the number of users left.
101 */
102 int32_t refcnt;
103
104 /* Size of the buffer object. */
105 size_t size;
106
107 /* Handle attached to the buffer object. */
108 uint32_t handle;
109
110 /* Combination of pan_kmod_bo_flags flags. */
111 uint32_t flags;
112
113 /* If non-NULL, the buffer object can only by mapped on this VM. Typical
114 * the case for all internal/non-shareable buffers. The backend can
115 * optimize things based on this information. Calling pan_kmod_bo_export()
116 * on such buffer objects is forbidden.
117 */
118 struct pan_kmod_vm *exclusive_vm;
119
120 /* The device this buffer object was created from. */
121 struct pan_kmod_dev *dev;
122
123 /* User private data. Use pan_kmod_bo_{set,get}_user_priv() to access it. */
124 void *user_priv;
125 };
126
127 /* List of GPU properties needed by the UMD. */
128 struct pan_kmod_dev_props {
129 /* GPU product ID. */
130 uint32_t gpu_prod_id;
131
132 /* GPU revision. */
133 uint32_t gpu_revision;
134
135 /* Bitmask encoding the number of shader cores exposed by the GPU. */
136 uint64_t shader_present;
137
138 /* Tiler features bits. */
139 uint32_t tiler_features;
140
141 /* Memory related feature bits. */
142 uint32_t mem_features;
143
144 /* MMU feature bits. */
145 uint32_t mmu_features;
146 #define MMU_FEATURES_VA_BITS(mmu_features) (mmu_features & 0xff)
147
148 /* Texture feature bits. */
149 uint32_t texture_features[4];
150
151 /* Maximum number of threads per core. */
152 uint32_t thread_tls_alloc;
153
154 /* AFBC feature bits. */
155 uint32_t afbc_features;
156 };
157
158 /* Memory allocator for kmod internal allocations. */
159 struct pan_kmod_allocator {
160 /* Allocate and set to zero. */
161 void *(*zalloc)(const struct pan_kmod_allocator *allocator, size_t size,
162 bool transient);
163
164 /* Free. */
165 void (*free)(const struct pan_kmod_allocator *allocator, void *data);
166
167 /* Private data allocator data. Can be NULL if unused. */
168 void *priv;
169 };
170
171 /* Synchronization type. */
172 enum pan_kmod_sync_type {
173 PAN_KMOD_SYNC_TYPE_WAIT = 0,
174 PAN_KMOD_SYNC_TYPE_SIGNAL,
175 };
176
177 /* Synchronization operation. */
178 struct pan_kmod_sync_op {
179 /* Type of operation. */
180 enum pan_kmod_sync_type type;
181
182 /* Syncobj handle. */
183 uint32_t handle;
184
185 /* Syncobj point. Zero for binary syncobjs. */
186 uint64_t point;
187 };
188
189 /* Special value passed to pan_kmod_vm_map() to signify the VM it should
190 * automatically allocate a VA. Only valid if the VM was created with
191 * PAN_KMOD_VM_FLAG_AUTO_VA.
192 */
193 #define PAN_KMOD_VM_MAP_AUTO_VA ~0ull
194
195 /* Special value return when the vm_map() operation failed. */
196 #define PAN_KMOD_VM_MAP_FAILED ~0ull
197
198 /* VM operations can be executed in different modes. */
199 enum pan_kmod_vm_op_mode {
200 /* The map/unmap operation is executed immediately, which might cause
201 * GPU faults if the GPU was still accessing buffers when we unmap or
202 * remap.
203 */
204 PAN_KMOD_VM_OP_MODE_IMMEDIATE,
205
206 /* The map/unmap operation is executed asynchronously, and the user
207 * provides explicit wait/signal sync operations.
208 */
209 PAN_KMOD_VM_OP_MODE_ASYNC,
210
211 /* The map/unmap operation is executed when the next GPU/VM idle-point
212 * is reached. This guarantees fault-free unmap/remap operations when the
213 * kmod user doesn't want to deal with synchronizations explicitly.
214 */
215 PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT,
216 };
217
218 /* VM operation type. */
219 enum pan_kmod_vm_op_type {
220 /* Map a buffer object. */
221 PAN_KMOD_VM_OP_TYPE_MAP,
222
223 /* Unmap a VA range. */
224 PAN_KMOD_VM_OP_TYPE_UNMAP,
225
226 /* Do nothing. Used as a way to execute sync operations on a VM queue,
227 * without touching the VM.
228 */
229 PAN_KMOD_VM_OP_TYPE_SYNC_ONLY,
230 };
231
232 /* VM operation data. */
233 struct pan_kmod_vm_op {
234 /* The type of operation being requested. */
235 enum pan_kmod_vm_op_type type;
236
237 /* VA range. */
238 struct {
239 /* Start of the VA range.
240 * Must be PAN_KMOD_VM_MAP_AUTO_VA if PAN_KMOD_VM_FLAG_AUTO_VA was set
241 * at VM creation time. In that case, the allocated VA is returned
242 * in this field.
243 */
244 uint64_t start;
245
246 /* Size of the VA range */
247 size_t size;
248 } va;
249
250 union {
251 /* Arguments specific to map operations. */
252 struct {
253 /* Buffer object to map. */
254 struct pan_kmod_bo *bo;
255
256 /* Offset in the buffer object. */
257 off_t bo_offset;
258 } map;
259 };
260
261 /* Synchronization operations attached to the VM operation. */
262 struct {
263 /* Number of synchronization operations. Must be zero if mode is
264 * PAN_KMOD_VM_OP_MODE_IMMEDIATE or PAN_KMOD_VM_OP_MODE_WAIT_IDLE.
265 */
266 uint32_t count;
267
268 /* Array of synchronization operation descriptors. NULL if count is zero. */
269 const struct pan_kmod_sync_op *array;
270 } syncs;
271 };
272
273 /* VM state. */
274 enum pan_kmod_vm_state {
275 PAN_KMOD_VM_USABLE,
276 PAN_KMOD_VM_FAULTY,
277 };
278
279 /* Device flags. */
280 enum pan_kmod_dev_flags {
281 /* Set when the fd passed to pan_kmod_create() is expected to be
282 * owned by the device, iff the device creation succeeded.
283 */
284 PAN_KMOD_DEV_FLAG_OWNS_FD = (1 << 0),
285 };
286
287 /* Encode a virtual address range. */
288 struct pan_kmod_va_range {
289 /* Start of the VA range. */
290 uint64_t start;
291
292 /* Size of the VA range. */
293 uint64_t size;
294 };
295
296 /* KMD backend vtable.
297 *
298 * All methods described there are mandatory, unless explicitly flagged as
299 * optional.
300 */
301 struct pan_kmod_ops {
302 /* Create a pan_kmod_dev object.
303 * Return NULL if the creation fails for any reason.
304 */
305 struct pan_kmod_dev *(*dev_create)(
306 int fd, uint32_t flags, const drmVersionPtr version,
307 const struct pan_kmod_allocator *allocator);
308
309 /* Destroy a pan_kmod_dev object. */
310 void (*dev_destroy)(struct pan_kmod_dev *dev);
311
312 /* Query device properties. */
313 void (*dev_query_props)(const struct pan_kmod_dev *dev,
314 struct pan_kmod_dev_props *props);
315
316 /* Query the maxium user VA range.
317 * Users are free to use a subset of this range if they need less VA space.
318 * This method is optional, when not specified, kmod assumes the whole VA
319 * space (extracted from MMU_FEATURES.VA_BITS) is usable.
320 */
321 struct pan_kmod_va_range (*dev_query_user_va_range)(
322 const struct pan_kmod_dev *dev);
323
324 /* Allocate a buffer object.
325 * Return NULL if the creation fails for any reason.
326 */
327 struct pan_kmod_bo *(*bo_alloc)(struct pan_kmod_dev *dev,
328 struct pan_kmod_vm *exclusive_vm,
329 size_t size, uint32_t flags);
330
331 /* Free buffer object. */
332 void (*bo_free)(struct pan_kmod_bo *bo);
333
334 /* Import a buffer object.
335 * Return NULL if the import fails for any reason.
336 */
337 struct pan_kmod_bo *(*bo_import)(struct pan_kmod_dev *dev, uint32_t handle,
338 size_t size, uint32_t flags);
339
340 /* Post export operations.
341 * Return 0 on success, -1 otherwise.
342 * This method is optional.
343 */
344 int (*bo_export)(struct pan_kmod_bo *bo, int dmabuf_fd);
345
346 /* Get the file offset to use to mmap() a buffer object. */
347 off_t (*bo_get_mmap_offset)(struct pan_kmod_bo *bo);
348
349 /* Wait for a buffer object to be ready for read or read/write accesses. */
350 bool (*bo_wait)(struct pan_kmod_bo *bo, int64_t timeout_ns,
351 bool for_read_only_access);
352
353 /* Make a buffer object evictable. This method is optional. */
354 void (*bo_make_evictable)(struct pan_kmod_bo *bo);
355
356 /* Make the buffer object unevictable. This method is optional. */
357 bool (*bo_make_unevictable)(struct pan_kmod_bo *bo);
358
359 /* Create a VM object. */
360 struct pan_kmod_vm *(*vm_create)(struct pan_kmod_dev *dev, uint32_t flags,
361 uint64_t va_start, uint64_t va_range);
362
363 /* Destroy a VM object. */
364 void (*vm_destroy)(struct pan_kmod_vm *vm);
365
366 /* Execute VM operations.
367 * Return 0 if the submission suceeds, -1 otherwise.
368 * For PAN_KMOD_VM_OP_MODE_IMMEDIATE submissions, the return value also
369 * reflects the successfulness of the VM operation, for other modes,
370 * if any of the VM operation fails, the VM might be flagged as unusable
371 * and users should create a new VM to recover.
372 */
373 int (*vm_bind)(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
374 struct pan_kmod_vm_op *ops, uint32_t op_count);
375
376 /* Query the VM state.
377 * This method is optional. When missing the VM is assumed to always be
378 * usable.
379 */
380 enum pan_kmod_vm_state (*vm_query_state)(struct pan_kmod_vm *vm);
381 };
382
383 /* KMD information. */
384 struct pan_kmod_driver {
385 /* KMD version. */
386 struct {
387 uint32_t major;
388 uint32_t minor;
389 } version;
390 };
391
392 /* Device object. */
393 struct pan_kmod_dev {
394 /* FD attached to the device. */
395 int fd;
396
397 /* Device flags. */
398 uint32_t flags;
399
400 /* KMD backing this device. */
401 struct pan_kmod_driver driver;
402
403 /* kmod backend ops assigned at device creation. */
404 const struct pan_kmod_ops *ops;
405
406 /* DRM prime import returns the handle of a pre-existing GEM if we are
407 * importing an object that was created by us or previously imported.
408 * We need to make sure we return the same pan_kmod_bo in that case,
409 * otherwise freeing one pan_kmod_bo will make all other BOs sharing
410 * the same handle invalid.
411 */
412 struct {
413 struct util_sparse_array array;
414 simple_mtx_t lock;
415 } handle_to_bo;
416
417 /* Allocator attached to the device. */
418 const struct pan_kmod_allocator *allocator;
419
420 /* User private data. Use pan_kmod_dev_{set,get}_user_priv() to access it. */
421 void *user_priv;
422 };
423
424 struct pan_kmod_dev *
425 pan_kmod_dev_create(int fd, uint32_t flags,
426 const struct pan_kmod_allocator *allocator);
427
428 void pan_kmod_dev_destroy(struct pan_kmod_dev *dev);
429
430 static inline void
pan_kmod_dev_query_props(const struct pan_kmod_dev * dev,struct pan_kmod_dev_props * props)431 pan_kmod_dev_query_props(const struct pan_kmod_dev *dev,
432 struct pan_kmod_dev_props *props)
433 {
434 dev->ops->dev_query_props(dev, props);
435 }
436
437 static inline struct pan_kmod_va_range
pan_kmod_dev_query_user_va_range(const struct pan_kmod_dev * dev)438 pan_kmod_dev_query_user_va_range(const struct pan_kmod_dev *dev)
439 {
440 if (dev->ops->dev_query_user_va_range)
441 return dev->ops->dev_query_user_va_range(dev);
442
443 struct pan_kmod_dev_props props;
444
445 pan_kmod_dev_query_props(dev, &props);
446 return (struct pan_kmod_va_range){
447 .start = 0,
448 .size = 1ull << MMU_FEATURES_VA_BITS(props.mmu_features),
449 };
450 }
451
452 static inline void
pan_kmod_dev_set_user_priv(struct pan_kmod_dev * dev,void * data)453 pan_kmod_dev_set_user_priv(struct pan_kmod_dev *dev, void *data)
454 {
455 dev->user_priv = data;
456 }
457
458 static inline void *
pan_kmod_dev_get_user_priv(struct pan_kmod_dev * dev)459 pan_kmod_dev_get_user_priv(struct pan_kmod_dev *dev)
460 {
461 return dev->user_priv;
462 }
463
464 struct pan_kmod_bo *pan_kmod_bo_alloc(struct pan_kmod_dev *dev,
465 struct pan_kmod_vm *exclusive_vm,
466 size_t size, uint32_t flags);
467
468 static inline struct pan_kmod_bo *
pan_kmod_bo_get(struct pan_kmod_bo * bo)469 pan_kmod_bo_get(struct pan_kmod_bo *bo)
470 {
471 if (!bo)
472 return NULL;
473
474 ASSERTED int32_t refcnt = p_atomic_inc_return(&bo->refcnt);
475
476 /* If refcnt was zero before our increment, we're in trouble. */
477 assert(refcnt > 1);
478
479 return bo;
480 }
481
482 void pan_kmod_bo_put(struct pan_kmod_bo *bo);
483
484 static inline void *
pan_kmod_bo_cmdxchg_user_priv(struct pan_kmod_bo * bo,void * old_data,void * new_data)485 pan_kmod_bo_cmdxchg_user_priv(struct pan_kmod_bo *bo, void *old_data,
486 void *new_data)
487 {
488 return (void *)p_atomic_cmpxchg((uintptr_t *)&bo->user_priv,
489 (uintptr_t)old_data, (uintptr_t)new_data);
490 }
491
492 static inline void
pan_kmod_bo_set_user_priv(struct pan_kmod_bo * bo,void * data)493 pan_kmod_bo_set_user_priv(struct pan_kmod_bo *bo, void *data)
494 {
495 bo->user_priv = data;
496 }
497
498 static inline void *
pan_kmod_bo_get_user_priv(const struct pan_kmod_bo * bo)499 pan_kmod_bo_get_user_priv(const struct pan_kmod_bo *bo)
500 {
501 return bo->user_priv;
502 }
503
504 struct pan_kmod_bo *pan_kmod_bo_import(struct pan_kmod_dev *dev, int fd,
505 uint32_t flags);
506
507 static inline int
pan_kmod_bo_export(struct pan_kmod_bo * bo)508 pan_kmod_bo_export(struct pan_kmod_bo *bo)
509 {
510 int fd;
511
512 if (drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &fd)) {
513 mesa_loge("drmPrimeHandleToFD() failed (err=%d)", errno);
514 return -1;
515 }
516
517 if (bo->dev->ops->bo_export && bo->dev->ops->bo_export(bo, fd)) {
518 close(fd);
519 return -1;
520 }
521
522 bo->flags |= PAN_KMOD_BO_FLAG_EXPORTED;
523 return fd;
524 }
525
526 static inline bool
pan_kmod_bo_wait(struct pan_kmod_bo * bo,int64_t timeout_ns,bool for_read_only_access)527 pan_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
528 bool for_read_only_access)
529 {
530 return bo->dev->ops->bo_wait(bo, timeout_ns, for_read_only_access);
531 }
532
533 static inline void
pan_kmod_bo_make_evictable(struct pan_kmod_bo * bo)534 pan_kmod_bo_make_evictable(struct pan_kmod_bo *bo)
535 {
536 if (bo->dev->ops->bo_make_evictable)
537 bo->dev->ops->bo_make_evictable(bo);
538 }
539
540 static inline bool
pan_kmod_bo_make_unevictable(struct pan_kmod_bo * bo)541 pan_kmod_bo_make_unevictable(struct pan_kmod_bo *bo)
542 {
543 if (bo->dev->ops->bo_make_unevictable)
544 return bo->dev->ops->bo_make_unevictable(bo);
545
546 return true;
547 }
548
549 static inline void *
pan_kmod_bo_mmap(struct pan_kmod_bo * bo,off_t bo_offset,size_t size,int prot,int flags,void * host_addr)550 pan_kmod_bo_mmap(struct pan_kmod_bo *bo, off_t bo_offset, size_t size, int prot,
551 int flags, void *host_addr)
552 {
553 off_t mmap_offset;
554
555 if (bo_offset + size > bo->size)
556 return MAP_FAILED;
557
558 mmap_offset = bo->dev->ops->bo_get_mmap_offset(bo);
559 if (mmap_offset < 0)
560 return MAP_FAILED;
561
562 host_addr = os_mmap(host_addr, size, prot, flags, bo->dev->fd,
563 mmap_offset + bo_offset);
564 if (host_addr == MAP_FAILED)
565 mesa_loge("mmap() failed (err=%d)", errno);
566
567 return host_addr;
568 }
569
570 static inline size_t
pan_kmod_bo_size(struct pan_kmod_bo * bo)571 pan_kmod_bo_size(struct pan_kmod_bo *bo)
572 {
573 return bo->size;
574 }
575
576 static inline uint32_t
pan_kmod_bo_handle(struct pan_kmod_bo * bo)577 pan_kmod_bo_handle(struct pan_kmod_bo *bo)
578 {
579 return bo->handle;
580 }
581
582 static inline struct pan_kmod_vm *
pan_kmod_vm_create(struct pan_kmod_dev * dev,uint32_t flags,uint64_t va_start,uint64_t va_range)583 pan_kmod_vm_create(struct pan_kmod_dev *dev, uint32_t flags, uint64_t va_start,
584 uint64_t va_range)
585 {
586 return dev->ops->vm_create(dev, flags, va_start, va_range);
587 }
588
589 static inline void
pan_kmod_vm_destroy(struct pan_kmod_vm * vm)590 pan_kmod_vm_destroy(struct pan_kmod_vm *vm)
591 {
592 vm->dev->ops->vm_destroy(vm);
593 }
594
595 static inline int
pan_kmod_vm_bind(struct pan_kmod_vm * vm,enum pan_kmod_vm_op_mode mode,struct pan_kmod_vm_op * ops,uint32_t op_count)596 pan_kmod_vm_bind(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
597 struct pan_kmod_vm_op *ops, uint32_t op_count)
598 {
599 return vm->dev->ops->vm_bind(vm, mode, ops, op_count);
600 }
601
602 static inline enum pan_kmod_vm_state
pan_kmod_vm_query_state(struct pan_kmod_vm * vm)603 pan_kmod_vm_query_state(struct pan_kmod_vm *vm)
604 {
605 if (vm->dev->ops->vm_query_state)
606 return vm->dev->ops->vm_query_state(vm);
607
608 return PAN_KMOD_VM_USABLE;
609 }
610
611 static inline uint32_t
pan_kmod_vm_handle(struct pan_kmod_vm * vm)612 pan_kmod_vm_handle(struct pan_kmod_vm *vm)
613 {
614 return vm->handle;
615 }
616
617 #if defined(__cplusplus)
618 } // extern "C"
619 #endif
620