1 /*
2 * Copyright © 2023 Collabora, Ltd.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * This file exposes some core KMD functionalities in a driver-agnostic way.
7 * The drivers are still assumed to be regular DRM drivers, such that some
8 * operations can be handled generically.
9 *
10 * Any operation that's too specific to be abstracted can either have a backend
11 * specific helper exposed through pan_kmod_<backend>.h, or no helper at all
12 * (in the latter case, users are expected to call the ioctl directly).
13 *
14 * If some operations are not natively supported by a KMD, the kmod backend
15 * should fail or emulate the functionality (if deemed necessary).
16 */
17
18 #pragma once
19
20 #include <fcntl.h>
21 #include <unistd.h>
22 #include <xf86drm.h>
23
24 #include "drm-uapi/drm.h"
25
26 #include "util/log.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_mman.h"
30 #include "util/ralloc.h"
31 #include "util/simple_mtx.h"
32 #include "util/sparse_array.h"
33 #include "util/u_atomic.h"
34
35 #include "kmod/panthor_kmod.h"
36
37 #if defined(__cplusplus)
38 extern "C" {
39 #endif
40
41 struct pan_kmod_dev;
42
43 /* GPU VM creation flags. */
44 enum pan_kmod_vm_flags {
45 /* Set if you want the VM to automatically assign virtual addresses when
46 * pan_kmod_vm_map(). If this flag is set, all pan_kmod_vm_map() calls
47 * must have va=PAN_KMOD_VM_MAP_AUTO_VA.
48 */
49 PAN_KMOD_VM_FLAG_AUTO_VA = BITFIELD_BIT(0),
50
51 /* Let the backend know whether it should track the VM activity or not.
52 * Needed if PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT is used.
53 */
54 PAN_KMOD_VM_FLAG_TRACK_ACTIVITY = BITFIELD_BIT(1),
55 };
56
57 /* Object representing a GPU VM. */
58 struct pan_kmod_vm {
59 /* Combination of pan_kmod_vm_flags flags. */
60 uint32_t flags;
61
62 /* The VM handle returned by the KMD. If the KMD supports only one VM per
63 * context, this should be zero.
64 */
65 uint32_t handle;
66
67 /* Device this VM was created from. */
68 struct pan_kmod_dev *dev;
69 };
70
71 /* Buffer object flags. */
72 enum pan_kmod_bo_flags {
73 /* Allow GPU execution on this buffer. */
74 PAN_KMOD_BO_FLAG_EXECUTABLE = BITFIELD_BIT(0),
75
76 /* Allocate memory when a GPU fault occurs instead of allocating
77 * up-front.
78 */
79 PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT = BITFIELD_BIT(1),
80
81 /* If set, the buffer object will never be CPU-mapped in userspace. */
82 PAN_KMOD_BO_FLAG_NO_MMAP = BITFIELD_BIT(2),
83
84 /* Set when the buffer object has been exported. Users don't directly
85 * control this flag, it's set when pan_kmod_bo_export() is called.
86 */
87 PAN_KMOD_BO_FLAG_EXPORTED = BITFIELD_BIT(3),
88
89 /* Set when the buffer object has been impported. Users don't directly
90 * control this flag, it's set when pan_kmod_bo_import() is called.
91 */
92 PAN_KMOD_BO_FLAG_IMPORTED = BITFIELD_BIT(4),
93
94 /* If set, the buffer in mapped GPU-uncached when pan_kmod_vm_map()
95 * is called.
96 */
97 PAN_KMOD_BO_FLAG_GPU_UNCACHED = BITFIELD_BIT(5),
98 };
99
100 /* Allowed group priority flags. */
101 enum pan_kmod_group_allow_priority_flags {
102 /* Allow low priority group. */
103 PAN_KMOD_GROUP_ALLOW_PRIORITY_LOW = BITFIELD_BIT(0),
104
105 /* Allow medium priority group. */
106 PAN_KMOD_GROUP_ALLOW_PRIORITY_MEDIUM = BITFIELD_BIT(1),
107
108 /* Allow high priority group. */
109 PAN_KMOD_GROUP_ALLOW_PRIORITY_HIGH = BITFIELD_BIT(2),
110
111 /* Allow realtime priority group. */
112 PAN_KMOD_GROUP_ALLOW_PRIORITY_REALTIME = BITFIELD_BIT(3),
113 };
114
115 /* Buffer object. */
116 struct pan_kmod_bo {
117 /* Atomic reference count. The only reason we need to refcnt BOs at this
118 * level is because of how DRM prime import works: the import logic
119 * returns the handle of an existing object if the object was previously
120 * imported or was created by the driver.
121 * In order to prevent call GEM_CLOSE on an object that's still supposed
122 * to be active, we need count the number of users left.
123 */
124 int32_t refcnt;
125
126 /* Size of the buffer object. */
127 size_t size;
128
129 /* Handle attached to the buffer object. */
130 uint32_t handle;
131
132 /* Combination of pan_kmod_bo_flags flags. */
133 uint32_t flags;
134
135 /* If non-NULL, the buffer object can only by mapped on this VM. Typical
136 * the case for all internal/non-shareable buffers. The backend can
137 * optimize things based on this information. Calling pan_kmod_bo_export()
138 * on such buffer objects is forbidden.
139 */
140 struct pan_kmod_vm *exclusive_vm;
141
142 /* The device this buffer object was created from. */
143 struct pan_kmod_dev *dev;
144
145 /* User private data. Use pan_kmod_bo_{set,get}_user_priv() to access it. */
146 void *user_priv;
147 };
148
149 /* List of GPU properties needed by the UMD. */
150 struct pan_kmod_dev_props {
151 /* GPU product ID. */
152 uint32_t gpu_prod_id;
153
154 /* GPU revision. */
155 uint32_t gpu_revision;
156
157 /* GPU variant. */
158 uint32_t gpu_variant;
159
160 /* Bitmask encoding the number of shader cores exposed by the GPU. */
161 uint64_t shader_present;
162
163 /* Tiler features bits. */
164 uint32_t tiler_features;
165
166 /* Memory related feature bits. */
167 uint32_t mem_features;
168
169 /* MMU feature bits. */
170 uint32_t mmu_features;
171 #define MMU_FEATURES_VA_BITS(mmu_features) (mmu_features & 0xff)
172
173 /* Texture feature bits. */
174 uint32_t texture_features[4];
175
176 /* Maximum number of threads per core. */
177 uint32_t max_threads_per_core;
178
179 /* Maximum number of compute tasks per core. */
180 uint8_t max_tasks_per_core;
181
182 /* Maximum number of threads per workgroup. */
183 uint32_t max_threads_per_wg;
184
185 /* Number of registers per core. Can be used to determine the maximum
186 * number of threads that can be allocated for a specific shader based on
187 * the number of registers assigned to this shader.
188 */
189 uint32_t num_registers_per_core;
190
191 /* Maximum number of thread-local storage instance per core.
192 * If the GPU doesn't have a THREAD_TLS_ALLOC register or the register
193 * value is zero, the backend should assign the value of max_threads_per_core
194 * here.
195 */
196 uint32_t max_tls_instance_per_core;
197
198 /* AFBC feature bits. */
199 uint32_t afbc_features;
200
201 /* Support cycle count and timestamp propagation as job requirement */
202 bool gpu_can_query_timestamp;
203
204 /* GPU Timestamp frequency */
205 uint64_t timestamp_frequency;
206
207 /* A mask of flags containing the allowed group priorities. */
208 enum pan_kmod_group_allow_priority_flags allowed_group_priorities_mask;
209 };
210
211 /* Memory allocator for kmod internal allocations. */
212 struct pan_kmod_allocator {
213 /* Allocate and set to zero. */
214 void *(*zalloc)(const struct pan_kmod_allocator *allocator, size_t size,
215 bool transient);
216
217 /* Free. */
218 void (*free)(const struct pan_kmod_allocator *allocator, void *data);
219
220 /* Private data allocator data. Can be NULL if unused. */
221 void *priv;
222 };
223
224 /* Synchronization type. */
225 enum pan_kmod_sync_type {
226 PAN_KMOD_SYNC_TYPE_WAIT = 0,
227 PAN_KMOD_SYNC_TYPE_SIGNAL,
228 };
229
230 /* Synchronization operation. */
231 struct pan_kmod_sync_op {
232 /* Type of operation. */
233 enum pan_kmod_sync_type type;
234
235 /* Syncobj handle. */
236 uint32_t handle;
237
238 /* Syncobj point. Zero for binary syncobjs. */
239 uint64_t point;
240 };
241
242 /* Special value passed to pan_kmod_vm_map() to signify the VM it should
243 * automatically allocate a VA. Only valid if the VM was created with
244 * PAN_KMOD_VM_FLAG_AUTO_VA.
245 */
246 #define PAN_KMOD_VM_MAP_AUTO_VA ~0ull
247
248 /* Special value return when the vm_map() operation failed. */
249 #define PAN_KMOD_VM_MAP_FAILED ~0ull
250
251 /* VM operations can be executed in different modes. */
252 enum pan_kmod_vm_op_mode {
253 /* The map/unmap operation is executed immediately, which might cause
254 * GPU faults if the GPU was still accessing buffers when we unmap or
255 * remap.
256 */
257 PAN_KMOD_VM_OP_MODE_IMMEDIATE,
258
259 /* The map/unmap operation is executed asynchronously, and the user
260 * provides explicit wait/signal sync operations.
261 */
262 PAN_KMOD_VM_OP_MODE_ASYNC,
263
264 /* The map/unmap operation is executed when the next GPU/VM idle-point
265 * is reached. This guarantees fault-free unmap/remap operations when the
266 * kmod user doesn't want to deal with synchronizations explicitly.
267 */
268 PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT,
269 };
270
271 /* VM operation type. */
272 enum pan_kmod_vm_op_type {
273 /* Map a buffer object. */
274 PAN_KMOD_VM_OP_TYPE_MAP,
275
276 /* Unmap a VA range. */
277 PAN_KMOD_VM_OP_TYPE_UNMAP,
278
279 /* Do nothing. Used as a way to execute sync operations on a VM queue,
280 * without touching the VM.
281 */
282 PAN_KMOD_VM_OP_TYPE_SYNC_ONLY,
283 };
284
285 /* VM operation data. */
286 struct pan_kmod_vm_op {
287 /* The type of operation being requested. */
288 enum pan_kmod_vm_op_type type;
289
290 /* VA range. */
291 struct {
292 /* Start of the VA range.
293 * Must be PAN_KMOD_VM_MAP_AUTO_VA if PAN_KMOD_VM_FLAG_AUTO_VA was set
294 * at VM creation time. In that case, the allocated VA is returned
295 * in this field.
296 */
297 uint64_t start;
298
299 /* Size of the VA range */
300 size_t size;
301 } va;
302
303 union {
304 /* Arguments specific to map operations. */
305 struct {
306 /* Buffer object to map. */
307 struct pan_kmod_bo *bo;
308
309 /* Offset in the buffer object. */
310 off_t bo_offset;
311 } map;
312 };
313
314 /* Synchronization operations attached to the VM operation. */
315 struct {
316 /* Number of synchronization operations. Must be zero if mode is
317 * PAN_KMOD_VM_OP_MODE_IMMEDIATE or PAN_KMOD_VM_OP_MODE_WAIT_IDLE.
318 */
319 uint32_t count;
320
321 /* Array of synchronization operation descriptors. NULL if count is zero. */
322 const struct pan_kmod_sync_op *array;
323 } syncs;
324 };
325
326 /* VM state. */
327 enum pan_kmod_vm_state {
328 PAN_KMOD_VM_USABLE,
329 PAN_KMOD_VM_FAULTY,
330 };
331
332 /* Device flags. */
333 enum pan_kmod_dev_flags {
334 /* Set when the fd passed to pan_kmod_create() is expected to be
335 * owned by the device, iff the device creation succeeded.
336 */
337 PAN_KMOD_DEV_FLAG_OWNS_FD = (1 << 0),
338 };
339
340 /* Encode a virtual address range. */
341 struct pan_kmod_va_range {
342 /* Start of the VA range. */
343 uint64_t start;
344
345 /* Size of the VA range. */
346 uint64_t size;
347 };
348
349 /* KMD backend vtable.
350 *
351 * All methods described there are mandatory, unless explicitly flagged as
352 * optional.
353 */
354 struct pan_kmod_ops {
355 /* Create a pan_kmod_dev object.
356 * Return NULL if the creation fails for any reason.
357 */
358 struct pan_kmod_dev *(*dev_create)(
359 int fd, uint32_t flags, const drmVersionPtr version,
360 const struct pan_kmod_allocator *allocator);
361
362 /* Destroy a pan_kmod_dev object. */
363 void (*dev_destroy)(struct pan_kmod_dev *dev);
364
365 /* Query device properties. */
366 void (*dev_query_props)(const struct pan_kmod_dev *dev,
367 struct pan_kmod_dev_props *props);
368
369 /* Query the maxium user VA range.
370 * Users are free to use a subset of this range if they need less VA space.
371 * This method is optional, when not specified, kmod assumes the whole VA
372 * space (extracted from MMU_FEATURES.VA_BITS) is usable.
373 */
374 struct pan_kmod_va_range (*dev_query_user_va_range)(
375 const struct pan_kmod_dev *dev);
376
377 /* Allocate a buffer object.
378 * Return NULL if the creation fails for any reason.
379 */
380 struct pan_kmod_bo *(*bo_alloc)(struct pan_kmod_dev *dev,
381 struct pan_kmod_vm *exclusive_vm,
382 size_t size, uint32_t flags);
383
384 /* Free buffer object. */
385 void (*bo_free)(struct pan_kmod_bo *bo);
386
387 /* Import a buffer object.
388 * Return NULL if the import fails for any reason.
389 */
390 struct pan_kmod_bo *(*bo_import)(struct pan_kmod_dev *dev, uint32_t handle,
391 size_t size, uint32_t flags);
392
393 /* Post export operations.
394 * Return 0 on success, -1 otherwise.
395 * This method is optional.
396 */
397 int (*bo_export)(struct pan_kmod_bo *bo, int dmabuf_fd);
398
399 /* Get the file offset to use to mmap() a buffer object. */
400 off_t (*bo_get_mmap_offset)(struct pan_kmod_bo *bo);
401
402 /* Wait for a buffer object to be ready for read or read/write accesses. */
403 bool (*bo_wait)(struct pan_kmod_bo *bo, int64_t timeout_ns,
404 bool for_read_only_access);
405
406 /* Make a buffer object evictable. This method is optional. */
407 void (*bo_make_evictable)(struct pan_kmod_bo *bo);
408
409 /* Make the buffer object unevictable. This method is optional. */
410 bool (*bo_make_unevictable)(struct pan_kmod_bo *bo);
411
412 /* Create a VM object. */
413 struct pan_kmod_vm *(*vm_create)(struct pan_kmod_dev *dev, uint32_t flags,
414 uint64_t va_start, uint64_t va_range);
415
416 /* Destroy a VM object. */
417 void (*vm_destroy)(struct pan_kmod_vm *vm);
418
419 /* Execute VM operations.
420 * Return 0 if the submission suceeds, -1 otherwise.
421 * For PAN_KMOD_VM_OP_MODE_IMMEDIATE submissions, the return value also
422 * reflects the successfulness of the VM operation, for other modes,
423 * if any of the VM operation fails, the VM might be flagged as unusable
424 * and users should create a new VM to recover.
425 */
426 int (*vm_bind)(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
427 struct pan_kmod_vm_op *ops, uint32_t op_count);
428
429 /* Query the VM state.
430 * This method is optional. When missing the VM is assumed to always be
431 * usable.
432 */
433 enum pan_kmod_vm_state (*vm_query_state)(struct pan_kmod_vm *vm);
434
435 /* Query the current GPU timestamp */
436 uint64_t (*query_timestamp)(const struct pan_kmod_dev *dev);
437 };
438
439 /* KMD information. */
440 struct pan_kmod_driver {
441 /* KMD version. */
442 struct {
443 uint32_t major;
444 uint32_t minor;
445 } version;
446 };
447
448 /* Device object. */
449 struct pan_kmod_dev {
450 /* FD attached to the device. */
451 int fd;
452
453 /* Device flags. */
454 uint32_t flags;
455
456 /* KMD backing this device. */
457 struct pan_kmod_driver driver;
458
459 /* kmod backend ops assigned at device creation. */
460 const struct pan_kmod_ops *ops;
461
462 /* DRM prime import returns the handle of a pre-existing GEM if we are
463 * importing an object that was created by us or previously imported.
464 * We need to make sure we return the same pan_kmod_bo in that case,
465 * otherwise freeing one pan_kmod_bo will make all other BOs sharing
466 * the same handle invalid.
467 */
468 struct {
469 struct util_sparse_array array;
470 simple_mtx_t lock;
471 } handle_to_bo;
472
473 /* Allocator attached to the device. */
474 const struct pan_kmod_allocator *allocator;
475
476 /* User private data. Use pan_kmod_dev_{set,get}_user_priv() to access it. */
477 void *user_priv;
478 };
479
480 struct pan_kmod_dev *
481 pan_kmod_dev_create(int fd, uint32_t flags,
482 const struct pan_kmod_allocator *allocator);
483
484 void pan_kmod_dev_destroy(struct pan_kmod_dev *dev);
485
486 static inline void
pan_kmod_dev_query_props(const struct pan_kmod_dev * dev,struct pan_kmod_dev_props * props)487 pan_kmod_dev_query_props(const struct pan_kmod_dev *dev,
488 struct pan_kmod_dev_props *props)
489 {
490 dev->ops->dev_query_props(dev, props);
491 }
492
493 static inline struct pan_kmod_va_range
pan_kmod_dev_query_user_va_range(const struct pan_kmod_dev * dev)494 pan_kmod_dev_query_user_va_range(const struct pan_kmod_dev *dev)
495 {
496 if (dev->ops->dev_query_user_va_range)
497 return dev->ops->dev_query_user_va_range(dev);
498
499 struct pan_kmod_dev_props props;
500
501 pan_kmod_dev_query_props(dev, &props);
502 return (struct pan_kmod_va_range){
503 .start = 0,
504 .size = 1ull << MMU_FEATURES_VA_BITS(props.mmu_features),
505 };
506 }
507
508 static inline void
pan_kmod_dev_set_user_priv(struct pan_kmod_dev * dev,void * data)509 pan_kmod_dev_set_user_priv(struct pan_kmod_dev *dev, void *data)
510 {
511 dev->user_priv = data;
512 }
513
514 static inline void *
pan_kmod_dev_get_user_priv(struct pan_kmod_dev * dev)515 pan_kmod_dev_get_user_priv(struct pan_kmod_dev *dev)
516 {
517 return dev->user_priv;
518 }
519
520 struct pan_kmod_bo *pan_kmod_bo_alloc(struct pan_kmod_dev *dev,
521 struct pan_kmod_vm *exclusive_vm,
522 size_t size, uint32_t flags);
523
524 static inline struct pan_kmod_bo *
pan_kmod_bo_get(struct pan_kmod_bo * bo)525 pan_kmod_bo_get(struct pan_kmod_bo *bo)
526 {
527 if (!bo)
528 return NULL;
529
530 ASSERTED int32_t refcnt = p_atomic_inc_return(&bo->refcnt);
531
532 /* If refcnt was zero before our increment, we're in trouble. */
533 assert(refcnt > 1);
534
535 return bo;
536 }
537
538 void pan_kmod_bo_put(struct pan_kmod_bo *bo);
539
540 static inline void *
pan_kmod_bo_cmdxchg_user_priv(struct pan_kmod_bo * bo,void * old_data,void * new_data)541 pan_kmod_bo_cmdxchg_user_priv(struct pan_kmod_bo *bo, void *old_data,
542 void *new_data)
543 {
544 return (void *)p_atomic_cmpxchg((uintptr_t *)&bo->user_priv,
545 (uintptr_t)old_data, (uintptr_t)new_data);
546 }
547
548 static inline void
pan_kmod_bo_set_user_priv(struct pan_kmod_bo * bo,void * data)549 pan_kmod_bo_set_user_priv(struct pan_kmod_bo *bo, void *data)
550 {
551 bo->user_priv = data;
552 }
553
554 static inline void *
pan_kmod_bo_get_user_priv(const struct pan_kmod_bo * bo)555 pan_kmod_bo_get_user_priv(const struct pan_kmod_bo *bo)
556 {
557 return bo->user_priv;
558 }
559
560 struct pan_kmod_bo *pan_kmod_bo_import(struct pan_kmod_dev *dev, int fd,
561 uint32_t flags);
562
563 static inline int
pan_kmod_bo_export(struct pan_kmod_bo * bo)564 pan_kmod_bo_export(struct pan_kmod_bo *bo)
565 {
566 int fd;
567
568 if (drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &fd)) {
569 mesa_loge("drmPrimeHandleToFD() failed (err=%d)", errno);
570 return -1;
571 }
572
573 if (bo->dev->ops->bo_export && bo->dev->ops->bo_export(bo, fd)) {
574 close(fd);
575 return -1;
576 }
577
578 bo->flags |= PAN_KMOD_BO_FLAG_EXPORTED;
579 return fd;
580 }
581
582 static inline bool
pan_kmod_bo_wait(struct pan_kmod_bo * bo,int64_t timeout_ns,bool for_read_only_access)583 pan_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
584 bool for_read_only_access)
585 {
586 return bo->dev->ops->bo_wait(bo, timeout_ns, for_read_only_access);
587 }
588
589 static inline void
pan_kmod_bo_make_evictable(struct pan_kmod_bo * bo)590 pan_kmod_bo_make_evictable(struct pan_kmod_bo *bo)
591 {
592 if (bo->dev->ops->bo_make_evictable)
593 bo->dev->ops->bo_make_evictable(bo);
594 }
595
596 static inline bool
pan_kmod_bo_make_unevictable(struct pan_kmod_bo * bo)597 pan_kmod_bo_make_unevictable(struct pan_kmod_bo *bo)
598 {
599 if (bo->dev->ops->bo_make_unevictable)
600 return bo->dev->ops->bo_make_unevictable(bo);
601
602 return true;
603 }
604
605 static inline void *
pan_kmod_bo_mmap(struct pan_kmod_bo * bo,off_t bo_offset,size_t size,int prot,int flags,void * host_addr)606 pan_kmod_bo_mmap(struct pan_kmod_bo *bo, off_t bo_offset, size_t size, int prot,
607 int flags, void *host_addr)
608 {
609 off_t mmap_offset;
610
611 if (bo_offset + size > bo->size)
612 return MAP_FAILED;
613
614 mmap_offset = bo->dev->ops->bo_get_mmap_offset(bo);
615 if (mmap_offset < 0)
616 return MAP_FAILED;
617
618 host_addr = os_mmap(host_addr, size, prot, flags, bo->dev->fd,
619 mmap_offset + bo_offset);
620 if (host_addr == MAP_FAILED) {
621 mesa_loge("mmap(..., size=%zu, prot=%d, flags=0x%x) failed: %s",
622 size, prot, flags, strerror(errno));
623 return NULL;
624 }
625
626 return host_addr;
627 }
628
629 static inline size_t
pan_kmod_bo_size(struct pan_kmod_bo * bo)630 pan_kmod_bo_size(struct pan_kmod_bo *bo)
631 {
632 return bo->size;
633 }
634
635 static inline uint32_t
pan_kmod_bo_handle(struct pan_kmod_bo * bo)636 pan_kmod_bo_handle(struct pan_kmod_bo *bo)
637 {
638 return bo->handle;
639 }
640
641 static inline struct pan_kmod_vm *
pan_kmod_vm_create(struct pan_kmod_dev * dev,uint32_t flags,uint64_t va_start,uint64_t va_range)642 pan_kmod_vm_create(struct pan_kmod_dev *dev, uint32_t flags, uint64_t va_start,
643 uint64_t va_range)
644 {
645 return dev->ops->vm_create(dev, flags, va_start, va_range);
646 }
647
648 static inline void
pan_kmod_vm_destroy(struct pan_kmod_vm * vm)649 pan_kmod_vm_destroy(struct pan_kmod_vm *vm)
650 {
651 vm->dev->ops->vm_destroy(vm);
652 }
653
654 static inline int
pan_kmod_vm_bind(struct pan_kmod_vm * vm,enum pan_kmod_vm_op_mode mode,struct pan_kmod_vm_op * ops,uint32_t op_count)655 pan_kmod_vm_bind(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
656 struct pan_kmod_vm_op *ops, uint32_t op_count)
657 {
658 return vm->dev->ops->vm_bind(vm, mode, ops, op_count);
659 }
660
661 static inline enum pan_kmod_vm_state
pan_kmod_vm_query_state(struct pan_kmod_vm * vm)662 pan_kmod_vm_query_state(struct pan_kmod_vm *vm)
663 {
664 if (vm->dev->ops->vm_query_state)
665 return vm->dev->ops->vm_query_state(vm);
666
667 return PAN_KMOD_VM_USABLE;
668 }
669
670 static inline uint32_t
pan_kmod_vm_handle(struct pan_kmod_vm * vm)671 pan_kmod_vm_handle(struct pan_kmod_vm *vm)
672 {
673 return vm->handle;
674 }
675
676 static inline uint64_t
pan_kmod_query_timestamp(const struct pan_kmod_dev * dev)677 pan_kmod_query_timestamp(const struct pan_kmod_dev *dev)
678 {
679 return dev->ops->query_timestamp(dev);
680 }
681
682 #if defined(__cplusplus)
683 } // extern "C"
684 #endif
685