• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2023 Google, Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 
6 /* A simple helper layer for virtgpu drm native context, which also
7  * abstracted the differences between vtest (communicating via socket
8  * with vtest server) vs virtgpu (communicating via drm/virtio driver
9  * in the guest).
10  */
11 
12 #ifndef __VDRM_H__
13 #define __VDRM_H__
14 
15 #include <stdint.h>
16 
17 #include "util/simple_mtx.h"
18 
19 #include "virglrenderer_hw.h"
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 
25 struct vdrm_device;
26 struct vdrm_execbuf_params;
27 
28 struct vdrm_device_funcs {
29    /* Note flush_locked and execbuf_locked are similar, and on top of virtgpu
30     * guest kernel driver are basically the same.  But with vtest, only cmds
31     * that result in host kernel cmd submission can take and/or return fence
32     * and/or syncobj fd's.
33     */
34    int (*execbuf_locked)(struct vdrm_device *vdev, struct vdrm_execbuf_params *p,
35                          void *command, unsigned size);
36    int (*flush_locked)(struct vdrm_device *vdev, uintptr_t *fencep);
37 
38    void (*wait_fence)(struct vdrm_device *vdev, uintptr_t fence);
39 
40    uint32_t (*dmabuf_to_handle)(struct vdrm_device *vdev, int fd);
41    uint32_t (*handle_to_res_id)(struct vdrm_device *vdev, uint32_t handle);
42 
43    uint32_t (*bo_create)(struct vdrm_device *vdev, size_t size, uint32_t blob_flags,
44                          uint64_t blob_id, struct vdrm_ccmd_req *req);
45    int (*bo_wait)(struct vdrm_device *vdev, uint32_t handle);
46    void *(*bo_map)(struct vdrm_device *vdev, uint32_t handle, size_t size, void *placed_addr);
47    int (*bo_export_dmabuf)(struct vdrm_device *vdev, uint32_t handle);
48    void (*bo_close)(struct vdrm_device *vdev, uint32_t handle);
49 
50    void (*close)(struct vdrm_device *vdev);
51 };
52 
53 struct vdrm_device {
54    const struct vdrm_device_funcs *funcs;
55 
56    struct virgl_renderer_capset_drm caps;
57    bool supports_cross_device;
58    struct vdrm_shmem *shmem;
59    uint8_t *rsp_mem;
60    uint32_t rsp_mem_len;
61    uint32_t next_rsp_off;
62    simple_mtx_t rsp_lock;
63    simple_mtx_t eb_lock;
64 
65    uint32_t next_seqno;
66 
67    /*
68     * Buffering for requests to host:
69     */
70    uint32_t reqbuf_len;
71    uint32_t reqbuf_cnt;
72    uint8_t reqbuf[0x4000];
73 };
74 
75 struct vdrm_device *vdrm_device_connect(int fd, uint32_t context_type);
76 void vdrm_device_close(struct vdrm_device *vdev);
77 
78 void * vdrm_alloc_rsp(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, uint32_t sz);
79 int vdrm_send_req(struct vdrm_device *vdev, struct vdrm_ccmd_req *req, bool sync);
80 int vdrm_flush(struct vdrm_device *vdev);
81 
82 struct vdrm_execbuf_params {
83    int ring_idx;
84 
85    struct vdrm_ccmd_req *req;     /* Note, must be host kernel cmd submit */
86 
87    uint32_t *handles;
88    uint32_t num_handles;
89 
90    struct drm_virtgpu_execbuffer_syncobj *in_syncobjs;
91    struct drm_virtgpu_execbuffer_syncobj *out_syncobjs;
92 
93    bool has_in_fence_fd : 1;
94    bool needs_out_fence_fd : 1;
95 
96    int fence_fd;                  /* in/out fence */
97 
98    uint32_t num_in_syncobjs;
99    uint32_t num_out_syncobjs;
100 };
101 
102 /**
103  * Note, must be a host cmd submission, which specified in/out fence/syncobj
104  * can be passed to.  In the vtest case, we can't get fences/syncobjs for
105  * other host cmds.
106  */
107 int vdrm_execbuf(struct vdrm_device *vdev, struct vdrm_execbuf_params *p);
108 
109 void vdrm_host_sync(struct vdrm_device *vdev, const struct vdrm_ccmd_req *req);
110 
111 /**
112  * Import dmabuf fd returning a GEM handle
113  */
114 static inline uint32_t
vdrm_dmabuf_to_handle(struct vdrm_device * vdev,int fd)115 vdrm_dmabuf_to_handle(struct vdrm_device *vdev, int fd)
116 {
117    return vdev->funcs->dmabuf_to_handle(vdev, fd);
118 }
119 
120 static inline uint32_t
vdrm_handle_to_res_id(struct vdrm_device * vdev,uint32_t handle)121 vdrm_handle_to_res_id(struct vdrm_device *vdev, uint32_t handle)
122 {
123    return vdev->funcs->handle_to_res_id(vdev, handle);
124 }
125 
126 uint32_t vdrm_bo_create(struct vdrm_device *vdev, size_t size,
127                         uint32_t blob_flags, uint64_t blob_id,
128                         struct vdrm_ccmd_req *req);
129 
130 static inline int
vdrm_bo_wait(struct vdrm_device * vdev,uint32_t handle)131 vdrm_bo_wait(struct vdrm_device *vdev, uint32_t handle)
132 {
133    return vdev->funcs->bo_wait(vdev, handle);
134 }
135 
136 static inline void *
vdrm_bo_map(struct vdrm_device * vdev,uint32_t handle,size_t size,void * placed_addr)137 vdrm_bo_map(struct vdrm_device *vdev, uint32_t handle, size_t size, void *placed_addr)
138 {
139    return vdev->funcs->bo_map(vdev, handle, size, placed_addr);
140 }
141 
142 static inline int
vdrm_bo_export_dmabuf(struct vdrm_device * vdev,uint32_t handle)143 vdrm_bo_export_dmabuf(struct vdrm_device *vdev, uint32_t handle)
144 {
145    return vdev->funcs->bo_export_dmabuf(vdev, handle);
146 }
147 
148 static inline void
vdrm_bo_close(struct vdrm_device * vdev,uint32_t handle)149 vdrm_bo_close(struct vdrm_device *vdev, uint32_t handle)
150 {
151    vdev->funcs->bo_close(vdev, handle);
152 }
153 
154 #ifdef __cplusplus
155 } /* end of extern "C" */
156 #endif
157 
158 #endif /* __VDRM_H__ */
159