• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #ifndef VIRTIO_PRIV_H_
25 #define VIRTIO_PRIV_H_
26 
27 #include <poll.h>
28 
29 #include "freedreno_priv.h"
30 
31 #include "util/u_atomic.h"
32 #include "util/slab.h"
33 #include "util/timespec.h"
34 #include "util/vma.h"
35 
36 #include "pipe/p_defines.h"
37 
38 #include "drm-uapi/virtgpu_drm.h"
39 /* We also use some types/defines from the host drm/msm uabi: */
40 #include "drm-uapi/msm_drm.h"
41 
42 #define VIRGL_RENDERER_UNSTABLE_APIS 1
43 #include "virglrenderer_hw.h"
44 #include "msm_proto.h"
45 
46 struct virtio_device {
47    struct fd_device base;
48 
49    struct fd_bo *shmem_bo;
50    struct msm_shmem *shmem;
51    uint8_t *rsp_mem;
52    uint32_t rsp_mem_len;
53    uint32_t next_rsp_off;
54    simple_mtx_t rsp_lock;
55    simple_mtx_t eb_lock;
56 
57    uint32_t next_blob_id;
58    uint32_t next_seqno;
59 
60    struct virgl_renderer_capset_drm caps;
61 
62    /*
63     * Notes on address space allocation:
64     *
65     * In both the import (GEM_INFO) and new (GEM_NEW) path we allocate
66     * the iova.  Since the iova (vma on kernel side) is local to the
67     * address space, and that is 1:1 with drm fd (which is 1:1 with
68     * virtio_device and therefore address_space) which is not shared
69     * with anything outside of the driver, and because of the handle
70     * de-duplication, we can safely assume that an iova has not yet
71     * been set on imported buffers.
72     *
73     * The other complication with userspace allocated iova is that
74     * the kernel holds on to a reference to the bo (and the GPU is
75     * still using it's iova) until the submit retires.  So a per-pipe
76     * retire_queue is used to hold an extra reference to the submit
77     * (and indirectly all the bo's referenced) until the out-fence is
78     * signaled.
79     */
80    struct util_vma_heap address_space;
81    simple_mtx_t address_space_lock;
82 
83    uint32_t reqbuf_len;
84    uint32_t reqbuf_cnt;
85    uint8_t reqbuf[0x4000];
86 };
87 FD_DEFINE_CAST(fd_device, virtio_device);
88 
89 struct fd_device *virtio_device_new(int fd, drmVersionPtr version);
90 
91 static inline void
virtio_dev_free_iova(struct fd_device * dev,uint64_t iova,uint32_t size)92 virtio_dev_free_iova(struct fd_device *dev, uint64_t iova, uint32_t size)
93 {
94    struct virtio_device *virtio_dev = to_virtio_device(dev);
95 
96    simple_mtx_lock(&virtio_dev->address_space_lock);
97    util_vma_heap_free(&virtio_dev->address_space, iova, size);
98    simple_mtx_unlock(&virtio_dev->address_space_lock);
99 }
100 
101 static inline uint64_t
virtio_dev_alloc_iova(struct fd_device * dev,uint32_t size)102 virtio_dev_alloc_iova(struct fd_device *dev, uint32_t size)
103 {
104    struct virtio_device *virtio_dev = to_virtio_device(dev);
105    uint64_t iova;
106 
107    simple_mtx_lock(&virtio_dev->address_space_lock);
108    iova = util_vma_heap_alloc(&virtio_dev->address_space, size, 0x1000);
109    simple_mtx_unlock(&virtio_dev->address_space_lock);
110 
111    return iova;
112 }
113 
114 struct virtio_pipe {
115    struct fd_pipe base;
116    uint32_t pipe;
117    uint32_t gpu_id;
118    uint64_t chip_id;
119    uint64_t gmem_base;
120    uint32_t gmem;
121    uint32_t queue_id;
122    uint32_t ring_idx;
123    struct slab_parent_pool ring_pool;
124 
125    /**
126     * If we *ever* see an in-fence-fd, assume that userspace is
127     * not relying on implicit fences.
128     */
129    bool no_implicit_sync;
130 
131    /**
132     * We know that the kernel allocated fence seqno's sequentially per-
133     * submitqueue in a range 1..INT_MAX, which is incremented *after* any
134     * point where the submit ioctl could be restarted.  So we just *guess*
135     * what the next seqno fence will be to avoid having to synchronize the
136     * submit with the host.
137     *
138     * TODO maybe we need version_minor bump so we can make the 1..INT_MAX
139     * assumption.. it is only really true after:
140     *
141     *   ca3ffcbeb0c8 ("drm/msm/gpu: Don't allow zero fence_id")
142     */
143    int32_t next_submit_fence;
144 
145    /**
146     * When userspace allocates iova, we need to defer deleting bo's (and
147     * therefore releasing their address) until submits referencing them
148     * have completed.  This is accomplished by enqueueing a job, holding
149     * a reference to the submit, that waits on the submit's out-fence
150     * before dropping the reference to the submit.  The submit holds a
151     * reference to the associated ring buffers, which in turn hold a ref
152     * to the associated bo's.
153     */
154    struct util_queue retire_queue;
155 };
156 FD_DEFINE_CAST(fd_pipe, virtio_pipe);
157 
158 struct fd_pipe *virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id,
159                                 uint32_t prio);
160 
161 struct fd_submit *virtio_submit_new(struct fd_pipe *pipe);
162 
163 struct virtio_bo {
164    struct fd_bo base;
165    uint64_t offset;
166    uint32_t res_id;
167    uint32_t blob_id;
168 };
169 FD_DEFINE_CAST(fd_bo, virtio_bo);
170 
171 struct fd_bo *virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
172 struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
173                                     uint32_t handle);
174 
175 /*
176  * Internal helpers:
177  */
178 void *virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *hdr, uint32_t sz);
179 int virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
180                           uint32_t *handles, uint32_t num_handles,
181                           int in_fence_fd, int *out_fence_fd, int ring_idx);
182 int virtio_execbuf_flush(struct fd_device *dev);
183 int virtio_execbuf(struct fd_device *dev, struct msm_ccmd_req *req, bool sync);
184 void virtio_host_sync(struct fd_device *dev, const struct msm_ccmd_req *req);
185 int virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *req);
186 
187 #endif /* VIRTIO_PRIV_H_ */
188