1 /*
2 * Copyright © 2022 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef VIRTIO_PRIV_H_
25 #define VIRTIO_PRIV_H_
26
27 #include <poll.h>
28
29 #include "freedreno_priv.h"
30
31 #include "util/perf/cpu_trace.h"
32 #include "util/u_atomic.h"
33 #include "util/slab.h"
34 #include "util/timespec.h"
35 #include "util/vma.h"
36
37 #include "drm-uapi/virtgpu_drm.h"
38 /* We also use some types/defines from the host drm/msm uabi: */
39 #include "drm-uapi/msm_drm.h"
40
41 #define VIRGL_RENDERER_UNSTABLE_APIS 1
42 #include "virglrenderer_hw.h"
43 #include "msm_proto.h"
44
45 #include "vdrm.h"
46
47 struct virtio_device {
48 struct fd_device base;
49
50 struct vdrm_device *vdrm;
51
52 uint32_t next_blob_id;
53 struct msm_shmem *shmem;
54
55 /*
56 * Notes on address space allocation:
57 *
58 * In both the import (GEM_INFO) and new (GEM_NEW) path we allocate
59 * the iova. Since the iova (vma on kernel side) is local to the
60 * address space, and that is 1:1 with drm fd (which is 1:1 with
61 * virtio_device and therefore address_space) which is not shared
62 * with anything outside of the driver, and because of the handle
63 * de-duplication, we can safely assume that an iova has not yet
64 * been set on imported buffers.
65 *
66 * The other complication with userspace allocated iova is that
67 * the kernel holds on to a reference to the bo (and the GPU is
68 * still using it's iova) until the submit retires. So a per-pipe
69 * retire_queue is used to hold an extra reference to the submit
70 * (and indirectly all the bo's referenced) until the out-fence is
71 * signaled.
72 */
73 struct util_vma_heap address_space;
74 simple_mtx_t address_space_lock;
75 };
76 FD_DEFINE_CAST(fd_device, virtio_device);
77
78 struct fd_device *virtio_device_new(int fd, drmVersionPtr version);
79
80 static inline void
virtio_dev_free_iova(struct fd_device * dev,uint64_t iova,uint32_t size)81 virtio_dev_free_iova(struct fd_device *dev, uint64_t iova, uint32_t size)
82 {
83 struct virtio_device *virtio_dev = to_virtio_device(dev);
84
85 simple_mtx_lock(&virtio_dev->address_space_lock);
86 util_vma_heap_free(&virtio_dev->address_space, iova, size);
87 simple_mtx_unlock(&virtio_dev->address_space_lock);
88 }
89
90 static inline uint64_t
virtio_dev_alloc_iova(struct fd_device * dev,uint32_t size)91 virtio_dev_alloc_iova(struct fd_device *dev, uint32_t size)
92 {
93 struct virtio_device *virtio_dev = to_virtio_device(dev);
94 uint64_t iova;
95
96 simple_mtx_lock(&virtio_dev->address_space_lock);
97 iova = util_vma_heap_alloc(&virtio_dev->address_space, size, 0x1000);
98 simple_mtx_unlock(&virtio_dev->address_space_lock);
99
100 return iova;
101 }
102
103 struct virtio_pipe {
104 struct fd_pipe base;
105 uint32_t pipe;
106 uint32_t gpu_id;
107 uint64_t chip_id;
108 uint64_t gmem_base;
109 uint32_t gmem;
110 uint32_t queue_id;
111 uint32_t ring_idx;
112 struct slab_parent_pool ring_pool;
113
114 /**
115 * We know that the kernel allocated fence seqno's sequentially per-
116 * submitqueue in a range 1..INT_MAX, which is incremented *after* any
117 * point where the submit ioctl could be restarted. So we just *guess*
118 * what the next seqno fence will be to avoid having to synchronize the
119 * submit with the host.
120 *
121 * TODO maybe we need version_minor bump so we can make the 1..INT_MAX
122 * assumption.. it is only really true after:
123 *
124 * ca3ffcbeb0c8 ("drm/msm/gpu: Don't allow zero fence_id")
125 */
126 int32_t next_submit_fence;
127
128 /**
129 * When userspace allocates iova, we need to defer deleting bo's (and
130 * therefore releasing their address) until submits referencing them
131 * have completed. This is accomplished by enqueueing a job, holding
132 * a reference to the submit, that waits on the submit's out-fence
133 * before dropping the reference to the submit. The submit holds a
134 * reference to the associated ring buffers, which in turn hold a ref
135 * to the associated bo's.
136 */
137 struct util_queue retire_queue;
138 };
139 FD_DEFINE_CAST(fd_pipe, virtio_pipe);
140
141 struct fd_pipe *virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id,
142 uint32_t prio);
143
144 struct fd_submit *virtio_submit_new(struct fd_pipe *pipe);
145
146 struct virtio_bo {
147 struct fd_bo base;
148 uint64_t alloc_time_ns;
149 uint64_t offset;
150 uint32_t res_id;
151 uint32_t blob_id;
152 uint32_t upload_seqno;
153 bool has_upload_seqno;
154 };
155 FD_DEFINE_CAST(fd_bo, virtio_bo);
156
157 struct fd_bo *virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
158 struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
159 uint32_t handle);
160
161 /*
162 * Internal helpers:
163 */
164 int virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *req);
165
166 #endif /* VIRTIO_PRIV_H_ */
167