1 /*
2 * Copyright © 2022 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "util/libsync.h"
25
26 #include "virtio_priv.h"
27
28 static void *
virtio_bo_mmap(struct fd_bo * bo)29 virtio_bo_mmap(struct fd_bo *bo)
30 {
31 struct vdrm_device *vdrm = to_virtio_device(bo->dev)->vdrm;
32 struct virtio_bo *virtio_bo = to_virtio_bo(bo);
33
34 /* If we have uploaded, we need to wait for host to handle that
35 * before we can allow guest-side CPU access:
36 */
37 if (virtio_bo->has_upload_seqno) {
38
39 virtio_bo->has_upload_seqno = false;
40
41 vdrm_flush(vdrm);
42 vdrm_host_sync(vdrm, &(struct vdrm_ccmd_req) {
43 .seqno = virtio_bo->upload_seqno,
44 });
45 }
46
47 return vdrm_bo_map(vdrm, bo->handle, bo->size);
48 }
49
50 static int
virtio_bo_cpu_prep(struct fd_bo * bo,struct fd_pipe * pipe,uint32_t op)51 virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
52 {
53 MESA_TRACE_FUNC();
54 struct vdrm_device *vdrm = to_virtio_device(bo->dev)->vdrm;
55 int ret;
56
57 /*
58 * Wait first in the guest, to avoid a blocking call in host.
59 * If implicit sync it used, we still need to *also* wait in
60 * host, if it is a shared buffer, because the guest doesn't
61 * know about usage of the bo in the host (or other guests).
62 */
63
64 ret = vdrm_bo_wait(vdrm, bo->handle);
65 if (ret)
66 goto out;
67
68 /*
69 * The buffer could be shared with other things on the host side
70 * so have to poll the host. But we only get here with the shared
71 * buffers plus implicit sync. Hopefully that is rare enough.
72 */
73
74 struct msm_ccmd_gem_cpu_prep_req req = {
75 .hdr = MSM_CCMD(GEM_CPU_PREP, sizeof(req)),
76 .res_id = to_virtio_bo(bo)->res_id,
77 .op = op,
78 };
79 struct msm_ccmd_gem_cpu_prep_rsp *rsp;
80
81 /* We can't do a blocking wait in the host, so we have to poll: */
82 do {
83 rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
84
85 ret = vdrm_send_req(vdrm, &req.hdr, true);
86 if (ret)
87 goto out;
88
89 ret = rsp->ret;
90 } while (ret == -EBUSY);
91
92 out:
93 return ret;
94 }
95
96 static int
virtio_bo_madvise(struct fd_bo * bo,int willneed)97 virtio_bo_madvise(struct fd_bo *bo, int willneed)
98 {
99 /* TODO:
100 * Currently unsupported, synchronous WILLNEED calls would introduce too
101 * much latency.. ideally we'd keep state in the guest and only flush
102 * down to host when host is under memory pressure. (Perhaps virtio-balloon
103 * could signal this?)
104 */
105 return willneed;
106 }
107
108 static uint64_t
virtio_bo_iova(struct fd_bo * bo)109 virtio_bo_iova(struct fd_bo *bo)
110 {
111 /* The shmem bo is allowed to have no iova, as it is only used for
112 * guest<->host communications:
113 */
114 assert(bo->iova || (to_virtio_bo(bo)->blob_id == 0));
115 return bo->iova;
116 }
117
118 static void
virtio_bo_set_name(struct fd_bo * bo,const char * fmt,va_list ap)119 virtio_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
120 {
121 char name[32];
122 int sz;
123
124 /* Note, we cannot set name on the host for the shmem bo, as
125 * that isn't a real gem obj on the host side.. not having
126 * an iova is a convenient way to detect this case:
127 */
128 if (!bo->iova)
129 return;
130
131 sz = vsnprintf(name, sizeof(name), fmt, ap);
132 sz = MIN2(sz, sizeof(name));
133
134 unsigned req_len = sizeof(struct msm_ccmd_gem_set_name_req) + align(sz, 4);
135
136 uint8_t buf[req_len];
137 struct msm_ccmd_gem_set_name_req *req = (void *)buf;
138
139 req->hdr = MSM_CCMD(GEM_SET_NAME, req_len);
140 req->res_id = to_virtio_bo(bo)->res_id;
141 req->len = sz;
142
143 memcpy(req->payload, name, sz);
144
145 vdrm_send_req(to_virtio_device(bo->dev)->vdrm, &req->hdr, false);
146 }
147
148 static int
virtio_bo_dmabuf(struct fd_bo * bo)149 virtio_bo_dmabuf(struct fd_bo *bo)
150 {
151 struct virtio_device *virtio_dev = to_virtio_device(bo->dev);
152
153 return vdrm_bo_export_dmabuf(virtio_dev->vdrm, bo->handle);
154 }
155
156 static void
bo_upload(struct fd_bo * bo,unsigned off,void * src,unsigned len)157 bo_upload(struct fd_bo *bo, unsigned off, void *src, unsigned len)
158 {
159 MESA_TRACE_FUNC();
160 unsigned req_len = sizeof(struct msm_ccmd_gem_upload_req) + align(len, 4);
161 struct virtio_bo *virtio_bo = to_virtio_bo(bo);
162
163 uint8_t buf[req_len];
164 struct msm_ccmd_gem_upload_req *req = (void *)buf;
165
166 req->hdr = MSM_CCMD(GEM_UPLOAD, req_len);
167 req->res_id = virtio_bo->res_id;
168 req->pad = 0;
169 req->off = off;
170 req->len = len;
171
172 memcpy(req->payload, src, len);
173
174 vdrm_send_req(to_virtio_device(bo->dev)->vdrm, &req->hdr, false);
175
176 virtio_bo->upload_seqno = req->hdr.seqno;
177 virtio_bo->has_upload_seqno = true;
178 }
179
180 static void
virtio_bo_upload(struct fd_bo * bo,void * src,unsigned off,unsigned len)181 virtio_bo_upload(struct fd_bo *bo, void *src, unsigned off, unsigned len)
182 {
183 while (len > 0) {
184 unsigned sz = MIN2(len, 0x1000);
185 bo_upload(bo, off, src, sz);
186 off += sz;
187 src += sz;
188 len -= sz;
189 }
190 }
191
192 /**
193 * For recently allocated buffers, an immediate mmap would stall waiting
194 * for the host to handle the allocation and map to the guest, which
195 * could take a few ms. So for small transfers to recently allocated
196 * buffers, we'd prefer to use the upload path instead.
197 */
198 static bool
virtio_bo_prefer_upload(struct fd_bo * bo,unsigned len)199 virtio_bo_prefer_upload(struct fd_bo *bo, unsigned len)
200 {
201 struct virtio_bo *virtio_bo = to_virtio_bo(bo);
202
203 /* If we've already taken the hit of mmap'ing the buffer, then no reason
204 * to take the upload path:
205 */
206 if (bo->map)
207 return false;
208
209 if (len > 0x4000)
210 return false;
211
212 int64_t age_ns = os_time_get_nano() - virtio_bo->alloc_time_ns;
213 if (age_ns > 5000000)
214 return false;
215
216 return true;
217 }
218
219 static void
set_iova(struct fd_bo * bo,uint64_t iova)220 set_iova(struct fd_bo *bo, uint64_t iova)
221 {
222 struct msm_ccmd_gem_set_iova_req req = {
223 .hdr = MSM_CCMD(GEM_SET_IOVA, sizeof(req)),
224 .res_id = to_virtio_bo(bo)->res_id,
225 .iova = iova,
226 };
227
228 vdrm_send_req(to_virtio_device(bo->dev)->vdrm, &req.hdr, false);
229 }
230
231 static void
virtio_bo_finalize(struct fd_bo * bo)232 virtio_bo_finalize(struct fd_bo *bo)
233 {
234 /* Release iova by setting to zero: */
235 if (bo->iova) {
236 set_iova(bo, 0);
237
238 virtio_dev_free_iova(bo->dev, bo->iova, bo->size);
239 }
240 }
241
242 static const struct fd_bo_funcs funcs = {
243 .map = virtio_bo_mmap,
244 .cpu_prep = virtio_bo_cpu_prep,
245 .madvise = virtio_bo_madvise,
246 .iova = virtio_bo_iova,
247 .set_name = virtio_bo_set_name,
248 .dmabuf = virtio_bo_dmabuf,
249 .upload = virtio_bo_upload,
250 .prefer_upload = virtio_bo_prefer_upload,
251 .finalize = virtio_bo_finalize,
252 .destroy = fd_bo_fini_common,
253 };
254
255 static struct fd_bo *
bo_from_handle(struct fd_device * dev,uint32_t size,uint32_t handle)256 bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
257 {
258 struct virtio_device *virtio_dev = to_virtio_device(dev);
259 struct virtio_bo *virtio_bo;
260 struct fd_bo *bo;
261
262 virtio_bo = calloc(1, sizeof(*virtio_bo));
263 if (!virtio_bo)
264 return NULL;
265
266 virtio_bo->alloc_time_ns = os_time_get_nano();
267
268 bo = &virtio_bo->base;
269
270 /* Note we need to set these because allocation_wait_execute() could
271 * run before bo_init_commont():
272 */
273 bo->dev = dev;
274 p_atomic_set(&bo->refcnt, 1);
275
276 bo->size = size;
277 bo->funcs = &funcs;
278 bo->handle = handle;
279
280 /* Don't assume we can mmap an imported bo: */
281 bo->alloc_flags = FD_BO_NOMAP;
282
283 virtio_bo->res_id = vdrm_handle_to_res_id(virtio_dev->vdrm, handle);
284
285 fd_bo_init_common(bo, dev);
286
287 return bo;
288 }
289
290 /* allocate a new buffer object from existing handle */
291 struct fd_bo *
virtio_bo_from_handle(struct fd_device * dev,uint32_t size,uint32_t handle)292 virtio_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
293 {
294 struct fd_bo *bo = bo_from_handle(dev, size, handle);
295
296 if (!bo)
297 return NULL;
298
299 bo->iova = virtio_dev_alloc_iova(dev, size);
300 if (!bo->iova)
301 goto fail;
302
303 set_iova(bo, bo->iova);
304
305 return bo;
306
307 fail:
308 virtio_bo_finalize(bo);
309 fd_bo_fini_common(bo);
310 return NULL;
311 }
312
313 /* allocate a buffer object: */
314 struct fd_bo *
virtio_bo_new(struct fd_device * dev,uint32_t size,uint32_t flags)315 virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
316 {
317 struct virtio_device *virtio_dev = to_virtio_device(dev);
318 struct msm_ccmd_gem_new_req req = {
319 .hdr = MSM_CCMD(GEM_NEW, sizeof(req)),
320 .size = size,
321 };
322
323 if (flags & FD_BO_SCANOUT)
324 req.flags |= MSM_BO_SCANOUT;
325
326 if (flags & FD_BO_GPUREADONLY)
327 req.flags |= MSM_BO_GPU_READONLY;
328
329 if (flags & FD_BO_CACHED_COHERENT) {
330 req.flags |= MSM_BO_CACHED_COHERENT;
331 } else {
332 req.flags |= MSM_BO_WC;
333 }
334
335 uint32_t blob_flags = 0;
336 if (flags & (FD_BO_SHARED | FD_BO_SCANOUT)) {
337 blob_flags = VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE |
338 VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
339 }
340
341 if (!(flags & FD_BO_NOMAP)) {
342 blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
343 }
344
345 uint32_t blob_id = p_atomic_inc_return(&virtio_dev->next_blob_id);
346
347 /* tunneled cmds are processed separately on host side,
348 * before the renderer->get_blob() callback.. the blob_id
349 * is used to link the created bo to the get_blob() call
350 */
351 req.blob_id = blob_id;
352 req.iova = virtio_dev_alloc_iova(dev, size);
353 if (!req.iova)
354 goto fail;
355
356 uint32_t handle =
357 vdrm_bo_create(virtio_dev->vdrm, size, blob_flags, blob_id, &req.hdr);
358 if (!handle)
359 goto fail;
360
361 struct fd_bo *bo = bo_from_handle(dev, size, handle);
362 struct virtio_bo *virtio_bo = to_virtio_bo(bo);
363
364 virtio_bo->blob_id = blob_id;
365 bo->iova = req.iova;
366
367 return bo;
368
369 fail:
370 if (req.iova)
371 virtio_dev_free_iova(dev, req.iova, size);
372 return NULL;
373 }
374