• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2022 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <unistd.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 
28 #include "util/libsync.h"
29 #include "util/u_process.h"
30 
31 #include "virtio_priv.h"
32 
33 static void
virtio_device_destroy(struct fd_device * dev)34 virtio_device_destroy(struct fd_device *dev)
35 {
36    struct virtio_device *virtio_dev = to_virtio_device(dev);
37 
38    fd_bo_del_locked(virtio_dev->shmem_bo);
39    util_vma_heap_finish(&virtio_dev->address_space);
40 }
41 
42 static const struct fd_device_funcs funcs = {
43    .bo_new = virtio_bo_new,
44    .bo_from_handle = virtio_bo_from_handle,
45    .pipe_new = virtio_pipe_new,
46    .destroy = virtio_device_destroy,
47 };
48 
49 static int
get_capset(int fd,struct virgl_renderer_capset_drm * caps)50 get_capset(int fd, struct virgl_renderer_capset_drm *caps)
51 {
52    struct drm_virtgpu_get_caps args = {
53          .cap_set_id = VIRGL_RENDERER_CAPSET_DRM,
54          .cap_set_ver = 0,
55          .addr = VOID2U64(caps),
56          .size = sizeof(*caps),
57    };
58 
59    memset(caps, 0, sizeof(*caps));
60 
61    return drmIoctl(fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
62 }
63 
64 static int
set_context(int fd)65 set_context(int fd)
66 {
67    struct drm_virtgpu_context_set_param params[] = {
68          { VIRTGPU_CONTEXT_PARAM_CAPSET_ID, VIRGL_RENDERER_CAPSET_DRM },
69          { VIRTGPU_CONTEXT_PARAM_NUM_RINGS, 64 },
70    };
71    struct drm_virtgpu_context_init args = {
72       .num_params = ARRAY_SIZE(params),
73       .ctx_set_params = VOID2U64(params),
74    };
75 
76    return drmIoctl(fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &args);
77 }
78 
79 static void
set_debuginfo(struct fd_device * dev)80 set_debuginfo(struct fd_device *dev)
81 {
82    const char *comm = util_get_process_name();
83    static char cmdline[0x1000+1];
84    int fd = open("/proc/self/cmdline", O_RDONLY);
85    if (fd < 0)
86       return;
87 
88    int n = read(fd, cmdline, sizeof(cmdline) - 1);
89    if (n < 0)
90       return;
91 
92    /* arguments are separated by NULL, convert to spaces: */
93    for (int i = 0; i < n; i++) {
94       if (cmdline[i] == '\0') {
95          cmdline[i] = ' ';
96       }
97    }
98 
99    cmdline[n] = '\0';
100 
101    unsigned comm_len = strlen(comm) + 1;
102    unsigned cmdline_len = strlen(cmdline) + 1;
103 
104    struct msm_ccmd_set_debuginfo_req *req;
105 
106    unsigned req_len = align(sizeof(*req) + comm_len + cmdline_len, 4);
107 
108    req = malloc(req_len);
109 
110    req->hdr         = MSM_CCMD(SET_DEBUGINFO, req_len);
111    req->comm_len    = comm_len;
112    req->cmdline_len = cmdline_len;
113 
114    memcpy(&req->payload[0], comm, comm_len);
115    memcpy(&req->payload[comm_len], cmdline, cmdline_len);
116 
117    virtio_execbuf(dev, &req->hdr, false);
118 
119    free(req);
120 }
121 
122 struct fd_device *
virtio_device_new(int fd,drmVersionPtr version)123 virtio_device_new(int fd, drmVersionPtr version)
124 {
125    struct virgl_renderer_capset_drm caps;
126    struct virtio_device *virtio_dev;
127    struct fd_device *dev;
128    int ret;
129 
130    STATIC_ASSERT(FD_BO_PREP_READ == MSM_PREP_READ);
131    STATIC_ASSERT(FD_BO_PREP_WRITE == MSM_PREP_WRITE);
132    STATIC_ASSERT(FD_BO_PREP_NOSYNC == MSM_PREP_NOSYNC);
133 
134    /* Debug option to force fallback to virgl: */
135    if (debug_get_bool_option("FD_NO_VIRTIO", false))
136       return NULL;
137 
138    ret = get_capset(fd, &caps);
139    if (ret) {
140       INFO_MSG("could not get caps: %s", strerror(errno));
141       return NULL;
142    }
143 
144    if (caps.context_type != VIRTGPU_DRM_CONTEXT_MSM) {
145       INFO_MSG("wrong context_type: %u", caps.context_type);
146       return NULL;
147    }
148 
149    INFO_MSG("wire_format_version: %u", caps.wire_format_version);
150    INFO_MSG("version_major:       %u", caps.version_major);
151    INFO_MSG("version_minor:       %u", caps.version_minor);
152    INFO_MSG("version_patchlevel:  %u", caps.version_patchlevel);
153    INFO_MSG("has_cached_coherent: %u", caps.u.msm.has_cached_coherent);
154    INFO_MSG("va_start:            0x%0"PRIx64, caps.u.msm.va_start);
155    INFO_MSG("va_size:             0x%0"PRIx64, caps.u.msm.va_size);
156    INFO_MSG("gpu_id:              %u", caps.u.msm.gpu_id);
157    INFO_MSG("gmem_size:           %u", caps.u.msm.gmem_size);
158    INFO_MSG("gmem_base:           0x%0" PRIx64, caps.u.msm.gmem_base);
159    INFO_MSG("chip_id:             0x%0" PRIx64, caps.u.msm.chip_id);
160    INFO_MSG("max_freq:            %u", caps.u.msm.max_freq);
161 
162    if (caps.wire_format_version != 2) {
163       ERROR_MSG("Unsupported protocol version: %u", caps.wire_format_version);
164       return NULL;
165    }
166 
167    if ((caps.version_major != 1) || (caps.version_minor < FD_VERSION_SOFTPIN)) {
168       ERROR_MSG("unsupported version: %u.%u.%u", caps.version_major,
169                 caps.version_minor, caps.version_patchlevel);
170       return NULL;
171    }
172 
173    if (!caps.u.msm.va_size) {
174       ERROR_MSG("No address space");
175       return NULL;
176    }
177 
178    ret = set_context(fd);
179    if (ret) {
180       INFO_MSG("Could not set context type: %s", strerror(errno));
181       return NULL;
182    }
183 
184    virtio_dev = calloc(1, sizeof(*virtio_dev));
185    if (!virtio_dev)
186       return NULL;
187 
188    dev = &virtio_dev->base;
189    dev->funcs = &funcs;
190    dev->fd = fd;
191    dev->version = caps.version_minor;
192    dev->has_cached_coherent = caps.u.msm.has_cached_coherent;
193 
194    p_atomic_set(&virtio_dev->next_blob_id, 1);
195 
196    virtio_dev->caps = caps;
197 
198    util_queue_init(&dev->submit_queue, "sq", 8, 1, 0, NULL);
199 
200    dev->bo_size = sizeof(struct virtio_bo);
201 
202    simple_mtx_init(&virtio_dev->rsp_lock, mtx_plain);
203    simple_mtx_init(&virtio_dev->eb_lock, mtx_plain);
204 
205    set_debuginfo(dev);
206 
207    util_vma_heap_init(&virtio_dev->address_space,
208                       caps.u.msm.va_start,
209                       caps.u.msm.va_size);
210    simple_mtx_init(&virtio_dev->address_space_lock, mtx_plain);
211 
212    return dev;
213 }
214 
215 void *
virtio_alloc_rsp(struct fd_device * dev,struct msm_ccmd_req * req,uint32_t sz)216 virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *req, uint32_t sz)
217 {
218    struct virtio_device *virtio_dev = to_virtio_device(dev);
219    unsigned off;
220 
221    simple_mtx_lock(&virtio_dev->rsp_lock);
222 
223    sz = align(sz, 8);
224 
225    if ((virtio_dev->next_rsp_off + sz) >= virtio_dev->rsp_mem_len)
226       virtio_dev->next_rsp_off = 0;
227 
228    off = virtio_dev->next_rsp_off;
229    virtio_dev->next_rsp_off += sz;
230 
231    simple_mtx_unlock(&virtio_dev->rsp_lock);
232 
233    req->rsp_off = off;
234 
235    struct msm_ccmd_rsp *rsp = (void *)&virtio_dev->rsp_mem[off];
236    rsp->len = sz;
237 
238    return rsp;
239 }
240 
241 static int execbuf_flush_locked(struct fd_device *dev, int *out_fence_fd);
242 
243 static int
execbuf_locked(struct fd_device * dev,void * cmd,uint32_t cmd_size,uint32_t * handles,uint32_t num_handles,int in_fence_fd,int * out_fence_fd,int ring_idx)244 execbuf_locked(struct fd_device *dev, void *cmd, uint32_t cmd_size,
245                uint32_t *handles, uint32_t num_handles,
246                int in_fence_fd, int *out_fence_fd, int ring_idx)
247 {
248 #define COND(bool, val) ((bool) ? (val) : 0)
249    struct drm_virtgpu_execbuffer eb = {
250          .flags = COND(out_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_OUT) |
251                   COND(in_fence_fd != -1, VIRTGPU_EXECBUF_FENCE_FD_IN) |
252                   VIRTGPU_EXECBUF_RING_IDX,
253          .fence_fd = in_fence_fd,
254          .size  = cmd_size,
255          .command = VOID2U64(cmd),
256          .ring_idx = ring_idx,
257          .bo_handles = VOID2U64(handles),
258          .num_bo_handles = num_handles,
259    };
260 
261    int ret = drmIoctl(dev->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
262    if (ret) {
263       ERROR_MSG("EXECBUFFER failed: %s", strerror(errno));
264       return ret;
265    }
266 
267    if (out_fence_fd)
268       *out_fence_fd = eb.fence_fd;
269 
270    return 0;
271 }
272 
273 /**
274  * Helper for "execbuf" ioctl.. note that in virtgpu execbuf is just
275  * a generic "send commands to host", not necessarily specific to
276  * cmdstream execution.
277  *
278  * Note that ring_idx 0 is the "CPU ring", ie. for synchronizing btwn
279  * guest and host CPU.
280  */
281 int
virtio_execbuf_fenced(struct fd_device * dev,struct msm_ccmd_req * req,uint32_t * handles,uint32_t num_handles,int in_fence_fd,int * out_fence_fd,int ring_idx)282 virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
283                       uint32_t *handles, uint32_t num_handles,
284                       int in_fence_fd, int *out_fence_fd, int ring_idx)
285 {
286    struct virtio_device *virtio_dev = to_virtio_device(dev);
287    int ret;
288 
289    simple_mtx_lock(&virtio_dev->eb_lock);
290    execbuf_flush_locked(dev, NULL);
291    req->seqno = ++virtio_dev->next_seqno;
292 
293    ret = execbuf_locked(dev, req, req->len, handles, num_handles,
294                         in_fence_fd, out_fence_fd, ring_idx);
295 
296    simple_mtx_unlock(&virtio_dev->eb_lock);
297 
298    return ret;
299 }
300 
301 static int
execbuf_flush_locked(struct fd_device * dev,int * out_fence_fd)302 execbuf_flush_locked(struct fd_device *dev, int *out_fence_fd)
303 {
304    struct virtio_device *virtio_dev = to_virtio_device(dev);
305    int ret;
306 
307    if (!virtio_dev->reqbuf_len)
308       return 0;
309 
310    ret = execbuf_locked(dev, virtio_dev->reqbuf, virtio_dev->reqbuf_len,
311                         NULL, 0, -1, out_fence_fd, 0);
312    if (ret)
313       return ret;
314 
315    virtio_dev->reqbuf_len = 0;
316    virtio_dev->reqbuf_cnt = 0;
317 
318    return 0;
319 }
320 
321 int
virtio_execbuf_flush(struct fd_device * dev)322 virtio_execbuf_flush(struct fd_device *dev)
323 {
324    struct virtio_device *virtio_dev = to_virtio_device(dev);
325    simple_mtx_lock(&virtio_dev->eb_lock);
326    int ret = execbuf_flush_locked(dev, NULL);
327    simple_mtx_unlock(&virtio_dev->eb_lock);
328    return ret;
329 }
330 
331 int
virtio_execbuf(struct fd_device * dev,struct msm_ccmd_req * req,bool sync)332 virtio_execbuf(struct fd_device *dev, struct msm_ccmd_req *req, bool sync)
333 {
334    struct virtio_device *virtio_dev = to_virtio_device(dev);
335    int fence_fd, ret = 0;
336 
337    simple_mtx_lock(&virtio_dev->eb_lock);
338    req->seqno = ++virtio_dev->next_seqno;
339 
340    if ((virtio_dev->reqbuf_len + req->len) > sizeof(virtio_dev->reqbuf)) {
341       ret = execbuf_flush_locked(dev, NULL);
342       if (ret)
343          goto out_unlock;
344    }
345 
346    memcpy(&virtio_dev->reqbuf[virtio_dev->reqbuf_len], req, req->len);
347    virtio_dev->reqbuf_len += req->len;
348    virtio_dev->reqbuf_cnt++;
349 
350    if (!sync)
351       goto out_unlock;
352 
353    ret = execbuf_flush_locked(dev, &fence_fd);
354 
355 out_unlock:
356    simple_mtx_unlock(&virtio_dev->eb_lock);
357 
358    if (ret)
359       return ret;
360 
361    if (sync) {
362       sync_wait(fence_fd, -1);
363       close(fence_fd);
364       virtio_host_sync(dev, req);
365    }
366 
367    return 0;
368 }
369 
370 /**
371  * Wait until host as processed the specified request.
372  */
373 void
virtio_host_sync(struct fd_device * dev,const struct msm_ccmd_req * req)374 virtio_host_sync(struct fd_device *dev, const struct msm_ccmd_req *req)
375 {
376    struct virtio_device *virtio_dev = to_virtio_device(dev);
377 
378    while (fd_fence_before(virtio_dev->shmem->seqno, req->seqno))
379       sched_yield();
380 }
381 
382 /**
383  * Helper for simple pass-thru ioctls
384  */
385 int
virtio_simple_ioctl(struct fd_device * dev,unsigned cmd,void * _req)386 virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *_req)
387 {
388    unsigned req_len = sizeof(struct msm_ccmd_ioctl_simple_req);
389    unsigned rsp_len = sizeof(struct msm_ccmd_ioctl_simple_rsp);
390 
391    req_len += _IOC_SIZE(cmd);
392    if (cmd & IOC_OUT)
393       rsp_len += _IOC_SIZE(cmd);
394 
395    uint8_t buf[req_len];
396    struct msm_ccmd_ioctl_simple_req *req = (void *)buf;
397    struct msm_ccmd_ioctl_simple_rsp *rsp;
398 
399    req->hdr = MSM_CCMD(IOCTL_SIMPLE, req_len);
400    req->cmd = cmd;
401    memcpy(req->payload, _req, _IOC_SIZE(cmd));
402 
403    rsp = virtio_alloc_rsp(dev, &req->hdr, rsp_len);
404 
405    int ret = virtio_execbuf(dev, &req->hdr, true);
406 
407    if (cmd & IOC_OUT)
408       memcpy(_req, rsp->payload, _IOC_SIZE(cmd));
409 
410    ret = rsp->ret;
411 
412    return ret;
413 }
414