• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 Sergio Lopez
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "agx_device_virtio.h"
7 
8 #include <inttypes.h>
9 #include <sys/mman.h>
10 
11 #include "drm-uapi/virtgpu_drm.h"
12 #include "unstable_asahi_drm.h"
13 
14 #define VIRGL_RENDERER_UNSTABLE_APIS 1
15 #include "vdrm.h"
16 #include "virglrenderer_hw.h"
17 
18 #include "asahi_proto.h"
19 
20 /**
21  * Helper for simple pass-thru ioctls
22  */
23 int
agx_virtio_simple_ioctl(struct agx_device * dev,unsigned cmd,void * _req)24 agx_virtio_simple_ioctl(struct agx_device *dev, unsigned cmd, void *_req)
25 {
26    struct vdrm_device *vdrm = dev->vdrm;
27    unsigned req_len = sizeof(struct asahi_ccmd_ioctl_simple_req);
28    unsigned rsp_len = sizeof(struct asahi_ccmd_ioctl_simple_rsp);
29 
30    req_len += _IOC_SIZE(cmd);
31    if (cmd & IOC_OUT)
32       rsp_len += _IOC_SIZE(cmd);
33 
34    uint8_t buf[req_len];
35    struct asahi_ccmd_ioctl_simple_req *req = (void *)buf;
36    struct asahi_ccmd_ioctl_simple_rsp *rsp;
37 
38    req->hdr = ASAHI_CCMD(IOCTL_SIMPLE, req_len);
39    req->cmd = cmd;
40    memcpy(req->payload, _req, _IOC_SIZE(cmd));
41 
42    rsp = vdrm_alloc_rsp(vdrm, &req->hdr, rsp_len);
43 
44    int ret = vdrm_send_req(vdrm, &req->hdr, true);
45    if (ret) {
46       fprintf(stderr, "simple_ioctl: vdrm_send_req failed\n");
47       return ret;
48    }
49 
50    if (cmd & IOC_OUT)
51       memcpy(_req, rsp->payload, _IOC_SIZE(cmd));
52 
53    return rsp->ret;
54 }
55 
56 static struct agx_bo *
agx_virtio_bo_alloc(struct agx_device * dev,size_t size,size_t align,enum agx_bo_flags flags)57 agx_virtio_bo_alloc(struct agx_device *dev, size_t size, size_t align,
58                     enum agx_bo_flags flags)
59 {
60    struct agx_bo *bo;
61    unsigned handle = 0;
62 
63    /* executable implies low va */
64    assert(!(flags & AGX_BO_EXEC) || (flags & AGX_BO_LOW_VA));
65 
66    struct asahi_ccmd_gem_new_req req = {
67       .hdr = ASAHI_CCMD(GEM_NEW, sizeof(req)),
68       .size = size,
69    };
70 
71    if (flags & AGX_BO_WRITEBACK)
72       req.flags |= ASAHI_GEM_WRITEBACK;
73 
74    uint32_t blob_flags =
75       VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
76 
77    req.bind_flags = ASAHI_BIND_READ;
78    if (!(flags & AGX_BO_READONLY)) {
79       req.bind_flags |= ASAHI_BIND_WRITE;
80    }
81 
82    uint32_t blob_id = p_atomic_inc_return(&dev->next_blob_id);
83 
84    enum agx_va_flags va_flags = flags & AGX_BO_LOW_VA ? AGX_VA_USC : 0;
85    struct agx_va *va = agx_va_alloc(dev, size, align, va_flags, 0);
86    if (!va) {
87       fprintf(stderr, "Failed to allocate BO VMA\n");
88       return NULL;
89    }
90 
91    /* Note: optional, can zero out for not mapping for sparse */
92    req.addr = va->addr;
93    req.blob_id = blob_id;
94    req.vm_id = dev->vm_id;
95 
96    handle = vdrm_bo_create(dev->vdrm, size, blob_flags, blob_id, &req.hdr);
97    if (!handle) {
98       fprintf(stderr, "vdrm_bo_created failed\n");
99       return NULL;
100    }
101 
102    pthread_mutex_lock(&dev->bo_map_lock);
103    bo = agx_lookup_bo(dev, handle);
104    dev->max_handle = MAX2(dev->max_handle, handle);
105    pthread_mutex_unlock(&dev->bo_map_lock);
106 
107    /* Fresh handle */
108    assert(!memcmp(bo, &((struct agx_bo){}), sizeof(*bo)));
109 
110    bo->dev = dev;
111    bo->size = size;
112    bo->align = align;
113    bo->flags = flags;
114    bo->handle = handle;
115    bo->prime_fd = -1;
116    bo->blob_id = blob_id;
117    bo->va = va;
118    bo->vbo_res_id = vdrm_handle_to_res_id(dev->vdrm, handle);
119    return bo;
120 }
121 
122 static int
agx_virtio_bo_bind(struct agx_device * dev,struct agx_bo * bo,uint64_t addr,size_t size_B,uint64_t offset_B,uint32_t flags,bool unbind)123 agx_virtio_bo_bind(struct agx_device *dev, struct agx_bo *bo, uint64_t addr,
124                    size_t size_B, uint64_t offset_B, uint32_t flags,
125                    bool unbind)
126 {
127    struct asahi_ccmd_gem_bind_req req = {
128       .hdr.cmd = ASAHI_CCMD_GEM_BIND,
129       .hdr.len = sizeof(struct asahi_ccmd_gem_bind_req),
130       .bind = {
131          .op = unbind ? ASAHI_BIND_OP_UNBIND : ASAHI_BIND_OP_BIND,
132          .flags = flags,
133          .vm_id = dev->vm_id,
134          .handle = bo->vbo_res_id,
135          .offset = offset_B,
136          .range = size_B,
137          .addr = addr,
138       }};
139 
140    int ret = vdrm_send_req(dev->vdrm, &req.hdr, false);
141    if (ret) {
142       fprintf(stderr, "ASAHI_CCMD_GEM_BIND failed: %d (handle=%d)\n", ret,
143               bo->handle);
144    }
145 
146    return ret;
147 }
148 
149 static int
agx_virtio_bo_bind_object(struct agx_device * dev,struct agx_bo * bo,uint32_t * object_handle,size_t size_B,uint64_t offset_B,uint32_t flags)150 agx_virtio_bo_bind_object(struct agx_device *dev, struct agx_bo *bo,
151                           uint32_t *object_handle, size_t size_B,
152                           uint64_t offset_B, uint32_t flags)
153 {
154    struct asahi_ccmd_gem_bind_object_req req = {
155       .hdr.cmd = ASAHI_CCMD_GEM_BIND_OBJECT,
156       .hdr.len = sizeof(struct asahi_ccmd_gem_bind_object_req),
157       .bind = {
158          .op = ASAHI_BIND_OBJECT_OP_BIND,
159          .flags = flags,
160          .vm_id = 0,
161          .handle = bo->vbo_res_id,
162          .offset = offset_B,
163          .range = size_B,
164       }};
165 
166    struct asahi_ccmd_gem_bind_object_rsp *rsp;
167 
168    rsp = vdrm_alloc_rsp(dev->vdrm, &req.hdr,
169                         sizeof(struct asahi_ccmd_gem_bind_object_rsp));
170 
171    int ret = vdrm_send_req(dev->vdrm, &req.hdr, true);
172    if (ret || rsp->ret) {
173       fprintf(stderr,
174               "ASAHI_CCMD_GEM_BIND_OBJECT bind failed: %d:%d (handle=%d)\n",
175               ret, rsp->ret, bo->handle);
176    }
177 
178    if (!rsp->ret)
179       *object_handle = rsp->object_handle;
180 
181    return rsp->ret;
182 }
183 
184 static int
agx_virtio_bo_unbind_object(struct agx_device * dev,uint32_t object_handle,uint32_t flags)185 agx_virtio_bo_unbind_object(struct agx_device *dev, uint32_t object_handle,
186                             uint32_t flags)
187 {
188    struct asahi_ccmd_gem_bind_object_req req = {
189       .hdr.cmd = ASAHI_CCMD_GEM_BIND_OBJECT,
190       .hdr.len = sizeof(struct asahi_ccmd_gem_bind_object_req),
191       .bind = {
192          .op = ASAHI_BIND_OBJECT_OP_UNBIND,
193          .flags = flags,
194          .object_handle = object_handle,
195       }};
196 
197    int ret = vdrm_send_req(dev->vdrm, &req.hdr, false);
198    if (ret) {
199       fprintf(stderr,
200               "ASAHI_CCMD_GEM_BIND_OBJECT unbind failed: %d (handle=%d)\n", ret,
201               object_handle);
202    }
203 
204    return 0;
205 }
206 
207 static void
agx_virtio_bo_mmap(struct agx_device * dev,struct agx_bo * bo)208 agx_virtio_bo_mmap(struct agx_device *dev, struct agx_bo *bo)
209 {
210    bo->_map = vdrm_bo_map(dev->vdrm, bo->handle, bo->size, NULL);
211    if (bo->_map == MAP_FAILED) {
212       bo->_map = NULL;
213       fprintf(stderr, "mmap failed: result=%p size=0x%llx fd=%i\n", bo->_map,
214               (long long)bo->size, dev->fd);
215    }
216 }
217 
218 static ssize_t
agx_virtio_get_params(struct agx_device * dev,void * buf,size_t size)219 agx_virtio_get_params(struct agx_device *dev, void *buf, size_t size)
220 {
221    struct vdrm_device *vdrm = dev->vdrm;
222    struct asahi_ccmd_get_params_req req = {
223       .params.size = size,
224       .hdr.cmd = ASAHI_CCMD_GET_PARAMS,
225       .hdr.len = sizeof(struct asahi_ccmd_get_params_req),
226    };
227    struct asahi_ccmd_get_params_rsp *rsp;
228 
229    rsp = vdrm_alloc_rsp(vdrm, &req.hdr,
230                         sizeof(struct asahi_ccmd_get_params_rsp) + size);
231 
232    int ret = vdrm_send_req(vdrm, &req.hdr, true);
233    if (ret)
234       goto out;
235 
236    if (rsp->virt_uabi_version != ASAHI_PROTO_UNSTABLE_UABI_VERSION) {
237       fprintf(stderr, "Virt UABI mismatch: Host %d, Mesa %d\n",
238               rsp->virt_uabi_version, ASAHI_PROTO_UNSTABLE_UABI_VERSION);
239       return -1;
240    }
241 
242    ret = rsp->ret;
243    if (!ret) {
244       memcpy(buf, &rsp->payload, size);
245       return size;
246    }
247 
248 out:
249    return ret;
250 }
251 
252 static void
agx_virtio_serialize_attachments(char ** ptr,uint64_t attachments,uint32_t count)253 agx_virtio_serialize_attachments(char **ptr, uint64_t attachments,
254                                  uint32_t count)
255 {
256    if (!count)
257       return;
258 
259    size_t attachments_size = sizeof(struct drm_asahi_attachment) * count;
260    memcpy(*ptr, (char *)(uintptr_t)attachments, attachments_size);
261    *ptr += attachments_size;
262 }
263 
264 static int
agx_virtio_submit(struct agx_device * dev,struct drm_asahi_submit * submit,struct agx_submit_virt * virt)265 agx_virtio_submit(struct agx_device *dev, struct drm_asahi_submit *submit,
266                   struct agx_submit_virt *virt)
267 {
268    struct drm_asahi_command *commands =
269       (struct drm_asahi_command *)(uintptr_t)submit->commands;
270    struct drm_asahi_sync *in_syncs =
271       (struct drm_asahi_sync *)(uintptr_t)submit->in_syncs;
272    struct drm_asahi_sync *out_syncs =
273       (struct drm_asahi_sync *)(uintptr_t)submit->out_syncs;
274    size_t req_len = sizeof(struct asahi_ccmd_submit_req);
275 
276    for (int i = 0; i < submit->command_count; i++) {
277       switch (commands[i].cmd_type) {
278       case DRM_ASAHI_CMD_COMPUTE: {
279          struct drm_asahi_cmd_compute *compute =
280             (struct drm_asahi_cmd_compute *)(uintptr_t)commands[i].cmd_buffer;
281          req_len += sizeof(struct drm_asahi_command) +
282                     sizeof(struct drm_asahi_cmd_compute);
283          req_len +=
284             compute->attachment_count * sizeof(struct drm_asahi_attachment);
285 
286          if (compute->extensions) {
287             assert(*(uint32_t *)(uintptr_t)compute->extensions ==
288                    ASAHI_COMPUTE_EXT_TIMESTAMPS);
289             req_len += sizeof(struct drm_asahi_cmd_compute_user_timestamps);
290          }
291          break;
292       }
293 
294       case DRM_ASAHI_CMD_RENDER: {
295          struct drm_asahi_cmd_render *render =
296             (struct drm_asahi_cmd_render *)(uintptr_t)commands[i].cmd_buffer;
297          req_len += sizeof(struct drm_asahi_command) +
298                     sizeof(struct drm_asahi_cmd_render);
299          req_len += render->fragment_attachment_count *
300                     sizeof(struct drm_asahi_attachment);
301          req_len += render->vertex_attachment_count *
302                     sizeof(struct drm_asahi_attachment);
303 
304          if (render->extensions) {
305             assert(*(uint32_t *)(uintptr_t)render->extensions ==
306                    ASAHI_RENDER_EXT_TIMESTAMPS);
307             req_len += sizeof(struct drm_asahi_cmd_render_user_timestamps);
308          }
309          break;
310       }
311 
312       default:
313          return EINVAL;
314       }
315    }
316 
317    size_t extres_size =
318       sizeof(struct asahi_ccmd_submit_res) * virt->extres_count;
319    req_len += extres_size;
320 
321    struct asahi_ccmd_submit_req *req =
322       (struct asahi_ccmd_submit_req *)calloc(1, req_len);
323 
324    req->queue_id = submit->queue_id;
325    req->result_res_id = virt->vbo_res_id;
326    req->command_count = submit->command_count;
327    req->extres_count = virt->extres_count;
328 
329    char *ptr = (char *)&req->payload;
330 
331    for (int i = 0; i < submit->command_count; i++) {
332       memcpy(ptr, &commands[i], sizeof(struct drm_asahi_command));
333       ptr += sizeof(struct drm_asahi_command);
334 
335       memcpy(ptr, (char *)(uintptr_t)commands[i].cmd_buffer,
336              commands[i].cmd_buffer_size);
337       ptr += commands[i].cmd_buffer_size;
338 
339       switch (commands[i].cmd_type) {
340       case DRM_ASAHI_CMD_RENDER: {
341          struct drm_asahi_cmd_render *render =
342             (struct drm_asahi_cmd_render *)(uintptr_t)commands[i].cmd_buffer;
343          agx_virtio_serialize_attachments(&ptr, render->vertex_attachments,
344                                           render->vertex_attachment_count);
345          agx_virtio_serialize_attachments(&ptr, render->fragment_attachments,
346                                           render->fragment_attachment_count);
347          if (render->extensions) {
348             struct drm_asahi_cmd_render_user_timestamps *ext =
349                (struct drm_asahi_cmd_render_user_timestamps *)(uintptr_t)
350                   render->extensions;
351             assert(!ext->next);
352             memcpy(ptr, (void *)ext, sizeof(*ext));
353             ptr += sizeof(*ext);
354          }
355          break;
356       }
357       case DRM_ASAHI_CMD_COMPUTE: {
358          struct drm_asahi_cmd_compute *compute =
359             (struct drm_asahi_cmd_compute *)(uintptr_t)commands[i].cmd_buffer;
360          agx_virtio_serialize_attachments(&ptr, compute->attachments,
361                                           compute->attachment_count);
362          if (compute->extensions) {
363             struct drm_asahi_cmd_compute_user_timestamps *ext =
364                (struct drm_asahi_cmd_compute_user_timestamps *)(uintptr_t)
365                   compute->extensions;
366             assert(!ext->next);
367             memcpy(ptr, (void *)ext, sizeof(*ext));
368             ptr += sizeof(*ext);
369          }
370          break;
371       }
372       }
373    }
374 
375    memcpy(ptr, virt->extres, extres_size);
376    ptr += extres_size;
377 
378    req->hdr.cmd = ASAHI_CCMD_SUBMIT;
379    req->hdr.len = req_len;
380 
381    struct drm_virtgpu_execbuffer_syncobj *vdrm_in_syncs = calloc(
382       submit->in_sync_count, sizeof(struct drm_virtgpu_execbuffer_syncobj));
383    for (int i = 0; i < submit->in_sync_count; i++) {
384       vdrm_in_syncs[i].handle = in_syncs[i].handle;
385       vdrm_in_syncs[i].point = in_syncs[i].timeline_value;
386    }
387 
388    struct drm_virtgpu_execbuffer_syncobj *vdrm_out_syncs = calloc(
389       submit->out_sync_count, sizeof(struct drm_virtgpu_execbuffer_syncobj));
390    for (int i = 0; i < submit->out_sync_count; i++) {
391       vdrm_out_syncs[i].handle = out_syncs[i].handle;
392       vdrm_out_syncs[i].point = out_syncs[i].timeline_value;
393    }
394 
395    struct vdrm_execbuf_params p = {
396       /* Signal the host we want to wait for the command to complete */
397       .ring_idx = 1,
398       .req = &req->hdr,
399       .num_in_syncobjs = submit->in_sync_count,
400       .in_syncobjs = vdrm_in_syncs,
401       .num_out_syncobjs = submit->out_sync_count,
402       .out_syncobjs = vdrm_out_syncs,
403    };
404 
405    int ret = vdrm_execbuf(dev->vdrm, &p);
406 
407    free(vdrm_out_syncs);
408    free(vdrm_in_syncs);
409    free(req);
410    return ret;
411 }
412 
413 const agx_device_ops_t agx_virtio_device_ops = {
414    .bo_alloc = agx_virtio_bo_alloc,
415    .bo_bind = agx_virtio_bo_bind,
416    .bo_mmap = agx_virtio_bo_mmap,
417    .get_params = agx_virtio_get_params,
418    .submit = agx_virtio_submit,
419    .bo_bind_object = agx_virtio_bo_bind_object,
420    .bo_unbind_object = agx_virtio_bo_unbind_object,
421 };
422 
423 bool
agx_virtio_open_device(struct agx_device * dev)424 agx_virtio_open_device(struct agx_device *dev)
425 {
426    struct vdrm_device *vdrm;
427 
428    vdrm = vdrm_device_connect(dev->fd, 2);
429    if (!vdrm) {
430       fprintf(stderr, "could not connect vdrm\n");
431       return false;
432    }
433 
434    dev->vdrm = vdrm;
435    dev->ops = agx_virtio_device_ops;
436    return true;
437 }
438