1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
34
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
virtio_gpu_resource_id_get(struct virtio_gpu_device * vgdev,uint32_t * resid)41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43 {
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52 }
53
virtio_gpu_resource_id_put(struct virtio_gpu_device * vgdev,uint32_t id)54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55 {
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59 }
60
virtio_gpu_ctrl_ack(struct virtqueue * vq)61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62 {
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65 schedule_work(&vgdev->ctrlq.dequeue_work);
66 }
67
virtio_gpu_cursor_ack(struct virtqueue * vq)68 void virtio_gpu_cursor_ack(struct virtqueue *vq)
69 {
70 struct drm_device *dev = vq->vdev->priv;
71 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
73 }
74
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76 {
77 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
78 VBUFFER_SIZE,
79 __alignof__(struct virtio_gpu_vbuffer),
80 0, NULL);
81 if (!vgdev->vbufs)
82 return -ENOMEM;
83 return 0;
84 }
85
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)86 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
87 {
88 kmem_cache_destroy(vgdev->vbufs);
89 vgdev->vbufs = NULL;
90 }
91
92 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)93 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
94 int size, int resp_size, void *resp_buf,
95 virtio_gpu_resp_cb resp_cb)
96 {
97 struct virtio_gpu_vbuffer *vbuf;
98
99 vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
100 if (!vbuf)
101 return ERR_PTR(-ENOMEM);
102 memset(vbuf, 0, VBUFFER_SIZE);
103
104 BUG_ON(size > MAX_INLINE_CMD_SIZE);
105 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
106 vbuf->size = size;
107
108 vbuf->resp_cb = resp_cb;
109 vbuf->resp_size = resp_size;
110 if (resp_size <= MAX_INLINE_RESP_SIZE)
111 vbuf->resp_buf = (void *)vbuf->buf + size;
112 else
113 vbuf->resp_buf = resp_buf;
114 BUG_ON(!vbuf->resp_buf);
115 return vbuf;
116 }
117
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)118 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
119 struct virtio_gpu_vbuffer **vbuffer_p,
120 int size)
121 {
122 struct virtio_gpu_vbuffer *vbuf;
123
124 vbuf = virtio_gpu_get_vbuf(vgdev, size,
125 sizeof(struct virtio_gpu_ctrl_hdr),
126 NULL, NULL);
127 if (IS_ERR(vbuf)) {
128 *vbuffer_p = NULL;
129 return ERR_CAST(vbuf);
130 }
131 *vbuffer_p = vbuf;
132 return vbuf->buf;
133 }
134
135 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)136 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
137 struct virtio_gpu_vbuffer **vbuffer_p)
138 {
139 struct virtio_gpu_vbuffer *vbuf;
140
141 vbuf = virtio_gpu_get_vbuf
142 (vgdev, sizeof(struct virtio_gpu_update_cursor),
143 0, NULL, NULL);
144 if (IS_ERR(vbuf)) {
145 *vbuffer_p = NULL;
146 return ERR_CAST(vbuf);
147 }
148 *vbuffer_p = vbuf;
149 return (struct virtio_gpu_update_cursor *)vbuf->buf;
150 }
151
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)152 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
153 virtio_gpu_resp_cb cb,
154 struct virtio_gpu_vbuffer **vbuffer_p,
155 int cmd_size, int resp_size,
156 void *resp_buf)
157 {
158 struct virtio_gpu_vbuffer *vbuf;
159
160 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
161 resp_size, resp_buf, cb);
162 if (IS_ERR(vbuf)) {
163 *vbuffer_p = NULL;
164 return ERR_CAST(vbuf);
165 }
166 *vbuffer_p = vbuf;
167 return (struct virtio_gpu_command *)vbuf->buf;
168 }
169
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)170 static void free_vbuf(struct virtio_gpu_device *vgdev,
171 struct virtio_gpu_vbuffer *vbuf)
172 {
173 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
174 kfree(vbuf->resp_buf);
175 kfree(vbuf->data_buf);
176 kmem_cache_free(vgdev->vbufs, vbuf);
177 }
178
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)179 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
180 {
181 struct virtio_gpu_vbuffer *vbuf;
182 unsigned int len;
183 int freed = 0;
184
185 while ((vbuf = virtqueue_get_buf(vq, &len))) {
186 list_add_tail(&vbuf->list, reclaim_list);
187 freed++;
188 }
189 if (freed == 0)
190 DRM_DEBUG("Huh? zero vbufs reclaimed");
191 }
192
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)193 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
194 {
195 struct virtio_gpu_device *vgdev =
196 container_of(work, struct virtio_gpu_device,
197 ctrlq.dequeue_work);
198 struct list_head reclaim_list;
199 struct virtio_gpu_vbuffer *entry, *tmp;
200 struct virtio_gpu_ctrl_hdr *resp;
201 u64 fence_id = 0;
202
203 INIT_LIST_HEAD(&reclaim_list);
204 spin_lock(&vgdev->ctrlq.qlock);
205 do {
206 virtqueue_disable_cb(vgdev->ctrlq.vq);
207 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
208
209 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
210 spin_unlock(&vgdev->ctrlq.qlock);
211
212 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
213 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
214 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
215 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
216 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
217 u64 f = le64_to_cpu(resp->fence_id);
218
219 if (fence_id > f) {
220 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
221 __func__, fence_id, f);
222 } else {
223 fence_id = f;
224 }
225 }
226 if (entry->resp_cb)
227 entry->resp_cb(vgdev, entry);
228
229 list_del(&entry->list);
230 free_vbuf(vgdev, entry);
231 }
232 wake_up(&vgdev->ctrlq.ack_queue);
233
234 if (fence_id)
235 virtio_gpu_fence_event_process(vgdev, fence_id);
236 }
237
virtio_gpu_dequeue_cursor_func(struct work_struct * work)238 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
239 {
240 struct virtio_gpu_device *vgdev =
241 container_of(work, struct virtio_gpu_device,
242 cursorq.dequeue_work);
243 struct list_head reclaim_list;
244 struct virtio_gpu_vbuffer *entry, *tmp;
245
246 INIT_LIST_HEAD(&reclaim_list);
247 spin_lock(&vgdev->cursorq.qlock);
248 do {
249 virtqueue_disable_cb(vgdev->cursorq.vq);
250 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
251 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
252 spin_unlock(&vgdev->cursorq.qlock);
253
254 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
255 list_del(&entry->list);
256 free_vbuf(vgdev, entry);
257 }
258 wake_up(&vgdev->cursorq.ack_queue);
259 }
260
virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)261 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
262 struct virtio_gpu_vbuffer *vbuf)
263 __releases(&vgdev->ctrlq.qlock)
264 __acquires(&vgdev->ctrlq.qlock)
265 {
266 struct virtqueue *vq = vgdev->ctrlq.vq;
267 struct scatterlist *sgs[3], vcmd, vout, vresp;
268 int outcnt = 0, incnt = 0;
269 int ret;
270
271 if (!vgdev->vqs_ready)
272 return -ENODEV;
273
274 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
275 sgs[outcnt+incnt] = &vcmd;
276 outcnt++;
277
278 if (vbuf->data_size) {
279 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
280 sgs[outcnt + incnt] = &vout;
281 outcnt++;
282 }
283
284 if (vbuf->resp_size) {
285 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
286 sgs[outcnt + incnt] = &vresp;
287 incnt++;
288 }
289
290 retry:
291 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
292 if (ret == -ENOSPC) {
293 spin_unlock(&vgdev->ctrlq.qlock);
294 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
295 spin_lock(&vgdev->ctrlq.qlock);
296 goto retry;
297 } else {
298 virtqueue_kick(vq);
299 }
300
301 if (!ret)
302 ret = vq->num_free;
303 return ret;
304 }
305
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)306 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
307 struct virtio_gpu_vbuffer *vbuf)
308 {
309 int rc;
310
311 spin_lock(&vgdev->ctrlq.qlock);
312 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
313 spin_unlock(&vgdev->ctrlq.qlock);
314 return rc;
315 }
316
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_ctrl_hdr * hdr,struct virtio_gpu_fence ** fence)317 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
318 struct virtio_gpu_vbuffer *vbuf,
319 struct virtio_gpu_ctrl_hdr *hdr,
320 struct virtio_gpu_fence **fence)
321 {
322 struct virtqueue *vq = vgdev->ctrlq.vq;
323 int rc;
324
325 again:
326 spin_lock(&vgdev->ctrlq.qlock);
327
328 /*
329 * Make sure we have enouth space in the virtqueue. If not
330 * wait here until we have.
331 *
332 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
333 * to wait for free space, which can result in fence ids being
334 * submitted out-of-order.
335 */
336 if (vq->num_free < 3) {
337 spin_unlock(&vgdev->ctrlq.qlock);
338 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
339 goto again;
340 }
341
342 if (fence)
343 virtio_gpu_fence_emit(vgdev, hdr, fence);
344 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
345 spin_unlock(&vgdev->ctrlq.qlock);
346 return rc;
347 }
348
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)349 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
350 struct virtio_gpu_vbuffer *vbuf)
351 {
352 struct virtqueue *vq = vgdev->cursorq.vq;
353 struct scatterlist *sgs[1], ccmd;
354 int ret;
355 int outcnt;
356
357 if (!vgdev->vqs_ready)
358 return -ENODEV;
359
360 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
361 sgs[0] = &ccmd;
362 outcnt = 1;
363
364 spin_lock(&vgdev->cursorq.qlock);
365 retry:
366 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
367 if (ret == -ENOSPC) {
368 spin_unlock(&vgdev->cursorq.qlock);
369 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
370 spin_lock(&vgdev->cursorq.qlock);
371 goto retry;
372 } else {
373 virtqueue_kick(vq);
374 }
375
376 spin_unlock(&vgdev->cursorq.qlock);
377
378 if (!ret)
379 ret = vq->num_free;
380 return ret;
381 }
382
383 /* just create gem objects for userspace and long lived objects,
384 just use dma_alloced pages for the queue objects? */
385
386 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t format,uint32_t width,uint32_t height)387 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
388 uint32_t resource_id,
389 uint32_t format,
390 uint32_t width,
391 uint32_t height)
392 {
393 struct virtio_gpu_resource_create_2d *cmd_p;
394 struct virtio_gpu_vbuffer *vbuf;
395
396 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
397 memset(cmd_p, 0, sizeof(*cmd_p));
398
399 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
400 cmd_p->resource_id = cpu_to_le32(resource_id);
401 cmd_p->format = cpu_to_le32(format);
402 cmd_p->width = cpu_to_le32(width);
403 cmd_p->height = cpu_to_le32(height);
404
405 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
406 }
407
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,uint32_t resource_id)408 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
409 uint32_t resource_id)
410 {
411 struct virtio_gpu_resource_unref *cmd_p;
412 struct virtio_gpu_vbuffer *vbuf;
413
414 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
415 memset(cmd_p, 0, sizeof(*cmd_p));
416
417 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
418 cmd_p->resource_id = cpu_to_le32(resource_id);
419
420 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
421 }
422
virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id)423 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
424 uint32_t resource_id)
425 {
426 struct virtio_gpu_resource_detach_backing *cmd_p;
427 struct virtio_gpu_vbuffer *vbuf;
428
429 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
430 memset(cmd_p, 0, sizeof(*cmd_p));
431
432 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
433 cmd_p->resource_id = cpu_to_le32(resource_id);
434
435 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
436 }
437
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)438 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
439 uint32_t scanout_id, uint32_t resource_id,
440 uint32_t width, uint32_t height,
441 uint32_t x, uint32_t y)
442 {
443 struct virtio_gpu_set_scanout *cmd_p;
444 struct virtio_gpu_vbuffer *vbuf;
445
446 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
447 memset(cmd_p, 0, sizeof(*cmd_p));
448
449 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
450 cmd_p->resource_id = cpu_to_le32(resource_id);
451 cmd_p->scanout_id = cpu_to_le32(scanout_id);
452 cmd_p->r.width = cpu_to_le32(width);
453 cmd_p->r.height = cpu_to_le32(height);
454 cmd_p->r.x = cpu_to_le32(x);
455 cmd_p->r.y = cpu_to_le32(y);
456
457 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
458 }
459
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height)460 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
461 uint32_t resource_id,
462 uint32_t x, uint32_t y,
463 uint32_t width, uint32_t height)
464 {
465 struct virtio_gpu_resource_flush *cmd_p;
466 struct virtio_gpu_vbuffer *vbuf;
467
468 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
469 memset(cmd_p, 0, sizeof(*cmd_p));
470
471 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
472 cmd_p->resource_id = cpu_to_le32(resource_id);
473 cmd_p->r.width = cpu_to_le32(width);
474 cmd_p->r.height = cpu_to_le32(height);
475 cmd_p->r.x = cpu_to_le32(x);
476 cmd_p->r.y = cpu_to_le32(y);
477
478 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
479 }
480
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint64_t offset,__le32 width,__le32 height,__le32 x,__le32 y,struct virtio_gpu_fence ** fence)481 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
482 uint32_t resource_id, uint64_t offset,
483 __le32 width, __le32 height,
484 __le32 x, __le32 y,
485 struct virtio_gpu_fence **fence)
486 {
487 struct virtio_gpu_transfer_to_host_2d *cmd_p;
488 struct virtio_gpu_vbuffer *vbuf;
489
490 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
491 memset(cmd_p, 0, sizeof(*cmd_p));
492
493 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
494 cmd_p->resource_id = cpu_to_le32(resource_id);
495 cmd_p->offset = cpu_to_le64(offset);
496 cmd_p->r.width = width;
497 cmd_p->r.height = height;
498 cmd_p->r.x = x;
499 cmd_p->r.y = y;
500
501 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
502 }
503
504 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence ** fence)505 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
506 uint32_t resource_id,
507 struct virtio_gpu_mem_entry *ents,
508 uint32_t nents,
509 struct virtio_gpu_fence **fence)
510 {
511 struct virtio_gpu_resource_attach_backing *cmd_p;
512 struct virtio_gpu_vbuffer *vbuf;
513
514 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
515 memset(cmd_p, 0, sizeof(*cmd_p));
516
517 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
518 cmd_p->resource_id = cpu_to_le32(resource_id);
519 cmd_p->nr_entries = cpu_to_le32(nents);
520
521 vbuf->data_buf = ents;
522 vbuf->data_size = sizeof(*ents) * nents;
523
524 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
525 }
526
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)527 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
528 struct virtio_gpu_vbuffer *vbuf)
529 {
530 struct virtio_gpu_resp_display_info *resp =
531 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
532 int i;
533
534 spin_lock(&vgdev->display_info_lock);
535 for (i = 0; i < vgdev->num_scanouts; i++) {
536 vgdev->outputs[i].info = resp->pmodes[i];
537 if (resp->pmodes[i].enabled) {
538 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
539 le32_to_cpu(resp->pmodes[i].r.width),
540 le32_to_cpu(resp->pmodes[i].r.height),
541 le32_to_cpu(resp->pmodes[i].r.x),
542 le32_to_cpu(resp->pmodes[i].r.y));
543 } else {
544 DRM_DEBUG("output %d: disabled", i);
545 }
546 }
547
548 vgdev->display_info_pending = false;
549 spin_unlock(&vgdev->display_info_lock);
550 wake_up(&vgdev->resp_wq);
551
552 if (!drm_helper_hpd_irq_event(vgdev->ddev))
553 drm_kms_helper_hotplug_event(vgdev->ddev);
554 }
555
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)556 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
557 struct virtio_gpu_vbuffer *vbuf)
558 {
559 struct virtio_gpu_get_capset_info *cmd =
560 (struct virtio_gpu_get_capset_info *)vbuf->buf;
561 struct virtio_gpu_resp_capset_info *resp =
562 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
563 int i = le32_to_cpu(cmd->capset_index);
564
565 spin_lock(&vgdev->display_info_lock);
566 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
567 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
568 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
569 spin_unlock(&vgdev->display_info_lock);
570 wake_up(&vgdev->resp_wq);
571 }
572
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)573 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
574 struct virtio_gpu_vbuffer *vbuf)
575 {
576 struct virtio_gpu_get_capset *cmd =
577 (struct virtio_gpu_get_capset *)vbuf->buf;
578 struct virtio_gpu_resp_capset *resp =
579 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
580 struct virtio_gpu_drv_cap_cache *cache_ent;
581
582 spin_lock(&vgdev->display_info_lock);
583 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
584 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
585 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
586 memcpy(cache_ent->caps_cache, resp->capset_data,
587 cache_ent->size);
588 /* Copy must occur before is_valid is signalled. */
589 smp_wmb();
590 atomic_set(&cache_ent->is_valid, 1);
591 break;
592 }
593 }
594 spin_unlock(&vgdev->display_info_lock);
595 wake_up(&vgdev->resp_wq);
596 }
597
598
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)599 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
600 {
601 struct virtio_gpu_ctrl_hdr *cmd_p;
602 struct virtio_gpu_vbuffer *vbuf;
603 void *resp_buf;
604
605 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
606 GFP_KERNEL);
607 if (!resp_buf)
608 return -ENOMEM;
609
610 cmd_p = virtio_gpu_alloc_cmd_resp
611 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
612 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
613 resp_buf);
614 memset(cmd_p, 0, sizeof(*cmd_p));
615
616 vgdev->display_info_pending = true;
617 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
618 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
619 return 0;
620 }
621
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)622 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
623 {
624 struct virtio_gpu_get_capset_info *cmd_p;
625 struct virtio_gpu_vbuffer *vbuf;
626 void *resp_buf;
627
628 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
629 GFP_KERNEL);
630 if (!resp_buf)
631 return -ENOMEM;
632
633 cmd_p = virtio_gpu_alloc_cmd_resp
634 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
635 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
636 resp_buf);
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
640 cmd_p->capset_index = cpu_to_le32(idx);
641 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
642 return 0;
643 }
644
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)645 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
646 int idx, int version,
647 struct virtio_gpu_drv_cap_cache **cache_p)
648 {
649 struct virtio_gpu_get_capset *cmd_p;
650 struct virtio_gpu_vbuffer *vbuf;
651 int max_size;
652 struct virtio_gpu_drv_cap_cache *cache_ent;
653 void *resp_buf;
654
655 if (idx >= vgdev->num_capsets)
656 return -EINVAL;
657
658 if (version > vgdev->capsets[idx].max_version)
659 return -EINVAL;
660
661 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
662 if (!cache_ent)
663 return -ENOMEM;
664
665 max_size = vgdev->capsets[idx].max_size;
666 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
667 if (!cache_ent->caps_cache) {
668 kfree(cache_ent);
669 return -ENOMEM;
670 }
671
672 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
673 GFP_KERNEL);
674 if (!resp_buf) {
675 kfree(cache_ent->caps_cache);
676 kfree(cache_ent);
677 return -ENOMEM;
678 }
679
680 cache_ent->version = version;
681 cache_ent->id = vgdev->capsets[idx].id;
682 atomic_set(&cache_ent->is_valid, 0);
683 cache_ent->size = max_size;
684 spin_lock(&vgdev->display_info_lock);
685 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
686 spin_unlock(&vgdev->display_info_lock);
687
688 cmd_p = virtio_gpu_alloc_cmd_resp
689 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
690 sizeof(struct virtio_gpu_resp_capset) + max_size,
691 resp_buf);
692 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
693 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
694 cmd_p->capset_version = cpu_to_le32(version);
695 *cache_p = cache_ent;
696 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
697
698 return 0;
699 }
700
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t nlen,const char * name)701 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
702 uint32_t nlen, const char *name)
703 {
704 struct virtio_gpu_ctx_create *cmd_p;
705 struct virtio_gpu_vbuffer *vbuf;
706
707 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
708 memset(cmd_p, 0, sizeof(*cmd_p));
709
710 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
711 cmd_p->hdr.ctx_id = cpu_to_le32(id);
712 cmd_p->nlen = cpu_to_le32(nlen);
713 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
714 cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
715 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
716 }
717
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)718 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
719 uint32_t id)
720 {
721 struct virtio_gpu_ctx_destroy *cmd_p;
722 struct virtio_gpu_vbuffer *vbuf;
723
724 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
725 memset(cmd_p, 0, sizeof(*cmd_p));
726
727 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
728 cmd_p->hdr.ctx_id = cpu_to_le32(id);
729 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
730 }
731
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint32_t resource_id)732 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
733 uint32_t ctx_id,
734 uint32_t resource_id)
735 {
736 struct virtio_gpu_ctx_resource *cmd_p;
737 struct virtio_gpu_vbuffer *vbuf;
738
739 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
740 memset(cmd_p, 0, sizeof(*cmd_p));
741
742 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
743 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
744 cmd_p->resource_id = cpu_to_le32(resource_id);
745 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
746
747 }
748
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint32_t resource_id)749 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
750 uint32_t ctx_id,
751 uint32_t resource_id)
752 {
753 struct virtio_gpu_ctx_resource *cmd_p;
754 struct virtio_gpu_vbuffer *vbuf;
755
756 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
757 memset(cmd_p, 0, sizeof(*cmd_p));
758
759 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
760 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
761 cmd_p->resource_id = cpu_to_le32(resource_id);
762 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
763 }
764
765 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_resource_create_3d * rc_3d,struct virtio_gpu_fence ** fence)766 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
767 struct virtio_gpu_resource_create_3d *rc_3d,
768 struct virtio_gpu_fence **fence)
769 {
770 struct virtio_gpu_resource_create_3d *cmd_p;
771 struct virtio_gpu_vbuffer *vbuf;
772
773 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
774 memset(cmd_p, 0, sizeof(*cmd_p));
775
776 *cmd_p = *rc_3d;
777 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
778 cmd_p->hdr.flags = 0;
779
780 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
781 }
782
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t ctx_id,uint64_t offset,uint32_t level,struct virtio_gpu_box * box,struct virtio_gpu_fence ** fence)783 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
784 uint32_t resource_id, uint32_t ctx_id,
785 uint64_t offset, uint32_t level,
786 struct virtio_gpu_box *box,
787 struct virtio_gpu_fence **fence)
788 {
789 struct virtio_gpu_transfer_host_3d *cmd_p;
790 struct virtio_gpu_vbuffer *vbuf;
791
792 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
793 memset(cmd_p, 0, sizeof(*cmd_p));
794
795 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
796 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
797 cmd_p->resource_id = cpu_to_le32(resource_id);
798 cmd_p->box = *box;
799 cmd_p->offset = cpu_to_le64(offset);
800 cmd_p->level = cpu_to_le32(level);
801
802 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
803 }
804
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t ctx_id,uint64_t offset,uint32_t level,struct virtio_gpu_box * box,struct virtio_gpu_fence ** fence)805 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
806 uint32_t resource_id, uint32_t ctx_id,
807 uint64_t offset, uint32_t level,
808 struct virtio_gpu_box *box,
809 struct virtio_gpu_fence **fence)
810 {
811 struct virtio_gpu_transfer_host_3d *cmd_p;
812 struct virtio_gpu_vbuffer *vbuf;
813
814 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
815 memset(cmd_p, 0, sizeof(*cmd_p));
816
817 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
818 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
819 cmd_p->resource_id = cpu_to_le32(resource_id);
820 cmd_p->box = *box;
821 cmd_p->offset = cpu_to_le64(offset);
822 cmd_p->level = cpu_to_le32(level);
823
824 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
825 }
826
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_fence ** fence)827 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
828 void *data, uint32_t data_size,
829 uint32_t ctx_id, struct virtio_gpu_fence **fence)
830 {
831 struct virtio_gpu_cmd_submit *cmd_p;
832 struct virtio_gpu_vbuffer *vbuf;
833
834 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
835 memset(cmd_p, 0, sizeof(*cmd_p));
836
837 vbuf->data_buf = data;
838 vbuf->data_size = data_size;
839
840 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
841 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
842 cmd_p->size = cpu_to_le32(data_size);
843
844 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
845 }
846
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,uint32_t resource_id,struct virtio_gpu_fence ** fence)847 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
848 struct virtio_gpu_object *obj,
849 uint32_t resource_id,
850 struct virtio_gpu_fence **fence)
851 {
852 struct virtio_gpu_mem_entry *ents;
853 struct scatterlist *sg;
854 int si;
855
856 if (!obj->pages) {
857 int ret;
858 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
859 if (ret)
860 return ret;
861 }
862
863 /* gets freed when the ring has consumed it */
864 ents = kmalloc_array(obj->pages->nents,
865 sizeof(struct virtio_gpu_mem_entry),
866 GFP_KERNEL);
867 if (!ents) {
868 DRM_ERROR("failed to allocate ent list\n");
869 return -ENOMEM;
870 }
871
872 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
873 ents[si].addr = cpu_to_le64(sg_phys(sg));
874 ents[si].length = cpu_to_le32(sg->length);
875 ents[si].padding = 0;
876 }
877
878 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
879 ents, obj->pages->nents,
880 fence);
881 obj->hw_res_handle = resource_id;
882 return 0;
883 }
884
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)885 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
886 struct virtio_gpu_output *output)
887 {
888 struct virtio_gpu_vbuffer *vbuf;
889 struct virtio_gpu_update_cursor *cur_p;
890
891 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
892 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
893 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
894 virtio_gpu_queue_cursor(vgdev, vbuf);
895 }
896