1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
45 {
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
52 }
53
virtio_gpu_ctrl_ack(struct virtqueue * vq)54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55 {
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
58
59 schedule_work(&vgdev->ctrlq.dequeue_work);
60 }
61
virtio_gpu_cursor_ack(struct virtqueue * vq)62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
63 {
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
66
67 schedule_work(&vgdev->cursorq.dequeue_work);
68 }
69
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 VBUFFER_SIZE,
74 __alignof__(struct virtio_gpu_vbuffer),
75 0, NULL);
76 if (!vgdev->vbufs)
77 return -ENOMEM;
78 return 0;
79 }
80
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82 {
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
85 }
86
87 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
91 {
92 struct virtio_gpu_vbuffer *vbuf;
93
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
95
96 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
97 size < sizeof(struct virtio_gpu_ctrl_hdr));
98 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
99 vbuf->size = size;
100
101 vbuf->resp_cb = resp_cb;
102 vbuf->resp_size = resp_size;
103 if (resp_size <= MAX_INLINE_RESP_SIZE)
104 vbuf->resp_buf = (void *)vbuf->buf + size;
105 else
106 vbuf->resp_buf = resp_buf;
107 BUG_ON(!vbuf->resp_buf);
108 return vbuf;
109 }
110
111 static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)112 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
113 {
114 /* this assumes a vbuf contains a command that starts with a
115 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
116 * virtqueues.
117 */
118 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
119 }
120
121 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)122 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
123 struct virtio_gpu_vbuffer **vbuffer_p)
124 {
125 struct virtio_gpu_vbuffer *vbuf;
126
127 vbuf = virtio_gpu_get_vbuf
128 (vgdev, sizeof(struct virtio_gpu_update_cursor),
129 0, NULL, NULL);
130 if (IS_ERR(vbuf)) {
131 *vbuffer_p = NULL;
132 return ERR_CAST(vbuf);
133 }
134 *vbuffer_p = vbuf;
135 return (struct virtio_gpu_update_cursor *)vbuf->buf;
136 }
137
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)138 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
139 virtio_gpu_resp_cb cb,
140 struct virtio_gpu_vbuffer **vbuffer_p,
141 int cmd_size, int resp_size,
142 void *resp_buf)
143 {
144 struct virtio_gpu_vbuffer *vbuf;
145
146 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
147 resp_size, resp_buf, cb);
148 *vbuffer_p = vbuf;
149 return (struct virtio_gpu_command *)vbuf->buf;
150 }
151
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)152 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
153 struct virtio_gpu_vbuffer **vbuffer_p,
154 int size)
155 {
156 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
157 sizeof(struct virtio_gpu_ctrl_hdr),
158 NULL);
159 }
160
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)161 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
162 struct virtio_gpu_vbuffer **vbuffer_p,
163 int size,
164 virtio_gpu_resp_cb cb)
165 {
166 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
167 sizeof(struct virtio_gpu_ctrl_hdr),
168 NULL);
169 }
170
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)171 static void free_vbuf(struct virtio_gpu_device *vgdev,
172 struct virtio_gpu_vbuffer *vbuf)
173 {
174 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
175 kfree(vbuf->resp_buf);
176 kvfree(vbuf->data_buf);
177 kmem_cache_free(vgdev->vbufs, vbuf);
178 }
179
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)180 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
181 {
182 struct virtio_gpu_vbuffer *vbuf;
183 unsigned int len;
184 int freed = 0;
185
186 while ((vbuf = virtqueue_get_buf(vq, &len))) {
187 list_add_tail(&vbuf->list, reclaim_list);
188 freed++;
189 }
190 if (freed == 0)
191 DRM_DEBUG("Huh? zero vbufs reclaimed");
192 }
193
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)194 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
195 {
196 struct virtio_gpu_device *vgdev =
197 container_of(work, struct virtio_gpu_device,
198 ctrlq.dequeue_work);
199 struct list_head reclaim_list;
200 struct virtio_gpu_vbuffer *entry, *tmp;
201 struct virtio_gpu_ctrl_hdr *resp;
202 u64 fence_id = 0;
203
204 INIT_LIST_HEAD(&reclaim_list);
205 spin_lock(&vgdev->ctrlq.qlock);
206 do {
207 virtqueue_disable_cb(vgdev->ctrlq.vq);
208 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
209
210 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
211 spin_unlock(&vgdev->ctrlq.qlock);
212
213 list_for_each_entry(entry, &reclaim_list, list) {
214 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
215
216 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
217
218 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
219 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
220 struct virtio_gpu_ctrl_hdr *cmd;
221 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
222 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
223 le32_to_cpu(resp->type),
224 le32_to_cpu(cmd->type));
225 } else
226 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
227 }
228 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
229 u64 f = le64_to_cpu(resp->fence_id);
230
231 if (fence_id > f) {
232 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
233 __func__, fence_id, f);
234 } else {
235 fence_id = f;
236 }
237 }
238 if (entry->resp_cb)
239 entry->resp_cb(vgdev, entry);
240 }
241 wake_up(&vgdev->ctrlq.ack_queue);
242
243 if (fence_id)
244 virtio_gpu_fence_event_process(vgdev, fence_id);
245
246 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
247 if (entry->objs)
248 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
249 list_del(&entry->list);
250 free_vbuf(vgdev, entry);
251 }
252 }
253
virtio_gpu_dequeue_cursor_func(struct work_struct * work)254 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
255 {
256 struct virtio_gpu_device *vgdev =
257 container_of(work, struct virtio_gpu_device,
258 cursorq.dequeue_work);
259 struct list_head reclaim_list;
260 struct virtio_gpu_vbuffer *entry, *tmp;
261
262 INIT_LIST_HEAD(&reclaim_list);
263 spin_lock(&vgdev->cursorq.qlock);
264 do {
265 virtqueue_disable_cb(vgdev->cursorq.vq);
266 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
267 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
268 spin_unlock(&vgdev->cursorq.qlock);
269
270 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
271 list_del(&entry->list);
272 free_vbuf(vgdev, entry);
273 }
274 wake_up(&vgdev->cursorq.ack_queue);
275 }
276
277 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)278 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
279 {
280 int ret, s, i;
281 struct sg_table *sgt;
282 struct scatterlist *sg;
283 struct page *pg;
284
285 if (WARN_ON(!PAGE_ALIGNED(data)))
286 return NULL;
287
288 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
289 if (!sgt)
290 return NULL;
291
292 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
293 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
294 if (ret) {
295 kfree(sgt);
296 return NULL;
297 }
298
299 for_each_sgtable_sg(sgt, sg, i) {
300 pg = vmalloc_to_page(data);
301 if (!pg) {
302 sg_free_table(sgt);
303 kfree(sgt);
304 return NULL;
305 }
306
307 s = min_t(int, PAGE_SIZE, size);
308 sg_set_page(sg, pg, s, 0);
309
310 size -= s;
311 data += s;
312 }
313
314 return sgt;
315 }
316
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)317 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
318 struct virtio_gpu_vbuffer *vbuf,
319 struct virtio_gpu_fence *fence,
320 int elemcnt,
321 struct scatterlist **sgs,
322 int outcnt,
323 int incnt)
324 {
325 struct virtqueue *vq = vgdev->ctrlq.vq;
326 int ret, idx;
327
328 if (!drm_dev_enter(vgdev->ddev, &idx)) {
329 if (fence && vbuf->objs)
330 virtio_gpu_array_unlock_resv(vbuf->objs);
331 free_vbuf(vgdev, vbuf);
332 return -1;
333 }
334
335 if (vgdev->has_indirect)
336 elemcnt = 1;
337
338 again:
339 spin_lock(&vgdev->ctrlq.qlock);
340
341 if (vq->num_free < elemcnt) {
342 spin_unlock(&vgdev->ctrlq.qlock);
343 virtio_gpu_notify(vgdev);
344 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
345 goto again;
346 }
347
348 /* now that the position of the vbuf in the virtqueue is known, we can
349 * finally set the fence id
350 */
351 if (fence) {
352 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
353 fence);
354 if (vbuf->objs) {
355 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
356 virtio_gpu_array_unlock_resv(vbuf->objs);
357 }
358 }
359
360 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
361 WARN_ON(ret);
362
363 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
364
365 atomic_inc(&vgdev->pending_commands);
366
367 spin_unlock(&vgdev->ctrlq.qlock);
368
369 drm_dev_exit(idx);
370 return 0;
371 }
372
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)373 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
374 struct virtio_gpu_vbuffer *vbuf,
375 struct virtio_gpu_fence *fence)
376 {
377 struct scatterlist *sgs[3], vcmd, vout, vresp;
378 struct sg_table *sgt = NULL;
379 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
380
381 /* set up vcmd */
382 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
383 elemcnt++;
384 sgs[outcnt] = &vcmd;
385 outcnt++;
386
387 /* set up vout */
388 if (vbuf->data_size) {
389 if (is_vmalloc_addr(vbuf->data_buf)) {
390 int sg_ents;
391 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
392 &sg_ents);
393 if (!sgt) {
394 if (fence && vbuf->objs)
395 virtio_gpu_array_unlock_resv(vbuf->objs);
396 return -1;
397 }
398
399 elemcnt += sg_ents;
400 sgs[outcnt] = sgt->sgl;
401 } else {
402 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
403 elemcnt++;
404 sgs[outcnt] = &vout;
405 }
406 outcnt++;
407 }
408
409 /* set up vresp */
410 if (vbuf->resp_size) {
411 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
412 elemcnt++;
413 sgs[outcnt + incnt] = &vresp;
414 incnt++;
415 }
416
417 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
418 incnt);
419
420 if (sgt) {
421 sg_free_table(sgt);
422 kfree(sgt);
423 }
424 return ret;
425 }
426
virtio_gpu_notify(struct virtio_gpu_device * vgdev)427 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
428 {
429 bool notify;
430
431 if (!atomic_read(&vgdev->pending_commands))
432 return;
433
434 spin_lock(&vgdev->ctrlq.qlock);
435 atomic_set(&vgdev->pending_commands, 0);
436 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
437 spin_unlock(&vgdev->ctrlq.qlock);
438
439 if (notify)
440 virtqueue_notify(vgdev->ctrlq.vq);
441 }
442
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)443 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
444 struct virtio_gpu_vbuffer *vbuf)
445 {
446 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
447 }
448
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)449 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
450 struct virtio_gpu_vbuffer *vbuf)
451 {
452 struct virtqueue *vq = vgdev->cursorq.vq;
453 struct scatterlist *sgs[1], ccmd;
454 int idx, ret, outcnt;
455 bool notify;
456
457 if (!drm_dev_enter(vgdev->ddev, &idx)) {
458 free_vbuf(vgdev, vbuf);
459 return;
460 }
461
462 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
463 sgs[0] = &ccmd;
464 outcnt = 1;
465
466 spin_lock(&vgdev->cursorq.qlock);
467 retry:
468 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
469 if (ret == -ENOSPC) {
470 spin_unlock(&vgdev->cursorq.qlock);
471 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
472 spin_lock(&vgdev->cursorq.qlock);
473 goto retry;
474 } else {
475 trace_virtio_gpu_cmd_queue(vq,
476 virtio_gpu_vbuf_ctrl_hdr(vbuf));
477
478 notify = virtqueue_kick_prepare(vq);
479 }
480
481 spin_unlock(&vgdev->cursorq.qlock);
482
483 if (notify)
484 virtqueue_notify(vq);
485
486 drm_dev_exit(idx);
487 }
488
489 /* just create gem objects for userspace and long lived objects,
490 * just use dma_alloced pages for the queue objects?
491 */
492
493 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
495 struct virtio_gpu_object *bo,
496 struct virtio_gpu_object_params *params,
497 struct virtio_gpu_object_array *objs,
498 struct virtio_gpu_fence *fence)
499 {
500 struct virtio_gpu_resource_create_2d *cmd_p;
501 struct virtio_gpu_vbuffer *vbuf;
502
503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
504 memset(cmd_p, 0, sizeof(*cmd_p));
505 vbuf->objs = objs;
506
507 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
508 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
509 cmd_p->format = cpu_to_le32(params->format);
510 cmd_p->width = cpu_to_le32(params->width);
511 cmd_p->height = cpu_to_le32(params->height);
512
513 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
514 bo->created = true;
515 }
516
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)517 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
518 struct virtio_gpu_vbuffer *vbuf)
519 {
520 struct virtio_gpu_object *bo;
521
522 bo = vbuf->resp_cb_data;
523 vbuf->resp_cb_data = NULL;
524
525 virtio_gpu_cleanup_object(bo);
526 }
527
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
529 struct virtio_gpu_object *bo)
530 {
531 struct virtio_gpu_resource_unref *cmd_p;
532 struct virtio_gpu_vbuffer *vbuf;
533 int ret;
534
535 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
536 virtio_gpu_cmd_unref_cb);
537 memset(cmd_p, 0, sizeof(*cmd_p));
538
539 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
540 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
541
542 vbuf->resp_cb_data = bo;
543 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
544 if (ret < 0)
545 virtio_gpu_cleanup_object(bo);
546 }
547
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
549 uint32_t scanout_id, uint32_t resource_id,
550 uint32_t width, uint32_t height,
551 uint32_t x, uint32_t y)
552 {
553 struct virtio_gpu_set_scanout *cmd_p;
554 struct virtio_gpu_vbuffer *vbuf;
555
556 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
557 memset(cmd_p, 0, sizeof(*cmd_p));
558
559 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
560 cmd_p->resource_id = cpu_to_le32(resource_id);
561 cmd_p->scanout_id = cpu_to_le32(scanout_id);
562 cmd_p->r.width = cpu_to_le32(width);
563 cmd_p->r.height = cpu_to_le32(height);
564 cmd_p->r.x = cpu_to_le32(x);
565 cmd_p->r.y = cpu_to_le32(y);
566
567 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
568 }
569
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)570 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
571 uint32_t resource_id,
572 uint32_t x, uint32_t y,
573 uint32_t width, uint32_t height,
574 struct virtio_gpu_object_array *objs,
575 struct virtio_gpu_fence *fence)
576 {
577 struct virtio_gpu_resource_flush *cmd_p;
578 struct virtio_gpu_vbuffer *vbuf;
579
580 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
581 memset(cmd_p, 0, sizeof(*cmd_p));
582 vbuf->objs = objs;
583
584 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
585 cmd_p->resource_id = cpu_to_le32(resource_id);
586 cmd_p->r.width = cpu_to_le32(width);
587 cmd_p->r.height = cpu_to_le32(height);
588 cmd_p->r.x = cpu_to_le32(x);
589 cmd_p->r.y = cpu_to_le32(y);
590
591 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
592 }
593
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)594 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
595 uint64_t offset,
596 uint32_t width, uint32_t height,
597 uint32_t x, uint32_t y,
598 struct virtio_gpu_object_array *objs,
599 struct virtio_gpu_fence *fence)
600 {
601 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
602 struct virtio_gpu_transfer_to_host_2d *cmd_p;
603 struct virtio_gpu_vbuffer *vbuf;
604 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
605 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
606
607 if (virtio_gpu_is_shmem(bo) && use_dma_api)
608 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
609 shmem->pages, DMA_TO_DEVICE);
610
611 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
612 memset(cmd_p, 0, sizeof(*cmd_p));
613 vbuf->objs = objs;
614
615 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
616 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
617 cmd_p->offset = cpu_to_le64(offset);
618 cmd_p->r.width = cpu_to_le32(width);
619 cmd_p->r.height = cpu_to_le32(height);
620 cmd_p->r.x = cpu_to_le32(x);
621 cmd_p->r.y = cpu_to_le32(y);
622
623 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
624 }
625
626 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)627 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
628 uint32_t resource_id,
629 struct virtio_gpu_mem_entry *ents,
630 uint32_t nents,
631 struct virtio_gpu_fence *fence)
632 {
633 struct virtio_gpu_resource_attach_backing *cmd_p;
634 struct virtio_gpu_vbuffer *vbuf;
635
636 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
640 cmd_p->resource_id = cpu_to_le32(resource_id);
641 cmd_p->nr_entries = cpu_to_le32(nents);
642
643 vbuf->data_buf = ents;
644 vbuf->data_size = sizeof(*ents) * nents;
645
646 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
647 }
648
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)649 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
650 struct virtio_gpu_vbuffer *vbuf)
651 {
652 struct virtio_gpu_resp_display_info *resp =
653 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
654 int i;
655
656 spin_lock(&vgdev->display_info_lock);
657 for (i = 0; i < vgdev->num_scanouts; i++) {
658 vgdev->outputs[i].info = resp->pmodes[i];
659 if (resp->pmodes[i].enabled) {
660 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
661 le32_to_cpu(resp->pmodes[i].r.width),
662 le32_to_cpu(resp->pmodes[i].r.height),
663 le32_to_cpu(resp->pmodes[i].r.x),
664 le32_to_cpu(resp->pmodes[i].r.y));
665 } else {
666 DRM_DEBUG("output %d: disabled", i);
667 }
668 }
669
670 vgdev->display_info_pending = false;
671 spin_unlock(&vgdev->display_info_lock);
672 wake_up(&vgdev->resp_wq);
673
674 if (!drm_helper_hpd_irq_event(vgdev->ddev))
675 drm_kms_helper_hotplug_event(vgdev->ddev);
676 }
677
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)678 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
679 struct virtio_gpu_vbuffer *vbuf)
680 {
681 struct virtio_gpu_get_capset_info *cmd =
682 (struct virtio_gpu_get_capset_info *)vbuf->buf;
683 struct virtio_gpu_resp_capset_info *resp =
684 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
685 int i = le32_to_cpu(cmd->capset_index);
686
687 spin_lock(&vgdev->display_info_lock);
688 if (vgdev->capsets) {
689 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
690 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
691 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
692 } else {
693 DRM_ERROR("invalid capset memory.");
694 }
695 spin_unlock(&vgdev->display_info_lock);
696 wake_up(&vgdev->resp_wq);
697 }
698
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)699 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
700 struct virtio_gpu_vbuffer *vbuf)
701 {
702 struct virtio_gpu_get_capset *cmd =
703 (struct virtio_gpu_get_capset *)vbuf->buf;
704 struct virtio_gpu_resp_capset *resp =
705 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
706 struct virtio_gpu_drv_cap_cache *cache_ent;
707
708 spin_lock(&vgdev->display_info_lock);
709 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
710 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
711 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
712 memcpy(cache_ent->caps_cache, resp->capset_data,
713 cache_ent->size);
714 /* Copy must occur before is_valid is signalled. */
715 smp_wmb();
716 atomic_set(&cache_ent->is_valid, 1);
717 break;
718 }
719 }
720 spin_unlock(&vgdev->display_info_lock);
721 wake_up_all(&vgdev->resp_wq);
722 }
723
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)724 static int virtio_get_edid_block(void *data, u8 *buf,
725 unsigned int block, size_t len)
726 {
727 struct virtio_gpu_resp_edid *resp = data;
728 size_t start = block * EDID_LENGTH;
729
730 if (start + len > le32_to_cpu(resp->size))
731 return -1;
732 memcpy(buf, resp->edid + start, len);
733 return 0;
734 }
735
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)736 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
737 struct virtio_gpu_vbuffer *vbuf)
738 {
739 struct virtio_gpu_cmd_get_edid *cmd =
740 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
741 struct virtio_gpu_resp_edid *resp =
742 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
743 uint32_t scanout = le32_to_cpu(cmd->scanout);
744 struct virtio_gpu_output *output;
745 struct edid *new_edid, *old_edid;
746
747 if (scanout >= vgdev->num_scanouts)
748 return;
749 output = vgdev->outputs + scanout;
750
751 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
752 drm_connector_update_edid_property(&output->conn, new_edid);
753
754 spin_lock(&vgdev->display_info_lock);
755 old_edid = output->edid;
756 output->edid = new_edid;
757 spin_unlock(&vgdev->display_info_lock);
758
759 kfree(old_edid);
760 wake_up(&vgdev->resp_wq);
761 }
762
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)763 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
764 {
765 struct virtio_gpu_ctrl_hdr *cmd_p;
766 struct virtio_gpu_vbuffer *vbuf;
767 void *resp_buf;
768
769 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
770 GFP_KERNEL);
771 if (!resp_buf)
772 return -ENOMEM;
773
774 cmd_p = virtio_gpu_alloc_cmd_resp
775 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
776 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
777 resp_buf);
778 memset(cmd_p, 0, sizeof(*cmd_p));
779
780 vgdev->display_info_pending = true;
781 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
782 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
783 return 0;
784 }
785
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)786 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
787 {
788 struct virtio_gpu_get_capset_info *cmd_p;
789 struct virtio_gpu_vbuffer *vbuf;
790 void *resp_buf;
791
792 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
793 GFP_KERNEL);
794 if (!resp_buf)
795 return -ENOMEM;
796
797 cmd_p = virtio_gpu_alloc_cmd_resp
798 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
799 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
800 resp_buf);
801 memset(cmd_p, 0, sizeof(*cmd_p));
802
803 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
804 cmd_p->capset_index = cpu_to_le32(idx);
805 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
806 return 0;
807 }
808
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)809 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
810 int idx, int version,
811 struct virtio_gpu_drv_cap_cache **cache_p)
812 {
813 struct virtio_gpu_get_capset *cmd_p;
814 struct virtio_gpu_vbuffer *vbuf;
815 int max_size;
816 struct virtio_gpu_drv_cap_cache *cache_ent;
817 struct virtio_gpu_drv_cap_cache *search_ent;
818 void *resp_buf;
819
820 *cache_p = NULL;
821
822 if (idx >= vgdev->num_capsets)
823 return -EINVAL;
824
825 if (version > vgdev->capsets[idx].max_version)
826 return -EINVAL;
827
828 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
829 if (!cache_ent)
830 return -ENOMEM;
831
832 max_size = vgdev->capsets[idx].max_size;
833 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
834 if (!cache_ent->caps_cache) {
835 kfree(cache_ent);
836 return -ENOMEM;
837 }
838
839 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
840 GFP_KERNEL);
841 if (!resp_buf) {
842 kfree(cache_ent->caps_cache);
843 kfree(cache_ent);
844 return -ENOMEM;
845 }
846
847 cache_ent->version = version;
848 cache_ent->id = vgdev->capsets[idx].id;
849 atomic_set(&cache_ent->is_valid, 0);
850 cache_ent->size = max_size;
851 spin_lock(&vgdev->display_info_lock);
852 /* Search while under lock in case it was added by another task. */
853 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
854 if (search_ent->id == vgdev->capsets[idx].id &&
855 search_ent->version == version) {
856 *cache_p = search_ent;
857 break;
858 }
859 }
860 if (!*cache_p)
861 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
862 spin_unlock(&vgdev->display_info_lock);
863
864 if (*cache_p) {
865 /* Entry was found, so free everything that was just created. */
866 kfree(resp_buf);
867 kfree(cache_ent->caps_cache);
868 kfree(cache_ent);
869 return 0;
870 }
871
872 cmd_p = virtio_gpu_alloc_cmd_resp
873 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
874 sizeof(struct virtio_gpu_resp_capset) + max_size,
875 resp_buf);
876 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
877 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
878 cmd_p->capset_version = cpu_to_le32(version);
879 *cache_p = cache_ent;
880 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
881
882 return 0;
883 }
884
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)885 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
886 {
887 struct virtio_gpu_cmd_get_edid *cmd_p;
888 struct virtio_gpu_vbuffer *vbuf;
889 void *resp_buf;
890 int scanout;
891
892 if (WARN_ON(!vgdev->has_edid))
893 return -EINVAL;
894
895 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
896 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
897 GFP_KERNEL);
898 if (!resp_buf)
899 return -ENOMEM;
900
901 cmd_p = virtio_gpu_alloc_cmd_resp
902 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
903 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
904 resp_buf);
905 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
906 cmd_p->scanout = cpu_to_le32(scanout);
907 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
908 }
909
910 return 0;
911 }
912
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t nlen,const char * name)913 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
914 uint32_t nlen, const char *name)
915 {
916 struct virtio_gpu_ctx_create *cmd_p;
917 struct virtio_gpu_vbuffer *vbuf;
918
919 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
920 memset(cmd_p, 0, sizeof(*cmd_p));
921
922 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
923 cmd_p->hdr.ctx_id = cpu_to_le32(id);
924 cmd_p->nlen = cpu_to_le32(nlen);
925 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
926 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
927 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
928 }
929
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)930 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
931 uint32_t id)
932 {
933 struct virtio_gpu_ctx_destroy *cmd_p;
934 struct virtio_gpu_vbuffer *vbuf;
935
936 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
937 memset(cmd_p, 0, sizeof(*cmd_p));
938
939 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
940 cmd_p->hdr.ctx_id = cpu_to_le32(id);
941 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
942 }
943
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)944 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
945 uint32_t ctx_id,
946 struct virtio_gpu_object_array *objs)
947 {
948 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
949 struct virtio_gpu_ctx_resource *cmd_p;
950 struct virtio_gpu_vbuffer *vbuf;
951
952 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
953 memset(cmd_p, 0, sizeof(*cmd_p));
954 vbuf->objs = objs;
955
956 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
957 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
958 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
959 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
960 }
961
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)962 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
963 uint32_t ctx_id,
964 struct virtio_gpu_object_array *objs)
965 {
966 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
967 struct virtio_gpu_ctx_resource *cmd_p;
968 struct virtio_gpu_vbuffer *vbuf;
969
970 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
971 memset(cmd_p, 0, sizeof(*cmd_p));
972 vbuf->objs = objs;
973
974 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
975 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
976 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
977 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
978 }
979
980 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)981 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
982 struct virtio_gpu_object *bo,
983 struct virtio_gpu_object_params *params,
984 struct virtio_gpu_object_array *objs,
985 struct virtio_gpu_fence *fence)
986 {
987 struct virtio_gpu_resource_create_3d *cmd_p;
988 struct virtio_gpu_vbuffer *vbuf;
989
990 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
991 memset(cmd_p, 0, sizeof(*cmd_p));
992 vbuf->objs = objs;
993
994 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
995 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
996 cmd_p->format = cpu_to_le32(params->format);
997 cmd_p->width = cpu_to_le32(params->width);
998 cmd_p->height = cpu_to_le32(params->height);
999
1000 cmd_p->target = cpu_to_le32(params->target);
1001 cmd_p->bind = cpu_to_le32(params->bind);
1002 cmd_p->depth = cpu_to_le32(params->depth);
1003 cmd_p->array_size = cpu_to_le32(params->array_size);
1004 cmd_p->last_level = cpu_to_le32(params->last_level);
1005 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1006 cmd_p->flags = cpu_to_le32(params->flags);
1007
1008 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1009
1010 bo->created = true;
1011 }
1012
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1013 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1014 uint32_t ctx_id,
1015 uint64_t offset, uint32_t level,
1016 uint32_t stride,
1017 uint32_t layer_stride,
1018 struct drm_virtgpu_3d_box *box,
1019 struct virtio_gpu_object_array *objs,
1020 struct virtio_gpu_fence *fence)
1021 {
1022 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1023 struct virtio_gpu_transfer_host_3d *cmd_p;
1024 struct virtio_gpu_vbuffer *vbuf;
1025 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1026
1027 if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1028 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1029 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1030 shmem->pages, DMA_TO_DEVICE);
1031 }
1032
1033 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1034 memset(cmd_p, 0, sizeof(*cmd_p));
1035
1036 vbuf->objs = objs;
1037
1038 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1039 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1040 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1041 convert_to_hw_box(&cmd_p->box, box);
1042 cmd_p->offset = cpu_to_le64(offset);
1043 cmd_p->level = cpu_to_le32(level);
1044 cmd_p->stride = cpu_to_le32(stride);
1045 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1046
1047 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1048 }
1049
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1050 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1051 uint32_t ctx_id,
1052 uint64_t offset, uint32_t level,
1053 uint32_t stride,
1054 uint32_t layer_stride,
1055 struct drm_virtgpu_3d_box *box,
1056 struct virtio_gpu_object_array *objs,
1057 struct virtio_gpu_fence *fence)
1058 {
1059 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1060 struct virtio_gpu_transfer_host_3d *cmd_p;
1061 struct virtio_gpu_vbuffer *vbuf;
1062
1063 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1064 memset(cmd_p, 0, sizeof(*cmd_p));
1065
1066 vbuf->objs = objs;
1067
1068 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1069 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1070 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1071 convert_to_hw_box(&cmd_p->box, box);
1072 cmd_p->offset = cpu_to_le64(offset);
1073 cmd_p->level = cpu_to_le32(level);
1074 cmd_p->stride = cpu_to_le32(stride);
1075 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1076
1077 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1078 }
1079
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1080 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1081 void *data, uint32_t data_size,
1082 uint32_t ctx_id,
1083 struct virtio_gpu_object_array *objs,
1084 struct virtio_gpu_fence *fence)
1085 {
1086 struct virtio_gpu_cmd_submit *cmd_p;
1087 struct virtio_gpu_vbuffer *vbuf;
1088
1089 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1090 memset(cmd_p, 0, sizeof(*cmd_p));
1091
1092 vbuf->data_buf = data;
1093 vbuf->data_size = data_size;
1094 vbuf->objs = objs;
1095
1096 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1097 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1098 cmd_p->size = cpu_to_le32(data_size);
1099
1100 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1101 }
1102
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1103 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1104 struct virtio_gpu_object *obj,
1105 struct virtio_gpu_mem_entry *ents,
1106 unsigned int nents)
1107 {
1108 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1109 ents, nents, NULL);
1110 }
1111
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1112 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1113 struct virtio_gpu_output *output)
1114 {
1115 struct virtio_gpu_vbuffer *vbuf;
1116 struct virtio_gpu_update_cursor *cur_p;
1117
1118 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1119 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1120 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1121 virtio_gpu_queue_cursor(vgdev, vbuf);
1122 }
1123
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1124 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1125 struct virtio_gpu_vbuffer *vbuf)
1126 {
1127 struct virtio_gpu_object *obj =
1128 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1129 struct virtio_gpu_resp_resource_uuid *resp =
1130 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1131 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1132
1133 spin_lock(&vgdev->resource_export_lock);
1134 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1135
1136 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1137 obj->uuid_state == STATE_INITIALIZING) {
1138 import_uuid(&obj->uuid, resp->uuid);
1139 obj->uuid_state = STATE_OK;
1140 } else {
1141 obj->uuid_state = STATE_ERR;
1142 }
1143 spin_unlock(&vgdev->resource_export_lock);
1144
1145 wake_up_all(&vgdev->resp_wq);
1146 }
1147
1148 int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1149 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1150 struct virtio_gpu_object_array *objs)
1151 {
1152 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1153 struct virtio_gpu_resource_assign_uuid *cmd_p;
1154 struct virtio_gpu_vbuffer *vbuf;
1155 struct virtio_gpu_resp_resource_uuid *resp_buf;
1156
1157 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1158 if (!resp_buf) {
1159 spin_lock(&vgdev->resource_export_lock);
1160 bo->uuid_state = STATE_ERR;
1161 spin_unlock(&vgdev->resource_export_lock);
1162 virtio_gpu_array_put_free(objs);
1163 return -ENOMEM;
1164 }
1165
1166 cmd_p = virtio_gpu_alloc_cmd_resp
1167 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1168 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1169 memset(cmd_p, 0, sizeof(*cmd_p));
1170
1171 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1172 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1173
1174 vbuf->objs = objs;
1175 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1176 return 0;
1177 }
1178
virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1179 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1180 struct virtio_gpu_vbuffer *vbuf)
1181 {
1182 struct virtio_gpu_object *bo =
1183 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1184 struct virtio_gpu_resp_map_info *resp =
1185 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1186 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1187 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1188
1189 spin_lock(&vgdev->host_visible_lock);
1190
1191 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1192 vram->map_info = resp->map_info;
1193 vram->map_state = STATE_OK;
1194 } else {
1195 vram->map_state = STATE_ERR;
1196 }
1197
1198 spin_unlock(&vgdev->host_visible_lock);
1199 wake_up_all(&vgdev->resp_wq);
1200 }
1201
virtio_gpu_cmd_map(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs,uint64_t offset)1202 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1203 struct virtio_gpu_object_array *objs, uint64_t offset)
1204 {
1205 struct virtio_gpu_resource_map_blob *cmd_p;
1206 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1207 struct virtio_gpu_vbuffer *vbuf;
1208 struct virtio_gpu_resp_map_info *resp_buf;
1209
1210 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1211 if (!resp_buf)
1212 return -ENOMEM;
1213
1214 cmd_p = virtio_gpu_alloc_cmd_resp
1215 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1216 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1217 memset(cmd_p, 0, sizeof(*cmd_p));
1218
1219 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1220 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1221 cmd_p->offset = cpu_to_le64(offset);
1222 vbuf->objs = objs;
1223
1224 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1225 return 0;
1226 }
1227
virtio_gpu_cmd_unmap(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)1228 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1229 struct virtio_gpu_object *bo)
1230 {
1231 struct virtio_gpu_resource_unmap_blob *cmd_p;
1232 struct virtio_gpu_vbuffer *vbuf;
1233
1234 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1235 memset(cmd_p, 0, sizeof(*cmd_p));
1236
1237 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1238 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1239
1240 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1241 }
1242
1243 void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_mem_entry * ents,uint32_t nents)1244 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1245 struct virtio_gpu_object *bo,
1246 struct virtio_gpu_object_params *params,
1247 struct virtio_gpu_mem_entry *ents,
1248 uint32_t nents)
1249 {
1250 struct virtio_gpu_resource_create_blob *cmd_p;
1251 struct virtio_gpu_vbuffer *vbuf;
1252
1253 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1254 memset(cmd_p, 0, sizeof(*cmd_p));
1255
1256 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1257 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1258 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1259 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1260 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1261 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1262 cmd_p->size = cpu_to_le64(params->size);
1263 cmd_p->nr_entries = cpu_to_le32(nents);
1264
1265 vbuf->data_buf = ents;
1266 vbuf->data_size = sizeof(*ents) * nents;
1267
1268 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1269 bo->created = true;
1270 }
1271
virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device * vgdev,uint32_t scanout_id,struct virtio_gpu_object * bo,struct drm_framebuffer * fb,uint32_t width,uint32_t height,uint32_t x,uint32_t y)1272 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1273 uint32_t scanout_id,
1274 struct virtio_gpu_object *bo,
1275 struct drm_framebuffer *fb,
1276 uint32_t width, uint32_t height,
1277 uint32_t x, uint32_t y)
1278 {
1279 uint32_t i;
1280 struct virtio_gpu_set_scanout_blob *cmd_p;
1281 struct virtio_gpu_vbuffer *vbuf;
1282 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1283
1284 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1285 memset(cmd_p, 0, sizeof(*cmd_p));
1286
1287 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1288 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1289 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1290
1291 cmd_p->format = cpu_to_le32(format);
1292 cmd_p->width = cpu_to_le32(fb->width);
1293 cmd_p->height = cpu_to_le32(fb->height);
1294
1295 for (i = 0; i < 4; i++) {
1296 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1297 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1298 }
1299
1300 cmd_p->r.width = cpu_to_le32(width);
1301 cmd_p->r.height = cpu_to_le32(height);
1302 cmd_p->r.x = cpu_to_le32(x);
1303 cmd_p->r.y = cpu_to_le32(y);
1304
1305 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1306 }
1307