• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36 
37 #define MAX_INLINE_CMD_SIZE   96
38 #define MAX_INLINE_RESP_SIZE  24
39 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
40 			       + MAX_INLINE_CMD_SIZE		 \
41 			       + MAX_INLINE_RESP_SIZE)
42 
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 			      const struct drm_virtgpu_3d_box *src)
45 {
46 	dst->x = cpu_to_le32(src->x);
47 	dst->y = cpu_to_le32(src->y);
48 	dst->z = cpu_to_le32(src->z);
49 	dst->w = cpu_to_le32(src->w);
50 	dst->h = cpu_to_le32(src->h);
51 	dst->d = cpu_to_le32(src->d);
52 }
53 
virtio_gpu_ctrl_ack(struct virtqueue * vq)54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55 {
56 	struct drm_device *dev = vq->vdev->priv;
57 	struct virtio_gpu_device *vgdev = dev->dev_private;
58 
59 	schedule_work(&vgdev->ctrlq.dequeue_work);
60 }
61 
virtio_gpu_cursor_ack(struct virtqueue * vq)62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
63 {
64 	struct drm_device *dev = vq->vdev->priv;
65 	struct virtio_gpu_device *vgdev = dev->dev_private;
66 
67 	schedule_work(&vgdev->cursorq.dequeue_work);
68 }
69 
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 					 VBUFFER_SIZE,
74 					 __alignof__(struct virtio_gpu_vbuffer),
75 					 0, NULL);
76 	if (!vgdev->vbufs)
77 		return -ENOMEM;
78 	return 0;
79 }
80 
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82 {
83 	kmem_cache_destroy(vgdev->vbufs);
84 	vgdev->vbufs = NULL;
85 }
86 
87 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 		    int size, int resp_size, void *resp_buf,
90 		    virtio_gpu_resp_cb resp_cb)
91 {
92 	struct virtio_gpu_vbuffer *vbuf;
93 
94 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
95 
96 	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
97 	       size < sizeof(struct virtio_gpu_ctrl_hdr));
98 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
99 	vbuf->size = size;
100 
101 	vbuf->resp_cb = resp_cb;
102 	vbuf->resp_size = resp_size;
103 	if (resp_size <= MAX_INLINE_RESP_SIZE)
104 		vbuf->resp_buf = (void *)vbuf->buf + size;
105 	else
106 		vbuf->resp_buf = resp_buf;
107 	BUG_ON(!vbuf->resp_buf);
108 	return vbuf;
109 }
110 
111 static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)112 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
113 {
114 	/* this assumes a vbuf contains a command that starts with a
115 	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
116 	 * virtqueues.
117 	 */
118 	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
119 }
120 
121 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)122 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
123 			struct virtio_gpu_vbuffer **vbuffer_p)
124 {
125 	struct virtio_gpu_vbuffer *vbuf;
126 
127 	vbuf = virtio_gpu_get_vbuf
128 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
129 		 0, NULL, NULL);
130 	if (IS_ERR(vbuf)) {
131 		*vbuffer_p = NULL;
132 		return ERR_CAST(vbuf);
133 	}
134 	*vbuffer_p = vbuf;
135 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
136 }
137 
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)138 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
139 				       virtio_gpu_resp_cb cb,
140 				       struct virtio_gpu_vbuffer **vbuffer_p,
141 				       int cmd_size, int resp_size,
142 				       void *resp_buf)
143 {
144 	struct virtio_gpu_vbuffer *vbuf;
145 
146 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
147 				   resp_size, resp_buf, cb);
148 	*vbuffer_p = vbuf;
149 	return (struct virtio_gpu_command *)vbuf->buf;
150 }
151 
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)152 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
153 				  struct virtio_gpu_vbuffer **vbuffer_p,
154 				  int size)
155 {
156 	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
157 					 sizeof(struct virtio_gpu_ctrl_hdr),
158 					 NULL);
159 }
160 
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)161 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
162 				     struct virtio_gpu_vbuffer **vbuffer_p,
163 				     int size,
164 				     virtio_gpu_resp_cb cb)
165 {
166 	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
167 					 sizeof(struct virtio_gpu_ctrl_hdr),
168 					 NULL);
169 }
170 
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)171 static void free_vbuf(struct virtio_gpu_device *vgdev,
172 		      struct virtio_gpu_vbuffer *vbuf)
173 {
174 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
175 		kfree(vbuf->resp_buf);
176 	kvfree(vbuf->data_buf);
177 	kmem_cache_free(vgdev->vbufs, vbuf);
178 }
179 
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)180 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
181 {
182 	struct virtio_gpu_vbuffer *vbuf;
183 	unsigned int len;
184 	int freed = 0;
185 
186 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
187 		list_add_tail(&vbuf->list, reclaim_list);
188 		freed++;
189 	}
190 	if (freed == 0)
191 		DRM_DEBUG("Huh? zero vbufs reclaimed");
192 }
193 
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)194 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
195 {
196 	struct virtio_gpu_device *vgdev =
197 		container_of(work, struct virtio_gpu_device,
198 			     ctrlq.dequeue_work);
199 	struct list_head reclaim_list;
200 	struct virtio_gpu_vbuffer *entry, *tmp;
201 	struct virtio_gpu_ctrl_hdr *resp;
202 	u64 fence_id = 0;
203 
204 	INIT_LIST_HEAD(&reclaim_list);
205 	spin_lock(&vgdev->ctrlq.qlock);
206 	do {
207 		virtqueue_disable_cb(vgdev->ctrlq.vq);
208 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
209 
210 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
211 	spin_unlock(&vgdev->ctrlq.qlock);
212 
213 	list_for_each_entry(entry, &reclaim_list, list) {
214 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
215 
216 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
217 
218 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
219 			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
220 				struct virtio_gpu_ctrl_hdr *cmd;
221 				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
222 				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
223 						      le32_to_cpu(resp->type),
224 						      le32_to_cpu(cmd->type));
225 			} else
226 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
227 		}
228 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
229 			u64 f = le64_to_cpu(resp->fence_id);
230 
231 			if (fence_id > f) {
232 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
233 					  __func__, fence_id, f);
234 			} else {
235 				fence_id = f;
236 			}
237 		}
238 		if (entry->resp_cb)
239 			entry->resp_cb(vgdev, entry);
240 	}
241 	wake_up(&vgdev->ctrlq.ack_queue);
242 
243 	if (fence_id)
244 		virtio_gpu_fence_event_process(vgdev, fence_id);
245 
246 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
247 		if (entry->objs)
248 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
249 		list_del(&entry->list);
250 		free_vbuf(vgdev, entry);
251 	}
252 }
253 
virtio_gpu_dequeue_cursor_func(struct work_struct * work)254 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
255 {
256 	struct virtio_gpu_device *vgdev =
257 		container_of(work, struct virtio_gpu_device,
258 			     cursorq.dequeue_work);
259 	struct list_head reclaim_list;
260 	struct virtio_gpu_vbuffer *entry, *tmp;
261 
262 	INIT_LIST_HEAD(&reclaim_list);
263 	spin_lock(&vgdev->cursorq.qlock);
264 	do {
265 		virtqueue_disable_cb(vgdev->cursorq.vq);
266 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
267 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
268 	spin_unlock(&vgdev->cursorq.qlock);
269 
270 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
271 		list_del(&entry->list);
272 		free_vbuf(vgdev, entry);
273 	}
274 	wake_up(&vgdev->cursorq.ack_queue);
275 }
276 
277 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)278 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
279 {
280 	int ret, s, i;
281 	struct sg_table *sgt;
282 	struct scatterlist *sg;
283 	struct page *pg;
284 
285 	if (WARN_ON(!PAGE_ALIGNED(data)))
286 		return NULL;
287 
288 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
289 	if (!sgt)
290 		return NULL;
291 
292 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
293 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
294 	if (ret) {
295 		kfree(sgt);
296 		return NULL;
297 	}
298 
299 	for_each_sgtable_sg(sgt, sg, i) {
300 		pg = vmalloc_to_page(data);
301 		if (!pg) {
302 			sg_free_table(sgt);
303 			kfree(sgt);
304 			return NULL;
305 		}
306 
307 		s = min_t(int, PAGE_SIZE, size);
308 		sg_set_page(sg, pg, s, 0);
309 
310 		size -= s;
311 		data += s;
312 	}
313 
314 	return sgt;
315 }
316 
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)317 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
318 				     struct virtio_gpu_vbuffer *vbuf,
319 				     struct virtio_gpu_fence *fence,
320 				     int elemcnt,
321 				     struct scatterlist **sgs,
322 				     int outcnt,
323 				     int incnt)
324 {
325 	struct virtqueue *vq = vgdev->ctrlq.vq;
326 	int ret, idx;
327 
328 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
329 		if (fence && vbuf->objs)
330 			virtio_gpu_array_unlock_resv(vbuf->objs);
331 		free_vbuf(vgdev, vbuf);
332 		return -1;
333 	}
334 
335 	if (vgdev->has_indirect)
336 		elemcnt = 1;
337 
338 again:
339 	spin_lock(&vgdev->ctrlq.qlock);
340 
341 	if (vq->num_free < elemcnt) {
342 		spin_unlock(&vgdev->ctrlq.qlock);
343 		virtio_gpu_notify(vgdev);
344 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
345 		goto again;
346 	}
347 
348 	/* now that the position of the vbuf in the virtqueue is known, we can
349 	 * finally set the fence id
350 	 */
351 	if (fence) {
352 		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
353 				      fence);
354 		if (vbuf->objs) {
355 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
356 			virtio_gpu_array_unlock_resv(vbuf->objs);
357 		}
358 	}
359 
360 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
361 	WARN_ON(ret);
362 
363 	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
364 
365 	atomic_inc(&vgdev->pending_commands);
366 
367 	spin_unlock(&vgdev->ctrlq.qlock);
368 
369 	drm_dev_exit(idx);
370 	return 0;
371 }
372 
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)373 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
374 					       struct virtio_gpu_vbuffer *vbuf,
375 					       struct virtio_gpu_fence *fence)
376 {
377 	struct scatterlist *sgs[3], vcmd, vout, vresp;
378 	struct sg_table *sgt = NULL;
379 	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
380 
381 	/* set up vcmd */
382 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
383 	elemcnt++;
384 	sgs[outcnt] = &vcmd;
385 	outcnt++;
386 
387 	/* set up vout */
388 	if (vbuf->data_size) {
389 		if (is_vmalloc_addr(vbuf->data_buf)) {
390 			int sg_ents;
391 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
392 					     &sg_ents);
393 			if (!sgt) {
394 				if (fence && vbuf->objs)
395 					virtio_gpu_array_unlock_resv(vbuf->objs);
396 				return -1;
397 			}
398 
399 			elemcnt += sg_ents;
400 			sgs[outcnt] = sgt->sgl;
401 		} else {
402 			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
403 			elemcnt++;
404 			sgs[outcnt] = &vout;
405 		}
406 		outcnt++;
407 	}
408 
409 	/* set up vresp */
410 	if (vbuf->resp_size) {
411 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
412 		elemcnt++;
413 		sgs[outcnt + incnt] = &vresp;
414 		incnt++;
415 	}
416 
417 	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
418 					incnt);
419 
420 	if (sgt) {
421 		sg_free_table(sgt);
422 		kfree(sgt);
423 	}
424 	return ret;
425 }
426 
virtio_gpu_notify(struct virtio_gpu_device * vgdev)427 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
428 {
429 	bool notify;
430 
431 	if (!atomic_read(&vgdev->pending_commands))
432 		return;
433 
434 	spin_lock(&vgdev->ctrlq.qlock);
435 	atomic_set(&vgdev->pending_commands, 0);
436 	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
437 	spin_unlock(&vgdev->ctrlq.qlock);
438 
439 	if (notify)
440 		virtqueue_notify(vgdev->ctrlq.vq);
441 }
442 
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)443 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
444 					struct virtio_gpu_vbuffer *vbuf)
445 {
446 	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
447 }
448 
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)449 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
450 				    struct virtio_gpu_vbuffer *vbuf)
451 {
452 	struct virtqueue *vq = vgdev->cursorq.vq;
453 	struct scatterlist *sgs[1], ccmd;
454 	int idx, ret, outcnt;
455 	bool notify;
456 
457 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
458 		free_vbuf(vgdev, vbuf);
459 		return;
460 	}
461 
462 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
463 	sgs[0] = &ccmd;
464 	outcnt = 1;
465 
466 	spin_lock(&vgdev->cursorq.qlock);
467 retry:
468 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
469 	if (ret == -ENOSPC) {
470 		spin_unlock(&vgdev->cursorq.qlock);
471 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
472 		spin_lock(&vgdev->cursorq.qlock);
473 		goto retry;
474 	} else {
475 		trace_virtio_gpu_cmd_queue(vq,
476 			virtio_gpu_vbuf_ctrl_hdr(vbuf));
477 
478 		notify = virtqueue_kick_prepare(vq);
479 	}
480 
481 	spin_unlock(&vgdev->cursorq.qlock);
482 
483 	if (notify)
484 		virtqueue_notify(vq);
485 
486 	drm_dev_exit(idx);
487 }
488 
489 /* just create gem objects for userspace and long lived objects,
490  * just use dma_alloced pages for the queue objects?
491  */
492 
493 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
495 				    struct virtio_gpu_object *bo,
496 				    struct virtio_gpu_object_params *params,
497 				    struct virtio_gpu_object_array *objs,
498 				    struct virtio_gpu_fence *fence)
499 {
500 	struct virtio_gpu_resource_create_2d *cmd_p;
501 	struct virtio_gpu_vbuffer *vbuf;
502 
503 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
504 	memset(cmd_p, 0, sizeof(*cmd_p));
505 	vbuf->objs = objs;
506 
507 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
508 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
509 	cmd_p->format = cpu_to_le32(params->format);
510 	cmd_p->width = cpu_to_le32(params->width);
511 	cmd_p->height = cpu_to_le32(params->height);
512 
513 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
514 	bo->created = true;
515 }
516 
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)517 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
518 				    struct virtio_gpu_vbuffer *vbuf)
519 {
520 	struct virtio_gpu_object *bo;
521 
522 	bo = vbuf->resp_cb_data;
523 	vbuf->resp_cb_data = NULL;
524 
525 	virtio_gpu_cleanup_object(bo);
526 }
527 
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
529 				   struct virtio_gpu_object *bo)
530 {
531 	struct virtio_gpu_resource_unref *cmd_p;
532 	struct virtio_gpu_vbuffer *vbuf;
533 	int ret;
534 
535 	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
536 					virtio_gpu_cmd_unref_cb);
537 	memset(cmd_p, 0, sizeof(*cmd_p));
538 
539 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
540 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
541 
542 	vbuf->resp_cb_data = bo;
543 	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
544 	if (ret < 0)
545 		virtio_gpu_cleanup_object(bo);
546 }
547 
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
549 				uint32_t scanout_id, uint32_t resource_id,
550 				uint32_t width, uint32_t height,
551 				uint32_t x, uint32_t y)
552 {
553 	struct virtio_gpu_set_scanout *cmd_p;
554 	struct virtio_gpu_vbuffer *vbuf;
555 
556 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
557 	memset(cmd_p, 0, sizeof(*cmd_p));
558 
559 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
560 	cmd_p->resource_id = cpu_to_le32(resource_id);
561 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
562 	cmd_p->r.width = cpu_to_le32(width);
563 	cmd_p->r.height = cpu_to_le32(height);
564 	cmd_p->r.x = cpu_to_le32(x);
565 	cmd_p->r.y = cpu_to_le32(y);
566 
567 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
568 }
569 
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height)570 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
571 				   uint32_t resource_id,
572 				   uint32_t x, uint32_t y,
573 				   uint32_t width, uint32_t height)
574 {
575 	struct virtio_gpu_resource_flush *cmd_p;
576 	struct virtio_gpu_vbuffer *vbuf;
577 
578 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
579 	memset(cmd_p, 0, sizeof(*cmd_p));
580 
581 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
582 	cmd_p->resource_id = cpu_to_le32(resource_id);
583 	cmd_p->r.width = cpu_to_le32(width);
584 	cmd_p->r.height = cpu_to_le32(height);
585 	cmd_p->r.x = cpu_to_le32(x);
586 	cmd_p->r.y = cpu_to_le32(y);
587 
588 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
589 }
590 
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)591 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
592 					uint64_t offset,
593 					uint32_t width, uint32_t height,
594 					uint32_t x, uint32_t y,
595 					struct virtio_gpu_object_array *objs,
596 					struct virtio_gpu_fence *fence)
597 {
598 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
599 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
600 	struct virtio_gpu_vbuffer *vbuf;
601 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
602 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
603 
604 	if (use_dma_api)
605 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
606 					    shmem->pages, DMA_TO_DEVICE);
607 
608 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
609 	memset(cmd_p, 0, sizeof(*cmd_p));
610 	vbuf->objs = objs;
611 
612 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
613 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
614 	cmd_p->offset = cpu_to_le64(offset);
615 	cmd_p->r.width = cpu_to_le32(width);
616 	cmd_p->r.height = cpu_to_le32(height);
617 	cmd_p->r.x = cpu_to_le32(x);
618 	cmd_p->r.y = cpu_to_le32(y);
619 
620 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
621 }
622 
623 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)624 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
625 				       uint32_t resource_id,
626 				       struct virtio_gpu_mem_entry *ents,
627 				       uint32_t nents,
628 				       struct virtio_gpu_fence *fence)
629 {
630 	struct virtio_gpu_resource_attach_backing *cmd_p;
631 	struct virtio_gpu_vbuffer *vbuf;
632 
633 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
634 	memset(cmd_p, 0, sizeof(*cmd_p));
635 
636 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
637 	cmd_p->resource_id = cpu_to_le32(resource_id);
638 	cmd_p->nr_entries = cpu_to_le32(nents);
639 
640 	vbuf->data_buf = ents;
641 	vbuf->data_size = sizeof(*ents) * nents;
642 
643 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
644 }
645 
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)646 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
647 					       struct virtio_gpu_vbuffer *vbuf)
648 {
649 	struct virtio_gpu_resp_display_info *resp =
650 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
651 	int i;
652 
653 	spin_lock(&vgdev->display_info_lock);
654 	for (i = 0; i < vgdev->num_scanouts; i++) {
655 		vgdev->outputs[i].info = resp->pmodes[i];
656 		if (resp->pmodes[i].enabled) {
657 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
658 				  le32_to_cpu(resp->pmodes[i].r.width),
659 				  le32_to_cpu(resp->pmodes[i].r.height),
660 				  le32_to_cpu(resp->pmodes[i].r.x),
661 				  le32_to_cpu(resp->pmodes[i].r.y));
662 		} else {
663 			DRM_DEBUG("output %d: disabled", i);
664 		}
665 	}
666 
667 	vgdev->display_info_pending = false;
668 	spin_unlock(&vgdev->display_info_lock);
669 	wake_up(&vgdev->resp_wq);
670 
671 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
672 		drm_kms_helper_hotplug_event(vgdev->ddev);
673 }
674 
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)675 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
676 					      struct virtio_gpu_vbuffer *vbuf)
677 {
678 	struct virtio_gpu_get_capset_info *cmd =
679 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
680 	struct virtio_gpu_resp_capset_info *resp =
681 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
682 	int i = le32_to_cpu(cmd->capset_index);
683 
684 	spin_lock(&vgdev->display_info_lock);
685 	if (vgdev->capsets) {
686 		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
687 		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
688 		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
689 	} else {
690 		DRM_ERROR("invalid capset memory.");
691 	}
692 	spin_unlock(&vgdev->display_info_lock);
693 	wake_up(&vgdev->resp_wq);
694 }
695 
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)696 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
697 				     struct virtio_gpu_vbuffer *vbuf)
698 {
699 	struct virtio_gpu_get_capset *cmd =
700 		(struct virtio_gpu_get_capset *)vbuf->buf;
701 	struct virtio_gpu_resp_capset *resp =
702 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
703 	struct virtio_gpu_drv_cap_cache *cache_ent;
704 
705 	spin_lock(&vgdev->display_info_lock);
706 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
707 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
708 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
709 			memcpy(cache_ent->caps_cache, resp->capset_data,
710 			       cache_ent->size);
711 			/* Copy must occur before is_valid is signalled. */
712 			smp_wmb();
713 			atomic_set(&cache_ent->is_valid, 1);
714 			break;
715 		}
716 	}
717 	spin_unlock(&vgdev->display_info_lock);
718 	wake_up_all(&vgdev->resp_wq);
719 }
720 
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)721 static int virtio_get_edid_block(void *data, u8 *buf,
722 				 unsigned int block, size_t len)
723 {
724 	struct virtio_gpu_resp_edid *resp = data;
725 	size_t start = block * EDID_LENGTH;
726 
727 	if (start + len > le32_to_cpu(resp->size))
728 		return -1;
729 	memcpy(buf, resp->edid + start, len);
730 	return 0;
731 }
732 
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)733 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
734 				       struct virtio_gpu_vbuffer *vbuf)
735 {
736 	struct virtio_gpu_cmd_get_edid *cmd =
737 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
738 	struct virtio_gpu_resp_edid *resp =
739 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
740 	uint32_t scanout = le32_to_cpu(cmd->scanout);
741 	struct virtio_gpu_output *output;
742 	struct edid *new_edid, *old_edid;
743 
744 	if (scanout >= vgdev->num_scanouts)
745 		return;
746 	output = vgdev->outputs + scanout;
747 
748 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
749 	drm_connector_update_edid_property(&output->conn, new_edid);
750 
751 	spin_lock(&vgdev->display_info_lock);
752 	old_edid = output->edid;
753 	output->edid = new_edid;
754 	spin_unlock(&vgdev->display_info_lock);
755 
756 	kfree(old_edid);
757 	wake_up(&vgdev->resp_wq);
758 }
759 
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)760 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
761 {
762 	struct virtio_gpu_ctrl_hdr *cmd_p;
763 	struct virtio_gpu_vbuffer *vbuf;
764 	void *resp_buf;
765 
766 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
767 			   GFP_KERNEL);
768 	if (!resp_buf)
769 		return -ENOMEM;
770 
771 	cmd_p = virtio_gpu_alloc_cmd_resp
772 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
773 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
774 		 resp_buf);
775 	memset(cmd_p, 0, sizeof(*cmd_p));
776 
777 	vgdev->display_info_pending = true;
778 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
779 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
780 	return 0;
781 }
782 
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)783 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
784 {
785 	struct virtio_gpu_get_capset_info *cmd_p;
786 	struct virtio_gpu_vbuffer *vbuf;
787 	void *resp_buf;
788 
789 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
790 			   GFP_KERNEL);
791 	if (!resp_buf)
792 		return -ENOMEM;
793 
794 	cmd_p = virtio_gpu_alloc_cmd_resp
795 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
796 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
797 		 resp_buf);
798 	memset(cmd_p, 0, sizeof(*cmd_p));
799 
800 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
801 	cmd_p->capset_index = cpu_to_le32(idx);
802 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
803 	return 0;
804 }
805 
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)806 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
807 			      int idx, int version,
808 			      struct virtio_gpu_drv_cap_cache **cache_p)
809 {
810 	struct virtio_gpu_get_capset *cmd_p;
811 	struct virtio_gpu_vbuffer *vbuf;
812 	int max_size;
813 	struct virtio_gpu_drv_cap_cache *cache_ent;
814 	struct virtio_gpu_drv_cap_cache *search_ent;
815 	void *resp_buf;
816 
817 	*cache_p = NULL;
818 
819 	if (idx >= vgdev->num_capsets)
820 		return -EINVAL;
821 
822 	if (version > vgdev->capsets[idx].max_version)
823 		return -EINVAL;
824 
825 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
826 	if (!cache_ent)
827 		return -ENOMEM;
828 
829 	max_size = vgdev->capsets[idx].max_size;
830 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
831 	if (!cache_ent->caps_cache) {
832 		kfree(cache_ent);
833 		return -ENOMEM;
834 	}
835 
836 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
837 			   GFP_KERNEL);
838 	if (!resp_buf) {
839 		kfree(cache_ent->caps_cache);
840 		kfree(cache_ent);
841 		return -ENOMEM;
842 	}
843 
844 	cache_ent->version = version;
845 	cache_ent->id = vgdev->capsets[idx].id;
846 	atomic_set(&cache_ent->is_valid, 0);
847 	cache_ent->size = max_size;
848 	spin_lock(&vgdev->display_info_lock);
849 	/* Search while under lock in case it was added by another task. */
850 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
851 		if (search_ent->id == vgdev->capsets[idx].id &&
852 		    search_ent->version == version) {
853 			*cache_p = search_ent;
854 			break;
855 		}
856 	}
857 	if (!*cache_p)
858 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
859 	spin_unlock(&vgdev->display_info_lock);
860 
861 	if (*cache_p) {
862 		/* Entry was found, so free everything that was just created. */
863 		kfree(resp_buf);
864 		kfree(cache_ent->caps_cache);
865 		kfree(cache_ent);
866 		return 0;
867 	}
868 
869 	cmd_p = virtio_gpu_alloc_cmd_resp
870 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
871 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
872 		 resp_buf);
873 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
874 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
875 	cmd_p->capset_version = cpu_to_le32(version);
876 	*cache_p = cache_ent;
877 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
878 
879 	return 0;
880 }
881 
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)882 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
883 {
884 	struct virtio_gpu_cmd_get_edid *cmd_p;
885 	struct virtio_gpu_vbuffer *vbuf;
886 	void *resp_buf;
887 	int scanout;
888 
889 	if (WARN_ON(!vgdev->has_edid))
890 		return -EINVAL;
891 
892 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
893 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
894 				   GFP_KERNEL);
895 		if (!resp_buf)
896 			return -ENOMEM;
897 
898 		cmd_p = virtio_gpu_alloc_cmd_resp
899 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
900 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
901 			 resp_buf);
902 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
903 		cmd_p->scanout = cpu_to_le32(scanout);
904 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
905 	}
906 
907 	return 0;
908 }
909 
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t nlen,const char * name)910 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
911 				   uint32_t nlen, const char *name)
912 {
913 	struct virtio_gpu_ctx_create *cmd_p;
914 	struct virtio_gpu_vbuffer *vbuf;
915 
916 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
917 	memset(cmd_p, 0, sizeof(*cmd_p));
918 
919 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
920 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
921 	cmd_p->nlen = cpu_to_le32(nlen);
922 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
923 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
924 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
925 }
926 
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)927 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
928 				    uint32_t id)
929 {
930 	struct virtio_gpu_ctx_destroy *cmd_p;
931 	struct virtio_gpu_vbuffer *vbuf;
932 
933 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
934 	memset(cmd_p, 0, sizeof(*cmd_p));
935 
936 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
937 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
938 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
939 }
940 
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)941 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
942 					    uint32_t ctx_id,
943 					    struct virtio_gpu_object_array *objs)
944 {
945 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
946 	struct virtio_gpu_ctx_resource *cmd_p;
947 	struct virtio_gpu_vbuffer *vbuf;
948 
949 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
950 	memset(cmd_p, 0, sizeof(*cmd_p));
951 	vbuf->objs = objs;
952 
953 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
954 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
955 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
956 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
957 }
958 
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)959 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
960 					    uint32_t ctx_id,
961 					    struct virtio_gpu_object_array *objs)
962 {
963 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
964 	struct virtio_gpu_ctx_resource *cmd_p;
965 	struct virtio_gpu_vbuffer *vbuf;
966 
967 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
968 	memset(cmd_p, 0, sizeof(*cmd_p));
969 	vbuf->objs = objs;
970 
971 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
972 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
973 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
974 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
975 }
976 
977 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)978 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
979 				  struct virtio_gpu_object *bo,
980 				  struct virtio_gpu_object_params *params,
981 				  struct virtio_gpu_object_array *objs,
982 				  struct virtio_gpu_fence *fence)
983 {
984 	struct virtio_gpu_resource_create_3d *cmd_p;
985 	struct virtio_gpu_vbuffer *vbuf;
986 
987 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
988 	memset(cmd_p, 0, sizeof(*cmd_p));
989 	vbuf->objs = objs;
990 
991 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
992 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
993 	cmd_p->format = cpu_to_le32(params->format);
994 	cmd_p->width = cpu_to_le32(params->width);
995 	cmd_p->height = cpu_to_le32(params->height);
996 
997 	cmd_p->target = cpu_to_le32(params->target);
998 	cmd_p->bind = cpu_to_le32(params->bind);
999 	cmd_p->depth = cpu_to_le32(params->depth);
1000 	cmd_p->array_size = cpu_to_le32(params->array_size);
1001 	cmd_p->last_level = cpu_to_le32(params->last_level);
1002 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1003 	cmd_p->flags = cpu_to_le32(params->flags);
1004 
1005 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1006 
1007 	bo->created = true;
1008 }
1009 
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1010 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1011 					uint32_t ctx_id,
1012 					uint64_t offset, uint32_t level,
1013 					struct drm_virtgpu_3d_box *box,
1014 					struct virtio_gpu_object_array *objs,
1015 					struct virtio_gpu_fence *fence)
1016 {
1017 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1018 	struct virtio_gpu_transfer_host_3d *cmd_p;
1019 	struct virtio_gpu_vbuffer *vbuf;
1020 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1021 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1022 
1023 	if (use_dma_api)
1024 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1025 					    shmem->pages, DMA_TO_DEVICE);
1026 
1027 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1028 	memset(cmd_p, 0, sizeof(*cmd_p));
1029 
1030 	vbuf->objs = objs;
1031 
1032 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1033 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1034 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1035 	convert_to_hw_box(&cmd_p->box, box);
1036 	cmd_p->offset = cpu_to_le64(offset);
1037 	cmd_p->level = cpu_to_le32(level);
1038 
1039 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1040 }
1041 
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1042 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1043 					  uint32_t ctx_id,
1044 					  uint64_t offset, uint32_t level,
1045 					  struct drm_virtgpu_3d_box *box,
1046 					  struct virtio_gpu_object_array *objs,
1047 					  struct virtio_gpu_fence *fence)
1048 {
1049 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1050 	struct virtio_gpu_transfer_host_3d *cmd_p;
1051 	struct virtio_gpu_vbuffer *vbuf;
1052 
1053 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1054 	memset(cmd_p, 0, sizeof(*cmd_p));
1055 
1056 	vbuf->objs = objs;
1057 
1058 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1059 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1060 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1061 	convert_to_hw_box(&cmd_p->box, box);
1062 	cmd_p->offset = cpu_to_le64(offset);
1063 	cmd_p->level = cpu_to_le32(level);
1064 
1065 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1066 }
1067 
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1068 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1069 			   void *data, uint32_t data_size,
1070 			   uint32_t ctx_id,
1071 			   struct virtio_gpu_object_array *objs,
1072 			   struct virtio_gpu_fence *fence)
1073 {
1074 	struct virtio_gpu_cmd_submit *cmd_p;
1075 	struct virtio_gpu_vbuffer *vbuf;
1076 
1077 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1078 	memset(cmd_p, 0, sizeof(*cmd_p));
1079 
1080 	vbuf->data_buf = data;
1081 	vbuf->data_size = data_size;
1082 	vbuf->objs = objs;
1083 
1084 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1085 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1086 	cmd_p->size = cpu_to_le32(data_size);
1087 
1088 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1089 }
1090 
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1091 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1092 			      struct virtio_gpu_object *obj,
1093 			      struct virtio_gpu_mem_entry *ents,
1094 			      unsigned int nents)
1095 {
1096 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1097 					       ents, nents, NULL);
1098 }
1099 
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1100 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1101 			    struct virtio_gpu_output *output)
1102 {
1103 	struct virtio_gpu_vbuffer *vbuf;
1104 	struct virtio_gpu_update_cursor *cur_p;
1105 
1106 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1107 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1108 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1109 	virtio_gpu_queue_cursor(vgdev, vbuf);
1110 }
1111 
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1112 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1113 					    struct virtio_gpu_vbuffer *vbuf)
1114 {
1115 	struct virtio_gpu_object *obj =
1116 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1117 	struct virtio_gpu_resp_resource_uuid *resp =
1118 		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1119 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1120 
1121 	spin_lock(&vgdev->resource_export_lock);
1122 	WARN_ON(obj->uuid_state != UUID_INITIALIZING);
1123 
1124 	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1125 	    obj->uuid_state == UUID_INITIALIZING) {
1126 		memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
1127 		obj->uuid_state = UUID_INITIALIZED;
1128 	} else {
1129 		obj->uuid_state = UUID_INITIALIZATION_FAILED;
1130 	}
1131 	spin_unlock(&vgdev->resource_export_lock);
1132 
1133 	wake_up_all(&vgdev->resp_wq);
1134 }
1135 
1136 int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1137 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1138 				    struct virtio_gpu_object_array *objs)
1139 {
1140 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1141 	struct virtio_gpu_resource_assign_uuid *cmd_p;
1142 	struct virtio_gpu_vbuffer *vbuf;
1143 	struct virtio_gpu_resp_resource_uuid *resp_buf;
1144 
1145 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1146 	if (!resp_buf) {
1147 		spin_lock(&vgdev->resource_export_lock);
1148 		bo->uuid_state = UUID_INITIALIZATION_FAILED;
1149 		spin_unlock(&vgdev->resource_export_lock);
1150 		virtio_gpu_array_put_free(objs);
1151 		return -ENOMEM;
1152 	}
1153 
1154 	cmd_p = virtio_gpu_alloc_cmd_resp
1155 		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1156 		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1157 	memset(cmd_p, 0, sizeof(*cmd_p));
1158 
1159 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1160 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1161 
1162 	vbuf->objs = objs;
1163 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1164 	return 0;
1165 }
1166