• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <drm/drmP.h>
29 #include "virtgpu_drv.h"
30 #include <drm/virtgpu_drm.h>
31 #include "ttm/ttm_execbuf_util.h"
32 
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)33 static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 			      const struct drm_virtgpu_3d_box *src)
35 {
36 	dst->x = cpu_to_le32(src->x);
37 	dst->y = cpu_to_le32(src->y);
38 	dst->z = cpu_to_le32(src->z);
39 	dst->w = cpu_to_le32(src->w);
40 	dst->h = cpu_to_le32(src->h);
41 	dst->d = cpu_to_le32(src->d);
42 }
43 
virtio_gpu_map_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)44 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 				struct drm_file *file_priv)
46 {
47 	struct virtio_gpu_device *vgdev = dev->dev_private;
48 	struct drm_virtgpu_map *virtio_gpu_map = data;
49 
50 	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 					 virtio_gpu_map->handle,
52 					 &virtio_gpu_map->offset);
53 }
54 
virtio_gpu_object_list_validate(struct ww_acquire_ctx * ticket,struct list_head * head)55 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 					   struct list_head *head)
57 {
58 	struct ttm_validate_buffer *buf;
59 	struct ttm_buffer_object *bo;
60 	struct virtio_gpu_object *qobj;
61 	int ret;
62 
63 	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
64 	if (ret != 0)
65 		return ret;
66 
67 	list_for_each_entry(buf, head, head) {
68 		bo = buf->bo;
69 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
71 		if (ret) {
72 			ttm_eu_backoff_reservation(ticket, head);
73 			return ret;
74 		}
75 	}
76 	return 0;
77 }
78 
virtio_gpu_unref_list(struct list_head * head)79 static void virtio_gpu_unref_list(struct list_head *head)
80 {
81 	struct ttm_validate_buffer *buf;
82 	struct ttm_buffer_object *bo;
83 	struct virtio_gpu_object *qobj;
84 	list_for_each_entry(buf, head, head) {
85 		bo = buf->bo;
86 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
87 
88 		drm_gem_object_unreference_unlocked(&qobj->gem_base);
89 	}
90 }
91 
virtio_gpu_execbuffer(struct drm_device * dev,struct drm_virtgpu_execbuffer * exbuf,struct drm_file * drm_file)92 static int virtio_gpu_execbuffer(struct drm_device *dev,
93 				 struct drm_virtgpu_execbuffer *exbuf,
94 				 struct drm_file *drm_file)
95 {
96 	struct virtio_gpu_device *vgdev = dev->dev_private;
97 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
98 	struct drm_gem_object *gobj;
99 	struct virtio_gpu_fence *fence;
100 	struct virtio_gpu_object *qobj;
101 	int ret;
102 	uint32_t *bo_handles = NULL;
103 	void __user *user_bo_handles = NULL;
104 	struct list_head validate_list;
105 	struct ttm_validate_buffer *buflist = NULL;
106 	int i;
107 	struct ww_acquire_ctx ticket;
108 	void *buf;
109 
110 	if (vgdev->has_virgl_3d == false)
111 		return -ENOSYS;
112 
113 	INIT_LIST_HEAD(&validate_list);
114 	if (exbuf->num_bo_handles) {
115 
116 		bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117 					   sizeof(uint32_t));
118 		buflist = drm_calloc_large(exbuf->num_bo_handles,
119 					   sizeof(struct ttm_validate_buffer));
120 		if (!bo_handles || !buflist) {
121 			drm_free_large(bo_handles);
122 			drm_free_large(buflist);
123 			return -ENOMEM;
124 		}
125 
126 		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127 		if (copy_from_user(bo_handles, user_bo_handles,
128 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
129 			ret = -EFAULT;
130 			drm_free_large(bo_handles);
131 			drm_free_large(buflist);
132 			return ret;
133 		}
134 
135 		for (i = 0; i < exbuf->num_bo_handles; i++) {
136 			gobj = drm_gem_object_lookup(dev,
137 						     drm_file, bo_handles[i]);
138 			if (!gobj) {
139 				drm_free_large(bo_handles);
140 				drm_free_large(buflist);
141 				return -ENOENT;
142 			}
143 
144 			qobj = gem_to_virtio_gpu_obj(gobj);
145 			buflist[i].bo = &qobj->tbo;
146 
147 			list_add(&buflist[i].head, &validate_list);
148 		}
149 		drm_free_large(bo_handles);
150 	}
151 
152 	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153 	if (ret)
154 		goto out_free;
155 
156 	buf = kmalloc(exbuf->size, GFP_KERNEL);
157 	if (!buf) {
158 		ret = -ENOMEM;
159 		goto out_unresv;
160 	}
161 	if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162 			   exbuf->size)) {
163 		kfree(buf);
164 		ret = -EFAULT;
165 		goto out_unresv;
166 	}
167 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168 			      vfpriv->ctx_id, &fence);
169 
170 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
171 
172 	/* fence the command bo */
173 	virtio_gpu_unref_list(&validate_list);
174 	drm_free_large(buflist);
175 	fence_put(&fence->f);
176 	return 0;
177 
178 out_unresv:
179 	ttm_eu_backoff_reservation(&ticket, &validate_list);
180 out_free:
181 	virtio_gpu_unref_list(&validate_list);
182 	drm_free_large(buflist);
183 	return ret;
184 }
185 
186 /*
187  * Usage of execbuffer:
188  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189  * However, the command as passed from user space must *not* contain the initial
190  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191  */
virtio_gpu_execbuffer_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)192 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193 				       struct drm_file *file_priv)
194 {
195 	struct drm_virtgpu_execbuffer *execbuffer = data;
196 	return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197 }
198 
199 
virtio_gpu_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)200 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201 				     struct drm_file *file_priv)
202 {
203 	struct virtio_gpu_device *vgdev = dev->dev_private;
204 	struct drm_virtgpu_getparam *param = data;
205 	int value;
206 
207 	switch (param->param) {
208 	case VIRTGPU_PARAM_3D_FEATURES:
209 		value = vgdev->has_virgl_3d == true ? 1 : 0;
210 		break;
211 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
212 		value = 1;
213 		break;
214 	default:
215 		return -EINVAL;
216 	}
217 	if (copy_to_user((void __user *)(unsigned long)param->value,
218 			 &value, sizeof(int))) {
219 		return -EFAULT;
220 	}
221 	return 0;
222 }
223 
virtio_gpu_resource_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)224 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
225 					    struct drm_file *file_priv)
226 {
227 	struct virtio_gpu_device *vgdev = dev->dev_private;
228 	struct drm_virtgpu_resource_create *rc = data;
229 	int ret;
230 	uint32_t res_id;
231 	struct virtio_gpu_object *qobj;
232 	struct drm_gem_object *obj;
233 	uint32_t handle = 0;
234 	uint32_t size;
235 	struct list_head validate_list;
236 	struct ttm_validate_buffer mainbuf;
237 	struct virtio_gpu_fence *fence = NULL;
238 	struct ww_acquire_ctx ticket;
239 	struct virtio_gpu_resource_create_3d rc_3d;
240 
241 	if (vgdev->has_virgl_3d == false) {
242 		if (rc->depth > 1)
243 			return -EINVAL;
244 		if (rc->nr_samples > 1)
245 			return -EINVAL;
246 		if (rc->last_level > 1)
247 			return -EINVAL;
248 		if (rc->target != 2)
249 			return -EINVAL;
250 		if (rc->array_size > 1)
251 			return -EINVAL;
252 	}
253 
254 	INIT_LIST_HEAD(&validate_list);
255 	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
256 
257 	virtio_gpu_resource_id_get(vgdev, &res_id);
258 
259 	size = rc->size;
260 
261 	/* allocate a single page size object */
262 	if (size == 0)
263 		size = PAGE_SIZE;
264 
265 	qobj = virtio_gpu_alloc_object(dev, size, false, false);
266 	if (IS_ERR(qobj)) {
267 		ret = PTR_ERR(qobj);
268 		goto fail_id;
269 	}
270 	obj = &qobj->gem_base;
271 
272 	if (!vgdev->has_virgl_3d) {
273 		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
274 					       rc->width, rc->height);
275 
276 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
277 	} else {
278 		/* use a gem reference since unref list undoes them */
279 		drm_gem_object_reference(&qobj->gem_base);
280 		mainbuf.bo = &qobj->tbo;
281 		list_add(&mainbuf.head, &validate_list);
282 
283 		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
284 		if (ret) {
285 			DRM_DEBUG("failed to validate\n");
286 			goto fail_unref;
287 		}
288 
289 		rc_3d.resource_id = cpu_to_le32(res_id);
290 		rc_3d.target = cpu_to_le32(rc->target);
291 		rc_3d.format = cpu_to_le32(rc->format);
292 		rc_3d.bind = cpu_to_le32(rc->bind);
293 		rc_3d.width = cpu_to_le32(rc->width);
294 		rc_3d.height = cpu_to_le32(rc->height);
295 		rc_3d.depth = cpu_to_le32(rc->depth);
296 		rc_3d.array_size = cpu_to_le32(rc->array_size);
297 		rc_3d.last_level = cpu_to_le32(rc->last_level);
298 		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
299 		rc_3d.flags = cpu_to_le32(rc->flags);
300 
301 		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
302 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
303 		if (ret) {
304 			ttm_eu_backoff_reservation(&ticket, &validate_list);
305 			goto fail_unref;
306 		}
307 		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
308 	}
309 
310 	qobj->hw_res_handle = res_id;
311 
312 	ret = drm_gem_handle_create(file_priv, obj, &handle);
313 	if (ret) {
314 
315 		drm_gem_object_release(obj);
316 		if (vgdev->has_virgl_3d) {
317 			virtio_gpu_unref_list(&validate_list);
318 			fence_put(&fence->f);
319 		}
320 		return ret;
321 	}
322 	drm_gem_object_unreference_unlocked(obj);
323 
324 	rc->res_handle = res_id; /* similiar to a VM address */
325 	rc->bo_handle = handle;
326 
327 	if (vgdev->has_virgl_3d) {
328 		virtio_gpu_unref_list(&validate_list);
329 		fence_put(&fence->f);
330 	}
331 	return 0;
332 fail_unref:
333 	if (vgdev->has_virgl_3d) {
334 		virtio_gpu_unref_list(&validate_list);
335 		fence_put(&fence->f);
336 	}
337 //fail_obj:
338 //	drm_gem_object_handle_unreference_unlocked(obj);
339 fail_id:
340 	virtio_gpu_resource_id_put(vgdev, res_id);
341 	return ret;
342 }
343 
virtio_gpu_resource_info_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)344 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
345 					  struct drm_file *file_priv)
346 {
347 	struct drm_virtgpu_resource_info *ri = data;
348 	struct drm_gem_object *gobj = NULL;
349 	struct virtio_gpu_object *qobj = NULL;
350 
351 	gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
352 	if (gobj == NULL)
353 		return -ENOENT;
354 
355 	qobj = gem_to_virtio_gpu_obj(gobj);
356 
357 	ri->size = qobj->gem_base.size;
358 	ri->res_handle = qobj->hw_res_handle;
359 	drm_gem_object_unreference_unlocked(gobj);
360 	return 0;
361 }
362 
virtio_gpu_transfer_from_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)363 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
364 					       void *data,
365 					       struct drm_file *file)
366 {
367 	struct virtio_gpu_device *vgdev = dev->dev_private;
368 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
369 	struct drm_virtgpu_3d_transfer_from_host *args = data;
370 	struct drm_gem_object *gobj = NULL;
371 	struct virtio_gpu_object *qobj = NULL;
372 	struct virtio_gpu_fence *fence;
373 	int ret;
374 	u32 offset = args->offset;
375 	struct virtio_gpu_box box;
376 
377 	if (vgdev->has_virgl_3d == false)
378 		return -ENOSYS;
379 
380 	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
381 	if (gobj == NULL)
382 		return -ENOENT;
383 
384 	qobj = gem_to_virtio_gpu_obj(gobj);
385 
386 	ret = virtio_gpu_object_reserve(qobj, false);
387 	if (ret)
388 		goto out;
389 
390 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
391 			      true, false);
392 	if (unlikely(ret))
393 		goto out_unres;
394 
395 	convert_to_hw_box(&box, &args->box);
396 	virtio_gpu_cmd_transfer_from_host_3d
397 		(vgdev, qobj->hw_res_handle,
398 		 vfpriv->ctx_id, offset, args->level,
399 		 &box, &fence);
400 	reservation_object_add_excl_fence(qobj->tbo.resv,
401 					  &fence->f);
402 
403 	fence_put(&fence->f);
404 out_unres:
405 	virtio_gpu_object_unreserve(qobj);
406 out:
407 	drm_gem_object_unreference_unlocked(gobj);
408 	return ret;
409 }
410 
virtio_gpu_transfer_to_host_ioctl(struct drm_device * dev,void * data,struct drm_file * file)411 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
412 					     struct drm_file *file)
413 {
414 	struct virtio_gpu_device *vgdev = dev->dev_private;
415 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
416 	struct drm_virtgpu_3d_transfer_to_host *args = data;
417 	struct drm_gem_object *gobj = NULL;
418 	struct virtio_gpu_object *qobj = NULL;
419 	struct virtio_gpu_fence *fence;
420 	struct virtio_gpu_box box;
421 	int ret;
422 	u32 offset = args->offset;
423 
424 	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
425 	if (gobj == NULL)
426 		return -ENOENT;
427 
428 	qobj = gem_to_virtio_gpu_obj(gobj);
429 
430 	ret = virtio_gpu_object_reserve(qobj, false);
431 	if (ret)
432 		goto out;
433 
434 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
435 			      true, false);
436 	if (unlikely(ret))
437 		goto out_unres;
438 
439 	convert_to_hw_box(&box, &args->box);
440 	if (!vgdev->has_virgl_3d) {
441 		virtio_gpu_cmd_transfer_to_host_2d
442 			(vgdev, qobj->hw_res_handle, offset,
443 			 box.w, box.h, box.x, box.y, NULL);
444 	} else {
445 		virtio_gpu_cmd_transfer_to_host_3d
446 			(vgdev, qobj->hw_res_handle,
447 			 vfpriv ? vfpriv->ctx_id : 0, offset,
448 			 args->level, &box, &fence);
449 		reservation_object_add_excl_fence(qobj->tbo.resv,
450 						  &fence->f);
451 		fence_put(&fence->f);
452 	}
453 
454 out_unres:
455 	virtio_gpu_object_unreserve(qobj);
456 out:
457 	drm_gem_object_unreference_unlocked(gobj);
458 	return ret;
459 }
460 
virtio_gpu_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file)461 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
462 			    struct drm_file *file)
463 {
464 	struct drm_virtgpu_3d_wait *args = data;
465 	struct drm_gem_object *gobj = NULL;
466 	struct virtio_gpu_object *qobj = NULL;
467 	int ret;
468 	bool nowait = false;
469 
470 	gobj = drm_gem_object_lookup(dev, file, args->handle);
471 	if (gobj == NULL)
472 		return -ENOENT;
473 
474 	qobj = gem_to_virtio_gpu_obj(gobj);
475 
476 	if (args->flags & VIRTGPU_WAIT_NOWAIT)
477 		nowait = true;
478 	ret = virtio_gpu_object_wait(qobj, nowait);
479 
480 	drm_gem_object_unreference_unlocked(gobj);
481 	return ret;
482 }
483 
virtio_gpu_get_caps_ioctl(struct drm_device * dev,void * data,struct drm_file * file)484 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
485 				void *data, struct drm_file *file)
486 {
487 	struct virtio_gpu_device *vgdev = dev->dev_private;
488 	struct drm_virtgpu_get_caps *args = data;
489 	unsigned size, host_caps_size;
490 	int i;
491 	int found_valid = -1;
492 	int ret;
493 	struct virtio_gpu_drv_cap_cache *cache_ent;
494 	void *ptr;
495 	if (vgdev->num_capsets == 0)
496 		return -ENOSYS;
497 
498 	/* don't allow userspace to pass 0 */
499 	if (args->size == 0)
500 		return -EINVAL;
501 
502 	spin_lock(&vgdev->display_info_lock);
503 	for (i = 0; i < vgdev->num_capsets; i++) {
504 		if (vgdev->capsets[i].id == args->cap_set_id) {
505 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
506 				found_valid = i;
507 				break;
508 			}
509 		}
510 	}
511 
512 	if (found_valid == -1) {
513 		spin_unlock(&vgdev->display_info_lock);
514 		return -EINVAL;
515 	}
516 
517 	host_caps_size = vgdev->capsets[found_valid].max_size;
518 	/* only copy to user the minimum of the host caps size or the guest caps size */
519 	size = min(args->size, host_caps_size);
520 
521 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
522 		if (cache_ent->id == args->cap_set_id &&
523 		    cache_ent->version == args->cap_set_ver) {
524 			ptr = cache_ent->caps_cache;
525 			spin_unlock(&vgdev->display_info_lock);
526 			goto copy_exit;
527 		}
528 	}
529 	spin_unlock(&vgdev->display_info_lock);
530 
531 	/* not in cache - need to talk to hw */
532 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
533 				  &cache_ent);
534 
535 	ret = wait_event_timeout(vgdev->resp_wq,
536 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
537 
538 	/* is_valid check must proceed before copy of the cache entry. */
539 	smp_rmb();
540 
541 	ptr = cache_ent->caps_cache;
542 
543 copy_exit:
544 	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
545 		return -EFAULT;
546 
547 	return 0;
548 }
549 
550 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
551 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
552 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
553 
554 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
555 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
556 
557 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
558 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
559 
560 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
561 			  virtio_gpu_resource_create_ioctl,
562 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
563 
564 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
565 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
566 
567 	/* make transfer async to the main ring? - no sure, can we
568 	   thread these in the underlying GL */
569 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
570 			  virtio_gpu_transfer_from_host_ioctl,
571 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
572 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
573 			  virtio_gpu_transfer_to_host_ioctl,
574 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
575 
576 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
577 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
578 
579 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
580 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
581 };
582