• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <drm/drmP.h>
29 #include "virtgpu_drv.h"
30 
31 static int virtio_gpu_fbdev = 1;
32 
33 MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console");
34 module_param_named(fbdev, virtio_gpu_fbdev, int, 0400);
35 
virtio_gpu_config_changed_work_func(struct work_struct * work)36 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
37 {
38 	struct virtio_gpu_device *vgdev =
39 		container_of(work, struct virtio_gpu_device,
40 			     config_changed_work);
41 	u32 events_read, events_clear = 0;
42 
43 	/* read the config space */
44 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
45 		     events_read, &events_read);
46 	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
47 		virtio_gpu_cmd_get_display_info(vgdev);
48 		drm_helper_hpd_irq_event(vgdev->ddev);
49 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
50 	}
51 	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
52 		      events_clear, &events_clear);
53 }
54 
virtio_gpu_ctx_id_get(struct virtio_gpu_device * vgdev,uint32_t * resid)55 static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
56 				  uint32_t *resid)
57 {
58 	int handle;
59 
60 	idr_preload(GFP_KERNEL);
61 	spin_lock(&vgdev->ctx_id_idr_lock);
62 	handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
63 	spin_unlock(&vgdev->ctx_id_idr_lock);
64 	idr_preload_end();
65 	*resid = handle;
66 }
67 
virtio_gpu_ctx_id_put(struct virtio_gpu_device * vgdev,uint32_t id)68 static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
69 {
70 	spin_lock(&vgdev->ctx_id_idr_lock);
71 	idr_remove(&vgdev->ctx_id_idr, id);
72 	spin_unlock(&vgdev->ctx_id_idr_lock);
73 }
74 
virtio_gpu_context_create(struct virtio_gpu_device * vgdev,uint32_t nlen,const char * name,uint32_t * ctx_id)75 static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
76 				      uint32_t nlen, const char *name,
77 				      uint32_t *ctx_id)
78 {
79 	virtio_gpu_ctx_id_get(vgdev, ctx_id);
80 	virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
81 }
82 
virtio_gpu_context_destroy(struct virtio_gpu_device * vgdev,uint32_t ctx_id)83 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
84 				      uint32_t ctx_id)
85 {
86 	virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
87 	virtio_gpu_ctx_id_put(vgdev, ctx_id);
88 }
89 
virtio_gpu_init_vq(struct virtio_gpu_queue * vgvq,void (* work_func)(struct work_struct * work))90 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
91 			       void (*work_func)(struct work_struct *work))
92 {
93 	spin_lock_init(&vgvq->qlock);
94 	init_waitqueue_head(&vgvq->ack_queue);
95 	INIT_WORK(&vgvq->dequeue_work, work_func);
96 }
97 
virtio_gpu_get_capsets(struct virtio_gpu_device * vgdev,int num_capsets)98 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
99 				   int num_capsets)
100 {
101 	int i, ret;
102 
103 	vgdev->capsets = kcalloc(num_capsets,
104 				 sizeof(struct virtio_gpu_drv_capset),
105 				 GFP_KERNEL);
106 	if (!vgdev->capsets) {
107 		DRM_ERROR("failed to allocate cap sets\n");
108 		return;
109 	}
110 	for (i = 0; i < num_capsets; i++) {
111 		virtio_gpu_cmd_get_capset_info(vgdev, i);
112 		ret = wait_event_timeout(vgdev->resp_wq,
113 					 vgdev->capsets[i].id > 0, 5 * HZ);
114 		if (ret == 0) {
115 			DRM_ERROR("timed out waiting for cap set %d\n", i);
116 			spin_lock(&vgdev->display_info_lock);
117 			kfree(vgdev->capsets);
118 			vgdev->capsets = NULL;
119 			spin_unlock(&vgdev->display_info_lock);
120 			return;
121 		}
122 		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
123 			 i, vgdev->capsets[i].id,
124 			 vgdev->capsets[i].max_version,
125 			 vgdev->capsets[i].max_size);
126 	}
127 	vgdev->num_capsets = num_capsets;
128 }
129 
virtio_gpu_driver_load(struct drm_device * dev,unsigned long flags)130 int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
131 {
132 	static vq_callback_t *callbacks[] = {
133 		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
134 	};
135 	static const char * const names[] = { "control", "cursor" };
136 
137 	struct virtio_gpu_device *vgdev;
138 	/* this will expand later */
139 	struct virtqueue *vqs[2];
140 	u32 num_scanouts, num_capsets;
141 	int ret;
142 
143 	if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
144 		return -ENODEV;
145 
146 	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
147 	if (!vgdev)
148 		return -ENOMEM;
149 
150 	vgdev->ddev = dev;
151 	dev->dev_private = vgdev;
152 	vgdev->vdev = dev_to_virtio(dev->dev);
153 	vgdev->dev = dev->dev;
154 
155 	spin_lock_init(&vgdev->display_info_lock);
156 	spin_lock_init(&vgdev->ctx_id_idr_lock);
157 	idr_init(&vgdev->ctx_id_idr);
158 	spin_lock_init(&vgdev->resource_idr_lock);
159 	idr_init(&vgdev->resource_idr);
160 	init_waitqueue_head(&vgdev->resp_wq);
161 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
162 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
163 
164 	vgdev->fence_drv.context = dma_fence_context_alloc(1);
165 	spin_lock_init(&vgdev->fence_drv.lock);
166 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
167 	INIT_LIST_HEAD(&vgdev->cap_cache);
168 	INIT_WORK(&vgdev->config_changed_work,
169 		  virtio_gpu_config_changed_work_func);
170 
171 #ifdef __LITTLE_ENDIAN
172 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
173 		vgdev->has_virgl_3d = true;
174 	DRM_INFO("virgl 3d acceleration %s\n",
175 		 vgdev->has_virgl_3d ? "enabled" : "not supported by host");
176 #else
177 	DRM_INFO("virgl 3d acceleration not supported by guest\n");
178 #endif
179 
180 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
181 	if (ret) {
182 		DRM_ERROR("failed to find virt queues\n");
183 		goto err_vqs;
184 	}
185 	vgdev->ctrlq.vq = vqs[0];
186 	vgdev->cursorq.vq = vqs[1];
187 	ret = virtio_gpu_alloc_vbufs(vgdev);
188 	if (ret) {
189 		DRM_ERROR("failed to alloc vbufs\n");
190 		goto err_vbufs;
191 	}
192 
193 	ret = virtio_gpu_ttm_init(vgdev);
194 	if (ret) {
195 		DRM_ERROR("failed to init ttm %d\n", ret);
196 		goto err_ttm;
197 	}
198 
199 	/* get display info */
200 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
201 		     num_scanouts, &num_scanouts);
202 	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
203 				    VIRTIO_GPU_MAX_SCANOUTS);
204 	if (!vgdev->num_scanouts) {
205 		DRM_ERROR("num_scanouts is zero\n");
206 		ret = -EINVAL;
207 		goto err_scanouts;
208 	}
209 	DRM_INFO("number of scanouts: %d\n", num_scanouts);
210 
211 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
212 		     num_capsets, &num_capsets);
213 	DRM_INFO("number of cap sets: %d\n", num_capsets);
214 
215 	ret = virtio_gpu_modeset_init(vgdev);
216 	if (ret)
217 		goto err_modeset;
218 
219 	virtio_device_ready(vgdev->vdev);
220 	vgdev->vqs_ready = true;
221 
222 	if (num_capsets)
223 		virtio_gpu_get_capsets(vgdev, num_capsets);
224 	virtio_gpu_cmd_get_display_info(vgdev);
225 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
226 			   5 * HZ);
227 	if (virtio_gpu_fbdev)
228 		virtio_gpu_fbdev_init(vgdev);
229 
230 	return 0;
231 
232 err_modeset:
233 err_scanouts:
234 	virtio_gpu_ttm_fini(vgdev);
235 err_ttm:
236 	virtio_gpu_free_vbufs(vgdev);
237 err_vbufs:
238 	vgdev->vdev->config->del_vqs(vgdev->vdev);
239 err_vqs:
240 	kfree(vgdev);
241 	return ret;
242 }
243 
virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device * vgdev)244 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
245 {
246 	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
247 
248 	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
249 		kfree(cache_ent->caps_cache);
250 		kfree(cache_ent);
251 	}
252 }
253 
virtio_gpu_driver_unload(struct drm_device * dev)254 void virtio_gpu_driver_unload(struct drm_device *dev)
255 {
256 	struct virtio_gpu_device *vgdev = dev->dev_private;
257 
258 	vgdev->vqs_ready = false;
259 	flush_work(&vgdev->ctrlq.dequeue_work);
260 	flush_work(&vgdev->cursorq.dequeue_work);
261 	flush_work(&vgdev->config_changed_work);
262 	vgdev->vdev->config->del_vqs(vgdev->vdev);
263 
264 	virtio_gpu_modeset_fini(vgdev);
265 	virtio_gpu_ttm_fini(vgdev);
266 	virtio_gpu_free_vbufs(vgdev);
267 	virtio_gpu_cleanup_cap_cache(vgdev);
268 	kfree(vgdev->capsets);
269 	kfree(vgdev);
270 }
271 
virtio_gpu_driver_open(struct drm_device * dev,struct drm_file * file)272 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
273 {
274 	struct virtio_gpu_device *vgdev = dev->dev_private;
275 	struct virtio_gpu_fpriv *vfpriv;
276 	uint32_t id;
277 	char dbgname[TASK_COMM_LEN];
278 
279 	/* can't create contexts without 3d renderer */
280 	if (!vgdev->has_virgl_3d)
281 		return 0;
282 
283 	/* allocate a virt GPU context for this opener */
284 	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
285 	if (!vfpriv)
286 		return -ENOMEM;
287 
288 	get_task_comm(dbgname, current);
289 	virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
290 
291 	vfpriv->ctx_id = id;
292 	file->driver_priv = vfpriv;
293 	return 0;
294 }
295 
virtio_gpu_driver_postclose(struct drm_device * dev,struct drm_file * file)296 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
297 {
298 	struct virtio_gpu_device *vgdev = dev->dev_private;
299 	struct virtio_gpu_fpriv *vfpriv;
300 
301 	if (!vgdev->has_virgl_3d)
302 		return;
303 
304 	vfpriv = file->driver_priv;
305 
306 	virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
307 	kfree(vfpriv);
308 	file->driver_priv = NULL;
309 }
310