• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013-2017 Oracle Corporation
3  * This file is based on ast_main.c
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  * Authors: Dave Airlie <airlied@redhat.com>,
27  *          Michael Thayer <michael.thayer@oracle.com,
28  *          Hans de Goede <hdegoede@redhat.com>
29  */
30 #include <drm/drm_fb_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 
33 #include "vbox_drv.h"
34 #include "vbox_err.h"
35 #include "vboxvideo_guest.h"
36 #include "vboxvideo_vbe.h"
37 
vbox_user_framebuffer_destroy(struct drm_framebuffer * fb)38 static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
39 {
40 	struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
41 
42 	if (vbox_fb->obj)
43 		drm_gem_object_put_unlocked(vbox_fb->obj);
44 
45 	drm_framebuffer_cleanup(fb);
46 	kfree(fb);
47 }
48 
vbox_enable_accel(struct vbox_private * vbox)49 void vbox_enable_accel(struct vbox_private *vbox)
50 {
51 	unsigned int i;
52 	struct vbva_buffer *vbva;
53 
54 	if (!vbox->vbva_info || !vbox->vbva_buffers) {
55 		/* Should never happen... */
56 		DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
57 		return;
58 	}
59 
60 	for (i = 0; i < vbox->num_crtcs; ++i) {
61 		if (vbox->vbva_info[i].vbva)
62 			continue;
63 
64 		vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
65 		if (!vbva_enable(&vbox->vbva_info[i],
66 				 vbox->guest_pool, vbva, i)) {
67 			/* very old host or driver error. */
68 			DRM_ERROR("vboxvideo: vbva_enable failed\n");
69 			return;
70 		}
71 	}
72 }
73 
vbox_disable_accel(struct vbox_private * vbox)74 void vbox_disable_accel(struct vbox_private *vbox)
75 {
76 	unsigned int i;
77 
78 	for (i = 0; i < vbox->num_crtcs; ++i)
79 		vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
80 }
81 
vbox_report_caps(struct vbox_private * vbox)82 void vbox_report_caps(struct vbox_private *vbox)
83 {
84 	u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
85 		   VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
86 
87 	if (vbox->initial_mode_queried)
88 		caps |= VBVACAPS_VIDEO_MODE_HINTS;
89 
90 	hgsmi_send_caps_info(vbox->guest_pool, caps);
91 }
92 
93 /**
94  * Send information about dirty rectangles to VBVA.  If necessary we enable
95  * VBVA first, as this is normally disabled after a change of master in case
96  * the new master does not send dirty rectangle information (is this even
97  * allowed?)
98  */
vbox_framebuffer_dirty_rectangles(struct drm_framebuffer * fb,struct drm_clip_rect * rects,unsigned int num_rects)99 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
100 				       struct drm_clip_rect *rects,
101 				       unsigned int num_rects)
102 {
103 	struct vbox_private *vbox = fb->dev->dev_private;
104 	struct drm_crtc *crtc;
105 	unsigned int i;
106 
107 	mutex_lock(&vbox->hw_mutex);
108 	list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
109 		if (CRTC_FB(crtc) != fb)
110 			continue;
111 
112 		vbox_enable_accel(vbox);
113 
114 		for (i = 0; i < num_rects; ++i) {
115 			struct vbva_cmd_hdr cmd_hdr;
116 			unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
117 
118 			if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
119 			    (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
120 			    (rects[i].x2 < crtc->x) ||
121 			    (rects[i].y2 < crtc->y))
122 				continue;
123 
124 			cmd_hdr.x = (s16)rects[i].x1;
125 			cmd_hdr.y = (s16)rects[i].y1;
126 			cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
127 			cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
128 
129 			if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
130 						      vbox->guest_pool))
131 				continue;
132 
133 			vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
134 				   &cmd_hdr, sizeof(cmd_hdr));
135 			vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
136 		}
137 	}
138 	mutex_unlock(&vbox->hw_mutex);
139 }
140 
vbox_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int flags,unsigned int color,struct drm_clip_rect * rects,unsigned int num_rects)141 static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
142 				       struct drm_file *file_priv,
143 				       unsigned int flags, unsigned int color,
144 				       struct drm_clip_rect *rects,
145 				       unsigned int num_rects)
146 {
147 	vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
148 
149 	return 0;
150 }
151 
152 static const struct drm_framebuffer_funcs vbox_fb_funcs = {
153 	.destroy = vbox_user_framebuffer_destroy,
154 	.dirty = vbox_user_framebuffer_dirty,
155 };
156 
vbox_framebuffer_init(struct drm_device * dev,struct vbox_framebuffer * vbox_fb,const struct DRM_MODE_FB_CMD * mode_cmd,struct drm_gem_object * obj)157 int vbox_framebuffer_init(struct drm_device *dev,
158 			  struct vbox_framebuffer *vbox_fb,
159 			  const struct DRM_MODE_FB_CMD *mode_cmd,
160 			  struct drm_gem_object *obj)
161 {
162 	int ret;
163 
164 	drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
165 	vbox_fb->obj = obj;
166 	ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
167 	if (ret) {
168 		DRM_ERROR("framebuffer init failed %d\n", ret);
169 		return ret;
170 	}
171 
172 	return 0;
173 }
174 
vbox_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * mode_cmd)175 static struct drm_framebuffer *vbox_user_framebuffer_create(
176 		struct drm_device *dev,
177 		struct drm_file *filp,
178 		const struct drm_mode_fb_cmd2 *mode_cmd)
179 {
180 	struct drm_gem_object *obj;
181 	struct vbox_framebuffer *vbox_fb;
182 	int ret = -ENOMEM;
183 
184 	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
185 	if (!obj)
186 		return ERR_PTR(-ENOENT);
187 
188 	vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
189 	if (!vbox_fb)
190 		goto err_unref_obj;
191 
192 	ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
193 	if (ret)
194 		goto err_free_vbox_fb;
195 
196 	return &vbox_fb->base;
197 
198 err_free_vbox_fb:
199 	kfree(vbox_fb);
200 err_unref_obj:
201 	drm_gem_object_put_unlocked(obj);
202 	return ERR_PTR(ret);
203 }
204 
205 static const struct drm_mode_config_funcs vbox_mode_funcs = {
206 	.fb_create = vbox_user_framebuffer_create,
207 };
208 
vbox_accel_init(struct vbox_private * vbox)209 static int vbox_accel_init(struct vbox_private *vbox)
210 {
211 	unsigned int i;
212 
213 	vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
214 				       sizeof(*vbox->vbva_info), GFP_KERNEL);
215 	if (!vbox->vbva_info)
216 		return -ENOMEM;
217 
218 	/* Take a command buffer for each screen from the end of usable VRAM. */
219 	vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
220 
221 	vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
222 					     vbox->available_vram_size,
223 					     vbox->num_crtcs *
224 					     VBVA_MIN_BUFFER_SIZE);
225 	if (!vbox->vbva_buffers)
226 		return -ENOMEM;
227 
228 	for (i = 0; i < vbox->num_crtcs; ++i)
229 		vbva_setup_buffer_context(&vbox->vbva_info[i],
230 					  vbox->available_vram_size +
231 					  i * VBVA_MIN_BUFFER_SIZE,
232 					  VBVA_MIN_BUFFER_SIZE);
233 
234 	return 0;
235 }
236 
vbox_accel_fini(struct vbox_private * vbox)237 static void vbox_accel_fini(struct vbox_private *vbox)
238 {
239 	vbox_disable_accel(vbox);
240 	pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
241 }
242 
243 /** Do we support the 4.3 plus mode hint reporting interface? */
have_hgsmi_mode_hints(struct vbox_private * vbox)244 static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
245 {
246 	u32 have_hints, have_cursor;
247 	int ret;
248 
249 	ret = hgsmi_query_conf(vbox->guest_pool,
250 			       VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
251 			       &have_hints);
252 	if (ret)
253 		return false;
254 
255 	ret = hgsmi_query_conf(vbox->guest_pool,
256 			       VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
257 			       &have_cursor);
258 	if (ret)
259 		return false;
260 
261 	return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
262 }
263 
vbox_check_supported(u16 id)264 static bool vbox_check_supported(u16 id)
265 {
266 	u16 dispi_id;
267 
268 	vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
269 	dispi_id = inw(VBE_DISPI_IOPORT_DATA);
270 
271 	return dispi_id == id;
272 }
273 
274 /**
275  * Set up our heaps and data exchange buffers in VRAM before handing the rest
276  * to the memory manager.
277  */
vbox_hw_init(struct vbox_private * vbox)278 static int vbox_hw_init(struct vbox_private *vbox)
279 {
280 	int ret = -ENOMEM;
281 
282 	vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
283 	vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
284 
285 	DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
286 
287 	/* Map guest-heap at end of vram */
288 	vbox->guest_heap =
289 	    pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
290 			    GUEST_HEAP_SIZE);
291 	if (!vbox->guest_heap)
292 		return -ENOMEM;
293 
294 	/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
295 	vbox->guest_pool = gen_pool_create(4, -1);
296 	if (!vbox->guest_pool)
297 		goto err_unmap_guest_heap;
298 
299 	ret = gen_pool_add_virt(vbox->guest_pool,
300 				(unsigned long)vbox->guest_heap,
301 				GUEST_HEAP_OFFSET(vbox),
302 				GUEST_HEAP_USABLE_SIZE, -1);
303 	if (ret)
304 		goto err_destroy_guest_pool;
305 
306 	ret = hgsmi_test_query_conf(vbox->guest_pool);
307 	if (ret) {
308 		DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
309 		goto err_destroy_guest_pool;
310 	}
311 
312 	/* Reduce available VRAM size to reflect the guest heap. */
313 	vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
314 	/* Linux drm represents monitors as a 32-bit array. */
315 	hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
316 			 &vbox->num_crtcs);
317 	vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
318 
319 	if (!have_hgsmi_mode_hints(vbox)) {
320 		ret = -ENOTSUPP;
321 		goto err_destroy_guest_pool;
322 	}
323 
324 	vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
325 					     sizeof(struct vbva_modehint),
326 					     GFP_KERNEL);
327 	if (!vbox->last_mode_hints) {
328 		ret = -ENOMEM;
329 		goto err_destroy_guest_pool;
330 	}
331 
332 	ret = vbox_accel_init(vbox);
333 	if (ret)
334 		goto err_destroy_guest_pool;
335 
336 	return 0;
337 
338 err_destroy_guest_pool:
339 	gen_pool_destroy(vbox->guest_pool);
340 err_unmap_guest_heap:
341 	pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
342 	return ret;
343 }
344 
vbox_hw_fini(struct vbox_private * vbox)345 static void vbox_hw_fini(struct vbox_private *vbox)
346 {
347 	vbox_accel_fini(vbox);
348 	gen_pool_destroy(vbox->guest_pool);
349 	pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
350 }
351 
vbox_driver_load(struct drm_device * dev,unsigned long flags)352 int vbox_driver_load(struct drm_device *dev, unsigned long flags)
353 {
354 	struct vbox_private *vbox;
355 	int ret = 0;
356 
357 	if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
358 		return -ENODEV;
359 
360 	vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
361 	if (!vbox)
362 		return -ENOMEM;
363 
364 	dev->dev_private = vbox;
365 	vbox->dev = dev;
366 
367 	mutex_init(&vbox->hw_mutex);
368 
369 	ret = vbox_hw_init(vbox);
370 	if (ret)
371 		return ret;
372 
373 	ret = vbox_mm_init(vbox);
374 	if (ret)
375 		goto err_hw_fini;
376 
377 	drm_mode_config_init(dev);
378 
379 	dev->mode_config.funcs = (void *)&vbox_mode_funcs;
380 	dev->mode_config.min_width = 64;
381 	dev->mode_config.min_height = 64;
382 	dev->mode_config.preferred_depth = 24;
383 	dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
384 	dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
385 
386 	ret = vbox_mode_init(dev);
387 	if (ret)
388 		goto err_drm_mode_cleanup;
389 
390 	ret = vbox_irq_init(vbox);
391 	if (ret)
392 		goto err_mode_fini;
393 
394 	ret = vbox_fbdev_init(dev);
395 	if (ret)
396 		goto err_irq_fini;
397 
398 	return 0;
399 
400 err_irq_fini:
401 	vbox_irq_fini(vbox);
402 err_mode_fini:
403 	vbox_mode_fini(dev);
404 err_drm_mode_cleanup:
405 	drm_mode_config_cleanup(dev);
406 	vbox_mm_fini(vbox);
407 err_hw_fini:
408 	vbox_hw_fini(vbox);
409 	return ret;
410 }
411 
vbox_driver_unload(struct drm_device * dev)412 void vbox_driver_unload(struct drm_device *dev)
413 {
414 	struct vbox_private *vbox = dev->dev_private;
415 
416 	vbox_fbdev_fini(dev);
417 	vbox_irq_fini(vbox);
418 	vbox_mode_fini(dev);
419 	drm_mode_config_cleanup(dev);
420 	vbox_mm_fini(vbox);
421 	vbox_hw_fini(vbox);
422 }
423 
424 /**
425  * @note this is described in the DRM framework documentation.  AST does not
426  * have it, but we get an oops on driver unload if it is not present.
427  */
vbox_driver_lastclose(struct drm_device * dev)428 void vbox_driver_lastclose(struct drm_device *dev)
429 {
430 	struct vbox_private *vbox = dev->dev_private;
431 
432 	if (vbox->fbdev)
433 		drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
434 }
435 
vbox_gem_create(struct drm_device * dev,u32 size,bool iskernel,struct drm_gem_object ** obj)436 int vbox_gem_create(struct drm_device *dev,
437 		    u32 size, bool iskernel, struct drm_gem_object **obj)
438 {
439 	struct vbox_bo *vboxbo;
440 	int ret;
441 
442 	*obj = NULL;
443 
444 	size = roundup(size, PAGE_SIZE);
445 	if (size == 0)
446 		return -EINVAL;
447 
448 	ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
449 	if (ret) {
450 		if (ret != -ERESTARTSYS)
451 			DRM_ERROR("failed to allocate GEM object\n");
452 		return ret;
453 	}
454 
455 	*obj = &vboxbo->gem;
456 
457 	return 0;
458 }
459 
vbox_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)460 int vbox_dumb_create(struct drm_file *file,
461 		     struct drm_device *dev, struct drm_mode_create_dumb *args)
462 {
463 	int ret;
464 	struct drm_gem_object *gobj;
465 	u32 handle;
466 
467 	args->pitch = args->width * ((args->bpp + 7) / 8);
468 	args->size = args->pitch * args->height;
469 
470 	ret = vbox_gem_create(dev, args->size, false, &gobj);
471 	if (ret)
472 		return ret;
473 
474 	ret = drm_gem_handle_create(file, gobj, &handle);
475 	drm_gem_object_put_unlocked(gobj);
476 	if (ret)
477 		return ret;
478 
479 	args->handle = handle;
480 
481 	return 0;
482 }
483 
vbox_bo_unref(struct vbox_bo ** bo)484 static void vbox_bo_unref(struct vbox_bo **bo)
485 {
486 	struct ttm_buffer_object *tbo;
487 
488 	if ((*bo) == NULL)
489 		return;
490 
491 	tbo = &((*bo)->bo);
492 	ttm_bo_unref(&tbo);
493 	if (!tbo)
494 		*bo = NULL;
495 }
496 
vbox_gem_free_object(struct drm_gem_object * obj)497 void vbox_gem_free_object(struct drm_gem_object *obj)
498 {
499 	struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
500 
501 	vbox_bo_unref(&vbox_bo);
502 }
503 
vbox_bo_mmap_offset(struct vbox_bo * bo)504 static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
505 {
506 	return drm_vma_node_offset_addr(&bo->bo.vma_node);
507 }
508 
509 int
vbox_dumb_mmap_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)510 vbox_dumb_mmap_offset(struct drm_file *file,
511 		      struct drm_device *dev,
512 		      u32 handle, u64 *offset)
513 {
514 	struct drm_gem_object *obj;
515 	int ret;
516 	struct vbox_bo *bo;
517 
518 	mutex_lock(&dev->struct_mutex);
519 	obj = drm_gem_object_lookup(file, handle);
520 	if (!obj) {
521 		ret = -ENOENT;
522 		goto out_unlock;
523 	}
524 
525 	bo = gem_to_vbox_bo(obj);
526 	*offset = vbox_bo_mmap_offset(bo);
527 
528 	drm_gem_object_put(obj);
529 	ret = 0;
530 
531 out_unlock:
532 	mutex_unlock(&dev->struct_mutex);
533 	return ret;
534 }
535