1 /**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <linux/export.h>
30
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33
34 #include <drm/ttm/ttm_placement.h>
35
36 #define VMW_DIRTY_DELAY (HZ / 30)
37
38 struct vmw_fb_par {
39 struct vmw_private *vmw_priv;
40
41 void *vmalloc;
42
43 struct vmw_dma_buffer *vmw_bo;
44 struct ttm_bo_kmap_obj map;
45
46 u32 pseudo_palette[17];
47
48 unsigned depth;
49 unsigned bpp;
50
51 unsigned max_width;
52 unsigned max_height;
53
54 void *bo_ptr;
55 unsigned bo_size;
56 bool bo_iowrite;
57
58 struct {
59 spinlock_t lock;
60 bool active;
61 unsigned x1;
62 unsigned y1;
63 unsigned x2;
64 unsigned y2;
65 } dirty;
66 };
67
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)68 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
69 unsigned blue, unsigned transp,
70 struct fb_info *info)
71 {
72 struct vmw_fb_par *par = info->par;
73 u32 *pal = par->pseudo_palette;
74
75 if (regno > 15) {
76 DRM_ERROR("Bad regno %u.\n", regno);
77 return 1;
78 }
79
80 switch (par->depth) {
81 case 24:
82 case 32:
83 pal[regno] = ((red & 0xff00) << 8) |
84 (green & 0xff00) |
85 ((blue & 0xff00) >> 8);
86 break;
87 default:
88 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
89 return 1;
90 }
91
92 return 0;
93 }
94
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)95 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
96 struct fb_info *info)
97 {
98 int depth = var->bits_per_pixel;
99 struct vmw_fb_par *par = info->par;
100 struct vmw_private *vmw_priv = par->vmw_priv;
101
102 switch (var->bits_per_pixel) {
103 case 32:
104 depth = (var->transp.length > 0) ? 32 : 24;
105 break;
106 default:
107 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
108 return -EINVAL;
109 }
110
111 switch (depth) {
112 case 24:
113 var->red.offset = 16;
114 var->green.offset = 8;
115 var->blue.offset = 0;
116 var->red.length = 8;
117 var->green.length = 8;
118 var->blue.length = 8;
119 var->transp.length = 0;
120 var->transp.offset = 0;
121 break;
122 case 32:
123 var->red.offset = 16;
124 var->green.offset = 8;
125 var->blue.offset = 0;
126 var->red.length = 8;
127 var->green.length = 8;
128 var->blue.length = 8;
129 var->transp.length = 8;
130 var->transp.offset = 24;
131 break;
132 default:
133 DRM_ERROR("Bad depth %u.\n", depth);
134 return -EINVAL;
135 }
136
137 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
138 (var->xoffset != 0 || var->yoffset != 0)) {
139 DRM_ERROR("Can not handle panning without display topology\n");
140 return -EINVAL;
141 }
142
143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
149 if (!vmw_kms_validate_mode_vram(vmw_priv,
150 var->xres * var->bits_per_pixel/8,
151 var->yoffset + var->yres)) {
152 DRM_ERROR("Requested geom can not fit in framebuffer\n");
153 return -EINVAL;
154 }
155
156 return 0;
157 }
158
vmw_fb_set_par(struct fb_info * info)159 static int vmw_fb_set_par(struct fb_info *info)
160 {
161 struct vmw_fb_par *par = info->par;
162 struct vmw_private *vmw_priv = par->vmw_priv;
163 int ret;
164
165 info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
166
167 ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
168 info->fix.line_length,
169 par->bpp, par->depth);
170 if (ret)
171 return ret;
172
173 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
174 /* TODO check if pitch and offset changes */
175 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
176 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
177 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
178 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
183 }
184
185 /* This is really helpful since if this fails the user
186 * can probably not see anything on the screen.
187 */
188 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
189
190 return 0;
191 }
192
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)193 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
194 struct fb_info *info)
195 {
196 return 0;
197 }
198
vmw_fb_blank(int blank,struct fb_info * info)199 static int vmw_fb_blank(int blank, struct fb_info *info)
200 {
201 return 0;
202 }
203
204 /*
205 * Dirty code
206 */
207
vmw_fb_dirty_flush(struct vmw_fb_par * par)208 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
209 {
210 struct vmw_private *vmw_priv = par->vmw_priv;
211 struct fb_info *info = vmw_priv->fb_info;
212 int stride = (info->fix.line_length / 4);
213 int *src = (int *)info->screen_base;
214 __le32 __iomem *vram_mem = par->bo_ptr;
215 unsigned long flags;
216 unsigned x, y, w, h;
217 int i, k;
218 struct {
219 uint32_t header;
220 SVGAFifoCmdUpdate body;
221 } *cmd;
222
223 if (vmw_priv->suspended)
224 return;
225
226 spin_lock_irqsave(&par->dirty.lock, flags);
227 if (!par->dirty.active) {
228 spin_unlock_irqrestore(&par->dirty.lock, flags);
229 return;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
241 iowrite32(src[k], vram_mem + k);
242 }
243
244 #if 0
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
246 #endif
247
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
249 if (unlikely(cmd == NULL)) {
250 DRM_ERROR("Fifo reserve failed.\n");
251 return;
252 }
253
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260 }
261
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)262 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
263 unsigned x1, unsigned y1,
264 unsigned width, unsigned height)
265 {
266 struct fb_info *info = par->vmw_priv->fb_info;
267 unsigned long flags;
268 unsigned x2 = x1 + width;
269 unsigned y2 = y1 + height;
270
271 spin_lock_irqsave(&par->dirty.lock, flags);
272 if (par->dirty.x1 == par->dirty.x2) {
273 par->dirty.x1 = x1;
274 par->dirty.y1 = y1;
275 par->dirty.x2 = x2;
276 par->dirty.y2 = y2;
277 /* if we are active start the dirty work
278 * we share the work with the defio system */
279 if (par->dirty.active)
280 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
281 } else {
282 if (x1 < par->dirty.x1)
283 par->dirty.x1 = x1;
284 if (y1 < par->dirty.y1)
285 par->dirty.y1 = y1;
286 if (x2 > par->dirty.x2)
287 par->dirty.x2 = x2;
288 if (y2 > par->dirty.y2)
289 par->dirty.y2 = y2;
290 }
291 spin_unlock_irqrestore(&par->dirty.lock, flags);
292 }
293
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)294 static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist)
296 {
297 struct vmw_fb_par *par = info->par;
298 unsigned long start, end, min, max;
299 unsigned long flags;
300 struct page *page;
301 int y1, y2;
302
303 min = ULONG_MAX;
304 max = 0;
305 list_for_each_entry(page, pagelist, lru) {
306 start = page->index << PAGE_SHIFT;
307 end = start + PAGE_SIZE - 1;
308 min = min(min, start);
309 max = max(max, end);
310 }
311
312 if (min < max) {
313 y1 = min / info->fix.line_length;
314 y2 = (max / info->fix.line_length) + 1;
315
316 spin_lock_irqsave(&par->dirty.lock, flags);
317 par->dirty.x1 = 0;
318 par->dirty.y1 = y1;
319 par->dirty.x2 = info->var.xres;
320 par->dirty.y2 = y2;
321 spin_unlock_irqrestore(&par->dirty.lock, flags);
322 }
323
324 vmw_fb_dirty_flush(par);
325 };
326
327 struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io,
330 };
331
332 /*
333 * Draw code
334 */
335
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)336 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
337 {
338 cfb_fillrect(info, rect);
339 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
340 rect->width, rect->height);
341 }
342
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)343 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
344 {
345 cfb_copyarea(info, region);
346 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
347 region->width, region->height);
348 }
349
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)350 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
351 {
352 cfb_imageblit(info, image);
353 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
354 image->width, image->height);
355 }
356
357 /*
358 * Bring up code
359 */
360
361 static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371 };
372
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_dma_buffer ** out)373 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out)
375 {
376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
378 struct ttm_placement ne_placement;
379 int ret;
380
381 ne_placement.num_placement = 1;
382 ne_placement.placement = &ne_place;
383 ne_placement.num_busy_placement = 1;
384 ne_placement.busy_placement = &ne_place;
385
386 ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387
388 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
389
390 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
391 if (!vmw_bo) {
392 ret = -ENOMEM;
393 goto err_unlock;
394 }
395
396 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
397 &ne_placement,
398 false,
399 &vmw_dmabuf_bo_free);
400 if (unlikely(ret != 0))
401 goto err_unlock; /* init frees the buffer on failure */
402
403 *out = vmw_bo;
404
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406
407 return 0;
408
409 err_unlock:
410 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
411 return ret;
412 }
413
vmw_fb_init(struct vmw_private * vmw_priv)414 int vmw_fb_init(struct vmw_private *vmw_priv)
415 {
416 struct device *device = &vmw_priv->dev->pdev->dev;
417 struct vmw_fb_par *par;
418 struct fb_info *info;
419 unsigned initial_width, initial_height;
420 unsigned fb_width, fb_height;
421 unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
422 int ret;
423
424 fb_bpp = 32;
425 fb_depth = 24;
426
427 /* XXX As shouldn't these be as well. */
428 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
429 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
430
431 initial_width = min(vmw_priv->initial_width, fb_width);
432 initial_height = min(vmw_priv->initial_height, fb_height);
433
434 fb_pitch = fb_width * fb_bpp / 8;
435 fb_size = fb_pitch * fb_height;
436 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
437
438 info = framebuffer_alloc(sizeof(*par), device);
439 if (!info)
440 return -ENOMEM;
441
442 /*
443 * Par
444 */
445 vmw_priv->fb_info = info;
446 par = info->par;
447 par->vmw_priv = vmw_priv;
448 par->depth = fb_depth;
449 par->bpp = fb_bpp;
450 par->vmalloc = NULL;
451 par->max_width = fb_width;
452 par->max_height = fb_height;
453
454 /*
455 * Create buffers and alloc memory
456 */
457 par->vmalloc = vmalloc(fb_size);
458 if (unlikely(par->vmalloc == NULL)) {
459 ret = -ENOMEM;
460 goto err_free;
461 }
462
463 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
464 if (unlikely(ret != 0))
465 goto err_free;
466
467 ret = ttm_bo_kmap(&par->vmw_bo->base,
468 0,
469 par->vmw_bo->base.num_pages,
470 &par->map);
471 if (unlikely(ret != 0))
472 goto err_unref;
473 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
474 par->bo_size = fb_size;
475
476 /*
477 * Fixed and var
478 */
479 strcpy(info->fix.id, "svgadrmfb");
480 info->fix.type = FB_TYPE_PACKED_PIXELS;
481 info->fix.visual = FB_VISUAL_TRUECOLOR;
482 info->fix.type_aux = 0;
483 info->fix.xpanstep = 1; /* doing it in hw */
484 info->fix.ypanstep = 1; /* doing it in hw */
485 info->fix.ywrapstep = 0;
486 info->fix.accel = FB_ACCEL_NONE;
487 info->fix.line_length = fb_pitch;
488
489 info->fix.smem_start = 0;
490 info->fix.smem_len = fb_size;
491
492 info->pseudo_palette = par->pseudo_palette;
493 info->screen_base = par->vmalloc;
494 info->screen_size = fb_size;
495
496 info->flags = FBINFO_DEFAULT;
497 info->fbops = &vmw_fb_ops;
498
499 /* 24 depth per default */
500 info->var.red.offset = 16;
501 info->var.green.offset = 8;
502 info->var.blue.offset = 0;
503 info->var.red.length = 8;
504 info->var.green.length = 8;
505 info->var.blue.length = 8;
506 info->var.transp.offset = 0;
507 info->var.transp.length = 0;
508
509 info->var.xres_virtual = fb_width;
510 info->var.yres_virtual = fb_height;
511 info->var.bits_per_pixel = par->bpp;
512 info->var.xoffset = 0;
513 info->var.yoffset = 0;
514 info->var.activate = FB_ACTIVATE_NOW;
515 info->var.height = -1;
516 info->var.width = -1;
517
518 info->var.xres = initial_width;
519 info->var.yres = initial_height;
520
521 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
522
523 info->apertures = alloc_apertures(1);
524 if (!info->apertures) {
525 ret = -ENOMEM;
526 goto err_aper;
527 }
528 info->apertures->ranges[0].base = vmw_priv->vram_start;
529 info->apertures->ranges[0].size = vmw_priv->vram_size;
530
531 /*
532 * Dirty & Deferred IO
533 */
534 par->dirty.x1 = par->dirty.x2 = 0;
535 par->dirty.y1 = par->dirty.y2 = 0;
536 par->dirty.active = true;
537 spin_lock_init(&par->dirty.lock);
538 info->fbdefio = &vmw_defio;
539 fb_deferred_io_init(info);
540
541 ret = register_framebuffer(info);
542 if (unlikely(ret != 0))
543 goto err_defio;
544
545 return 0;
546
547 err_defio:
548 fb_deferred_io_cleanup(info);
549 err_aper:
550 ttm_bo_kunmap(&par->map);
551 err_unref:
552 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
553 err_free:
554 vfree(par->vmalloc);
555 framebuffer_release(info);
556 vmw_priv->fb_info = NULL;
557
558 return ret;
559 }
560
vmw_fb_close(struct vmw_private * vmw_priv)561 int vmw_fb_close(struct vmw_private *vmw_priv)
562 {
563 struct fb_info *info;
564 struct vmw_fb_par *par;
565 struct ttm_buffer_object *bo;
566
567 if (!vmw_priv->fb_info)
568 return 0;
569
570 info = vmw_priv->fb_info;
571 par = info->par;
572 bo = &par->vmw_bo->base;
573 par->vmw_bo = NULL;
574
575 /* ??? order */
576 fb_deferred_io_cleanup(info);
577 unregister_framebuffer(info);
578
579 ttm_bo_kunmap(&par->map);
580 ttm_bo_unref(&bo);
581
582 vfree(par->vmalloc);
583 framebuffer_release(info);
584
585 return 0;
586 }
587
vmw_fb_off(struct vmw_private * vmw_priv)588 int vmw_fb_off(struct vmw_private *vmw_priv)
589 {
590 struct fb_info *info;
591 struct vmw_fb_par *par;
592 unsigned long flags;
593
594 if (!vmw_priv->fb_info)
595 return -EINVAL;
596
597 info = vmw_priv->fb_info;
598 par = info->par;
599
600 spin_lock_irqsave(&par->dirty.lock, flags);
601 par->dirty.active = false;
602 spin_unlock_irqrestore(&par->dirty.lock, flags);
603
604 flush_delayed_work(&info->deferred_work);
605
606 par->bo_ptr = NULL;
607 ttm_bo_kunmap(&par->map);
608
609 vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
610
611 return 0;
612 }
613
vmw_fb_on(struct vmw_private * vmw_priv)614 int vmw_fb_on(struct vmw_private *vmw_priv)
615 {
616 struct fb_info *info;
617 struct vmw_fb_par *par;
618 unsigned long flags;
619 bool dummy;
620 int ret;
621
622 if (!vmw_priv->fb_info)
623 return -EINVAL;
624
625 info = vmw_priv->fb_info;
626 par = info->par;
627
628 /* we are already active */
629 if (par->bo_ptr != NULL)
630 return 0;
631
632 /* Make sure that all overlays are stoped when we take over */
633 vmw_overlay_stop_all(vmw_priv);
634
635 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
636 if (unlikely(ret != 0)) {
637 DRM_ERROR("could not move buffer to start of VRAM\n");
638 goto err_no_buffer;
639 }
640
641 ret = ttm_bo_kmap(&par->vmw_bo->base,
642 0,
643 par->vmw_bo->base.num_pages,
644 &par->map);
645 BUG_ON(ret != 0);
646 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
647
648 spin_lock_irqsave(&par->dirty.lock, flags);
649 par->dirty.active = true;
650 spin_unlock_irqrestore(&par->dirty.lock, flags);
651
652 err_no_buffer:
653 vmw_fb_set_par(info);
654
655 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
656
657 /* If there already was stuff dirty we wont
658 * schedule a new work, so lets do it now */
659 schedule_delayed_work(&info->deferred_work, 0);
660
661 return 0;
662 }
663