• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007-2011, Intel Corporation.
4  * All Rights Reserved.
5  *
6  **************************************************************************/
7 
8 #include <linux/console.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/pfn_t.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/tty.h>
19 
20 #include <drm/drm.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_fourcc.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 
26 #include "framebuffer.h"
27 #include "gtt.h"
28 #include "psb_drv.h"
29 #include "psb_intel_drv.h"
30 #include "psb_intel_reg.h"
31 
32 static const struct drm_framebuffer_funcs psb_fb_funcs = {
33 	.destroy = drm_gem_fb_destroy,
34 	.create_handle = drm_gem_fb_create_handle,
35 };
36 
37 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
38 
psbfb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)39 static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
40 			   unsigned blue, unsigned transp,
41 			   struct fb_info *info)
42 {
43 	struct drm_fb_helper *fb_helper = info->par;
44 	struct drm_framebuffer *fb = fb_helper->fb;
45 	uint32_t v;
46 
47 	if (!fb)
48 		return -ENOMEM;
49 
50 	if (regno > 255)
51 		return 1;
52 
53 	red = CMAP_TOHW(red, info->var.red.length);
54 	blue = CMAP_TOHW(blue, info->var.blue.length);
55 	green = CMAP_TOHW(green, info->var.green.length);
56 	transp = CMAP_TOHW(transp, info->var.transp.length);
57 
58 	v = (red << info->var.red.offset) |
59 	    (green << info->var.green.offset) |
60 	    (blue << info->var.blue.offset) |
61 	    (transp << info->var.transp.offset);
62 
63 	if (regno < 16) {
64 		switch (fb->format->cpp[0] * 8) {
65 		case 16:
66 			((uint32_t *) info->pseudo_palette)[regno] = v;
67 			break;
68 		case 24:
69 		case 32:
70 			((uint32_t *) info->pseudo_palette)[regno] = v;
71 			break;
72 		}
73 	}
74 
75 	return 0;
76 }
77 
psbfb_pan(struct fb_var_screeninfo * var,struct fb_info * info)78 static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
79 {
80 	struct drm_fb_helper *fb_helper = info->par;
81 	struct drm_framebuffer *fb = fb_helper->fb;
82 	struct drm_device *dev = fb->dev;
83 	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
84 
85 	/*
86 	 *	We have to poke our nose in here. The core fb code assumes
87 	 *	panning is part of the hardware that can be invoked before
88 	 *	the actual fb is mapped. In our case that isn't quite true.
89 	 */
90 	if (gtt->npage) {
91 		/* GTT roll shifts in 4K pages, we need to shift the right
92 		   number of pages */
93 		int pages = info->fix.line_length >> 12;
94 		psb_gtt_roll(dev, gtt, var->yoffset * pages);
95 	}
96         return 0;
97 }
98 
psbfb_vm_fault(struct vm_fault * vmf)99 static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100 {
101 	struct vm_area_struct *vma = vmf->vma;
102 	struct drm_framebuffer *fb = vma->vm_private_data;
103 	struct drm_device *dev = fb->dev;
104 	struct drm_psb_private *dev_priv = dev->dev_private;
105 	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
106 	int page_num;
107 	int i;
108 	unsigned long address;
109 	vm_fault_t ret = VM_FAULT_SIGBUS;
110 	unsigned long pfn;
111 	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112 				  gtt->offset;
113 
114 	page_num = vma_pages(vma);
115 	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116 
117 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118 
119 	for (i = 0; i < page_num; i++) {
120 		pfn = (phys_addr >> PAGE_SHIFT);
121 
122 		ret = vmf_insert_mixed(vma, address,
123 				__pfn_to_pfn_t(pfn, PFN_DEV));
124 		if (unlikely(ret & VM_FAULT_ERROR))
125 			break;
126 		address += PAGE_SIZE;
127 		phys_addr += PAGE_SIZE;
128 	}
129 	return ret;
130 }
131 
psbfb_vm_open(struct vm_area_struct * vma)132 static void psbfb_vm_open(struct vm_area_struct *vma)
133 {
134 }
135 
psbfb_vm_close(struct vm_area_struct * vma)136 static void psbfb_vm_close(struct vm_area_struct *vma)
137 {
138 }
139 
140 static const struct vm_operations_struct psbfb_vm_ops = {
141 	.fault	= psbfb_vm_fault,
142 	.open	= psbfb_vm_open,
143 	.close	= psbfb_vm_close
144 };
145 
psbfb_mmap(struct fb_info * info,struct vm_area_struct * vma)146 static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147 {
148 	struct drm_fb_helper *fb_helper = info->par;
149 	struct drm_framebuffer *fb = fb_helper->fb;
150 
151 	if (vma->vm_pgoff != 0)
152 		return -EINVAL;
153 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154 		return -EINVAL;
155 
156 	/*
157 	 * If this is a GEM object then info->screen_base is the virtual
158 	 * kernel remapping of the object. FIXME: Review if this is
159 	 * suitable for our mmap work
160 	 */
161 	vma->vm_ops = &psbfb_vm_ops;
162 	vma->vm_private_data = (void *)fb;
163 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
164 	return 0;
165 }
166 
167 static const struct fb_ops psbfb_ops = {
168 	.owner = THIS_MODULE,
169 	DRM_FB_HELPER_DEFAULT_OPS,
170 	.fb_setcolreg = psbfb_setcolreg,
171 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
172 	.fb_copyarea = psbfb_copyarea,
173 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
174 	.fb_mmap = psbfb_mmap,
175 	.fb_sync = psbfb_sync,
176 };
177 
178 static const struct fb_ops psbfb_roll_ops = {
179 	.owner = THIS_MODULE,
180 	DRM_FB_HELPER_DEFAULT_OPS,
181 	.fb_setcolreg = psbfb_setcolreg,
182 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
183 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
184 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
185 	.fb_pan_display = psbfb_pan,
186 	.fb_mmap = psbfb_mmap,
187 };
188 
189 static const struct fb_ops psbfb_unaccel_ops = {
190 	.owner = THIS_MODULE,
191 	DRM_FB_HELPER_DEFAULT_OPS,
192 	.fb_setcolreg = psbfb_setcolreg,
193 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
194 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
195 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
196 	.fb_mmap = psbfb_mmap,
197 };
198 
199 /**
200  *	psb_framebuffer_init	-	initialize a framebuffer
201  *	@dev: our DRM device
202  *	@fb: framebuffer to set up
203  *	@mode_cmd: mode description
204  *	@gt: backing object
205  *
206  *	Configure and fill in the boilerplate for our frame buffer. Return
207  *	0 on success or an error code if we fail.
208  */
psb_framebuffer_init(struct drm_device * dev,struct drm_framebuffer * fb,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)209 static int psb_framebuffer_init(struct drm_device *dev,
210 					struct drm_framebuffer *fb,
211 					const struct drm_mode_fb_cmd2 *mode_cmd,
212 					struct drm_gem_object *obj)
213 {
214 	const struct drm_format_info *info;
215 	int ret;
216 
217 	/*
218 	 * Reject unknown formats, YUV formats, and formats with more than
219 	 * 4 bytes per pixel.
220 	 */
221 	info = drm_get_format_info(dev, mode_cmd);
222 	if (!info || !info->depth || info->cpp[0] > 4)
223 		return -EINVAL;
224 
225 	if (mode_cmd->pitches[0] & 63)
226 		return -EINVAL;
227 
228 	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
229 	fb->obj[0] = obj;
230 	ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
231 	if (ret) {
232 		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
233 		return ret;
234 	}
235 	return 0;
236 }
237 
238 /**
239  *	psb_framebuffer_create	-	create a framebuffer backed by gt
240  *	@dev: our DRM device
241  *	@mode_cmd: the description of the requested mode
242  *	@gt: the backing object
243  *
244  *	Create a framebuffer object backed by the gt, and fill in the
245  *	boilerplate required
246  *
247  *	TODO: review object references
248  */
249 
psb_framebuffer_create(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)250 static struct drm_framebuffer *psb_framebuffer_create
251 			(struct drm_device *dev,
252 			 const struct drm_mode_fb_cmd2 *mode_cmd,
253 			 struct drm_gem_object *obj)
254 {
255 	struct drm_framebuffer *fb;
256 	int ret;
257 
258 	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
259 	if (!fb)
260 		return ERR_PTR(-ENOMEM);
261 
262 	ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
263 	if (ret) {
264 		kfree(fb);
265 		return ERR_PTR(ret);
266 	}
267 	return fb;
268 }
269 
270 /**
271  *	psbfb_alloc		-	allocate frame buffer memory
272  *	@dev: the DRM device
273  *	@aligned_size: space needed
274  *
275  *	Allocate the frame buffer. In the usual case we get a GTT range that
276  *	is stolen memory backed and life is simple. If there isn't sufficient
277  *	we fail as we don't have the virtual mapping space to really vmap it
278  *	and the kernel console code can't handle non linear framebuffers.
279  *
280  *	Re-address this as and if the framebuffer layer grows this ability.
281  */
psbfb_alloc(struct drm_device * dev,int aligned_size)282 static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
283 {
284 	struct gtt_range *backing;
285 	/* Begin by trying to use stolen memory backing */
286 	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
287 	if (backing) {
288 		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
289 		return backing;
290 	}
291 	return NULL;
292 }
293 
294 /**
295  *	psbfb_create		-	create a framebuffer
296  *	@fbdev: the framebuffer device
297  *	@sizes: specification of the layout
298  *
299  *	Create a framebuffer to the specifications provided
300  */
psbfb_create(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)301 static int psbfb_create(struct drm_fb_helper *fb_helper,
302 				struct drm_fb_helper_surface_size *sizes)
303 {
304 	struct drm_device *dev = fb_helper->dev;
305 	struct drm_psb_private *dev_priv = dev->dev_private;
306 	struct fb_info *info;
307 	struct drm_framebuffer *fb;
308 	struct drm_mode_fb_cmd2 mode_cmd;
309 	int size;
310 	int ret;
311 	struct gtt_range *backing;
312 	u32 bpp, depth;
313 	int gtt_roll = 0;
314 	int pitch_lines = 0;
315 
316 	mode_cmd.width = sizes->surface_width;
317 	mode_cmd.height = sizes->surface_height;
318 	bpp = sizes->surface_bpp;
319 	depth = sizes->surface_depth;
320 
321 	/* No 24bit packed */
322 	if (bpp == 24)
323 		bpp = 32;
324 
325 	do {
326 		/*
327 		 * Acceleration via the GTT requires pitch to be
328 		 * power of two aligned. Preferably page but less
329 		 * is ok with some fonts
330 		 */
331         	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
332 
333         	size = mode_cmd.pitches[0] * mode_cmd.height;
334         	size = ALIGN(size, PAGE_SIZE);
335 
336 		/* Allocate the fb in the GTT with stolen page backing */
337 		backing = psbfb_alloc(dev, size);
338 
339 		if (pitch_lines)
340 			pitch_lines *= 2;
341 		else
342 			pitch_lines = 1;
343 		gtt_roll++;
344 	} while (backing == NULL && pitch_lines <= 16);
345 
346 	/* The final pitch we accepted if we succeeded */
347 	pitch_lines /= 2;
348 
349 	if (backing == NULL) {
350 		/*
351 		 *	We couldn't get the space we wanted, fall back to the
352 		 *	display engine requirement instead.  The HW requires
353 		 *	the pitch to be 64 byte aligned
354 		 */
355 
356 		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
357 		pitch_lines = 64;
358 
359 		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
360 
361 		size = mode_cmd.pitches[0] * mode_cmd.height;
362 		size = ALIGN(size, PAGE_SIZE);
363 
364 		/* Allocate the framebuffer in the GTT with stolen page backing */
365 		backing = psbfb_alloc(dev, size);
366 		if (backing == NULL)
367 			return -ENOMEM;
368 	}
369 
370 	memset(dev_priv->vram_addr + backing->offset, 0, size);
371 
372 	info = drm_fb_helper_alloc_fbi(fb_helper);
373 	if (IS_ERR(info)) {
374 		ret = PTR_ERR(info);
375 		goto out;
376 	}
377 
378 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
379 
380 	fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
381 	if (IS_ERR(fb)) {
382 		ret = PTR_ERR(fb);
383 		goto out;
384 	}
385 
386 	fb_helper->fb = fb;
387 
388 	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
389 		info->fbops = &psbfb_ops;
390 	else if (gtt_roll) {	/* GTT rolling seems best */
391 		info->fbops = &psbfb_roll_ops;
392 		info->flags |= FBINFO_HWACCEL_YPAN;
393 	} else	/* Software */
394 		info->fbops = &psbfb_unaccel_ops;
395 
396 	info->fix.smem_start = dev->mode_config.fb_base;
397 	info->fix.smem_len = size;
398 	info->fix.ywrapstep = gtt_roll;
399 	info->fix.ypanstep = 0;
400 
401 	/* Accessed stolen memory directly */
402 	info->screen_base = dev_priv->vram_addr + backing->offset;
403 	info->screen_size = size;
404 
405 	if (dev_priv->gtt.stolen_size) {
406 		info->apertures->ranges[0].base = dev->mode_config.fb_base;
407 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
408 	}
409 
410 	drm_fb_helper_fill_info(info, fb_helper, sizes);
411 
412 	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
413 	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
414 
415 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
416 
417 	dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
418 
419 	return 0;
420 out:
421 	psb_gtt_free_range(dev, backing);
422 	return ret;
423 }
424 
425 /**
426  *	psb_user_framebuffer_create	-	create framebuffer
427  *	@dev: our DRM device
428  *	@filp: client file
429  *	@cmd: mode request
430  *
431  *	Create a new framebuffer backed by a userspace GEM object
432  */
psb_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * cmd)433 static struct drm_framebuffer *psb_user_framebuffer_create
434 			(struct drm_device *dev, struct drm_file *filp,
435 			 const struct drm_mode_fb_cmd2 *cmd)
436 {
437 	struct drm_gem_object *obj;
438 	struct drm_framebuffer *fb;
439 
440 	/*
441 	 *	Find the GEM object and thus the gtt range object that is
442 	 *	to back this space
443 	 */
444 	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
445 	if (obj == NULL)
446 		return ERR_PTR(-ENOENT);
447 
448 	/* Let the core code do all the work */
449 	fb = psb_framebuffer_create(dev, cmd, obj);
450 	if (IS_ERR(fb))
451 		drm_gem_object_put(obj);
452 
453 	return fb;
454 }
455 
psbfb_probe(struct drm_fb_helper * fb_helper,struct drm_fb_helper_surface_size * sizes)456 static int psbfb_probe(struct drm_fb_helper *fb_helper,
457 				struct drm_fb_helper_surface_size *sizes)
458 {
459 	struct drm_device *dev = fb_helper->dev;
460 	struct drm_psb_private *dev_priv = dev->dev_private;
461 	unsigned int fb_size;
462 	int bytespp;
463 
464 	bytespp = sizes->surface_bpp / 8;
465 	if (bytespp == 3)	/* no 24bit packed */
466 		bytespp = 4;
467 
468 	/* If the mode will not fit in 32bit then switch to 16bit to get
469 	   a console on full resolution. The X mode setting server will
470 	   allocate its own 32bit GEM framebuffer */
471 	fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
472 		  sizes->surface_height;
473 	fb_size = ALIGN(fb_size, PAGE_SIZE);
474 
475 	if (fb_size > dev_priv->vram_stolen_size) {
476                 sizes->surface_bpp = 16;
477                 sizes->surface_depth = 16;
478         }
479 
480 	return psbfb_create(fb_helper, sizes);
481 }
482 
483 static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
484 	.fb_probe = psbfb_probe,
485 };
486 
psb_fbdev_destroy(struct drm_device * dev,struct drm_fb_helper * fb_helper)487 static int psb_fbdev_destroy(struct drm_device *dev,
488 			     struct drm_fb_helper *fb_helper)
489 {
490 	struct drm_framebuffer *fb = fb_helper->fb;
491 
492 	drm_fb_helper_unregister_fbi(fb_helper);
493 
494 	drm_fb_helper_fini(fb_helper);
495 	drm_framebuffer_unregister_private(fb);
496 	drm_framebuffer_cleanup(fb);
497 
498 	if (fb->obj[0])
499 		drm_gem_object_put(fb->obj[0]);
500 	kfree(fb);
501 
502 	return 0;
503 }
504 
psb_fbdev_init(struct drm_device * dev)505 int psb_fbdev_init(struct drm_device *dev)
506 {
507 	struct drm_fb_helper *fb_helper;
508 	struct drm_psb_private *dev_priv = dev->dev_private;
509 	int ret;
510 
511 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
512 	if (!fb_helper) {
513 		dev_err(dev->dev, "no memory\n");
514 		return -ENOMEM;
515 	}
516 
517 	dev_priv->fb_helper = fb_helper;
518 
519 	drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
520 
521 	ret = drm_fb_helper_init(dev, fb_helper);
522 	if (ret)
523 		goto free;
524 
525 	/* disable all the possible outputs/crtcs before entering KMS mode */
526 	drm_helper_disable_unused_functions(dev);
527 
528 	ret = drm_fb_helper_initial_config(fb_helper, 32);
529 	if (ret)
530 		goto fini;
531 
532 	return 0;
533 
534 fini:
535 	drm_fb_helper_fini(fb_helper);
536 free:
537 	kfree(fb_helper);
538 	return ret;
539 }
540 
psb_fbdev_fini(struct drm_device * dev)541 static void psb_fbdev_fini(struct drm_device *dev)
542 {
543 	struct drm_psb_private *dev_priv = dev->dev_private;
544 
545 	if (!dev_priv->fb_helper)
546 		return;
547 
548 	psb_fbdev_destroy(dev, dev_priv->fb_helper);
549 	kfree(dev_priv->fb_helper);
550 	dev_priv->fb_helper = NULL;
551 }
552 
553 static const struct drm_mode_config_funcs psb_mode_funcs = {
554 	.fb_create = psb_user_framebuffer_create,
555 	.output_poll_changed = drm_fb_helper_output_poll_changed,
556 };
557 
psb_setup_outputs(struct drm_device * dev)558 static void psb_setup_outputs(struct drm_device *dev)
559 {
560 	struct drm_psb_private *dev_priv = dev->dev_private;
561 	struct drm_connector *connector;
562 
563 	drm_mode_create_scaling_mode_property(dev);
564 
565 	/* It is ok for this to fail - we just don't get backlight control */
566 	if (!dev_priv->backlight_property)
567 		dev_priv->backlight_property = drm_property_create_range(dev, 0,
568 							"backlight", 0, 100);
569 	dev_priv->ops->output_init(dev);
570 
571 	list_for_each_entry(connector, &dev->mode_config.connector_list,
572 			    head) {
573 		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
574 		struct drm_encoder *encoder = &gma_encoder->base;
575 		int crtc_mask = 0, clone_mask = 0;
576 
577 		/* valid crtcs */
578 		switch (gma_encoder->type) {
579 		case INTEL_OUTPUT_ANALOG:
580 			crtc_mask = (1 << 0);
581 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
582 			break;
583 		case INTEL_OUTPUT_SDVO:
584 			crtc_mask = dev_priv->ops->sdvo_mask;
585 			clone_mask = 0;
586 			break;
587 		case INTEL_OUTPUT_LVDS:
588 			crtc_mask = dev_priv->ops->lvds_mask;
589 			clone_mask = 0;
590 			break;
591 		case INTEL_OUTPUT_MIPI:
592 			crtc_mask = (1 << 0);
593 			clone_mask = 0;
594 			break;
595 		case INTEL_OUTPUT_MIPI2:
596 			crtc_mask = (1 << 2);
597 			clone_mask = 0;
598 			break;
599 		case INTEL_OUTPUT_HDMI:
600 			crtc_mask = dev_priv->ops->hdmi_mask;
601 			clone_mask = (1 << INTEL_OUTPUT_HDMI);
602 			break;
603 		case INTEL_OUTPUT_DISPLAYPORT:
604 			crtc_mask = (1 << 0) | (1 << 1);
605 			clone_mask = 0;
606 			break;
607 		case INTEL_OUTPUT_EDP:
608 			crtc_mask = (1 << 1);
609 			clone_mask = 0;
610 		}
611 		encoder->possible_crtcs = crtc_mask;
612 		encoder->possible_clones =
613 		    gma_connector_clones(dev, clone_mask);
614 	}
615 }
616 
psb_modeset_init(struct drm_device * dev)617 void psb_modeset_init(struct drm_device *dev)
618 {
619 	struct drm_psb_private *dev_priv = dev->dev_private;
620 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
621 	int i;
622 
623 	drm_mode_config_init(dev);
624 
625 	dev->mode_config.min_width = 0;
626 	dev->mode_config.min_height = 0;
627 
628 	dev->mode_config.funcs = &psb_mode_funcs;
629 
630 	/* set memory base */
631 	/* Oaktrail and Poulsbo should use BAR 2*/
632 	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
633 					&(dev->mode_config.fb_base));
634 
635 	/* num pipes is 2 for PSB but 1 for Mrst */
636 	for (i = 0; i < dev_priv->num_pipe; i++)
637 		psb_intel_crtc_init(dev, i, mode_dev);
638 
639 	dev->mode_config.max_width = 4096;
640 	dev->mode_config.max_height = 4096;
641 
642 	psb_setup_outputs(dev);
643 
644 	if (dev_priv->ops->errata)
645 	        dev_priv->ops->errata(dev);
646 
647         dev_priv->modeset = true;
648 }
649 
psb_modeset_cleanup(struct drm_device * dev)650 void psb_modeset_cleanup(struct drm_device *dev)
651 {
652 	struct drm_psb_private *dev_priv = dev->dev_private;
653 	if (dev_priv->modeset) {
654 		drm_kms_helper_poll_fini(dev);
655 		psb_fbdev_fini(dev);
656 		drm_mode_config_cleanup(dev);
657 	}
658 }
659