• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Matt Turner.
3  * Copyright 2012 Red Hat
4  *
5  * This file is subject to the terms and conditions of the GNU General
6  * Public License version 2. See the file COPYING in the main
7  * directory of this archive for more details.
8  *
9  * Authors: Matthew Garrett
10  *          Matt Turner
11  *          Dave Airlie
12  */
13 #include <linux/module.h>
14 #include <drm/drmP.h>
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_crtc_helper.h>
17 
18 #include <linux/fb.h>
19 
20 #include "mgag200_drv.h"
21 
mga_dirty_update(struct mga_fbdev * mfbdev,int x,int y,int width,int height)22 static void mga_dirty_update(struct mga_fbdev *mfbdev,
23 			     int x, int y, int width, int height)
24 {
25 	int i;
26 	struct drm_gem_object *obj;
27 	struct mgag200_bo *bo;
28 	int src_offset, dst_offset;
29 	int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
30 	int ret = -EBUSY;
31 	bool unmap = false;
32 	bool store_for_later = false;
33 	int x2, y2;
34 	unsigned long flags;
35 
36 	obj = mfbdev->mfb.obj;
37 	bo = gem_to_mga_bo(obj);
38 
39 	/*
40 	 * try and reserve the BO, if we fail with busy
41 	 * then the BO is being moved and we should
42 	 * store up the damage until later.
43 	 */
44 	if (drm_can_sleep())
45 		ret = mgag200_bo_reserve(bo, true);
46 	if (ret) {
47 		if (ret != -EBUSY)
48 			return;
49 
50 		store_for_later = true;
51 	}
52 
53 	x2 = x + width - 1;
54 	y2 = y + height - 1;
55 	spin_lock_irqsave(&mfbdev->dirty_lock, flags);
56 
57 	if (mfbdev->y1 < y)
58 		y = mfbdev->y1;
59 	if (mfbdev->y2 > y2)
60 		y2 = mfbdev->y2;
61 	if (mfbdev->x1 < x)
62 		x = mfbdev->x1;
63 	if (mfbdev->x2 > x2)
64 		x2 = mfbdev->x2;
65 
66 	if (store_for_later) {
67 		mfbdev->x1 = x;
68 		mfbdev->x2 = x2;
69 		mfbdev->y1 = y;
70 		mfbdev->y2 = y2;
71 		spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
72 		return;
73 	}
74 
75 	mfbdev->x1 = mfbdev->y1 = INT_MAX;
76 	mfbdev->x2 = mfbdev->y2 = 0;
77 	spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
78 
79 	if (!bo->kmap.virtual) {
80 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
81 		if (ret) {
82 			DRM_ERROR("failed to kmap fb updates\n");
83 			mgag200_bo_unreserve(bo);
84 			return;
85 		}
86 		unmap = true;
87 	}
88 	for (i = y; i <= y2; i++) {
89 		/* assume equal stride for now */
90 		src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
91 		memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
92 
93 	}
94 	if (unmap)
95 		ttm_bo_kunmap(&bo->kmap);
96 
97 	mgag200_bo_unreserve(bo);
98 }
99 
mga_fillrect(struct fb_info * info,const struct fb_fillrect * rect)100 static void mga_fillrect(struct fb_info *info,
101 			 const struct fb_fillrect *rect)
102 {
103 	struct mga_fbdev *mfbdev = info->par;
104 	sys_fillrect(info, rect);
105 	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
106 			 rect->height);
107 }
108 
mga_copyarea(struct fb_info * info,const struct fb_copyarea * area)109 static void mga_copyarea(struct fb_info *info,
110 			 const struct fb_copyarea *area)
111 {
112 	struct mga_fbdev *mfbdev = info->par;
113 	sys_copyarea(info, area);
114 	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
115 			 area->height);
116 }
117 
mga_imageblit(struct fb_info * info,const struct fb_image * image)118 static void mga_imageblit(struct fb_info *info,
119 			  const struct fb_image *image)
120 {
121 	struct mga_fbdev *mfbdev = info->par;
122 	sys_imageblit(info, image);
123 	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
124 			 image->height);
125 }
126 
127 
128 static struct fb_ops mgag200fb_ops = {
129 	.owner = THIS_MODULE,
130 	.fb_check_var = drm_fb_helper_check_var,
131 	.fb_set_par = drm_fb_helper_set_par,
132 	.fb_fillrect = mga_fillrect,
133 	.fb_copyarea = mga_copyarea,
134 	.fb_imageblit = mga_imageblit,
135 	.fb_pan_display = drm_fb_helper_pan_display,
136 	.fb_blank = drm_fb_helper_blank,
137 	.fb_setcmap = drm_fb_helper_setcmap,
138 };
139 
mgag200fb_create_object(struct mga_fbdev * afbdev,struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object ** gobj_p)140 static int mgag200fb_create_object(struct mga_fbdev *afbdev,
141 				   struct drm_mode_fb_cmd2 *mode_cmd,
142 				   struct drm_gem_object **gobj_p)
143 {
144 	struct drm_device *dev = afbdev->helper.dev;
145 	u32 size;
146 	struct drm_gem_object *gobj;
147 	int ret = 0;
148 
149 	size = mode_cmd->pitches[0] * mode_cmd->height;
150 	ret = mgag200_gem_create(dev, size, true, &gobj);
151 	if (ret)
152 		return ret;
153 
154 	*gobj_p = gobj;
155 	return ret;
156 }
157 
mgag200fb_create(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)158 static int mgag200fb_create(struct drm_fb_helper *helper,
159 			   struct drm_fb_helper_surface_size *sizes)
160 {
161 	struct mga_fbdev *mfbdev =
162 		container_of(helper, struct mga_fbdev, helper);
163 	struct drm_device *dev = mfbdev->helper.dev;
164 	struct drm_mode_fb_cmd2 mode_cmd;
165 	struct mga_device *mdev = dev->dev_private;
166 	struct fb_info *info;
167 	struct drm_framebuffer *fb;
168 	struct drm_gem_object *gobj = NULL;
169 	struct device *device = &dev->pdev->dev;
170 	struct mgag200_bo *bo;
171 	int ret;
172 	void *sysram;
173 	int size;
174 
175 	mode_cmd.width = sizes->surface_width;
176 	mode_cmd.height = sizes->surface_height;
177 	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
178 
179 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
180 							  sizes->surface_depth);
181 	size = mode_cmd.pitches[0] * mode_cmd.height;
182 
183 	ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
184 	if (ret) {
185 		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
186 		return ret;
187 	}
188 	bo = gem_to_mga_bo(gobj);
189 
190 	sysram = vmalloc(size);
191 	if (!sysram)
192 		return -ENOMEM;
193 
194 	info = framebuffer_alloc(0, device);
195 	if (info == NULL)
196 		return -ENOMEM;
197 
198 	info->par = mfbdev;
199 
200 	ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
201 	if (ret)
202 		return ret;
203 
204 	mfbdev->sysram = sysram;
205 	mfbdev->size = size;
206 
207 	fb = &mfbdev->mfb.base;
208 
209 	/* setup helper */
210 	mfbdev->helper.fb = fb;
211 	mfbdev->helper.fbdev = info;
212 
213 	ret = fb_alloc_cmap(&info->cmap, 256, 0);
214 	if (ret) {
215 		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
216 		ret = -ENOMEM;
217 		goto out;
218 	}
219 
220 	strcpy(info->fix.id, "mgadrmfb");
221 
222 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
223 	info->fbops = &mgag200fb_ops;
224 
225 	/* setup aperture base/size for vesafb takeover */
226 	info->apertures = alloc_apertures(1);
227 	if (!info->apertures) {
228 		ret = -ENOMEM;
229 		goto out;
230 	}
231 	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
232 	info->apertures->ranges[0].size = mdev->mc.vram_size;
233 
234 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
235 	drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
236 			       sizes->fb_height);
237 
238 	info->screen_base = sysram;
239 	info->screen_size = size;
240 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
241 
242 	DRM_DEBUG_KMS("allocated %dx%d\n",
243 		      fb->width, fb->height);
244 	return 0;
245 out:
246 	return ret;
247 }
248 
mga_fbdev_destroy(struct drm_device * dev,struct mga_fbdev * mfbdev)249 static int mga_fbdev_destroy(struct drm_device *dev,
250 				struct mga_fbdev *mfbdev)
251 {
252 	struct fb_info *info;
253 	struct mga_framebuffer *mfb = &mfbdev->mfb;
254 
255 	if (mfbdev->helper.fbdev) {
256 		info = mfbdev->helper.fbdev;
257 
258 		unregister_framebuffer(info);
259 		if (info->cmap.len)
260 			fb_dealloc_cmap(&info->cmap);
261 		framebuffer_release(info);
262 	}
263 
264 	if (mfb->obj) {
265 		drm_gem_object_unreference_unlocked(mfb->obj);
266 		mfb->obj = NULL;
267 	}
268 	drm_fb_helper_fini(&mfbdev->helper);
269 	vfree(mfbdev->sysram);
270 	drm_framebuffer_unregister_private(&mfb->base);
271 	drm_framebuffer_cleanup(&mfb->base);
272 
273 	return 0;
274 }
275 
276 static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
277 	.gamma_set = mga_crtc_fb_gamma_set,
278 	.gamma_get = mga_crtc_fb_gamma_get,
279 	.fb_probe = mgag200fb_create,
280 };
281 
mgag200_fbdev_init(struct mga_device * mdev)282 int mgag200_fbdev_init(struct mga_device *mdev)
283 {
284 	struct mga_fbdev *mfbdev;
285 	int ret;
286 	int bpp_sel = 32;
287 
288 	/* prefer 16bpp on low end gpus with limited VRAM */
289 	if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
290 		bpp_sel = 16;
291 
292 	mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
293 	if (!mfbdev)
294 		return -ENOMEM;
295 
296 	mdev->mfbdev = mfbdev;
297 	spin_lock_init(&mfbdev->dirty_lock);
298 
299 	drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
300 
301 	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
302 				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
303 	if (ret)
304 		return ret;
305 
306 	ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
307 	if (ret)
308 		goto fini;
309 
310 	/* disable all the possible outputs/crtcs before entering KMS mode */
311 	drm_helper_disable_unused_functions(mdev->dev);
312 
313 	ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
314 	if (ret)
315 		goto fini;
316 
317 	return 0;
318 
319 fini:
320 	drm_fb_helper_fini(&mfbdev->helper);
321 	return ret;
322 }
323 
mgag200_fbdev_fini(struct mga_device * mdev)324 void mgag200_fbdev_fini(struct mga_device *mdev)
325 {
326 	if (!mdev->mfbdev)
327 		return;
328 
329 	mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
330 }
331