• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat
3  *
4  * This file is subject to the terms and conditions of the GNU General
5  * Public License version 2. See the file COPYING in the main
6  * directory of this archive for more details.
7  *
8  * Authors: Matthew Garrett
9  *          Dave Airlie
10  */
11 #include <linux/module.h>
12 #include <drm/drmP.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_crtc_helper.h>
15 
16 #include "cirrus_drv.h"
17 
cirrus_dirty_update(struct cirrus_fbdev * afbdev,int x,int y,int width,int height)18 static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
19 			     int x, int y, int width, int height)
20 {
21 	int i;
22 	struct drm_gem_object *obj;
23 	struct cirrus_bo *bo;
24 	int src_offset, dst_offset;
25 	int bpp = afbdev->gfb->format->cpp[0];
26 	int ret = -EBUSY;
27 	bool unmap = false;
28 	bool store_for_later = false;
29 	int x2, y2;
30 	unsigned long flags;
31 
32 	obj = afbdev->gfb->obj[0];
33 	bo = gem_to_cirrus_bo(obj);
34 
35 	/*
36 	 * try and reserve the BO, if we fail with busy
37 	 * then the BO is being moved and we should
38 	 * store up the damage until later.
39 	 */
40 	if (drm_can_sleep())
41 		ret = cirrus_bo_reserve(bo, true);
42 	if (ret) {
43 		if (ret != -EBUSY)
44 			return;
45 		store_for_later = true;
46 	}
47 
48 	x2 = x + width - 1;
49 	y2 = y + height - 1;
50 	spin_lock_irqsave(&afbdev->dirty_lock, flags);
51 
52 	if (afbdev->y1 < y)
53 		y = afbdev->y1;
54 	if (afbdev->y2 > y2)
55 		y2 = afbdev->y2;
56 	if (afbdev->x1 < x)
57 		x = afbdev->x1;
58 	if (afbdev->x2 > x2)
59 		x2 = afbdev->x2;
60 
61 	if (store_for_later) {
62 		afbdev->x1 = x;
63 		afbdev->x2 = x2;
64 		afbdev->y1 = y;
65 		afbdev->y2 = y2;
66 		spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
67 		return;
68 	}
69 
70 	afbdev->x1 = afbdev->y1 = INT_MAX;
71 	afbdev->x2 = afbdev->y2 = 0;
72 	spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
73 
74 	if (!bo->kmap.virtual) {
75 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
76 		if (ret) {
77 			DRM_ERROR("failed to kmap fb updates\n");
78 			cirrus_bo_unreserve(bo);
79 			return;
80 		}
81 		unmap = true;
82 	}
83 	for (i = y; i < y + height; i++) {
84 		/* assume equal stride for now */
85 		src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
86 		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
87 
88 	}
89 	if (unmap)
90 		ttm_bo_kunmap(&bo->kmap);
91 
92 	cirrus_bo_unreserve(bo);
93 }
94 
cirrus_fillrect(struct fb_info * info,const struct fb_fillrect * rect)95 static void cirrus_fillrect(struct fb_info *info,
96 			 const struct fb_fillrect *rect)
97 {
98 	struct cirrus_fbdev *afbdev = info->par;
99 	drm_fb_helper_sys_fillrect(info, rect);
100 	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
101 			 rect->height);
102 }
103 
cirrus_copyarea(struct fb_info * info,const struct fb_copyarea * area)104 static void cirrus_copyarea(struct fb_info *info,
105 			 const struct fb_copyarea *area)
106 {
107 	struct cirrus_fbdev *afbdev = info->par;
108 	drm_fb_helper_sys_copyarea(info, area);
109 	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
110 			 area->height);
111 }
112 
cirrus_imageblit(struct fb_info * info,const struct fb_image * image)113 static void cirrus_imageblit(struct fb_info *info,
114 			  const struct fb_image *image)
115 {
116 	struct cirrus_fbdev *afbdev = info->par;
117 	drm_fb_helper_sys_imageblit(info, image);
118 	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
119 			 image->height);
120 }
121 
122 
123 static struct fb_ops cirrusfb_ops = {
124 	.owner = THIS_MODULE,
125 	.fb_check_var = drm_fb_helper_check_var,
126 	.fb_set_par = drm_fb_helper_set_par,
127 	.fb_fillrect = cirrus_fillrect,
128 	.fb_copyarea = cirrus_copyarea,
129 	.fb_imageblit = cirrus_imageblit,
130 	.fb_pan_display = drm_fb_helper_pan_display,
131 	.fb_blank = drm_fb_helper_blank,
132 	.fb_setcmap = drm_fb_helper_setcmap,
133 };
134 
cirrusfb_create_object(struct cirrus_fbdev * afbdev,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object ** gobj_p)135 static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
136 			       const struct drm_mode_fb_cmd2 *mode_cmd,
137 			       struct drm_gem_object **gobj_p)
138 {
139 	struct drm_device *dev = afbdev->helper.dev;
140 	struct cirrus_device *cdev = dev->dev_private;
141 	u32 bpp;
142 	u32 size;
143 	struct drm_gem_object *gobj;
144 	int ret = 0;
145 
146 	bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
147 
148 	if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
149 				      bpp, mode_cmd->pitches[0]))
150 		return -EINVAL;
151 
152 	size = mode_cmd->pitches[0] * mode_cmd->height;
153 	ret = cirrus_gem_create(dev, size, true, &gobj);
154 	if (ret)
155 		return ret;
156 
157 	*gobj_p = gobj;
158 	return ret;
159 }
160 
cirrusfb_create(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)161 static int cirrusfb_create(struct drm_fb_helper *helper,
162 			   struct drm_fb_helper_surface_size *sizes)
163 {
164 	struct cirrus_fbdev *gfbdev =
165 		container_of(helper, struct cirrus_fbdev, helper);
166 	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
167 	struct fb_info *info;
168 	struct drm_framebuffer *fb;
169 	struct drm_mode_fb_cmd2 mode_cmd;
170 	void *sysram;
171 	struct drm_gem_object *gobj = NULL;
172 	struct cirrus_bo *bo = NULL;
173 	int size, ret;
174 
175 	mode_cmd.width = sizes->surface_width;
176 	mode_cmd.height = sizes->surface_height;
177 	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
178 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
179 							  sizes->surface_depth);
180 	size = mode_cmd.pitches[0] * mode_cmd.height;
181 
182 	ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
183 	if (ret) {
184 		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
185 		return ret;
186 	}
187 
188 	bo = gem_to_cirrus_bo(gobj);
189 
190 	sysram = vmalloc(size);
191 	if (!sysram)
192 		return -ENOMEM;
193 
194 	info = drm_fb_helper_alloc_fbi(helper);
195 	if (IS_ERR(info)) {
196 		ret = PTR_ERR(info);
197 		goto err_vfree;
198 	}
199 
200 	info->par = gfbdev;
201 
202 	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
203 	if (!fb) {
204 		ret = -ENOMEM;
205 		goto err_drm_gem_object_put_unlocked;
206 	}
207 
208 	ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
209 	if (ret)
210 		goto err_kfree;
211 
212 	gfbdev->sysram = sysram;
213 	gfbdev->size = size;
214 	gfbdev->gfb = fb;
215 
216 	/* setup helper */
217 	gfbdev->helper.fb = fb;
218 
219 	strcpy(info->fix.id, "cirrusdrmfb");
220 
221 	info->fbops = &cirrusfb_ops;
222 
223 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
224 	drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
225 			       sizes->fb_height);
226 
227 	/* setup aperture base/size for vesafb takeover */
228 	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
229 	info->apertures->ranges[0].size = cdev->mc.vram_size;
230 
231 	info->fix.smem_start = cdev->dev->mode_config.fb_base;
232 	info->fix.smem_len = cdev->mc.vram_size;
233 
234 	info->screen_base = sysram;
235 	info->screen_size = size;
236 
237 	info->fix.mmio_start = 0;
238 	info->fix.mmio_len = 0;
239 
240 	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
241 	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
242 	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
243 	DRM_INFO("fb depth is %d\n", fb->format->depth);
244 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
245 
246 	return 0;
247 
248 err_kfree:
249 	kfree(fb);
250 err_drm_gem_object_put_unlocked:
251 	drm_gem_object_put_unlocked(gobj);
252 err_vfree:
253 	vfree(sysram);
254 	return ret;
255 }
256 
cirrus_fbdev_destroy(struct drm_device * dev,struct cirrus_fbdev * gfbdev)257 static int cirrus_fbdev_destroy(struct drm_device *dev,
258 				struct cirrus_fbdev *gfbdev)
259 {
260 	struct drm_framebuffer *gfb = gfbdev->gfb;
261 
262 	drm_fb_helper_unregister_fbi(&gfbdev->helper);
263 
264 	vfree(gfbdev->sysram);
265 	drm_fb_helper_fini(&gfbdev->helper);
266 	if (gfb)
267 		drm_framebuffer_put(gfb);
268 
269 	return 0;
270 }
271 
272 static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
273 	.fb_probe = cirrusfb_create,
274 };
275 
cirrus_fbdev_init(struct cirrus_device * cdev)276 int cirrus_fbdev_init(struct cirrus_device *cdev)
277 {
278 	struct cirrus_fbdev *gfbdev;
279 	int ret;
280 	int bpp_sel = 24;
281 
282 	/*bpp_sel = 8;*/
283 	gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
284 	if (!gfbdev)
285 		return -ENOMEM;
286 
287 	cdev->mode_info.gfbdev = gfbdev;
288 	spin_lock_init(&gfbdev->dirty_lock);
289 
290 	drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
291 			      &cirrus_fb_helper_funcs);
292 
293 	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
294 				 CIRRUSFB_CONN_LIMIT);
295 	if (ret)
296 		return ret;
297 
298 	ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
299 	if (ret)
300 		return ret;
301 
302 	/* disable all the possible outputs/crtcs before entering KMS mode */
303 	drm_helper_disable_unused_functions(cdev->dev);
304 
305 	return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
306 }
307 
cirrus_fbdev_fini(struct cirrus_device * cdev)308 void cirrus_fbdev_fini(struct cirrus_device *cdev)
309 {
310 	if (!cdev->mode_info.gfbdev)
311 		return;
312 
313 	cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
314 	kfree(cdev->mode_info.gfbdev);
315 	cdev->mode_info.gfbdev = NULL;
316 }
317