1 /* exynos_drm_fbdev.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authors:
5 * Inki Dae <inki.dae@samsung.com>
6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15 #include <drm/drmP.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_fb_helper.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/exynos_drm.h>
20
21 #include "exynos_drm_drv.h"
22 #include "exynos_drm_fb.h"
23 #include "exynos_drm_fbdev.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_iommu.h"
26
27 #define MAX_CONNECTOR 4
28 #define PREFERRED_BPP 32
29
30 #define to_exynos_fbdev(x) container_of(x, struct exynos_drm_fbdev,\
31 drm_fb_helper)
32
33 struct exynos_drm_fbdev {
34 struct drm_fb_helper drm_fb_helper;
35 struct exynos_drm_gem_obj *exynos_gem_obj;
36 };
37
exynos_drm_fb_mmap(struct fb_info * info,struct vm_area_struct * vma)38 static int exynos_drm_fb_mmap(struct fb_info *info,
39 struct vm_area_struct *vma)
40 {
41 struct drm_fb_helper *helper = info->par;
42 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
43 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
44 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
45 unsigned long vm_size;
46 int ret;
47
48 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
49
50 vm_size = vma->vm_end - vma->vm_start;
51
52 if (vm_size > buffer->size)
53 return -EINVAL;
54
55 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
57 if (ret < 0) {
58 DRM_ERROR("failed to mmap.\n");
59 return ret;
60 }
61
62 return 0;
63 }
64
65 static struct fb_ops exynos_drm_fb_ops = {
66 .owner = THIS_MODULE,
67 .fb_mmap = exynos_drm_fb_mmap,
68 .fb_fillrect = cfb_fillrect,
69 .fb_copyarea = cfb_copyarea,
70 .fb_imageblit = cfb_imageblit,
71 .fb_check_var = drm_fb_helper_check_var,
72 .fb_set_par = drm_fb_helper_set_par,
73 .fb_blank = drm_fb_helper_blank,
74 .fb_pan_display = drm_fb_helper_pan_display,
75 .fb_setcmap = drm_fb_helper_setcmap,
76 };
77
exynos_drm_fbdev_update(struct drm_fb_helper * helper,struct drm_framebuffer * fb)78 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79 struct drm_framebuffer *fb)
80 {
81 struct fb_info *fbi = helper->fbdev;
82 struct drm_device *dev = helper->dev;
83 struct exynos_drm_gem_buf *buffer;
84 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85 unsigned long offset;
86
87 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
88 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
89
90 /* RGB formats use only one buffer */
91 buffer = exynos_drm_fb_buffer(fb, 0);
92 if (!buffer) {
93 DRM_DEBUG_KMS("buffer is null.\n");
94 return -EFAULT;
95 }
96
97 /* map pages with kernel virtual space. */
98 if (!buffer->kvaddr) {
99 if (is_drm_iommu_supported(dev)) {
100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
101
102 buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
103 nr_pages, VM_MAP,
104 pgprot_writecombine(PAGE_KERNEL));
105 } else {
106 phys_addr_t dma_addr = buffer->dma_addr;
107 if (dma_addr)
108 buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
109 else
110 buffer->kvaddr = (void __iomem *)NULL;
111 }
112 if (!buffer->kvaddr) {
113 DRM_ERROR("failed to map pages to kernel space.\n");
114 return -EIO;
115 }
116 }
117
118 /* buffer count to framebuffer always is 1 at booting time. */
119 exynos_drm_fb_set_buf_cnt(fb, 1);
120
121 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
122 offset += fbi->var.yoffset * fb->pitches[0];
123
124 fbi->screen_base = buffer->kvaddr + offset;
125 fbi->screen_size = size;
126 fbi->fix.smem_len = size;
127
128 return 0;
129 }
130
exynos_drm_fbdev_create(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)131 static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
132 struct drm_fb_helper_surface_size *sizes)
133 {
134 struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
135 struct exynos_drm_gem_obj *exynos_gem_obj;
136 struct drm_device *dev = helper->dev;
137 struct fb_info *fbi;
138 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
139 struct platform_device *pdev = dev->platformdev;
140 unsigned long size;
141 int ret;
142
143 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
144 sizes->surface_width, sizes->surface_height,
145 sizes->surface_bpp);
146
147 mode_cmd.width = sizes->surface_width;
148 mode_cmd.height = sizes->surface_height;
149 mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
150 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
151 sizes->surface_depth);
152
153 mutex_lock(&dev->struct_mutex);
154
155 fbi = framebuffer_alloc(0, &pdev->dev);
156 if (!fbi) {
157 DRM_ERROR("failed to allocate fb info.\n");
158 ret = -ENOMEM;
159 goto out;
160 }
161
162 size = mode_cmd.pitches[0] * mode_cmd.height;
163
164 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
165 /*
166 * If physically contiguous memory allocation fails and if IOMMU is
167 * supported then try to get buffer from non physically contiguous
168 * memory area.
169 */
170 if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
171 dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
172 exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
173 size);
174 }
175
176 if (IS_ERR(exynos_gem_obj)) {
177 ret = PTR_ERR(exynos_gem_obj);
178 goto err_release_framebuffer;
179 }
180
181 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
182
183 helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
184 &exynos_gem_obj->base);
185 if (IS_ERR(helper->fb)) {
186 DRM_ERROR("failed to create drm framebuffer.\n");
187 ret = PTR_ERR(helper->fb);
188 goto err_destroy_gem;
189 }
190
191 helper->fbdev = fbi;
192
193 fbi->par = helper;
194 fbi->flags = FBINFO_FLAG_DEFAULT;
195 fbi->fbops = &exynos_drm_fb_ops;
196
197 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
198 if (ret) {
199 DRM_ERROR("failed to allocate cmap.\n");
200 goto err_destroy_framebuffer;
201 }
202
203 ret = exynos_drm_fbdev_update(helper, helper->fb);
204 if (ret < 0)
205 goto err_dealloc_cmap;
206
207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209
210 err_dealloc_cmap:
211 fb_dealloc_cmap(&fbi->cmap);
212 err_destroy_framebuffer:
213 drm_framebuffer_cleanup(helper->fb);
214 err_destroy_gem:
215 exynos_drm_gem_destroy(exynos_gem_obj);
216 err_release_framebuffer:
217 framebuffer_release(fbi);
218
219 /*
220 * if failed, all resources allocated above would be released by
221 * drm_mode_config_cleanup() when drm_load() had been called prior
222 * to any specific driver such as fimd or hdmi driver.
223 */
224 out:
225 mutex_unlock(&dev->struct_mutex);
226 return ret;
227 }
228
229 static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
230 .fb_probe = exynos_drm_fbdev_create,
231 };
232
exynos_drm_fbdev_is_anything_connected(struct drm_device * dev)233 static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
234 {
235 struct drm_connector *connector;
236 bool ret = false;
237
238 mutex_lock(&dev->mode_config.mutex);
239 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
240 if (connector->status != connector_status_connected)
241 continue;
242
243 ret = true;
244 break;
245 }
246 mutex_unlock(&dev->mode_config.mutex);
247
248 return ret;
249 }
250
exynos_drm_fbdev_init(struct drm_device * dev)251 int exynos_drm_fbdev_init(struct drm_device *dev)
252 {
253 struct exynos_drm_fbdev *fbdev;
254 struct exynos_drm_private *private = dev->dev_private;
255 struct drm_fb_helper *helper;
256 unsigned int num_crtc;
257 int ret;
258
259 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
260 return 0;
261
262 if (!exynos_drm_fbdev_is_anything_connected(dev))
263 return 0;
264
265 fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
266 if (!fbdev)
267 return -ENOMEM;
268
269 private->fb_helper = helper = &fbdev->drm_fb_helper;
270
271 drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
272
273 num_crtc = dev->mode_config.num_crtc;
274
275 ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
276 if (ret < 0) {
277 DRM_ERROR("failed to initialize drm fb helper.\n");
278 goto err_init;
279 }
280
281 ret = drm_fb_helper_single_add_all_connectors(helper);
282 if (ret < 0) {
283 DRM_ERROR("failed to register drm_fb_helper_connector.\n");
284 goto err_setup;
285
286 }
287
288 /* disable all the possible outputs/crtcs before entering KMS mode */
289 drm_helper_disable_unused_functions(dev);
290
291 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
292 if (ret < 0) {
293 DRM_ERROR("failed to set up hw configuration.\n");
294 goto err_setup;
295 }
296
297 return 0;
298
299 err_setup:
300 drm_fb_helper_fini(helper);
301
302 err_init:
303 private->fb_helper = NULL;
304 kfree(fbdev);
305
306 return ret;
307 }
308
exynos_drm_fbdev_destroy(struct drm_device * dev,struct drm_fb_helper * fb_helper)309 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
310 struct drm_fb_helper *fb_helper)
311 {
312 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
313 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
314 struct drm_framebuffer *fb;
315
316 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
317 vunmap(exynos_gem_obj->buffer->kvaddr);
318
319 /* release drm framebuffer and real buffer */
320 if (fb_helper->fb && fb_helper->fb->funcs) {
321 fb = fb_helper->fb;
322 if (fb) {
323 drm_framebuffer_unregister_private(fb);
324 drm_framebuffer_remove(fb);
325 }
326 }
327
328 /* release linux framebuffer */
329 if (fb_helper->fbdev) {
330 struct fb_info *info;
331 int ret;
332
333 info = fb_helper->fbdev;
334 ret = unregister_framebuffer(info);
335 if (ret < 0)
336 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
337
338 if (info->cmap.len)
339 fb_dealloc_cmap(&info->cmap);
340
341 framebuffer_release(info);
342 }
343
344 drm_fb_helper_fini(fb_helper);
345 }
346
exynos_drm_fbdev_fini(struct drm_device * dev)347 void exynos_drm_fbdev_fini(struct drm_device *dev)
348 {
349 struct exynos_drm_private *private = dev->dev_private;
350 struct exynos_drm_fbdev *fbdev;
351
352 if (!private || !private->fb_helper)
353 return;
354
355 fbdev = to_exynos_fbdev(private->fb_helper);
356
357 exynos_drm_fbdev_destroy(dev, private->fb_helper);
358 kfree(fbdev);
359 private->fb_helper = NULL;
360 }
361
exynos_drm_fbdev_restore_mode(struct drm_device * dev)362 void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
363 {
364 struct exynos_drm_private *private = dev->dev_private;
365
366 if (!private || !private->fb_helper)
367 return;
368
369 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
370 }
371