1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/gpu/drm/exynos/exynos7_drm_decon.c
3 *
4 * Copyright (C) 2014 Samsung Electronics Co.Ltd
5 * Authors:
6 * Akshu Agarwal <akshua@gmail.com>
7 * Ajay Kumar <ajaykumar.rs@samsung.com>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/component.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17
18 #include <video/of_display_timing.h>
19 #include <video/of_videomode.h>
20
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_vblank.h>
24 #include <drm/exynos_drm.h>
25
26 #include "exynos_drm_crtc.h"
27 #include "exynos_drm_drv.h"
28 #include "exynos_drm_fb.h"
29 #include "exynos_drm_plane.h"
30 #include "regs-decon7.h"
31
32 /*
33 * DECON stands for Display and Enhancement controller.
34 */
35
36 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
37
38 #define WINDOWS_NR 2
39
40 struct decon_context {
41 struct device *dev;
42 struct drm_device *drm_dev;
43 void *dma_priv;
44 struct exynos_drm_crtc *crtc;
45 struct exynos_drm_plane planes[WINDOWS_NR];
46 struct exynos_drm_plane_config configs[WINDOWS_NR];
47 struct clk *pclk;
48 struct clk *aclk;
49 struct clk *eclk;
50 struct clk *vclk;
51 void __iomem *regs;
52 unsigned long irq_flags;
53 bool i80_if;
54 bool suspended;
55 wait_queue_head_t wait_vsync_queue;
56 atomic_t wait_vsync_event;
57
58 struct drm_encoder *encoder;
59 };
60
61 static const struct of_device_id decon_driver_dt_match[] = {
62 {.compatible = "samsung,exynos7-decon"},
63 {},
64 };
65 MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
66
67 static const uint32_t decon_formats[] = {
68 DRM_FORMAT_RGB565,
69 DRM_FORMAT_XRGB8888,
70 DRM_FORMAT_XBGR8888,
71 DRM_FORMAT_RGBX8888,
72 DRM_FORMAT_BGRX8888,
73 DRM_FORMAT_ARGB8888,
74 DRM_FORMAT_ABGR8888,
75 DRM_FORMAT_RGBA8888,
76 DRM_FORMAT_BGRA8888,
77 };
78
79 static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
80 DRM_PLANE_TYPE_PRIMARY,
81 DRM_PLANE_TYPE_CURSOR,
82 };
83
decon_wait_for_vblank(struct exynos_drm_crtc * crtc)84 static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
85 {
86 struct decon_context *ctx = crtc->ctx;
87
88 if (ctx->suspended)
89 return;
90
91 atomic_set(&ctx->wait_vsync_event, 1);
92
93 /*
94 * wait for DECON to signal VSYNC interrupt or return after
95 * timeout which is set to 50ms (refresh rate of 20).
96 */
97 if (!wait_event_timeout(ctx->wait_vsync_queue,
98 !atomic_read(&ctx->wait_vsync_event),
99 HZ/20))
100 DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
101 }
102
decon_clear_channels(struct exynos_drm_crtc * crtc)103 static void decon_clear_channels(struct exynos_drm_crtc *crtc)
104 {
105 struct decon_context *ctx = crtc->ctx;
106 unsigned int win, ch_enabled = 0;
107
108 /* Check if any channel is enabled. */
109 for (win = 0; win < WINDOWS_NR; win++) {
110 u32 val = readl(ctx->regs + WINCON(win));
111
112 if (val & WINCONx_ENWIN) {
113 val &= ~WINCONx_ENWIN;
114 writel(val, ctx->regs + WINCON(win));
115 ch_enabled = 1;
116 }
117 }
118
119 /* Wait for vsync, as disable channel takes effect at next vsync */
120 if (ch_enabled)
121 decon_wait_for_vblank(ctx->crtc);
122 }
123
decon_ctx_initialize(struct decon_context * ctx,struct drm_device * drm_dev)124 static int decon_ctx_initialize(struct decon_context *ctx,
125 struct drm_device *drm_dev)
126 {
127 ctx->drm_dev = drm_dev;
128
129 decon_clear_channels(ctx->crtc);
130
131 return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
132 }
133
decon_ctx_remove(struct decon_context * ctx)134 static void decon_ctx_remove(struct decon_context *ctx)
135 {
136 /* detach this sub driver from iommu mapping if supported. */
137 exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
138 }
139
decon_calc_clkdiv(struct decon_context * ctx,const struct drm_display_mode * mode)140 static u32 decon_calc_clkdiv(struct decon_context *ctx,
141 const struct drm_display_mode *mode)
142 {
143 unsigned long ideal_clk = mode->clock;
144 u32 clkdiv;
145
146 /* Find the clock divider value that gets us closest to ideal_clk */
147 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk);
148
149 return (clkdiv < 0x100) ? clkdiv : 0xff;
150 }
151
decon_commit(struct exynos_drm_crtc * crtc)152 static void decon_commit(struct exynos_drm_crtc *crtc)
153 {
154 struct decon_context *ctx = crtc->ctx;
155 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
156 u32 val, clkdiv;
157
158 if (ctx->suspended)
159 return;
160
161 /* nothing to do if we haven't set the mode yet */
162 if (mode->htotal == 0 || mode->vtotal == 0)
163 return;
164
165 if (!ctx->i80_if) {
166 int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
167 /* setup vertical timing values. */
168 vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
169 vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
170 vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
171
172 val = VIDTCON0_VBPD(vbpd - 1) | VIDTCON0_VFPD(vfpd - 1);
173 writel(val, ctx->regs + VIDTCON0);
174
175 val = VIDTCON1_VSPW(vsync_len - 1);
176 writel(val, ctx->regs + VIDTCON1);
177
178 /* setup horizontal timing values. */
179 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
180 hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
181 hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
182
183 /* setup horizontal timing values. */
184 val = VIDTCON2_HBPD(hbpd - 1) | VIDTCON2_HFPD(hfpd - 1);
185 writel(val, ctx->regs + VIDTCON2);
186
187 val = VIDTCON3_HSPW(hsync_len - 1);
188 writel(val, ctx->regs + VIDTCON3);
189 }
190
191 /* setup horizontal and vertical display size. */
192 val = VIDTCON4_LINEVAL(mode->vdisplay - 1) |
193 VIDTCON4_HOZVAL(mode->hdisplay - 1);
194 writel(val, ctx->regs + VIDTCON4);
195
196 writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD);
197
198 /*
199 * fields of register with prefix '_F' would be updated
200 * at vsync(same as dma start)
201 */
202 val = VIDCON0_ENVID | VIDCON0_ENVID_F;
203 writel(val, ctx->regs + VIDCON0);
204
205 clkdiv = decon_calc_clkdiv(ctx, mode);
206 if (clkdiv > 1) {
207 val = VCLKCON1_CLKVAL_NUM_VCLK(clkdiv - 1);
208 writel(val, ctx->regs + VCLKCON1);
209 writel(val, ctx->regs + VCLKCON2);
210 }
211
212 val = readl(ctx->regs + DECON_UPDATE);
213 val |= DECON_UPDATE_STANDALONE_F;
214 writel(val, ctx->regs + DECON_UPDATE);
215 }
216
decon_enable_vblank(struct exynos_drm_crtc * crtc)217 static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
218 {
219 struct decon_context *ctx = crtc->ctx;
220 u32 val;
221
222 if (ctx->suspended)
223 return -EPERM;
224
225 if (!test_and_set_bit(0, &ctx->irq_flags)) {
226 val = readl(ctx->regs + VIDINTCON0);
227
228 val |= VIDINTCON0_INT_ENABLE;
229
230 if (!ctx->i80_if) {
231 val |= VIDINTCON0_INT_FRAME;
232 val &= ~VIDINTCON0_FRAMESEL0_MASK;
233 val |= VIDINTCON0_FRAMESEL0_VSYNC;
234 }
235
236 writel(val, ctx->regs + VIDINTCON0);
237 }
238
239 return 0;
240 }
241
decon_disable_vblank(struct exynos_drm_crtc * crtc)242 static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
243 {
244 struct decon_context *ctx = crtc->ctx;
245 u32 val;
246
247 if (ctx->suspended)
248 return;
249
250 if (test_and_clear_bit(0, &ctx->irq_flags)) {
251 val = readl(ctx->regs + VIDINTCON0);
252
253 val &= ~VIDINTCON0_INT_ENABLE;
254 if (!ctx->i80_if)
255 val &= ~VIDINTCON0_INT_FRAME;
256
257 writel(val, ctx->regs + VIDINTCON0);
258 }
259 }
260
decon_win_set_pixfmt(struct decon_context * ctx,unsigned int win,struct drm_framebuffer * fb)261 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
262 struct drm_framebuffer *fb)
263 {
264 unsigned long val;
265 int padding;
266
267 val = readl(ctx->regs + WINCON(win));
268 val &= ~WINCONx_BPPMODE_MASK;
269
270 switch (fb->format->format) {
271 case DRM_FORMAT_RGB565:
272 val |= WINCONx_BPPMODE_16BPP_565;
273 val |= WINCONx_BURSTLEN_16WORD;
274 break;
275 case DRM_FORMAT_XRGB8888:
276 val |= WINCONx_BPPMODE_24BPP_xRGB;
277 val |= WINCONx_BURSTLEN_16WORD;
278 break;
279 case DRM_FORMAT_XBGR8888:
280 val |= WINCONx_BPPMODE_24BPP_xBGR;
281 val |= WINCONx_BURSTLEN_16WORD;
282 break;
283 case DRM_FORMAT_RGBX8888:
284 val |= WINCONx_BPPMODE_24BPP_RGBx;
285 val |= WINCONx_BURSTLEN_16WORD;
286 break;
287 case DRM_FORMAT_BGRX8888:
288 val |= WINCONx_BPPMODE_24BPP_BGRx;
289 val |= WINCONx_BURSTLEN_16WORD;
290 break;
291 case DRM_FORMAT_ARGB8888:
292 val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX |
293 WINCONx_ALPHA_SEL;
294 val |= WINCONx_BURSTLEN_16WORD;
295 break;
296 case DRM_FORMAT_ABGR8888:
297 val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX |
298 WINCONx_ALPHA_SEL;
299 val |= WINCONx_BURSTLEN_16WORD;
300 break;
301 case DRM_FORMAT_RGBA8888:
302 val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX |
303 WINCONx_ALPHA_SEL;
304 val |= WINCONx_BURSTLEN_16WORD;
305 break;
306 case DRM_FORMAT_BGRA8888:
307 default:
308 val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
309 WINCONx_ALPHA_SEL;
310 val |= WINCONx_BURSTLEN_16WORD;
311 break;
312 }
313
314 DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %d\n", fb->format->cpp[0]);
315
316 /*
317 * In case of exynos, setting dma-burst to 16Word causes permanent
318 * tearing for very small buffers, e.g. cursor buffer. Burst Mode
319 * switching which is based on plane size is not recommended as
320 * plane size varies a lot towards the end of the screen and rapid
321 * movement causes unstable DMA which results into iommu crash/tear.
322 */
323
324 padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width;
325 if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
326 val &= ~WINCONx_BURSTLEN_MASK;
327 val |= WINCONx_BURSTLEN_8WORD;
328 }
329
330 writel(val, ctx->regs + WINCON(win));
331 }
332
decon_win_set_colkey(struct decon_context * ctx,unsigned int win)333 static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
334 {
335 unsigned int keycon0 = 0, keycon1 = 0;
336
337 keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
338 WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
339
340 keycon1 = WxKEYCON1_COLVAL(0xffffffff);
341
342 writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
343 writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
344 }
345
346 /**
347 * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
348 *
349 * @ctx: display and enhancement controller context
350 * @win: window to protect registers for
351 * @protect: 1 to protect (disable updates)
352 */
decon_shadow_protect_win(struct decon_context * ctx,unsigned int win,bool protect)353 static void decon_shadow_protect_win(struct decon_context *ctx,
354 unsigned int win, bool protect)
355 {
356 u32 bits, val;
357
358 bits = SHADOWCON_WINx_PROTECT(win);
359
360 val = readl(ctx->regs + SHADOWCON);
361 if (protect)
362 val |= bits;
363 else
364 val &= ~bits;
365 writel(val, ctx->regs + SHADOWCON);
366 }
367
decon_atomic_begin(struct exynos_drm_crtc * crtc)368 static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
369 {
370 struct decon_context *ctx = crtc->ctx;
371 int i;
372
373 if (ctx->suspended)
374 return;
375
376 for (i = 0; i < WINDOWS_NR; i++)
377 decon_shadow_protect_win(ctx, i, true);
378 }
379
decon_update_plane(struct exynos_drm_crtc * crtc,struct exynos_drm_plane * plane)380 static void decon_update_plane(struct exynos_drm_crtc *crtc,
381 struct exynos_drm_plane *plane)
382 {
383 struct exynos_drm_plane_state *state =
384 to_exynos_plane_state(plane->base.state);
385 struct decon_context *ctx = crtc->ctx;
386 struct drm_framebuffer *fb = state->base.fb;
387 int padding;
388 unsigned long val, alpha;
389 unsigned int last_x;
390 unsigned int last_y;
391 unsigned int win = plane->index;
392 unsigned int cpp = fb->format->cpp[0];
393 unsigned int pitch = fb->pitches[0];
394
395 if (ctx->suspended)
396 return;
397
398 /*
399 * SHADOWCON/PRTCON register is used for enabling timing.
400 *
401 * for example, once only width value of a register is set,
402 * if the dma is started then decon hardware could malfunction so
403 * with protect window setting, the register fields with prefix '_F'
404 * wouldn't be updated at vsync also but updated once unprotect window
405 * is set.
406 */
407
408 /* buffer start address */
409 val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
410 writel(val, ctx->regs + VIDW_BUF_START(win));
411
412 padding = (pitch / cpp) - fb->width;
413
414 /* buffer size */
415 writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
416 writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win));
417
418 /* offset from the start of the buffer to read */
419 writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
420 writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
421
422 DRM_DEV_DEBUG_KMS(ctx->dev, "start addr = 0x%lx\n",
423 (unsigned long)val);
424 DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
425 state->crtc.w, state->crtc.h);
426
427 val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
428 VIDOSDxA_TOPLEFT_Y(state->crtc.y);
429 writel(val, ctx->regs + VIDOSD_A(win));
430
431 last_x = state->crtc.x + state->crtc.w;
432 if (last_x)
433 last_x--;
434 last_y = state->crtc.y + state->crtc.h;
435 if (last_y)
436 last_y--;
437
438 val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y);
439
440 writel(val, ctx->regs + VIDOSD_B(win));
441
442 DRM_DEV_DEBUG_KMS(ctx->dev, "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
443 state->crtc.x, state->crtc.y, last_x, last_y);
444
445 /* OSD alpha */
446 alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
447 VIDOSDxC_ALPHA0_G_F(0x0) |
448 VIDOSDxC_ALPHA0_B_F(0x0);
449
450 writel(alpha, ctx->regs + VIDOSD_C(win));
451
452 alpha = VIDOSDxD_ALPHA1_R_F(0xff) |
453 VIDOSDxD_ALPHA1_G_F(0xff) |
454 VIDOSDxD_ALPHA1_B_F(0xff);
455
456 writel(alpha, ctx->regs + VIDOSD_D(win));
457
458 decon_win_set_pixfmt(ctx, win, fb);
459
460 /* hardware window 0 doesn't support color key. */
461 if (win != 0)
462 decon_win_set_colkey(ctx, win);
463
464 /* wincon */
465 val = readl(ctx->regs + WINCON(win));
466 val |= WINCONx_TRIPLE_BUF_MODE;
467 val |= WINCONx_ENWIN;
468 writel(val, ctx->regs + WINCON(win));
469
470 /* Enable DMA channel and unprotect windows */
471 decon_shadow_protect_win(ctx, win, false);
472
473 val = readl(ctx->regs + DECON_UPDATE);
474 val |= DECON_UPDATE_STANDALONE_F;
475 writel(val, ctx->regs + DECON_UPDATE);
476 }
477
decon_disable_plane(struct exynos_drm_crtc * crtc,struct exynos_drm_plane * plane)478 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
479 struct exynos_drm_plane *plane)
480 {
481 struct decon_context *ctx = crtc->ctx;
482 unsigned int win = plane->index;
483 u32 val;
484
485 if (ctx->suspended)
486 return;
487
488 /* protect windows */
489 decon_shadow_protect_win(ctx, win, true);
490
491 /* wincon */
492 val = readl(ctx->regs + WINCON(win));
493 val &= ~WINCONx_ENWIN;
494 writel(val, ctx->regs + WINCON(win));
495
496 val = readl(ctx->regs + DECON_UPDATE);
497 val |= DECON_UPDATE_STANDALONE_F;
498 writel(val, ctx->regs + DECON_UPDATE);
499 }
500
decon_atomic_flush(struct exynos_drm_crtc * crtc)501 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
502 {
503 struct decon_context *ctx = crtc->ctx;
504 int i;
505
506 if (ctx->suspended)
507 return;
508
509 for (i = 0; i < WINDOWS_NR; i++)
510 decon_shadow_protect_win(ctx, i, false);
511 exynos_crtc_handle_event(crtc);
512 }
513
decon_init(struct decon_context * ctx)514 static void decon_init(struct decon_context *ctx)
515 {
516 u32 val;
517
518 writel(VIDCON0_SWRESET, ctx->regs + VIDCON0);
519
520 val = VIDOUTCON0_DISP_IF_0_ON;
521 if (!ctx->i80_if)
522 val |= VIDOUTCON0_RGBIF;
523 writel(val, ctx->regs + VIDOUTCON0);
524
525 writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0);
526
527 if (!ctx->i80_if)
528 writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
529 }
530
decon_atomic_enable(struct exynos_drm_crtc * crtc)531 static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
532 {
533 struct decon_context *ctx = crtc->ctx;
534 int ret;
535
536 if (!ctx->suspended)
537 return;
538
539 ret = pm_runtime_resume_and_get(ctx->dev);
540 if (ret < 0) {
541 DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
542 return;
543 }
544
545 decon_init(ctx);
546
547 /* if vblank was enabled status, enable it again. */
548 if (test_and_clear_bit(0, &ctx->irq_flags))
549 decon_enable_vblank(ctx->crtc);
550
551 decon_commit(ctx->crtc);
552
553 ctx->suspended = false;
554 }
555
decon_atomic_disable(struct exynos_drm_crtc * crtc)556 static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
557 {
558 struct decon_context *ctx = crtc->ctx;
559 int i;
560
561 if (ctx->suspended)
562 return;
563
564 /*
565 * We need to make sure that all windows are disabled before we
566 * suspend that connector. Otherwise we might try to scan from
567 * a destroyed buffer later.
568 */
569 for (i = 0; i < WINDOWS_NR; i++)
570 decon_disable_plane(crtc, &ctx->planes[i]);
571
572 pm_runtime_put_sync(ctx->dev);
573
574 ctx->suspended = true;
575 }
576
577 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
578 .atomic_enable = decon_atomic_enable,
579 .atomic_disable = decon_atomic_disable,
580 .enable_vblank = decon_enable_vblank,
581 .disable_vblank = decon_disable_vblank,
582 .atomic_begin = decon_atomic_begin,
583 .update_plane = decon_update_plane,
584 .disable_plane = decon_disable_plane,
585 .atomic_flush = decon_atomic_flush,
586 };
587
588
decon_irq_handler(int irq,void * dev_id)589 static irqreturn_t decon_irq_handler(int irq, void *dev_id)
590 {
591 struct decon_context *ctx = (struct decon_context *)dev_id;
592 u32 val, clear_bit;
593
594 val = readl(ctx->regs + VIDINTCON1);
595
596 clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
597 if (val & clear_bit)
598 writel(clear_bit, ctx->regs + VIDINTCON1);
599
600 /* check the crtc is detached already from encoder */
601 if (!ctx->drm_dev)
602 goto out;
603
604 if (!ctx->i80_if) {
605 drm_crtc_handle_vblank(&ctx->crtc->base);
606
607 /* set wait vsync event to zero and wake up queue. */
608 if (atomic_read(&ctx->wait_vsync_event)) {
609 atomic_set(&ctx->wait_vsync_event, 0);
610 wake_up(&ctx->wait_vsync_queue);
611 }
612 }
613 out:
614 return IRQ_HANDLED;
615 }
616
decon_bind(struct device * dev,struct device * master,void * data)617 static int decon_bind(struct device *dev, struct device *master, void *data)
618 {
619 struct decon_context *ctx = dev_get_drvdata(dev);
620 struct drm_device *drm_dev = data;
621 struct exynos_drm_plane *exynos_plane;
622 unsigned int i;
623 int ret;
624
625 ret = decon_ctx_initialize(ctx, drm_dev);
626 if (ret) {
627 DRM_DEV_ERROR(dev, "decon_ctx_initialize failed.\n");
628 return ret;
629 }
630
631 for (i = 0; i < WINDOWS_NR; i++) {
632 ctx->configs[i].pixel_formats = decon_formats;
633 ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats);
634 ctx->configs[i].zpos = i;
635 ctx->configs[i].type = decon_win_types[i];
636
637 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
638 &ctx->configs[i]);
639 if (ret)
640 return ret;
641 }
642
643 exynos_plane = &ctx->planes[DEFAULT_WIN];
644 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
645 EXYNOS_DISPLAY_TYPE_LCD, &decon_crtc_ops, ctx);
646 if (IS_ERR(ctx->crtc)) {
647 decon_ctx_remove(ctx);
648 return PTR_ERR(ctx->crtc);
649 }
650
651 if (ctx->encoder)
652 exynos_dpi_bind(drm_dev, ctx->encoder);
653
654 return 0;
655
656 }
657
decon_unbind(struct device * dev,struct device * master,void * data)658 static void decon_unbind(struct device *dev, struct device *master,
659 void *data)
660 {
661 struct decon_context *ctx = dev_get_drvdata(dev);
662
663 decon_atomic_disable(ctx->crtc);
664
665 if (ctx->encoder)
666 exynos_dpi_remove(ctx->encoder);
667
668 decon_ctx_remove(ctx);
669 }
670
671 static const struct component_ops decon_component_ops = {
672 .bind = decon_bind,
673 .unbind = decon_unbind,
674 };
675
decon_probe(struct platform_device * pdev)676 static int decon_probe(struct platform_device *pdev)
677 {
678 struct device *dev = &pdev->dev;
679 struct decon_context *ctx;
680 struct device_node *i80_if_timings;
681 int ret;
682
683 if (!dev->of_node)
684 return -ENODEV;
685
686 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
687 if (!ctx)
688 return -ENOMEM;
689
690 ctx->dev = dev;
691 ctx->suspended = true;
692
693 i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
694 if (i80_if_timings)
695 ctx->i80_if = true;
696 of_node_put(i80_if_timings);
697
698 ctx->regs = of_iomap(dev->of_node, 0);
699 if (!ctx->regs)
700 return -ENOMEM;
701
702 ctx->pclk = devm_clk_get(dev, "pclk_decon0");
703 if (IS_ERR(ctx->pclk)) {
704 dev_err(dev, "failed to get bus clock pclk\n");
705 ret = PTR_ERR(ctx->pclk);
706 goto err_iounmap;
707 }
708
709 ctx->aclk = devm_clk_get(dev, "aclk_decon0");
710 if (IS_ERR(ctx->aclk)) {
711 dev_err(dev, "failed to get bus clock aclk\n");
712 ret = PTR_ERR(ctx->aclk);
713 goto err_iounmap;
714 }
715
716 ctx->eclk = devm_clk_get(dev, "decon0_eclk");
717 if (IS_ERR(ctx->eclk)) {
718 dev_err(dev, "failed to get eclock\n");
719 ret = PTR_ERR(ctx->eclk);
720 goto err_iounmap;
721 }
722
723 ctx->vclk = devm_clk_get(dev, "decon0_vclk");
724 if (IS_ERR(ctx->vclk)) {
725 dev_err(dev, "failed to get vclock\n");
726 ret = PTR_ERR(ctx->vclk);
727 goto err_iounmap;
728 }
729
730 ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
731 if (ret < 0)
732 goto err_iounmap;
733
734 ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
735 if (ret) {
736 dev_err(dev, "irq request failed.\n");
737 goto err_iounmap;
738 }
739
740 init_waitqueue_head(&ctx->wait_vsync_queue);
741 atomic_set(&ctx->wait_vsync_event, 0);
742
743 platform_set_drvdata(pdev, ctx);
744
745 ctx->encoder = exynos_dpi_probe(dev);
746 if (IS_ERR(ctx->encoder)) {
747 ret = PTR_ERR(ctx->encoder);
748 goto err_iounmap;
749 }
750
751 pm_runtime_enable(dev);
752
753 ret = component_add(dev, &decon_component_ops);
754 if (ret)
755 goto err_disable_pm_runtime;
756
757 return ret;
758
759 err_disable_pm_runtime:
760 pm_runtime_disable(dev);
761
762 err_iounmap:
763 iounmap(ctx->regs);
764
765 return ret;
766 }
767
decon_remove(struct platform_device * pdev)768 static int decon_remove(struct platform_device *pdev)
769 {
770 struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
771
772 pm_runtime_disable(&pdev->dev);
773
774 iounmap(ctx->regs);
775
776 component_del(&pdev->dev, &decon_component_ops);
777
778 return 0;
779 }
780
exynos7_decon_suspend(struct device * dev)781 static int exynos7_decon_suspend(struct device *dev)
782 {
783 struct decon_context *ctx = dev_get_drvdata(dev);
784
785 clk_disable_unprepare(ctx->vclk);
786 clk_disable_unprepare(ctx->eclk);
787 clk_disable_unprepare(ctx->aclk);
788 clk_disable_unprepare(ctx->pclk);
789
790 return 0;
791 }
792
exynos7_decon_resume(struct device * dev)793 static int exynos7_decon_resume(struct device *dev)
794 {
795 struct decon_context *ctx = dev_get_drvdata(dev);
796 int ret;
797
798 ret = clk_prepare_enable(ctx->pclk);
799 if (ret < 0) {
800 DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
801 ret);
802 goto err_pclk_enable;
803 }
804
805 ret = clk_prepare_enable(ctx->aclk);
806 if (ret < 0) {
807 DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
808 ret);
809 goto err_aclk_enable;
810 }
811
812 ret = clk_prepare_enable(ctx->eclk);
813 if (ret < 0) {
814 DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
815 ret);
816 goto err_eclk_enable;
817 }
818
819 ret = clk_prepare_enable(ctx->vclk);
820 if (ret < 0) {
821 DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
822 ret);
823 goto err_vclk_enable;
824 }
825
826 return 0;
827
828 err_vclk_enable:
829 clk_disable_unprepare(ctx->eclk);
830 err_eclk_enable:
831 clk_disable_unprepare(ctx->aclk);
832 err_aclk_enable:
833 clk_disable_unprepare(ctx->pclk);
834 err_pclk_enable:
835 return ret;
836 }
837
838 static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
839 exynos7_decon_resume, NULL);
840
841 struct platform_driver decon_driver = {
842 .probe = decon_probe,
843 .remove = decon_remove,
844 .driver = {
845 .name = "exynos-decon",
846 .pm = pm_ptr(&exynos7_decon_pm_ops),
847 .of_match_table = decon_driver_dt_match,
848 },
849 };
850