1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015 Free Electrons
4 * Copyright (C) 2015 NextThing Co
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9 #include <linux/component.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc.h>
21 #include <drm/drm_fb_cma_helper.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_gem_cma_helper.h>
24 #include <drm/drm_plane_helper.h>
25 #include <drm/drm_probe_helper.h>
26
27 #include "sun4i_backend.h"
28 #include "sun4i_drv.h"
29 #include "sun4i_frontend.h"
30 #include "sun4i_layer.h"
31 #include "sunxi_engine.h"
32
33 struct sun4i_backend_quirks {
34 /* backend <-> TCON muxing selection done in backend */
35 bool needs_output_muxing;
36
37 /* alpha at the lowest z position is not always supported */
38 bool supports_lowest_plane_alpha;
39 };
40
41 static const u32 sunxi_rgb2yuv_coef[12] = {
42 0x00000107, 0x00000204, 0x00000064, 0x00000108,
43 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
44 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
45 };
46
sun4i_backend_apply_color_correction(struct sunxi_engine * engine)47 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
48 {
49 int i;
50
51 DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
52
53 /* Set color correction */
54 regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
55 SUN4I_BACKEND_OCCTL_ENABLE);
56
57 for (i = 0; i < 12; i++)
58 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
59 sunxi_rgb2yuv_coef[i]);
60 }
61
sun4i_backend_disable_color_correction(struct sunxi_engine * engine)62 static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
63 {
64 DRM_DEBUG_DRIVER("Disabling color correction\n");
65
66 /* Disable color correction */
67 regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
68 SUN4I_BACKEND_OCCTL_ENABLE, 0);
69 }
70
sun4i_backend_commit(struct sunxi_engine * engine)71 static void sun4i_backend_commit(struct sunxi_engine *engine)
72 {
73 DRM_DEBUG_DRIVER("Committing changes\n");
74
75 regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
76 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
77 SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
78 }
79
sun4i_backend_layer_enable(struct sun4i_backend * backend,int layer,bool enable)80 void sun4i_backend_layer_enable(struct sun4i_backend *backend,
81 int layer, bool enable)
82 {
83 u32 val;
84
85 DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
86 layer);
87
88 if (enable)
89 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
90 else
91 val = 0;
92
93 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
94 SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
95 }
96
sun4i_backend_drm_format_to_layer(u32 format,u32 * mode)97 static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
98 {
99 switch (format) {
100 case DRM_FORMAT_ARGB8888:
101 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
102 break;
103
104 case DRM_FORMAT_ARGB4444:
105 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
106 break;
107
108 case DRM_FORMAT_ARGB1555:
109 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
110 break;
111
112 case DRM_FORMAT_RGBA5551:
113 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
114 break;
115
116 case DRM_FORMAT_RGBA4444:
117 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
118 break;
119
120 case DRM_FORMAT_XRGB8888:
121 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
122 break;
123
124 case DRM_FORMAT_RGB888:
125 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
126 break;
127
128 case DRM_FORMAT_RGB565:
129 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
130 break;
131
132 default:
133 return -EINVAL;
134 }
135
136 return 0;
137 }
138
139 static const uint32_t sun4i_backend_formats[] = {
140 DRM_FORMAT_ARGB1555,
141 DRM_FORMAT_ARGB4444,
142 DRM_FORMAT_ARGB8888,
143 DRM_FORMAT_RGB565,
144 DRM_FORMAT_RGB888,
145 DRM_FORMAT_RGBA4444,
146 DRM_FORMAT_RGBA5551,
147 DRM_FORMAT_UYVY,
148 DRM_FORMAT_VYUY,
149 DRM_FORMAT_XRGB8888,
150 DRM_FORMAT_YUYV,
151 DRM_FORMAT_YVYU,
152 };
153
sun4i_backend_format_is_supported(uint32_t fmt,uint64_t modifier)154 bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
155 {
156 unsigned int i;
157
158 if (modifier != DRM_FORMAT_MOD_LINEAR)
159 return false;
160
161 for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
162 if (sun4i_backend_formats[i] == fmt)
163 return true;
164
165 return false;
166 }
167
sun4i_backend_update_layer_coord(struct sun4i_backend * backend,int layer,struct drm_plane * plane)168 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
169 int layer, struct drm_plane *plane)
170 {
171 struct drm_plane_state *state = plane->state;
172
173 DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
174
175 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
176 DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
177 state->crtc_w, state->crtc_h);
178 regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG,
179 SUN4I_BACKEND_DISSIZE(state->crtc_w,
180 state->crtc_h));
181 }
182
183 /* Set height and width */
184 DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
185 state->crtc_w, state->crtc_h);
186 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
187 SUN4I_BACKEND_LAYSIZE(state->crtc_w,
188 state->crtc_h));
189
190 /* Set base coordinates */
191 DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
192 state->crtc_x, state->crtc_y);
193 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
194 SUN4I_BACKEND_LAYCOOR(state->crtc_x,
195 state->crtc_y));
196
197 return 0;
198 }
199
sun4i_backend_update_yuv_format(struct sun4i_backend * backend,int layer,struct drm_plane * plane)200 static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
201 int layer, struct drm_plane *plane)
202 {
203 struct drm_plane_state *state = plane->state;
204 struct drm_framebuffer *fb = state->fb;
205 const struct drm_format_info *format = fb->format;
206 const uint32_t fmt = format->format;
207 u32 val = SUN4I_BACKEND_IYUVCTL_EN;
208 int i;
209
210 for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
211 regmap_write(backend->engine.regs,
212 SUN4I_BACKEND_YGCOEF_REG(i),
213 sunxi_bt601_yuv2rgb_coef[i]);
214
215 /*
216 * We should do that only for a single plane, but the
217 * framebuffer's atomic_check has our back on this.
218 */
219 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
220 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
221 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
222
223 /* TODO: Add support for the multi-planar YUV formats */
224 if (drm_format_info_is_yuv_packed(format) &&
225 drm_format_info_is_yuv_sampling_422(format))
226 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
227 else
228 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
229
230 /*
231 * Allwinner seems to list the pixel sequence from right to left, while
232 * DRM lists it from left to right.
233 */
234 switch (fmt) {
235 case DRM_FORMAT_YUYV:
236 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
237 break;
238 case DRM_FORMAT_YVYU:
239 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
240 break;
241 case DRM_FORMAT_UYVY:
242 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
243 break;
244 case DRM_FORMAT_VYUY:
245 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
246 break;
247 default:
248 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
249 fmt);
250 }
251
252 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
253
254 return 0;
255 }
256
sun4i_backend_update_layer_formats(struct sun4i_backend * backend,int layer,struct drm_plane * plane)257 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
258 int layer, struct drm_plane *plane)
259 {
260 struct drm_plane_state *state = plane->state;
261 struct drm_framebuffer *fb = state->fb;
262 bool interlaced = false;
263 u32 val;
264 int ret;
265
266 /* Clear the YUV mode */
267 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
268 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
269
270 if (plane->state->crtc)
271 interlaced = plane->state->crtc->state->adjusted_mode.flags
272 & DRM_MODE_FLAG_INTERLACE;
273
274 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
275 SUN4I_BACKEND_MODCTL_ITLMOD_EN,
276 interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
277
278 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
279 interlaced ? "on" : "off");
280
281 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
282 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
283 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
284 regmap_update_bits(backend->engine.regs,
285 SUN4I_BACKEND_ATTCTL_REG0(layer),
286 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
287 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
288 val);
289
290 if (fb->format->is_yuv)
291 return sun4i_backend_update_yuv_format(backend, layer, plane);
292
293 ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
294 if (ret) {
295 DRM_DEBUG_DRIVER("Invalid format\n");
296 return ret;
297 }
298
299 regmap_update_bits(backend->engine.regs,
300 SUN4I_BACKEND_ATTCTL_REG1(layer),
301 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
302
303 return 0;
304 }
305
sun4i_backend_update_layer_frontend(struct sun4i_backend * backend,int layer,uint32_t fmt)306 int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
307 int layer, uint32_t fmt)
308 {
309 u32 val;
310 int ret;
311
312 ret = sun4i_backend_drm_format_to_layer(fmt, &val);
313 if (ret) {
314 DRM_DEBUG_DRIVER("Invalid format\n");
315 return ret;
316 }
317
318 regmap_update_bits(backend->engine.regs,
319 SUN4I_BACKEND_ATTCTL_REG0(layer),
320 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
321 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
322
323 regmap_update_bits(backend->engine.regs,
324 SUN4I_BACKEND_ATTCTL_REG1(layer),
325 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
326
327 return 0;
328 }
329
sun4i_backend_update_yuv_buffer(struct sun4i_backend * backend,struct drm_framebuffer * fb,dma_addr_t paddr)330 static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
331 struct drm_framebuffer *fb,
332 dma_addr_t paddr)
333 {
334 /* TODO: Add support for the multi-planar YUV formats */
335 DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
336 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
337
338 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
339 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
340 fb->pitches[0] * 8);
341
342 return 0;
343 }
344
sun4i_backend_update_layer_buffer(struct sun4i_backend * backend,int layer,struct drm_plane * plane)345 int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
346 int layer, struct drm_plane *plane)
347 {
348 struct drm_plane_state *state = plane->state;
349 struct drm_framebuffer *fb = state->fb;
350 u32 lo_paddr, hi_paddr;
351 dma_addr_t paddr;
352
353 /* Set the line width */
354 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
355 regmap_write(backend->engine.regs,
356 SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
357 fb->pitches[0] * 8);
358
359 /* Get the start of the displayed memory */
360 paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
361 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
362
363 if (fb->format->is_yuv)
364 return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
365
366 /* Write the 32 lower bits of the address (in bits) */
367 lo_paddr = paddr << 3;
368 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
369 regmap_write(backend->engine.regs,
370 SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
371 lo_paddr);
372
373 /* And the upper bits */
374 hi_paddr = paddr >> 29;
375 DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
376 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
377 SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
378 SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
379
380 return 0;
381 }
382
sun4i_backend_update_layer_zpos(struct sun4i_backend * backend,int layer,struct drm_plane * plane)383 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
384 struct drm_plane *plane)
385 {
386 struct drm_plane_state *state = plane->state;
387 struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
388 unsigned int priority = state->normalized_zpos;
389 unsigned int pipe = p_state->pipe;
390
391 DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
392 layer, priority, pipe);
393 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
394 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
395 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
396 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
397 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
398
399 return 0;
400 }
401
sun4i_backend_cleanup_layer(struct sun4i_backend * backend,int layer)402 void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
403 int layer)
404 {
405 regmap_update_bits(backend->engine.regs,
406 SUN4I_BACKEND_ATTCTL_REG0(layer),
407 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
408 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
409 }
410
sun4i_backend_plane_uses_scaler(struct drm_plane_state * state)411 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
412 {
413 u16 src_h = state->src_h >> 16;
414 u16 src_w = state->src_w >> 16;
415
416 DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
417 src_w, src_h, state->crtc_w, state->crtc_h);
418
419 if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
420 return true;
421
422 return false;
423 }
424
sun4i_backend_plane_uses_frontend(struct drm_plane_state * state)425 static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
426 {
427 struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
428 struct sun4i_backend *backend = layer->backend;
429 uint32_t format = state->fb->format->format;
430 uint64_t modifier = state->fb->modifier;
431
432 if (IS_ERR(backend->frontend))
433 return false;
434
435 if (!sun4i_frontend_format_is_supported(format, modifier))
436 return false;
437
438 if (!sun4i_backend_format_is_supported(format, modifier))
439 return true;
440
441 /*
442 * TODO: The backend alone allows 2x and 4x integer scaling, including
443 * support for an alpha component (which the frontend doesn't support).
444 * Use the backend directly instead of the frontend in this case, with
445 * another test to return false.
446 */
447
448 if (sun4i_backend_plane_uses_scaler(state))
449 return true;
450
451 /*
452 * Here the format is supported by both the frontend and the backend
453 * and no frontend scaling is required, so use the backend directly.
454 */
455 return false;
456 }
457
sun4i_backend_plane_is_supported(struct drm_plane_state * state,bool * uses_frontend)458 static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
459 bool *uses_frontend)
460 {
461 if (sun4i_backend_plane_uses_frontend(state)) {
462 *uses_frontend = true;
463 return true;
464 }
465
466 *uses_frontend = false;
467
468 /* Scaling is not supported without the frontend. */
469 if (sun4i_backend_plane_uses_scaler(state))
470 return false;
471
472 return true;
473 }
474
sun4i_backend_atomic_begin(struct sunxi_engine * engine,struct drm_crtc_state * old_state)475 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
476 struct drm_crtc_state *old_state)
477 {
478 u32 val;
479
480 WARN_ON(regmap_read_poll_timeout(engine->regs,
481 SUN4I_BACKEND_REGBUFFCTL_REG,
482 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
483 100, 50000));
484 }
485
sun4i_backend_atomic_check(struct sunxi_engine * engine,struct drm_crtc_state * crtc_state)486 static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
487 struct drm_crtc_state *crtc_state)
488 {
489 struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
490 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
491 struct drm_atomic_state *state = crtc_state->state;
492 struct drm_device *drm = state->dev;
493 struct drm_plane *plane;
494 unsigned int num_planes = 0;
495 unsigned int num_alpha_planes = 0;
496 unsigned int num_frontend_planes = 0;
497 unsigned int num_alpha_planes_max = 1;
498 unsigned int num_yuv_planes = 0;
499 unsigned int current_pipe = 0;
500 unsigned int i;
501
502 DRM_DEBUG_DRIVER("Starting checking our planes\n");
503
504 if (!crtc_state->planes_changed)
505 return 0;
506
507 drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
508 struct drm_plane_state *plane_state =
509 drm_atomic_get_plane_state(state, plane);
510 struct sun4i_layer_state *layer_state =
511 state_to_sun4i_layer_state(plane_state);
512 struct drm_framebuffer *fb = plane_state->fb;
513 struct drm_format_name_buf format_name;
514
515 if (!sun4i_backend_plane_is_supported(plane_state,
516 &layer_state->uses_frontend))
517 return -EINVAL;
518
519 if (layer_state->uses_frontend) {
520 DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
521 plane->index);
522 num_frontend_planes++;
523 } else {
524 if (fb->format->is_yuv) {
525 DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
526 num_yuv_planes++;
527 }
528 }
529
530 DRM_DEBUG_DRIVER("Plane FB format is %s\n",
531 drm_get_format_name(fb->format->format,
532 &format_name));
533 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
534 num_alpha_planes++;
535
536 DRM_DEBUG_DRIVER("Plane zpos is %d\n",
537 plane_state->normalized_zpos);
538
539 /* Sort our planes by Zpos */
540 plane_states[plane_state->normalized_zpos] = plane_state;
541
542 num_planes++;
543 }
544
545 /* All our planes were disabled, bail out */
546 if (!num_planes)
547 return 0;
548
549 /*
550 * The hardware is a bit unusual here.
551 *
552 * Even though it supports 4 layers, it does the composition
553 * in two separate steps.
554 *
555 * The first one is assigning a layer to one of its two
556 * pipes. If more that 1 layer is assigned to the same pipe,
557 * and if pixels overlaps, the pipe will take the pixel from
558 * the layer with the highest priority.
559 *
560 * The second step is the actual alpha blending, that takes
561 * the two pipes as input, and uses the potential alpha
562 * component to do the transparency between the two.
563 *
564 * This two-step scenario makes us unable to guarantee a
565 * robust alpha blending between the 4 layers in all
566 * situations, since this means that we need to have one layer
567 * with alpha at the lowest position of our two pipes.
568 *
569 * However, we cannot even do that on every platform, since
570 * the hardware has a bug where the lowest plane of the lowest
571 * pipe (pipe 0, priority 0), if it has any alpha, will
572 * discard the pixel data entirely and just display the pixels
573 * in the background color (black by default).
574 *
575 * This means that on the affected platforms, we effectively
576 * have only three valid configurations with alpha, all of
577 * them with the alpha being on pipe1 with the lowest
578 * position, which can be 1, 2 or 3 depending on the number of
579 * planes and their zpos.
580 */
581
582 /* For platforms that are not affected by the issue described above. */
583 if (backend->quirks->supports_lowest_plane_alpha)
584 num_alpha_planes_max++;
585
586 if (num_alpha_planes > num_alpha_planes_max) {
587 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
588 return -EINVAL;
589 }
590
591 /* We can't have an alpha plane at the lowest position */
592 if (!backend->quirks->supports_lowest_plane_alpha &&
593 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
594 return -EINVAL;
595
596 for (i = 1; i < num_planes; i++) {
597 struct drm_plane_state *p_state = plane_states[i];
598 struct drm_framebuffer *fb = p_state->fb;
599 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
600
601 /*
602 * The only alpha position is the lowest plane of the
603 * second pipe.
604 */
605 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
606 current_pipe++;
607
608 s_state->pipe = current_pipe;
609 }
610
611 /* We can only have a single YUV plane at a time */
612 if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
613 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
614 return -EINVAL;
615 }
616
617 if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
618 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
619 return -EINVAL;
620 }
621
622 DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
623 num_planes, num_alpha_planes, num_frontend_planes,
624 num_yuv_planes);
625
626 return 0;
627 }
628
sun4i_backend_vblank_quirk(struct sunxi_engine * engine)629 static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
630 {
631 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
632 struct sun4i_frontend *frontend = backend->frontend;
633
634 if (!frontend)
635 return;
636
637 /*
638 * In a teardown scenario with the frontend involved, we have
639 * to keep the frontend enabled until the next vblank, and
640 * only then disable it.
641 *
642 * This is due to the fact that the backend will not take into
643 * account the new configuration (with the plane that used to
644 * be fed by the frontend now disabled) until we write to the
645 * commit bit and the hardware fetches the new configuration
646 * during the next vblank.
647 *
648 * So we keep the frontend around in order to prevent any
649 * visual artifacts.
650 */
651 spin_lock(&backend->frontend_lock);
652 if (backend->frontend_teardown) {
653 sun4i_frontend_exit(frontend);
654 backend->frontend_teardown = false;
655 }
656 spin_unlock(&backend->frontend_lock);
657 };
658
sun4i_backend_init_sat(struct device * dev)659 static int sun4i_backend_init_sat(struct device *dev) {
660 struct sun4i_backend *backend = dev_get_drvdata(dev);
661 int ret;
662
663 backend->sat_reset = devm_reset_control_get(dev, "sat");
664 if (IS_ERR(backend->sat_reset)) {
665 dev_err(dev, "Couldn't get the SAT reset line\n");
666 return PTR_ERR(backend->sat_reset);
667 }
668
669 ret = reset_control_deassert(backend->sat_reset);
670 if (ret) {
671 dev_err(dev, "Couldn't deassert the SAT reset line\n");
672 return ret;
673 }
674
675 backend->sat_clk = devm_clk_get(dev, "sat");
676 if (IS_ERR(backend->sat_clk)) {
677 dev_err(dev, "Couldn't get our SAT clock\n");
678 ret = PTR_ERR(backend->sat_clk);
679 goto err_assert_reset;
680 }
681
682 ret = clk_prepare_enable(backend->sat_clk);
683 if (ret) {
684 dev_err(dev, "Couldn't enable the SAT clock\n");
685 return ret;
686 }
687
688 return 0;
689
690 err_assert_reset:
691 reset_control_assert(backend->sat_reset);
692 return ret;
693 }
694
sun4i_backend_free_sat(struct device * dev)695 static int sun4i_backend_free_sat(struct device *dev) {
696 struct sun4i_backend *backend = dev_get_drvdata(dev);
697
698 clk_disable_unprepare(backend->sat_clk);
699 reset_control_assert(backend->sat_reset);
700
701 return 0;
702 }
703
704 /*
705 * The display backend can take video output from the display frontend, or
706 * the display enhancement unit on the A80, as input for one it its layers.
707 * This relationship within the display pipeline is encoded in the device
708 * tree with of_graph, and we use it here to figure out which backend, if
709 * there are 2 or more, we are currently probing. The number would be in
710 * the "reg" property of the upstream output port endpoint.
711 */
sun4i_backend_of_get_id(struct device_node * node)712 static int sun4i_backend_of_get_id(struct device_node *node)
713 {
714 struct device_node *ep, *remote;
715 struct of_endpoint of_ep;
716
717 /* Input port is 0, and we want the first endpoint. */
718 ep = of_graph_get_endpoint_by_regs(node, 0, -1);
719 if (!ep)
720 return -EINVAL;
721
722 remote = of_graph_get_remote_endpoint(ep);
723 of_node_put(ep);
724 if (!remote)
725 return -EINVAL;
726
727 of_graph_parse_endpoint(remote, &of_ep);
728 of_node_put(remote);
729 return of_ep.id;
730 }
731
732 /* TODO: This needs to take multiple pipelines into account */
sun4i_backend_find_frontend(struct sun4i_drv * drv,struct device_node * node)733 static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
734 struct device_node *node)
735 {
736 struct device_node *port, *ep, *remote;
737 struct sun4i_frontend *frontend;
738
739 port = of_graph_get_port_by_id(node, 0);
740 if (!port)
741 return ERR_PTR(-EINVAL);
742
743 for_each_available_child_of_node(port, ep) {
744 remote = of_graph_get_remote_port_parent(ep);
745 if (!remote)
746 continue;
747 of_node_put(remote);
748
749 /* does this node match any registered engines? */
750 list_for_each_entry(frontend, &drv->frontend_list, list) {
751 if (remote == frontend->node) {
752 of_node_put(port);
753 of_node_put(ep);
754 return frontend;
755 }
756 }
757 }
758 of_node_put(port);
759 return ERR_PTR(-EINVAL);
760 }
761
762 static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
763 .atomic_begin = sun4i_backend_atomic_begin,
764 .atomic_check = sun4i_backend_atomic_check,
765 .commit = sun4i_backend_commit,
766 .layers_init = sun4i_layers_init,
767 .apply_color_correction = sun4i_backend_apply_color_correction,
768 .disable_color_correction = sun4i_backend_disable_color_correction,
769 .vblank_quirk = sun4i_backend_vblank_quirk,
770 };
771
772 static const struct regmap_config sun4i_backend_regmap_config = {
773 .reg_bits = 32,
774 .val_bits = 32,
775 .reg_stride = 4,
776 .max_register = 0x5800,
777 };
778
sun4i_backend_bind(struct device * dev,struct device * master,void * data)779 static int sun4i_backend_bind(struct device *dev, struct device *master,
780 void *data)
781 {
782 struct platform_device *pdev = to_platform_device(dev);
783 struct drm_device *drm = data;
784 struct sun4i_drv *drv = drm->dev_private;
785 struct sun4i_backend *backend;
786 const struct sun4i_backend_quirks *quirks;
787 struct resource *res;
788 void __iomem *regs;
789 int i, ret;
790
791 backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
792 if (!backend)
793 return -ENOMEM;
794 dev_set_drvdata(dev, backend);
795 spin_lock_init(&backend->frontend_lock);
796
797 if (of_find_property(dev->of_node, "interconnects", NULL)) {
798 /*
799 * This assume we have the same DMA constraints for all our the
800 * devices in our pipeline (all the backends, but also the
801 * frontends). This sounds bad, but it has always been the case
802 * for us, and DRM doesn't do per-device allocation either, so
803 * we would need to fix DRM first...
804 */
805 ret = of_dma_configure(drm->dev, dev->of_node, true);
806 if (ret)
807 return ret;
808 } else {
809 /*
810 * If we don't have the interconnect property, most likely
811 * because of an old DT, we need to set the DMA offset by hand
812 * on our device since the RAM mapping is at 0 for the DMA bus,
813 * unlike the CPU.
814 *
815 * XXX(hch): this has no business in a driver and needs to move
816 * to the device tree.
817 *
818 * If we have two subsequent calls to dma_direct_set_offset
819 * returns -EINVAL. Unfortunately, this happens when we have two
820 * backends in the system, and will result in the driver
821 * reporting an error while it has been setup properly before.
822 * Ignore EINVAL, but it should really be removed eventually.
823 */
824 ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
825 if (ret && ret != -EINVAL)
826 return ret;
827 }
828
829 backend->engine.node = dev->of_node;
830 backend->engine.ops = &sun4i_backend_engine_ops;
831 backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
832 if (backend->engine.id < 0)
833 return backend->engine.id;
834
835 backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
836 if (IS_ERR(backend->frontend))
837 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
838
839 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
840 regs = devm_ioremap_resource(dev, res);
841 if (IS_ERR(regs))
842 return PTR_ERR(regs);
843
844 backend->reset = devm_reset_control_get(dev, NULL);
845 if (IS_ERR(backend->reset)) {
846 dev_err(dev, "Couldn't get our reset line\n");
847 return PTR_ERR(backend->reset);
848 }
849
850 ret = reset_control_deassert(backend->reset);
851 if (ret) {
852 dev_err(dev, "Couldn't deassert our reset line\n");
853 return ret;
854 }
855
856 backend->bus_clk = devm_clk_get(dev, "ahb");
857 if (IS_ERR(backend->bus_clk)) {
858 dev_err(dev, "Couldn't get the backend bus clock\n");
859 ret = PTR_ERR(backend->bus_clk);
860 goto err_assert_reset;
861 }
862 clk_prepare_enable(backend->bus_clk);
863
864 backend->mod_clk = devm_clk_get(dev, "mod");
865 if (IS_ERR(backend->mod_clk)) {
866 dev_err(dev, "Couldn't get the backend module clock\n");
867 ret = PTR_ERR(backend->mod_clk);
868 goto err_disable_bus_clk;
869 }
870
871 ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
872 if (ret) {
873 dev_err(dev, "Couldn't set the module clock frequency\n");
874 goto err_disable_bus_clk;
875 }
876
877 clk_prepare_enable(backend->mod_clk);
878
879 backend->ram_clk = devm_clk_get(dev, "ram");
880 if (IS_ERR(backend->ram_clk)) {
881 dev_err(dev, "Couldn't get the backend RAM clock\n");
882 ret = PTR_ERR(backend->ram_clk);
883 goto err_disable_mod_clk;
884 }
885 clk_prepare_enable(backend->ram_clk);
886
887 if (of_device_is_compatible(dev->of_node,
888 "allwinner,sun8i-a33-display-backend")) {
889 ret = sun4i_backend_init_sat(dev);
890 if (ret) {
891 dev_err(dev, "Couldn't init SAT resources\n");
892 goto err_disable_ram_clk;
893 }
894 }
895
896 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
897 &sun4i_backend_regmap_config);
898 if (IS_ERR(backend->engine.regs)) {
899 dev_err(dev, "Couldn't create the backend regmap\n");
900 return PTR_ERR(backend->engine.regs);
901 }
902
903 list_add_tail(&backend->engine.list, &drv->engine_list);
904
905 /*
906 * Many of the backend's layer configuration registers have
907 * undefined default values. This poses a risk as we use
908 * regmap_update_bits in some places, and don't overwrite
909 * the whole register.
910 *
911 * Clear the registers here to have something predictable.
912 */
913 for (i = 0x800; i < 0x1000; i += 4)
914 regmap_write(backend->engine.regs, i, 0);
915
916 /* Disable registers autoloading */
917 regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
918 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
919
920 /* Enable the backend */
921 regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
922 SUN4I_BACKEND_MODCTL_DEBE_EN |
923 SUN4I_BACKEND_MODCTL_START_CTL);
924
925 /* Set output selection if needed */
926 quirks = of_device_get_match_data(dev);
927 if (quirks->needs_output_muxing) {
928 /*
929 * We assume there is no dynamic muxing of backends
930 * and TCONs, so we select the backend with same ID.
931 *
932 * While dynamic selection might be interesting, since
933 * the CRTC is tied to the TCON, while the layers are
934 * tied to the backends, this means, we will need to
935 * switch between groups of layers. There might not be
936 * a way to represent this constraint in DRM.
937 */
938 regmap_update_bits(backend->engine.regs,
939 SUN4I_BACKEND_MODCTL_REG,
940 SUN4I_BACKEND_MODCTL_OUT_SEL,
941 (backend->engine.id
942 ? SUN4I_BACKEND_MODCTL_OUT_LCD1
943 : SUN4I_BACKEND_MODCTL_OUT_LCD0));
944 }
945
946 backend->quirks = quirks;
947
948 return 0;
949
950 err_disable_ram_clk:
951 clk_disable_unprepare(backend->ram_clk);
952 err_disable_mod_clk:
953 clk_rate_exclusive_put(backend->mod_clk);
954 clk_disable_unprepare(backend->mod_clk);
955 err_disable_bus_clk:
956 clk_disable_unprepare(backend->bus_clk);
957 err_assert_reset:
958 reset_control_assert(backend->reset);
959 return ret;
960 }
961
sun4i_backend_unbind(struct device * dev,struct device * master,void * data)962 static void sun4i_backend_unbind(struct device *dev, struct device *master,
963 void *data)
964 {
965 struct sun4i_backend *backend = dev_get_drvdata(dev);
966
967 list_del(&backend->engine.list);
968
969 if (of_device_is_compatible(dev->of_node,
970 "allwinner,sun8i-a33-display-backend"))
971 sun4i_backend_free_sat(dev);
972
973 clk_disable_unprepare(backend->ram_clk);
974 clk_rate_exclusive_put(backend->mod_clk);
975 clk_disable_unprepare(backend->mod_clk);
976 clk_disable_unprepare(backend->bus_clk);
977 reset_control_assert(backend->reset);
978 }
979
980 static const struct component_ops sun4i_backend_ops = {
981 .bind = sun4i_backend_bind,
982 .unbind = sun4i_backend_unbind,
983 };
984
sun4i_backend_probe(struct platform_device * pdev)985 static int sun4i_backend_probe(struct platform_device *pdev)
986 {
987 return component_add(&pdev->dev, &sun4i_backend_ops);
988 }
989
sun4i_backend_remove(struct platform_device * pdev)990 static int sun4i_backend_remove(struct platform_device *pdev)
991 {
992 component_del(&pdev->dev, &sun4i_backend_ops);
993
994 return 0;
995 }
996
997 static const struct sun4i_backend_quirks sun4i_backend_quirks = {
998 .needs_output_muxing = true,
999 };
1000
1001 static const struct sun4i_backend_quirks sun5i_backend_quirks = {
1002 };
1003
1004 static const struct sun4i_backend_quirks sun6i_backend_quirks = {
1005 };
1006
1007 static const struct sun4i_backend_quirks sun7i_backend_quirks = {
1008 .needs_output_muxing = true,
1009 };
1010
1011 static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
1012 .supports_lowest_plane_alpha = true,
1013 };
1014
1015 static const struct sun4i_backend_quirks sun9i_backend_quirks = {
1016 };
1017
1018 static const struct of_device_id sun4i_backend_of_table[] = {
1019 {
1020 .compatible = "allwinner,sun4i-a10-display-backend",
1021 .data = &sun4i_backend_quirks,
1022 },
1023 {
1024 .compatible = "allwinner,sun5i-a13-display-backend",
1025 .data = &sun5i_backend_quirks,
1026 },
1027 {
1028 .compatible = "allwinner,sun6i-a31-display-backend",
1029 .data = &sun6i_backend_quirks,
1030 },
1031 {
1032 .compatible = "allwinner,sun7i-a20-display-backend",
1033 .data = &sun7i_backend_quirks,
1034 },
1035 {
1036 .compatible = "allwinner,sun8i-a23-display-backend",
1037 .data = &sun8i_a33_backend_quirks,
1038 },
1039 {
1040 .compatible = "allwinner,sun8i-a33-display-backend",
1041 .data = &sun8i_a33_backend_quirks,
1042 },
1043 {
1044 .compatible = "allwinner,sun9i-a80-display-backend",
1045 .data = &sun9i_backend_quirks,
1046 },
1047 { }
1048 };
1049 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1050
1051 static struct platform_driver sun4i_backend_platform_driver = {
1052 .probe = sun4i_backend_probe,
1053 .remove = sun4i_backend_remove,
1054 .driver = {
1055 .name = "sun4i-backend",
1056 .of_match_table = sun4i_backend_of_table,
1057 },
1058 };
1059 module_platform_driver(sun4i_backend_platform_driver);
1060
1061 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1062 MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1063 MODULE_LICENSE("GPL");
1064