1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
4 */
5
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/of_graph.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
16
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_probe_helper.h>
21
22 #include "drm.h"
23 #include "dc.h"
24 #include "plane.h"
25
26 static const u32 tegra_shared_plane_formats[] = {
27 DRM_FORMAT_ARGB1555,
28 DRM_FORMAT_RGB565,
29 DRM_FORMAT_RGBA5551,
30 DRM_FORMAT_ARGB8888,
31 DRM_FORMAT_ABGR8888,
32 /* new on Tegra114 */
33 DRM_FORMAT_ABGR4444,
34 DRM_FORMAT_ABGR1555,
35 DRM_FORMAT_BGRA5551,
36 DRM_FORMAT_XRGB1555,
37 DRM_FORMAT_RGBX5551,
38 DRM_FORMAT_XBGR1555,
39 DRM_FORMAT_BGRX5551,
40 DRM_FORMAT_BGR565,
41 DRM_FORMAT_XRGB8888,
42 DRM_FORMAT_XBGR8888,
43 /* planar formats */
44 DRM_FORMAT_UYVY,
45 DRM_FORMAT_YUYV,
46 DRM_FORMAT_YUV420,
47 DRM_FORMAT_YUV422,
48 };
49
50 static const u64 tegra_shared_plane_modifiers[] = {
51 DRM_FORMAT_MOD_LINEAR,
52 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
53 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
54 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
58 DRM_FORMAT_MOD_INVALID
59 };
60
tegra_plane_offset(struct tegra_plane * plane,unsigned int offset)61 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
62 unsigned int offset)
63 {
64 if (offset >= 0x500 && offset <= 0x581) {
65 offset = 0x000 + (offset - 0x500);
66 return plane->offset + offset;
67 }
68
69 if (offset >= 0x700 && offset <= 0x73c) {
70 offset = 0x180 + (offset - 0x700);
71 return plane->offset + offset;
72 }
73
74 if (offset >= 0x800 && offset <= 0x83e) {
75 offset = 0x1c0 + (offset - 0x800);
76 return plane->offset + offset;
77 }
78
79 dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
80
81 return plane->offset + offset;
82 }
83
tegra_plane_readl(struct tegra_plane * plane,unsigned int offset)84 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
85 unsigned int offset)
86 {
87 return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
88 }
89
tegra_plane_writel(struct tegra_plane * plane,u32 value,unsigned int offset)90 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
91 unsigned int offset)
92 {
93 tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
94 }
95
tegra_windowgroup_enable(struct tegra_windowgroup * wgrp)96 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
97 {
98 int err = 0;
99
100 mutex_lock(&wgrp->lock);
101
102 if (wgrp->usecount == 0) {
103 err = host1x_client_resume(wgrp->parent);
104 if (err < 0) {
105 dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
106 goto unlock;
107 }
108
109 reset_control_deassert(wgrp->rst);
110 }
111
112 wgrp->usecount++;
113
114 unlock:
115 mutex_unlock(&wgrp->lock);
116 return err;
117 }
118
tegra_windowgroup_disable(struct tegra_windowgroup * wgrp)119 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
120 {
121 int err;
122
123 mutex_lock(&wgrp->lock);
124
125 if (wgrp->usecount == 1) {
126 err = reset_control_assert(wgrp->rst);
127 if (err < 0) {
128 pr_err("failed to assert reset for window group %u\n",
129 wgrp->index);
130 }
131
132 host1x_client_suspend(wgrp->parent);
133 }
134
135 wgrp->usecount--;
136 mutex_unlock(&wgrp->lock);
137 }
138
tegra_display_hub_prepare(struct tegra_display_hub * hub)139 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
140 {
141 unsigned int i;
142
143 /*
144 * XXX Enabling/disabling windowgroups needs to happen when the owner
145 * display controller is disabled. There's currently no good point at
146 * which this could be executed, so unconditionally enable all window
147 * groups for now.
148 */
149 for (i = 0; i < hub->soc->num_wgrps; i++) {
150 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
151
152 /* Skip orphaned window group whose parent DC is disabled */
153 if (wgrp->parent)
154 tegra_windowgroup_enable(wgrp);
155 }
156
157 return 0;
158 }
159
tegra_display_hub_cleanup(struct tegra_display_hub * hub)160 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
161 {
162 unsigned int i;
163
164 /*
165 * XXX Remove this once window groups can be more fine-grainedly
166 * enabled and disabled.
167 */
168 for (i = 0; i < hub->soc->num_wgrps; i++) {
169 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
170
171 /* Skip orphaned window group whose parent DC is disabled */
172 if (wgrp->parent)
173 tegra_windowgroup_disable(wgrp);
174 }
175 }
176
tegra_shared_plane_update(struct tegra_plane * plane)177 static void tegra_shared_plane_update(struct tegra_plane *plane)
178 {
179 struct tegra_dc *dc = plane->dc;
180 unsigned long timeout;
181 u32 mask, value;
182
183 mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
184 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
185
186 timeout = jiffies + msecs_to_jiffies(1000);
187
188 while (time_before(jiffies, timeout)) {
189 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
190 if ((value & mask) == 0)
191 break;
192
193 usleep_range(100, 400);
194 }
195 }
196
tegra_shared_plane_activate(struct tegra_plane * plane)197 static void tegra_shared_plane_activate(struct tegra_plane *plane)
198 {
199 struct tegra_dc *dc = plane->dc;
200 unsigned long timeout;
201 u32 mask, value;
202
203 mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
204 tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
205
206 timeout = jiffies + msecs_to_jiffies(1000);
207
208 while (time_before(jiffies, timeout)) {
209 value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
210 if ((value & mask) == 0)
211 break;
212
213 usleep_range(100, 400);
214 }
215 }
216
217 static unsigned int
tegra_shared_plane_get_owner(struct tegra_plane * plane,struct tegra_dc * dc)218 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
219 {
220 unsigned int offset =
221 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
222
223 return tegra_dc_readl(dc, offset) & OWNER_MASK;
224 }
225
tegra_dc_owns_shared_plane(struct tegra_dc * dc,struct tegra_plane * plane)226 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
227 struct tegra_plane *plane)
228 {
229 struct device *dev = dc->dev;
230
231 if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
232 if (plane->dc == dc)
233 return true;
234
235 dev_WARN(dev, "head %u owns window %u but is not attached\n",
236 dc->pipe, plane->index);
237 }
238
239 return false;
240 }
241
tegra_shared_plane_set_owner(struct tegra_plane * plane,struct tegra_dc * new)242 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
243 struct tegra_dc *new)
244 {
245 unsigned int offset =
246 tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
247 struct tegra_dc *old = plane->dc, *dc = new ? new : old;
248 struct device *dev = new ? new->dev : old->dev;
249 unsigned int owner, index = plane->index;
250 u32 value;
251
252 value = tegra_dc_readl(dc, offset);
253 owner = value & OWNER_MASK;
254
255 if (new && (owner != OWNER_MASK && owner != new->pipe)) {
256 dev_WARN(dev, "window %u owned by head %u\n", index, owner);
257 return -EBUSY;
258 }
259
260 /*
261 * This seems to happen whenever the head has been disabled with one
262 * or more windows being active. This is harmless because we'll just
263 * reassign the window to the new head anyway.
264 */
265 if (old && owner == OWNER_MASK)
266 dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
267 old->pipe, owner);
268
269 value &= ~OWNER_MASK;
270
271 if (new)
272 value |= OWNER(new->pipe);
273 else
274 value |= OWNER_MASK;
275
276 tegra_dc_writel(dc, value, offset);
277
278 plane->dc = new;
279
280 return 0;
281 }
282
tegra_dc_assign_shared_plane(struct tegra_dc * dc,struct tegra_plane * plane)283 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
284 struct tegra_plane *plane)
285 {
286 u32 value;
287 int err;
288
289 if (!tegra_dc_owns_shared_plane(dc, plane)) {
290 err = tegra_shared_plane_set_owner(plane, dc);
291 if (err < 0)
292 return;
293 }
294
295 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
296 value |= MODE_FOUR_LINES;
297 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
298
299 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
300 value = SLOTS(1);
301 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
302
303 /* disable watermark */
304 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
305 value &= ~LATENCY_CTL_MODE_ENABLE;
306 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
307
308 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
309 value |= WATERMARK_MASK;
310 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
311
312 /* pipe meter */
313 value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
314 value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
315 tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
316
317 /* mempool entries */
318 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
319 value = MEMPOOL_ENTRIES(0x331);
320 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
321
322 value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
323 value &= ~THREAD_NUM_MASK;
324 value |= THREAD_NUM(plane->base.index);
325 value |= THREAD_GROUP_ENABLE;
326 tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
327
328 tegra_shared_plane_update(plane);
329 tegra_shared_plane_activate(plane);
330 }
331
tegra_dc_remove_shared_plane(struct tegra_dc * dc,struct tegra_plane * plane)332 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
333 struct tegra_plane *plane)
334 {
335 tegra_shared_plane_set_owner(plane, NULL);
336 }
337
tegra_shared_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)338 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
339 struct drm_plane_state *state)
340 {
341 struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
342 struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
343 struct tegra_bo_tiling *tiling = &plane_state->tiling;
344 struct tegra_dc *dc = to_tegra_dc(state->crtc);
345 int err;
346
347 /* no need for further checks if the plane is being disabled */
348 if (!state->crtc || !state->fb)
349 return 0;
350
351 err = tegra_plane_format(state->fb->format->format,
352 &plane_state->format,
353 &plane_state->swap);
354 if (err < 0)
355 return err;
356
357 err = tegra_fb_get_tiling(state->fb, tiling);
358 if (err < 0)
359 return err;
360
361 if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
362 !dc->soc->supports_block_linear) {
363 DRM_ERROR("hardware doesn't support block linear mode\n");
364 return -EINVAL;
365 }
366
367 /*
368 * Tegra doesn't support different strides for U and V planes so we
369 * error out if the user tries to display a framebuffer with such a
370 * configuration.
371 */
372 if (state->fb->format->num_planes > 2) {
373 if (state->fb->pitches[2] != state->fb->pitches[1]) {
374 DRM_ERROR("unsupported UV-plane configuration\n");
375 return -EINVAL;
376 }
377 }
378
379 /* XXX scaling is not yet supported, add a check here */
380
381 err = tegra_plane_state_add(&tegra->base, state);
382 if (err < 0)
383 return err;
384
385 return 0;
386 }
387
tegra_shared_plane_atomic_disable(struct drm_plane * plane,struct drm_plane_state * old_state)388 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
389 struct drm_plane_state *old_state)
390 {
391 struct tegra_plane *p = to_tegra_plane(plane);
392 struct tegra_dc *dc;
393 u32 value;
394 int err;
395
396 /* rien ne va plus */
397 if (!old_state || !old_state->crtc)
398 return;
399
400 dc = to_tegra_dc(old_state->crtc);
401
402 err = host1x_client_resume(&dc->client);
403 if (err < 0) {
404 dev_err(dc->dev, "failed to resume: %d\n", err);
405 return;
406 }
407
408 /*
409 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
410 * on planes that are already disabled. Make sure we fallback to the
411 * head for this particular state instead of crashing.
412 */
413 if (WARN_ON(p->dc == NULL))
414 p->dc = dc;
415
416 value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
417 value &= ~WIN_ENABLE;
418 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
419
420 tegra_dc_remove_shared_plane(dc, p);
421
422 host1x_client_suspend(&dc->client);
423 }
424
tegra_shared_plane_atomic_update(struct drm_plane * plane,struct drm_plane_state * old_state)425 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
426 struct drm_plane_state *old_state)
427 {
428 struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
429 struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
430 unsigned int zpos = plane->state->normalized_zpos;
431 struct drm_framebuffer *fb = plane->state->fb;
432 struct tegra_plane *p = to_tegra_plane(plane);
433 dma_addr_t base;
434 u32 value;
435 int err;
436
437 /* rien ne va plus */
438 if (!plane->state->crtc || !plane->state->fb)
439 return;
440
441 if (!plane->state->visible) {
442 tegra_shared_plane_atomic_disable(plane, old_state);
443 return;
444 }
445
446 err = host1x_client_resume(&dc->client);
447 if (err < 0) {
448 dev_err(dc->dev, "failed to resume: %d\n", err);
449 return;
450 }
451
452 tegra_dc_assign_shared_plane(dc, p);
453
454 tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
455
456 /* blending */
457 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
458 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
459 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
460 tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
461
462 value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
463 BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
464 BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
465 tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
466
467 value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
468 tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
469
470 /* bypass scaling */
471 value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
472 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
473
474 value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
475 tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
476
477 /* disable compression */
478 tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
479
480 base = state->iova[0] + fb->offsets[0];
481
482 tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
483 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
484
485 value = V_POSITION(plane->state->crtc_y) |
486 H_POSITION(plane->state->crtc_x);
487 tegra_plane_writel(p, value, DC_WIN_POSITION);
488
489 value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
490 tegra_plane_writel(p, value, DC_WIN_SIZE);
491
492 value = WIN_ENABLE | COLOR_EXPAND;
493 tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
494
495 value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
496 tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
497
498 tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
499 tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
500
501 value = PITCH(fb->pitches[0]);
502 tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
503
504 value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
505 tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
506
507 value = OFFSET_X(plane->state->src_y >> 16) |
508 OFFSET_Y(plane->state->src_x >> 16);
509 tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
510
511 if (dc->soc->supports_block_linear) {
512 unsigned long height = state->tiling.value;
513
514 /* XXX */
515 switch (state->tiling.mode) {
516 case TEGRA_BO_TILING_MODE_PITCH:
517 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
518 DC_WINBUF_SURFACE_KIND_PITCH;
519 break;
520
521 /* XXX not supported on Tegra186 and later */
522 case TEGRA_BO_TILING_MODE_TILED:
523 value = DC_WINBUF_SURFACE_KIND_TILED;
524 break;
525
526 case TEGRA_BO_TILING_MODE_BLOCK:
527 value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
528 DC_WINBUF_SURFACE_KIND_BLOCK;
529 break;
530 }
531
532 tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
533 }
534
535 /* disable gamut CSC */
536 value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
537 value &= ~CONTROL_CSC_ENABLE;
538 tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
539
540 host1x_client_suspend(&dc->client);
541 }
542
543 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
544 .prepare_fb = tegra_plane_prepare_fb,
545 .cleanup_fb = tegra_plane_cleanup_fb,
546 .atomic_check = tegra_shared_plane_atomic_check,
547 .atomic_update = tegra_shared_plane_atomic_update,
548 .atomic_disable = tegra_shared_plane_atomic_disable,
549 };
550
tegra_shared_plane_create(struct drm_device * drm,struct tegra_dc * dc,unsigned int wgrp,unsigned int index)551 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
552 struct tegra_dc *dc,
553 unsigned int wgrp,
554 unsigned int index)
555 {
556 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
557 struct tegra_drm *tegra = drm->dev_private;
558 struct tegra_display_hub *hub = tegra->hub;
559 /* planes can be assigned to arbitrary CRTCs */
560 unsigned int possible_crtcs = 0x7;
561 struct tegra_shared_plane *plane;
562 unsigned int num_formats;
563 const u64 *modifiers;
564 struct drm_plane *p;
565 const u32 *formats;
566 int err;
567
568 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
569 if (!plane)
570 return ERR_PTR(-ENOMEM);
571
572 plane->base.offset = 0x0a00 + 0x0300 * index;
573 plane->base.index = index;
574
575 plane->wgrp = &hub->wgrps[wgrp];
576 plane->wgrp->parent = &dc->client;
577
578 p = &plane->base.base;
579
580 num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
581 formats = tegra_shared_plane_formats;
582 modifiers = tegra_shared_plane_modifiers;
583
584 err = drm_universal_plane_init(drm, p, possible_crtcs,
585 &tegra_plane_funcs, formats,
586 num_formats, modifiers, type, NULL);
587 if (err < 0) {
588 kfree(plane);
589 return ERR_PTR(err);
590 }
591
592 drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
593 drm_plane_create_zpos_property(p, 0, 0, 255);
594
595 return p;
596 }
597
598 static struct drm_private_state *
tegra_display_hub_duplicate_state(struct drm_private_obj * obj)599 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
600 {
601 struct tegra_display_hub_state *state;
602
603 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
604 if (!state)
605 return NULL;
606
607 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
608
609 return &state->base;
610 }
611
tegra_display_hub_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)612 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
613 struct drm_private_state *state)
614 {
615 struct tegra_display_hub_state *hub_state =
616 to_tegra_display_hub_state(state);
617
618 kfree(hub_state);
619 }
620
621 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
622 .atomic_duplicate_state = tegra_display_hub_duplicate_state,
623 .atomic_destroy_state = tegra_display_hub_destroy_state,
624 };
625
626 static struct tegra_display_hub_state *
tegra_display_hub_get_state(struct tegra_display_hub * hub,struct drm_atomic_state * state)627 tegra_display_hub_get_state(struct tegra_display_hub *hub,
628 struct drm_atomic_state *state)
629 {
630 struct drm_private_state *priv;
631
632 priv = drm_atomic_get_private_obj_state(state, &hub->base);
633 if (IS_ERR(priv))
634 return ERR_CAST(priv);
635
636 return to_tegra_display_hub_state(priv);
637 }
638
tegra_display_hub_atomic_check(struct drm_device * drm,struct drm_atomic_state * state)639 int tegra_display_hub_atomic_check(struct drm_device *drm,
640 struct drm_atomic_state *state)
641 {
642 struct tegra_drm *tegra = drm->dev_private;
643 struct tegra_display_hub_state *hub_state;
644 struct drm_crtc_state *old, *new;
645 struct drm_crtc *crtc;
646 unsigned int i;
647
648 if (!tegra->hub)
649 return 0;
650
651 hub_state = tegra_display_hub_get_state(tegra->hub, state);
652 if (IS_ERR(hub_state))
653 return PTR_ERR(hub_state);
654
655 /*
656 * The display hub display clock needs to be fed by the display clock
657 * with the highest frequency to ensure proper functioning of all the
658 * displays.
659 *
660 * Note that this isn't used before Tegra186, but it doesn't hurt and
661 * conditionalizing it would make the code less clean.
662 */
663 for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
664 struct tegra_dc_state *dc = to_dc_state(new);
665
666 if (new->active) {
667 if (!hub_state->clk || dc->pclk > hub_state->rate) {
668 hub_state->dc = to_tegra_dc(dc->base.crtc);
669 hub_state->clk = hub_state->dc->clk;
670 hub_state->rate = dc->pclk;
671 }
672 }
673 }
674
675 return 0;
676 }
677
tegra_display_hub_update(struct tegra_dc * dc)678 static void tegra_display_hub_update(struct tegra_dc *dc)
679 {
680 u32 value;
681 int err;
682
683 err = host1x_client_resume(&dc->client);
684 if (err < 0) {
685 dev_err(dc->dev, "failed to resume: %d\n", err);
686 return;
687 }
688
689 value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
690 value &= ~LATENCY_EVENT;
691 tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
692
693 value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
694 value = CURS_SLOTS(1) | WGRP_SLOTS(1);
695 tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
696
697 tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
698 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
699 tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
700 tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
701
702 host1x_client_suspend(&dc->client);
703 }
704
tegra_display_hub_atomic_commit(struct drm_device * drm,struct drm_atomic_state * state)705 void tegra_display_hub_atomic_commit(struct drm_device *drm,
706 struct drm_atomic_state *state)
707 {
708 struct tegra_drm *tegra = drm->dev_private;
709 struct tegra_display_hub *hub = tegra->hub;
710 struct tegra_display_hub_state *hub_state;
711 struct device *dev = hub->client.dev;
712 int err;
713
714 hub_state = to_tegra_display_hub_state(hub->base.state);
715
716 if (hub_state->clk) {
717 err = clk_set_rate(hub_state->clk, hub_state->rate);
718 if (err < 0)
719 dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
720 hub_state->clk, hub_state->rate);
721
722 err = clk_set_parent(hub->clk_disp, hub_state->clk);
723 if (err < 0)
724 dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
725 hub->clk_disp, hub_state->clk, err);
726 }
727
728 if (hub_state->dc)
729 tegra_display_hub_update(hub_state->dc);
730 }
731
tegra_display_hub_init(struct host1x_client * client)732 static int tegra_display_hub_init(struct host1x_client *client)
733 {
734 struct tegra_display_hub *hub = to_tegra_display_hub(client);
735 struct drm_device *drm = dev_get_drvdata(client->host);
736 struct tegra_drm *tegra = drm->dev_private;
737 struct tegra_display_hub_state *state;
738
739 state = kzalloc(sizeof(*state), GFP_KERNEL);
740 if (!state)
741 return -ENOMEM;
742
743 drm_atomic_private_obj_init(drm, &hub->base, &state->base,
744 &tegra_display_hub_state_funcs);
745
746 tegra->hub = hub;
747
748 return 0;
749 }
750
tegra_display_hub_exit(struct host1x_client * client)751 static int tegra_display_hub_exit(struct host1x_client *client)
752 {
753 struct drm_device *drm = dev_get_drvdata(client->host);
754 struct tegra_drm *tegra = drm->dev_private;
755
756 drm_atomic_private_obj_fini(&tegra->hub->base);
757 tegra->hub = NULL;
758
759 return 0;
760 }
761
tegra_display_hub_runtime_suspend(struct host1x_client * client)762 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
763 {
764 struct tegra_display_hub *hub = to_tegra_display_hub(client);
765 struct device *dev = client->dev;
766 unsigned int i = hub->num_heads;
767 int err;
768
769 err = reset_control_assert(hub->rst);
770 if (err < 0)
771 return err;
772
773 while (i--)
774 clk_disable_unprepare(hub->clk_heads[i]);
775
776 clk_disable_unprepare(hub->clk_hub);
777 clk_disable_unprepare(hub->clk_dsc);
778 clk_disable_unprepare(hub->clk_disp);
779
780 pm_runtime_put_sync(dev);
781
782 return 0;
783 }
784
tegra_display_hub_runtime_resume(struct host1x_client * client)785 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
786 {
787 struct tegra_display_hub *hub = to_tegra_display_hub(client);
788 struct device *dev = client->dev;
789 unsigned int i;
790 int err;
791
792 err = pm_runtime_resume_and_get(dev);
793 if (err < 0) {
794 dev_err(dev, "failed to get runtime PM: %d\n", err);
795 return err;
796 }
797
798 err = clk_prepare_enable(hub->clk_disp);
799 if (err < 0)
800 goto put_rpm;
801
802 err = clk_prepare_enable(hub->clk_dsc);
803 if (err < 0)
804 goto disable_disp;
805
806 err = clk_prepare_enable(hub->clk_hub);
807 if (err < 0)
808 goto disable_dsc;
809
810 for (i = 0; i < hub->num_heads; i++) {
811 err = clk_prepare_enable(hub->clk_heads[i]);
812 if (err < 0)
813 goto disable_heads;
814 }
815
816 err = reset_control_deassert(hub->rst);
817 if (err < 0)
818 goto disable_heads;
819
820 return 0;
821
822 disable_heads:
823 while (i--)
824 clk_disable_unprepare(hub->clk_heads[i]);
825
826 clk_disable_unprepare(hub->clk_hub);
827 disable_dsc:
828 clk_disable_unprepare(hub->clk_dsc);
829 disable_disp:
830 clk_disable_unprepare(hub->clk_disp);
831 put_rpm:
832 pm_runtime_put_sync(dev);
833 return err;
834 }
835
836 static const struct host1x_client_ops tegra_display_hub_ops = {
837 .init = tegra_display_hub_init,
838 .exit = tegra_display_hub_exit,
839 .suspend = tegra_display_hub_runtime_suspend,
840 .resume = tegra_display_hub_runtime_resume,
841 };
842
tegra_display_hub_probe(struct platform_device * pdev)843 static int tegra_display_hub_probe(struct platform_device *pdev)
844 {
845 struct device_node *child = NULL;
846 struct tegra_display_hub *hub;
847 struct clk *clk;
848 unsigned int i;
849 int err;
850
851 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
852 if (!hub)
853 return -ENOMEM;
854
855 hub->soc = of_device_get_match_data(&pdev->dev);
856
857 hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
858 if (IS_ERR(hub->clk_disp)) {
859 err = PTR_ERR(hub->clk_disp);
860 return err;
861 }
862
863 if (hub->soc->supports_dsc) {
864 hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
865 if (IS_ERR(hub->clk_dsc)) {
866 err = PTR_ERR(hub->clk_dsc);
867 return err;
868 }
869 }
870
871 hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
872 if (IS_ERR(hub->clk_hub)) {
873 err = PTR_ERR(hub->clk_hub);
874 return err;
875 }
876
877 hub->rst = devm_reset_control_get(&pdev->dev, "misc");
878 if (IS_ERR(hub->rst)) {
879 err = PTR_ERR(hub->rst);
880 return err;
881 }
882
883 hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
884 sizeof(*hub->wgrps), GFP_KERNEL);
885 if (!hub->wgrps)
886 return -ENOMEM;
887
888 for (i = 0; i < hub->soc->num_wgrps; i++) {
889 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
890 char id[8];
891
892 snprintf(id, sizeof(id), "wgrp%u", i);
893 mutex_init(&wgrp->lock);
894 wgrp->usecount = 0;
895 wgrp->index = i;
896
897 wgrp->rst = devm_reset_control_get(&pdev->dev, id);
898 if (IS_ERR(wgrp->rst))
899 return PTR_ERR(wgrp->rst);
900
901 err = reset_control_assert(wgrp->rst);
902 if (err < 0)
903 return err;
904 }
905
906 hub->num_heads = of_get_child_count(pdev->dev.of_node);
907
908 hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
909 GFP_KERNEL);
910 if (!hub->clk_heads)
911 return -ENOMEM;
912
913 for (i = 0; i < hub->num_heads; i++) {
914 child = of_get_next_child(pdev->dev.of_node, child);
915 if (!child) {
916 dev_err(&pdev->dev, "failed to find node for head %u\n",
917 i);
918 return -ENODEV;
919 }
920
921 clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
922 if (IS_ERR(clk)) {
923 dev_err(&pdev->dev, "failed to get clock for head %u\n",
924 i);
925 of_node_put(child);
926 return PTR_ERR(clk);
927 }
928
929 hub->clk_heads[i] = clk;
930 }
931
932 of_node_put(child);
933
934 /* XXX: enable clock across reset? */
935 err = reset_control_assert(hub->rst);
936 if (err < 0)
937 return err;
938
939 platform_set_drvdata(pdev, hub);
940 pm_runtime_enable(&pdev->dev);
941
942 INIT_LIST_HEAD(&hub->client.list);
943 hub->client.ops = &tegra_display_hub_ops;
944 hub->client.dev = &pdev->dev;
945
946 err = host1x_client_register(&hub->client);
947 if (err < 0)
948 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
949 err);
950
951 err = devm_of_platform_populate(&pdev->dev);
952 if (err < 0)
953 goto unregister;
954
955 return err;
956
957 unregister:
958 host1x_client_unregister(&hub->client);
959 pm_runtime_disable(&pdev->dev);
960 return err;
961 }
962
tegra_display_hub_remove(struct platform_device * pdev)963 static int tegra_display_hub_remove(struct platform_device *pdev)
964 {
965 struct tegra_display_hub *hub = platform_get_drvdata(pdev);
966 unsigned int i;
967 int err;
968
969 err = host1x_client_unregister(&hub->client);
970 if (err < 0) {
971 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
972 err);
973 }
974
975 for (i = 0; i < hub->soc->num_wgrps; i++) {
976 struct tegra_windowgroup *wgrp = &hub->wgrps[i];
977
978 mutex_destroy(&wgrp->lock);
979 }
980
981 pm_runtime_disable(&pdev->dev);
982
983 return err;
984 }
985
986 static const struct tegra_display_hub_soc tegra186_display_hub = {
987 .num_wgrps = 6,
988 .supports_dsc = true,
989 };
990
991 static const struct tegra_display_hub_soc tegra194_display_hub = {
992 .num_wgrps = 6,
993 .supports_dsc = false,
994 };
995
996 static const struct of_device_id tegra_display_hub_of_match[] = {
997 {
998 .compatible = "nvidia,tegra194-display",
999 .data = &tegra194_display_hub
1000 }, {
1001 .compatible = "nvidia,tegra186-display",
1002 .data = &tegra186_display_hub
1003 }, {
1004 /* sentinel */
1005 }
1006 };
1007 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1008
1009 struct platform_driver tegra_display_hub_driver = {
1010 .driver = {
1011 .name = "tegra-display-hub",
1012 .of_match_table = tegra_display_hub_of_match,
1013 },
1014 .probe = tegra_display_hub_probe,
1015 .remove = tegra_display_hub_remove,
1016 };
1017