• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/of_graph.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_probe_helper.h>
21 
22 #include "drm.h"
23 #include "dc.h"
24 #include "plane.h"
25 
26 #define NFB 24
27 
28 static const u32 tegra_shared_plane_formats[] = {
29 	DRM_FORMAT_ARGB1555,
30 	DRM_FORMAT_RGB565,
31 	DRM_FORMAT_RGBA5551,
32 	DRM_FORMAT_ARGB8888,
33 	DRM_FORMAT_ABGR8888,
34 	/* new on Tegra114 */
35 	DRM_FORMAT_ABGR4444,
36 	DRM_FORMAT_ABGR1555,
37 	DRM_FORMAT_BGRA5551,
38 	DRM_FORMAT_XRGB1555,
39 	DRM_FORMAT_RGBX5551,
40 	DRM_FORMAT_XBGR1555,
41 	DRM_FORMAT_BGRX5551,
42 	DRM_FORMAT_BGR565,
43 	DRM_FORMAT_XRGB8888,
44 	DRM_FORMAT_XBGR8888,
45 	/* planar formats */
46 	DRM_FORMAT_UYVY,
47 	DRM_FORMAT_YUYV,
48 	DRM_FORMAT_YUV420,
49 	DRM_FORMAT_YUV422,
50 };
51 
52 static const u64 tegra_shared_plane_modifiers[] = {
53 	DRM_FORMAT_MOD_LINEAR,
54 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
55 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
56 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
57 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
58 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
59 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
60 	/*
61 	 * The GPU sector layout is only supported on Tegra194, but these will
62 	 * be filtered out later on by ->format_mod_supported() on SoCs where
63 	 * it isn't supported.
64 	 */
65 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
66 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
67 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
68 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 	/* sentinel */
72 	DRM_FORMAT_MOD_INVALID
73 };
74 
tegra_plane_offset(struct tegra_plane * plane,unsigned int offset)75 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
76 					      unsigned int offset)
77 {
78 	if (offset >= 0x500 && offset <= 0x581) {
79 		offset = 0x000 + (offset - 0x500);
80 		return plane->offset + offset;
81 	}
82 
83 	if (offset >= 0x700 && offset <= 0x73c) {
84 		offset = 0x180 + (offset - 0x700);
85 		return plane->offset + offset;
86 	}
87 
88 	if (offset >= 0x800 && offset <= 0x83e) {
89 		offset = 0x1c0 + (offset - 0x800);
90 		return plane->offset + offset;
91 	}
92 
93 	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
94 
95 	return plane->offset + offset;
96 }
97 
tegra_plane_readl(struct tegra_plane * plane,unsigned int offset)98 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
99 				    unsigned int offset)
100 {
101 	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
102 }
103 
tegra_plane_writel(struct tegra_plane * plane,u32 value,unsigned int offset)104 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
105 				      unsigned int offset)
106 {
107 	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
108 }
109 
tegra_windowgroup_enable(struct tegra_windowgroup * wgrp)110 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
111 {
112 	int err = 0;
113 
114 	mutex_lock(&wgrp->lock);
115 
116 	if (wgrp->usecount == 0) {
117 		err = host1x_client_resume(wgrp->parent);
118 		if (err < 0) {
119 			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
120 			goto unlock;
121 		}
122 
123 		reset_control_deassert(wgrp->rst);
124 	}
125 
126 	wgrp->usecount++;
127 
128 unlock:
129 	mutex_unlock(&wgrp->lock);
130 	return err;
131 }
132 
tegra_windowgroup_disable(struct tegra_windowgroup * wgrp)133 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
134 {
135 	int err;
136 
137 	mutex_lock(&wgrp->lock);
138 
139 	if (wgrp->usecount == 1) {
140 		err = reset_control_assert(wgrp->rst);
141 		if (err < 0) {
142 			pr_err("failed to assert reset for window group %u\n",
143 			       wgrp->index);
144 		}
145 
146 		host1x_client_suspend(wgrp->parent);
147 	}
148 
149 	wgrp->usecount--;
150 	mutex_unlock(&wgrp->lock);
151 }
152 
tegra_display_hub_prepare(struct tegra_display_hub * hub)153 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
154 {
155 	unsigned int i;
156 
157 	/*
158 	 * XXX Enabling/disabling windowgroups needs to happen when the owner
159 	 * display controller is disabled. There's currently no good point at
160 	 * which this could be executed, so unconditionally enable all window
161 	 * groups for now.
162 	 */
163 	for (i = 0; i < hub->soc->num_wgrps; i++) {
164 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
165 
166 		/* Skip orphaned window group whose parent DC is disabled */
167 		if (wgrp->parent)
168 			tegra_windowgroup_enable(wgrp);
169 	}
170 
171 	return 0;
172 }
173 
tegra_display_hub_cleanup(struct tegra_display_hub * hub)174 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
175 {
176 	unsigned int i;
177 
178 	/*
179 	 * XXX Remove this once window groups can be more fine-grainedly
180 	 * enabled and disabled.
181 	 */
182 	for (i = 0; i < hub->soc->num_wgrps; i++) {
183 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
184 
185 		/* Skip orphaned window group whose parent DC is disabled */
186 		if (wgrp->parent)
187 			tegra_windowgroup_disable(wgrp);
188 	}
189 }
190 
tegra_shared_plane_update(struct tegra_plane * plane)191 static void tegra_shared_plane_update(struct tegra_plane *plane)
192 {
193 	struct tegra_dc *dc = plane->dc;
194 	unsigned long timeout;
195 	u32 mask, value;
196 
197 	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
198 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
199 
200 	timeout = jiffies + msecs_to_jiffies(1000);
201 
202 	while (time_before(jiffies, timeout)) {
203 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
204 		if ((value & mask) == 0)
205 			break;
206 
207 		usleep_range(100, 400);
208 	}
209 }
210 
tegra_shared_plane_activate(struct tegra_plane * plane)211 static void tegra_shared_plane_activate(struct tegra_plane *plane)
212 {
213 	struct tegra_dc *dc = plane->dc;
214 	unsigned long timeout;
215 	u32 mask, value;
216 
217 	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
218 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
219 
220 	timeout = jiffies + msecs_to_jiffies(1000);
221 
222 	while (time_before(jiffies, timeout)) {
223 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
224 		if ((value & mask) == 0)
225 			break;
226 
227 		usleep_range(100, 400);
228 	}
229 }
230 
231 static unsigned int
tegra_shared_plane_get_owner(struct tegra_plane * plane,struct tegra_dc * dc)232 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
233 {
234 	unsigned int offset =
235 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
236 
237 	return tegra_dc_readl(dc, offset) & OWNER_MASK;
238 }
239 
tegra_dc_owns_shared_plane(struct tegra_dc * dc,struct tegra_plane * plane)240 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
241 				       struct tegra_plane *plane)
242 {
243 	struct device *dev = dc->dev;
244 
245 	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
246 		if (plane->dc == dc)
247 			return true;
248 
249 		dev_WARN(dev, "head %u owns window %u but is not attached\n",
250 			 dc->pipe, plane->index);
251 	}
252 
253 	return false;
254 }
255 
tegra_shared_plane_set_owner(struct tegra_plane * plane,struct tegra_dc * new)256 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
257 					struct tegra_dc *new)
258 {
259 	unsigned int offset =
260 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
261 	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
262 	struct device *dev = new ? new->dev : old->dev;
263 	unsigned int owner, index = plane->index;
264 	u32 value;
265 
266 	value = tegra_dc_readl(dc, offset);
267 	owner = value & OWNER_MASK;
268 
269 	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
270 		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
271 		return -EBUSY;
272 	}
273 
274 	/*
275 	 * This seems to happen whenever the head has been disabled with one
276 	 * or more windows being active. This is harmless because we'll just
277 	 * reassign the window to the new head anyway.
278 	 */
279 	if (old && owner == OWNER_MASK)
280 		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
281 			old->pipe, owner);
282 
283 	value &= ~OWNER_MASK;
284 
285 	if (new)
286 		value |= OWNER(new->pipe);
287 	else
288 		value |= OWNER_MASK;
289 
290 	tegra_dc_writel(dc, value, offset);
291 
292 	plane->dc = new;
293 
294 	return 0;
295 }
296 
tegra_shared_plane_setup_scaler(struct tegra_plane * plane)297 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
298 {
299 	static const unsigned int coeffs[192] = {
300 		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
301 		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
302 		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
303 		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
304 		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
305 		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
306 		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
307 		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
308 		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
309 		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
310 		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
311 		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
312 		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
313 		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
314 		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
315 		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
316 		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
317 		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
318 		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
319 		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
320 		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
321 		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
322 		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
323 		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
324 		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
325 		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
326 		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
327 		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
328 		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
329 		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
330 		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
331 		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
332 		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
333 		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
334 		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
335 		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
336 		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
337 		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
338 		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
339 		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
340 		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
341 		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
342 		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
343 		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
344 		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
345 		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
346 		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
347 		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
348 	};
349 	unsigned int ratio, row, column;
350 
351 	for (ratio = 0; ratio <= 2; ratio++) {
352 		for (row = 0; row <= 15; row++) {
353 			for (column = 0; column <= 3; column++) {
354 				unsigned int index = (ratio << 6) + (row << 2) + column;
355 				u32 value;
356 
357 				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
358 				tegra_plane_writel(plane, value,
359 						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
360 			}
361 		}
362 	}
363 }
364 
tegra_dc_assign_shared_plane(struct tegra_dc * dc,struct tegra_plane * plane)365 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
366 					 struct tegra_plane *plane)
367 {
368 	u32 value;
369 	int err;
370 
371 	if (!tegra_dc_owns_shared_plane(dc, plane)) {
372 		err = tegra_shared_plane_set_owner(plane, dc);
373 		if (err < 0)
374 			return;
375 	}
376 
377 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
378 	value |= MODE_FOUR_LINES;
379 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
380 
381 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
382 	value = SLOTS(1);
383 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
384 
385 	/* disable watermark */
386 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
387 	value &= ~LATENCY_CTL_MODE_ENABLE;
388 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
389 
390 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
391 	value |= WATERMARK_MASK;
392 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
393 
394 	/* pipe meter */
395 	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
396 	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
397 	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
398 
399 	/* mempool entries */
400 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
401 	value = MEMPOOL_ENTRIES(0x331);
402 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
403 
404 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
405 	value &= ~THREAD_NUM_MASK;
406 	value |= THREAD_NUM(plane->base.index);
407 	value |= THREAD_GROUP_ENABLE;
408 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
409 
410 	tegra_shared_plane_setup_scaler(plane);
411 
412 	tegra_shared_plane_update(plane);
413 	tegra_shared_plane_activate(plane);
414 }
415 
tegra_dc_remove_shared_plane(struct tegra_dc * dc,struct tegra_plane * plane)416 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
417 					 struct tegra_plane *plane)
418 {
419 	tegra_shared_plane_set_owner(plane, NULL);
420 }
421 
tegra_shared_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)422 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
423 					   struct drm_atomic_state *state)
424 {
425 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
426 										 plane);
427 	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
428 	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
429 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
430 	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
431 	int err;
432 
433 	/* no need for further checks if the plane is being disabled */
434 	if (!new_plane_state->crtc || !new_plane_state->fb)
435 		return 0;
436 
437 	err = tegra_plane_format(new_plane_state->fb->format->format,
438 				 &plane_state->format,
439 				 &plane_state->swap);
440 	if (err < 0)
441 		return err;
442 
443 	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
444 	if (err < 0)
445 		return err;
446 
447 	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
448 	    !dc->soc->supports_block_linear) {
449 		DRM_ERROR("hardware doesn't support block linear mode\n");
450 		return -EINVAL;
451 	}
452 
453 	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
454 	    !dc->soc->supports_sector_layout) {
455 		DRM_ERROR("hardware doesn't support GPU sector layout\n");
456 		return -EINVAL;
457 	}
458 
459 	/*
460 	 * Tegra doesn't support different strides for U and V planes so we
461 	 * error out if the user tries to display a framebuffer with such a
462 	 * configuration.
463 	 */
464 	if (new_plane_state->fb->format->num_planes > 2) {
465 		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
466 			DRM_ERROR("unsupported UV-plane configuration\n");
467 			return -EINVAL;
468 		}
469 	}
470 
471 	/* XXX scaling is not yet supported, add a check here */
472 
473 	err = tegra_plane_state_add(&tegra->base, new_plane_state);
474 	if (err < 0)
475 		return err;
476 
477 	return 0;
478 }
479 
tegra_shared_plane_atomic_disable(struct drm_plane * plane,struct drm_atomic_state * state)480 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
481 					      struct drm_atomic_state *state)
482 {
483 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
484 									   plane);
485 	struct tegra_plane *p = to_tegra_plane(plane);
486 	struct tegra_dc *dc;
487 	u32 value;
488 	int err;
489 
490 	/* rien ne va plus */
491 	if (!old_state || !old_state->crtc)
492 		return;
493 
494 	dc = to_tegra_dc(old_state->crtc);
495 
496 	err = host1x_client_resume(&dc->client);
497 	if (err < 0) {
498 		dev_err(dc->dev, "failed to resume: %d\n", err);
499 		return;
500 	}
501 
502 	/*
503 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
504 	 * on planes that are already disabled. Make sure we fallback to the
505 	 * head for this particular state instead of crashing.
506 	 */
507 	if (WARN_ON(p->dc == NULL))
508 		p->dc = dc;
509 
510 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
511 	value &= ~WIN_ENABLE;
512 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
513 
514 	tegra_dc_remove_shared_plane(dc, p);
515 
516 	host1x_client_suspend(&dc->client);
517 }
518 
compute_phase_incr(fixed20_12 in,unsigned int out)519 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
520 {
521 	u64 tmp, tmp1, tmp2;
522 
523 	tmp = (u64)dfixed_trunc(in);
524 	tmp2 = (u64)out;
525 	tmp1 = (tmp << NFB) + (tmp2 >> 1);
526 	do_div(tmp1, tmp2);
527 
528 	return lower_32_bits(tmp1);
529 }
530 
tegra_shared_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)531 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
532 					     struct drm_atomic_state *state)
533 {
534 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
535 									   plane);
536 	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
537 	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
538 	unsigned int zpos = new_state->normalized_zpos;
539 	struct drm_framebuffer *fb = new_state->fb;
540 	struct tegra_plane *p = to_tegra_plane(plane);
541 	u32 value, min_width, bypass = 0;
542 	dma_addr_t base, addr_flag = 0;
543 	unsigned int bpc;
544 	bool yuv, planar;
545 	int err;
546 
547 	/* rien ne va plus */
548 	if (!new_state->crtc || !new_state->fb)
549 		return;
550 
551 	if (!new_state->visible) {
552 		tegra_shared_plane_atomic_disable(plane, state);
553 		return;
554 	}
555 
556 	err = host1x_client_resume(&dc->client);
557 	if (err < 0) {
558 		dev_err(dc->dev, "failed to resume: %d\n", err);
559 		return;
560 	}
561 
562 	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planar, &bpc);
563 
564 	tegra_dc_assign_shared_plane(dc, p);
565 
566 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
567 
568 	/* blending */
569 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
570 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
571 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
572 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
573 
574 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
575 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
576 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
577 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
578 
579 	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
580 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
581 
582 	/* scaling */
583 	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
584 
585 	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
586 
587 	if (min_width < MAX_PIXELS_5TAP444(value)) {
588 		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
589 	} else {
590 		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
591 
592 		if (min_width < MAX_PIXELS_2TAP444(value))
593 			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
594 		else
595 			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
596 	}
597 
598 	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
599 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
600 
601 	if (new_state->src_w != new_state->crtc_w << 16) {
602 		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
603 		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
604 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
605 
606 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
607 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
608 	} else {
609 		bypass |= INPUT_SCALER_HBYPASS;
610 	}
611 
612 	if (new_state->src_h != new_state->crtc_h << 16) {
613 		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
614 		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
615 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
616 
617 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
618 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
619 	} else {
620 		bypass |= INPUT_SCALER_VBYPASS;
621 	}
622 
623 	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
624 
625 	/* disable compression */
626 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
627 
628 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
629 	/*
630 	 * Physical address bit 39 in Tegra194 is used as a switch for special
631 	 * logic that swizzles the memory using either the legacy Tegra or the
632 	 * dGPU sector layout.
633 	 */
634 	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
635 		addr_flag = BIT_ULL(39);
636 #endif
637 
638 	base = tegra_plane_state->iova[0] + fb->offsets[0];
639 	base |= addr_flag;
640 
641 	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
642 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
643 
644 	value = V_POSITION(new_state->crtc_y) |
645 		H_POSITION(new_state->crtc_x);
646 	tegra_plane_writel(p, value, DC_WIN_POSITION);
647 
648 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
649 	tegra_plane_writel(p, value, DC_WIN_SIZE);
650 
651 	value = WIN_ENABLE | COLOR_EXPAND;
652 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
653 
654 	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
655 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
656 
657 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
658 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
659 
660 	value = PITCH(fb->pitches[0]);
661 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
662 
663 	if (yuv && planar) {
664 		base = tegra_plane_state->iova[1] + fb->offsets[1];
665 		base |= addr_flag;
666 
667 		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
668 		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
669 
670 		base = tegra_plane_state->iova[2] + fb->offsets[2];
671 		base |= addr_flag;
672 
673 		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
674 		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
675 
676 		value = PITCH_U(fb->pitches[2]) | PITCH_V(fb->pitches[2]);
677 		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
678 	} else {
679 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
680 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
681 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
682 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
683 		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
684 	}
685 
686 	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
687 
688 	if (yuv) {
689 		if (bpc < 12)
690 			value |= DEGAMMA_YUV8_10;
691 		else
692 			value |= DEGAMMA_YUV12;
693 
694 		/* XXX parameterize */
695 		value |= COLOR_SPACE_YUV_2020;
696 	} else {
697 		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
698 			value |= DEGAMMA_SRGB;
699 	}
700 
701 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
702 
703 	value = OFFSET_X(new_state->src_y >> 16) |
704 		OFFSET_Y(new_state->src_x >> 16);
705 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
706 
707 	if (dc->soc->supports_block_linear) {
708 		unsigned long height = tegra_plane_state->tiling.value;
709 
710 		/* XXX */
711 		switch (tegra_plane_state->tiling.mode) {
712 		case TEGRA_BO_TILING_MODE_PITCH:
713 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
714 				DC_WINBUF_SURFACE_KIND_PITCH;
715 			break;
716 
717 		/* XXX not supported on Tegra186 and later */
718 		case TEGRA_BO_TILING_MODE_TILED:
719 			value = DC_WINBUF_SURFACE_KIND_TILED;
720 			break;
721 
722 		case TEGRA_BO_TILING_MODE_BLOCK:
723 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
724 				DC_WINBUF_SURFACE_KIND_BLOCK;
725 			break;
726 		}
727 
728 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
729 	}
730 
731 	/* disable gamut CSC */
732 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
733 	value &= ~CONTROL_CSC_ENABLE;
734 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
735 
736 	host1x_client_suspend(&dc->client);
737 }
738 
739 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
740 	.prepare_fb = tegra_plane_prepare_fb,
741 	.cleanup_fb = tegra_plane_cleanup_fb,
742 	.atomic_check = tegra_shared_plane_atomic_check,
743 	.atomic_update = tegra_shared_plane_atomic_update,
744 	.atomic_disable = tegra_shared_plane_atomic_disable,
745 };
746 
tegra_shared_plane_create(struct drm_device * drm,struct tegra_dc * dc,unsigned int wgrp,unsigned int index)747 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
748 					    struct tegra_dc *dc,
749 					    unsigned int wgrp,
750 					    unsigned int index)
751 {
752 	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
753 	struct tegra_drm *tegra = drm->dev_private;
754 	struct tegra_display_hub *hub = tegra->hub;
755 	struct tegra_shared_plane *plane;
756 	unsigned int possible_crtcs;
757 	unsigned int num_formats;
758 	const u64 *modifiers;
759 	struct drm_plane *p;
760 	const u32 *formats;
761 	int err;
762 
763 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
764 	if (!plane)
765 		return ERR_PTR(-ENOMEM);
766 
767 	plane->base.offset = 0x0a00 + 0x0300 * index;
768 	plane->base.index = index;
769 
770 	plane->wgrp = &hub->wgrps[wgrp];
771 	plane->wgrp->parent = &dc->client;
772 
773 	p = &plane->base.base;
774 
775 	/* planes can be assigned to arbitrary CRTCs */
776 	possible_crtcs = BIT(tegra->num_crtcs) - 1;
777 
778 	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
779 	formats = tegra_shared_plane_formats;
780 	modifiers = tegra_shared_plane_modifiers;
781 
782 	err = drm_universal_plane_init(drm, p, possible_crtcs,
783 				       &tegra_plane_funcs, formats,
784 				       num_formats, modifiers, type, NULL);
785 	if (err < 0) {
786 		kfree(plane);
787 		return ERR_PTR(err);
788 	}
789 
790 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
791 	drm_plane_create_zpos_property(p, 0, 0, 255);
792 
793 	return p;
794 }
795 
796 static struct drm_private_state *
tegra_display_hub_duplicate_state(struct drm_private_obj * obj)797 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
798 {
799 	struct tegra_display_hub_state *state;
800 
801 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
802 	if (!state)
803 		return NULL;
804 
805 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
806 
807 	return &state->base;
808 }
809 
tegra_display_hub_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)810 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
811 					    struct drm_private_state *state)
812 {
813 	struct tegra_display_hub_state *hub_state =
814 		to_tegra_display_hub_state(state);
815 
816 	kfree(hub_state);
817 }
818 
819 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
820 	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
821 	.atomic_destroy_state = tegra_display_hub_destroy_state,
822 };
823 
824 static struct tegra_display_hub_state *
tegra_display_hub_get_state(struct tegra_display_hub * hub,struct drm_atomic_state * state)825 tegra_display_hub_get_state(struct tegra_display_hub *hub,
826 			    struct drm_atomic_state *state)
827 {
828 	struct drm_private_state *priv;
829 
830 	priv = drm_atomic_get_private_obj_state(state, &hub->base);
831 	if (IS_ERR(priv))
832 		return ERR_CAST(priv);
833 
834 	return to_tegra_display_hub_state(priv);
835 }
836 
tegra_display_hub_atomic_check(struct drm_device * drm,struct drm_atomic_state * state)837 int tegra_display_hub_atomic_check(struct drm_device *drm,
838 				   struct drm_atomic_state *state)
839 {
840 	struct tegra_drm *tegra = drm->dev_private;
841 	struct tegra_display_hub_state *hub_state;
842 	struct drm_crtc_state *old, *new;
843 	struct drm_crtc *crtc;
844 	unsigned int i;
845 
846 	if (!tegra->hub)
847 		return 0;
848 
849 	hub_state = tegra_display_hub_get_state(tegra->hub, state);
850 	if (IS_ERR(hub_state))
851 		return PTR_ERR(hub_state);
852 
853 	/*
854 	 * The display hub display clock needs to be fed by the display clock
855 	 * with the highest frequency to ensure proper functioning of all the
856 	 * displays.
857 	 *
858 	 * Note that this isn't used before Tegra186, but it doesn't hurt and
859 	 * conditionalizing it would make the code less clean.
860 	 */
861 	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
862 		struct tegra_dc_state *dc = to_dc_state(new);
863 
864 		if (new->active) {
865 			if (!hub_state->clk || dc->pclk > hub_state->rate) {
866 				hub_state->dc = to_tegra_dc(dc->base.crtc);
867 				hub_state->clk = hub_state->dc->clk;
868 				hub_state->rate = dc->pclk;
869 			}
870 		}
871 	}
872 
873 	return 0;
874 }
875 
tegra_display_hub_update(struct tegra_dc * dc)876 static void tegra_display_hub_update(struct tegra_dc *dc)
877 {
878 	u32 value;
879 	int err;
880 
881 	err = host1x_client_resume(&dc->client);
882 	if (err < 0) {
883 		dev_err(dc->dev, "failed to resume: %d\n", err);
884 		return;
885 	}
886 
887 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
888 	value &= ~LATENCY_EVENT;
889 	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
890 
891 	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
892 	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
893 	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
894 
895 	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
896 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
897 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
898 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
899 
900 	host1x_client_suspend(&dc->client);
901 }
902 
tegra_display_hub_atomic_commit(struct drm_device * drm,struct drm_atomic_state * state)903 void tegra_display_hub_atomic_commit(struct drm_device *drm,
904 				     struct drm_atomic_state *state)
905 {
906 	struct tegra_drm *tegra = drm->dev_private;
907 	struct tegra_display_hub *hub = tegra->hub;
908 	struct tegra_display_hub_state *hub_state;
909 	struct device *dev = hub->client.dev;
910 	int err;
911 
912 	hub_state = to_tegra_display_hub_state(hub->base.state);
913 
914 	if (hub_state->clk) {
915 		err = clk_set_rate(hub_state->clk, hub_state->rate);
916 		if (err < 0)
917 			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
918 				hub_state->clk, hub_state->rate);
919 
920 		err = clk_set_parent(hub->clk_disp, hub_state->clk);
921 		if (err < 0)
922 			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
923 				hub->clk_disp, hub_state->clk, err);
924 	}
925 
926 	if (hub_state->dc)
927 		tegra_display_hub_update(hub_state->dc);
928 }
929 
tegra_display_hub_init(struct host1x_client * client)930 static int tegra_display_hub_init(struct host1x_client *client)
931 {
932 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
933 	struct drm_device *drm = dev_get_drvdata(client->host);
934 	struct tegra_drm *tegra = drm->dev_private;
935 	struct tegra_display_hub_state *state;
936 
937 	state = kzalloc(sizeof(*state), GFP_KERNEL);
938 	if (!state)
939 		return -ENOMEM;
940 
941 	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
942 				    &tegra_display_hub_state_funcs);
943 
944 	tegra->hub = hub;
945 
946 	return 0;
947 }
948 
tegra_display_hub_exit(struct host1x_client * client)949 static int tegra_display_hub_exit(struct host1x_client *client)
950 {
951 	struct drm_device *drm = dev_get_drvdata(client->host);
952 	struct tegra_drm *tegra = drm->dev_private;
953 
954 	drm_atomic_private_obj_fini(&tegra->hub->base);
955 	tegra->hub = NULL;
956 
957 	return 0;
958 }
959 
tegra_display_hub_runtime_suspend(struct host1x_client * client)960 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
961 {
962 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
963 	struct device *dev = client->dev;
964 	unsigned int i = hub->num_heads;
965 	int err;
966 
967 	err = reset_control_assert(hub->rst);
968 	if (err < 0)
969 		return err;
970 
971 	while (i--)
972 		clk_disable_unprepare(hub->clk_heads[i]);
973 
974 	clk_disable_unprepare(hub->clk_hub);
975 	clk_disable_unprepare(hub->clk_dsc);
976 	clk_disable_unprepare(hub->clk_disp);
977 
978 	pm_runtime_put_sync(dev);
979 
980 	return 0;
981 }
982 
tegra_display_hub_runtime_resume(struct host1x_client * client)983 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
984 {
985 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
986 	struct device *dev = client->dev;
987 	unsigned int i;
988 	int err;
989 
990 	err = pm_runtime_resume_and_get(dev);
991 	if (err < 0) {
992 		dev_err(dev, "failed to get runtime PM: %d\n", err);
993 		return err;
994 	}
995 
996 	err = clk_prepare_enable(hub->clk_disp);
997 	if (err < 0)
998 		goto put_rpm;
999 
1000 	err = clk_prepare_enable(hub->clk_dsc);
1001 	if (err < 0)
1002 		goto disable_disp;
1003 
1004 	err = clk_prepare_enable(hub->clk_hub);
1005 	if (err < 0)
1006 		goto disable_dsc;
1007 
1008 	for (i = 0; i < hub->num_heads; i++) {
1009 		err = clk_prepare_enable(hub->clk_heads[i]);
1010 		if (err < 0)
1011 			goto disable_heads;
1012 	}
1013 
1014 	err = reset_control_deassert(hub->rst);
1015 	if (err < 0)
1016 		goto disable_heads;
1017 
1018 	return 0;
1019 
1020 disable_heads:
1021 	while (i--)
1022 		clk_disable_unprepare(hub->clk_heads[i]);
1023 
1024 	clk_disable_unprepare(hub->clk_hub);
1025 disable_dsc:
1026 	clk_disable_unprepare(hub->clk_dsc);
1027 disable_disp:
1028 	clk_disable_unprepare(hub->clk_disp);
1029 put_rpm:
1030 	pm_runtime_put_sync(dev);
1031 	return err;
1032 }
1033 
1034 static const struct host1x_client_ops tegra_display_hub_ops = {
1035 	.init = tegra_display_hub_init,
1036 	.exit = tegra_display_hub_exit,
1037 	.suspend = tegra_display_hub_runtime_suspend,
1038 	.resume = tegra_display_hub_runtime_resume,
1039 };
1040 
tegra_display_hub_probe(struct platform_device * pdev)1041 static int tegra_display_hub_probe(struct platform_device *pdev)
1042 {
1043 	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1044 	struct device_node *child = NULL;
1045 	struct tegra_display_hub *hub;
1046 	struct clk *clk;
1047 	unsigned int i;
1048 	int err;
1049 
1050 	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1051 	if (err < 0) {
1052 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1053 		return err;
1054 	}
1055 
1056 	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1057 	if (!hub)
1058 		return -ENOMEM;
1059 
1060 	hub->soc = of_device_get_match_data(&pdev->dev);
1061 
1062 	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1063 	if (IS_ERR(hub->clk_disp)) {
1064 		err = PTR_ERR(hub->clk_disp);
1065 		return err;
1066 	}
1067 
1068 	if (hub->soc->supports_dsc) {
1069 		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1070 		if (IS_ERR(hub->clk_dsc)) {
1071 			err = PTR_ERR(hub->clk_dsc);
1072 			return err;
1073 		}
1074 	}
1075 
1076 	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1077 	if (IS_ERR(hub->clk_hub)) {
1078 		err = PTR_ERR(hub->clk_hub);
1079 		return err;
1080 	}
1081 
1082 	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1083 	if (IS_ERR(hub->rst)) {
1084 		err = PTR_ERR(hub->rst);
1085 		return err;
1086 	}
1087 
1088 	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1089 				  sizeof(*hub->wgrps), GFP_KERNEL);
1090 	if (!hub->wgrps)
1091 		return -ENOMEM;
1092 
1093 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1094 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1095 		char id[8];
1096 
1097 		snprintf(id, sizeof(id), "wgrp%u", i);
1098 		mutex_init(&wgrp->lock);
1099 		wgrp->usecount = 0;
1100 		wgrp->index = i;
1101 
1102 		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1103 		if (IS_ERR(wgrp->rst))
1104 			return PTR_ERR(wgrp->rst);
1105 
1106 		err = reset_control_assert(wgrp->rst);
1107 		if (err < 0)
1108 			return err;
1109 	}
1110 
1111 	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1112 
1113 	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1114 				      GFP_KERNEL);
1115 	if (!hub->clk_heads)
1116 		return -ENOMEM;
1117 
1118 	for (i = 0; i < hub->num_heads; i++) {
1119 		child = of_get_next_child(pdev->dev.of_node, child);
1120 		if (!child) {
1121 			dev_err(&pdev->dev, "failed to find node for head %u\n",
1122 				i);
1123 			return -ENODEV;
1124 		}
1125 
1126 		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1127 		if (IS_ERR(clk)) {
1128 			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1129 				i);
1130 			of_node_put(child);
1131 			return PTR_ERR(clk);
1132 		}
1133 
1134 		hub->clk_heads[i] = clk;
1135 	}
1136 
1137 	of_node_put(child);
1138 
1139 	/* XXX: enable clock across reset? */
1140 	err = reset_control_assert(hub->rst);
1141 	if (err < 0)
1142 		return err;
1143 
1144 	platform_set_drvdata(pdev, hub);
1145 	pm_runtime_enable(&pdev->dev);
1146 
1147 	INIT_LIST_HEAD(&hub->client.list);
1148 	hub->client.ops = &tegra_display_hub_ops;
1149 	hub->client.dev = &pdev->dev;
1150 
1151 	err = host1x_client_register(&hub->client);
1152 	if (err < 0)
1153 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1154 			err);
1155 
1156 	err = devm_of_platform_populate(&pdev->dev);
1157 	if (err < 0)
1158 		goto unregister;
1159 
1160 	return err;
1161 
1162 unregister:
1163 	host1x_client_unregister(&hub->client);
1164 	pm_runtime_disable(&pdev->dev);
1165 	return err;
1166 }
1167 
tegra_display_hub_remove(struct platform_device * pdev)1168 static int tegra_display_hub_remove(struct platform_device *pdev)
1169 {
1170 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1171 	unsigned int i;
1172 	int err;
1173 
1174 	err = host1x_client_unregister(&hub->client);
1175 	if (err < 0) {
1176 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1177 			err);
1178 	}
1179 
1180 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1181 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1182 
1183 		mutex_destroy(&wgrp->lock);
1184 	}
1185 
1186 	pm_runtime_disable(&pdev->dev);
1187 
1188 	return err;
1189 }
1190 
1191 static const struct tegra_display_hub_soc tegra186_display_hub = {
1192 	.num_wgrps = 6,
1193 	.supports_dsc = true,
1194 };
1195 
1196 static const struct tegra_display_hub_soc tegra194_display_hub = {
1197 	.num_wgrps = 6,
1198 	.supports_dsc = false,
1199 };
1200 
1201 static const struct of_device_id tegra_display_hub_of_match[] = {
1202 	{
1203 		.compatible = "nvidia,tegra194-display",
1204 		.data = &tegra194_display_hub
1205 	}, {
1206 		.compatible = "nvidia,tegra186-display",
1207 		.data = &tegra186_display_hub
1208 	}, {
1209 		/* sentinel */
1210 	}
1211 };
1212 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1213 
1214 struct platform_driver tegra_display_hub_driver = {
1215 	.driver = {
1216 		.name = "tegra-display-hub",
1217 		.of_match_table = tegra_display_hub_of_match,
1218 	},
1219 	.probe = tegra_display_hub_probe,
1220 	.remove = tegra_display_hub_remove,
1221 };
1222