• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author: Chris Zhong <zyw@rock-chips.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/component.h>
9 #include <linux/extcon.h>
10 #include <linux/firmware.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/phy/phy.h>
13 #include <linux/regmap.h>
14 #include <linux/reset.h>
15 
16 #include <sound/hdmi-codec.h>
17 
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_dp_helper.h>
20 #include <drm/drm_edid.h>
21 #include <drm/drm_of.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_simple_kms_helper.h>
24 
25 #include "cdn-dp-core.h"
26 #include "cdn-dp-reg.h"
27 #include "rockchip_drm_vop.h"
28 
29 #define connector_to_dp(c) \
30 		container_of(c, struct cdn_dp_device, connector)
31 
32 #define encoder_to_dp(c) \
33 		container_of(c, struct cdn_dp_device, encoder)
34 
35 #define GRF_SOC_CON9		0x6224
36 #define DP_SEL_VOP_LIT		BIT(12)
37 #define GRF_SOC_CON26		0x6268
38 #define DPTX_HPD_SEL		(3 << 12)
39 #define DPTX_HPD_DEL		(2 << 12)
40 #define DPTX_HPD_SEL_MASK	(3 << 28)
41 
42 #define CDN_FW_TIMEOUT_MS	(64 * 1000)
43 #define CDN_DPCD_TIMEOUT_MS	5000
44 #define CDN_DP_FIRMWARE		"rockchip/dptx.bin"
45 
46 struct cdn_dp_data {
47 	u8 max_phy;
48 };
49 
50 struct cdn_dp_data rk3399_cdn_dp = {
51 	.max_phy = 2,
52 };
53 
54 static const struct of_device_id cdn_dp_dt_ids[] = {
55 	{ .compatible = "rockchip,rk3399-cdn-dp",
56 		.data = (void *)&rk3399_cdn_dp },
57 	{}
58 };
59 
60 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
61 
cdn_dp_grf_write(struct cdn_dp_device * dp,unsigned int reg,unsigned int val)62 static int cdn_dp_grf_write(struct cdn_dp_device *dp,
63 			    unsigned int reg, unsigned int val)
64 {
65 	int ret;
66 
67 	ret = clk_prepare_enable(dp->grf_clk);
68 	if (ret) {
69 		DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
70 		return ret;
71 	}
72 
73 	ret = regmap_write(dp->grf, reg, val);
74 	if (ret) {
75 		DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
76 		clk_disable_unprepare(dp->grf_clk);
77 		return ret;
78 	}
79 
80 	clk_disable_unprepare(dp->grf_clk);
81 
82 	return 0;
83 }
84 
cdn_dp_clk_enable(struct cdn_dp_device * dp)85 static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
86 {
87 	int ret;
88 	unsigned long rate;
89 
90 	ret = clk_prepare_enable(dp->pclk);
91 	if (ret < 0) {
92 		DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
93 		goto err_pclk;
94 	}
95 
96 	ret = clk_prepare_enable(dp->core_clk);
97 	if (ret < 0) {
98 		DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
99 		goto err_core_clk;
100 	}
101 
102 	ret = pm_runtime_get_sync(dp->dev);
103 	if (ret < 0) {
104 		DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
105 		goto err_pm_runtime_get;
106 	}
107 
108 	reset_control_assert(dp->core_rst);
109 	reset_control_assert(dp->dptx_rst);
110 	reset_control_assert(dp->apb_rst);
111 	reset_control_deassert(dp->core_rst);
112 	reset_control_deassert(dp->dptx_rst);
113 	reset_control_deassert(dp->apb_rst);
114 
115 	rate = clk_get_rate(dp->core_clk);
116 	if (!rate) {
117 		DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
118 		ret = -EINVAL;
119 		goto err_set_rate;
120 	}
121 
122 	cdn_dp_set_fw_clk(dp, rate);
123 	cdn_dp_clock_reset(dp);
124 
125 	return 0;
126 
127 err_set_rate:
128 	pm_runtime_put(dp->dev);
129 err_pm_runtime_get:
130 	clk_disable_unprepare(dp->core_clk);
131 err_core_clk:
132 	clk_disable_unprepare(dp->pclk);
133 err_pclk:
134 	return ret;
135 }
136 
cdn_dp_clk_disable(struct cdn_dp_device * dp)137 static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
138 {
139 	pm_runtime_put_sync(dp->dev);
140 	clk_disable_unprepare(dp->pclk);
141 	clk_disable_unprepare(dp->core_clk);
142 }
143 
cdn_dp_get_port_lanes(struct cdn_dp_port * port)144 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
145 {
146 	struct extcon_dev *edev = port->extcon;
147 	union extcon_property_value property;
148 	int dptx;
149 	u8 lanes;
150 
151 	dptx = extcon_get_state(edev, EXTCON_DISP_DP);
152 	if (dptx > 0) {
153 		extcon_get_property(edev, EXTCON_DISP_DP,
154 				    EXTCON_PROP_USB_SS, &property);
155 		if (property.intval)
156 			lanes = 2;
157 		else
158 			lanes = 4;
159 	} else {
160 		lanes = 0;
161 	}
162 
163 	return lanes;
164 }
165 
cdn_dp_get_sink_count(struct cdn_dp_device * dp,u8 * sink_count)166 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
167 {
168 	int ret;
169 	u8 value;
170 
171 	*sink_count = 0;
172 	ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
173 	if (ret)
174 		return ret;
175 
176 	*sink_count = DP_GET_SINK_COUNT(value);
177 	return 0;
178 }
179 
cdn_dp_connected_port(struct cdn_dp_device * dp)180 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
181 {
182 	struct cdn_dp_port *port;
183 	int i, lanes;
184 
185 	for (i = 0; i < dp->ports; i++) {
186 		port = dp->port[i];
187 		lanes = cdn_dp_get_port_lanes(port);
188 		if (lanes)
189 			return port;
190 	}
191 	return NULL;
192 }
193 
cdn_dp_check_sink_connection(struct cdn_dp_device * dp)194 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
195 {
196 	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
197 	struct cdn_dp_port *port;
198 	u8 sink_count = 0;
199 
200 	if (dp->active_port < 0 || dp->active_port >= dp->ports) {
201 		DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
202 		return false;
203 	}
204 
205 	port = dp->port[dp->active_port];
206 
207 	/*
208 	 * Attempt to read sink count, retry in case the sink may not be ready.
209 	 *
210 	 * Sinks are *supposed* to come up within 1ms from an off state, but
211 	 * some docks need more time to power up.
212 	 */
213 	while (time_before(jiffies, timeout)) {
214 		if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
215 			return false;
216 
217 		if (!cdn_dp_get_sink_count(dp, &sink_count))
218 			return sink_count ? true : false;
219 
220 		usleep_range(5000, 10000);
221 	}
222 
223 	DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
224 	return false;
225 }
226 
227 static enum drm_connector_status
cdn_dp_connector_detect(struct drm_connector * connector,bool force)228 cdn_dp_connector_detect(struct drm_connector *connector, bool force)
229 {
230 	struct cdn_dp_device *dp = connector_to_dp(connector);
231 	enum drm_connector_status status = connector_status_disconnected;
232 
233 	mutex_lock(&dp->lock);
234 	if (dp->connected)
235 		status = connector_status_connected;
236 	mutex_unlock(&dp->lock);
237 
238 	return status;
239 }
240 
cdn_dp_connector_destroy(struct drm_connector * connector)241 static void cdn_dp_connector_destroy(struct drm_connector *connector)
242 {
243 	drm_connector_unregister(connector);
244 	drm_connector_cleanup(connector);
245 }
246 
247 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
248 	.detect = cdn_dp_connector_detect,
249 	.destroy = cdn_dp_connector_destroy,
250 	.fill_modes = drm_helper_probe_single_connector_modes,
251 	.reset = drm_atomic_helper_connector_reset,
252 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
253 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
254 };
255 
cdn_dp_connector_get_modes(struct drm_connector * connector)256 static int cdn_dp_connector_get_modes(struct drm_connector *connector)
257 {
258 	struct cdn_dp_device *dp = connector_to_dp(connector);
259 	struct edid *edid;
260 	int ret = 0;
261 
262 	mutex_lock(&dp->lock);
263 	edid = dp->edid;
264 	if (edid) {
265 		DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
266 				  edid->width_cm, edid->height_cm);
267 
268 		dp->sink_has_audio = drm_detect_monitor_audio(edid);
269 		ret = drm_add_edid_modes(connector, edid);
270 		if (ret)
271 			drm_connector_update_edid_property(connector,
272 								edid);
273 	}
274 	mutex_unlock(&dp->lock);
275 
276 	return ret;
277 }
278 
cdn_dp_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)279 static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
280 				       struct drm_display_mode *mode)
281 {
282 	struct cdn_dp_device *dp = connector_to_dp(connector);
283 	struct drm_display_info *display_info = &dp->connector.display_info;
284 	u32 requested, actual, rate, sink_max, source_max = 0;
285 	u8 lanes, bpc;
286 
287 	/* If DP is disconnected, every mode is invalid */
288 	if (!dp->connected)
289 		return MODE_BAD;
290 
291 	switch (display_info->bpc) {
292 	case 10:
293 		bpc = 10;
294 		break;
295 	case 6:
296 		bpc = 6;
297 		break;
298 	default:
299 		bpc = 8;
300 		break;
301 	}
302 
303 	requested = mode->clock * bpc * 3 / 1000;
304 
305 	source_max = dp->lanes;
306 	sink_max = drm_dp_max_lane_count(dp->dpcd);
307 	lanes = min(source_max, sink_max);
308 
309 	source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
310 	sink_max = drm_dp_max_link_rate(dp->dpcd);
311 	rate = min(source_max, sink_max);
312 
313 	actual = rate * lanes / 100;
314 
315 	/* efficiency is about 0.8 */
316 	actual = actual * 8 / 10;
317 
318 	if (requested > actual) {
319 		DRM_DEV_DEBUG_KMS(dp->dev,
320 				  "requested=%d, actual=%d, clock=%d\n",
321 				  requested, actual, mode->clock);
322 		return MODE_CLOCK_HIGH;
323 	}
324 
325 	return MODE_OK;
326 }
327 
328 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
329 	.get_modes = cdn_dp_connector_get_modes,
330 	.mode_valid = cdn_dp_connector_mode_valid,
331 };
332 
cdn_dp_firmware_init(struct cdn_dp_device * dp)333 static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
334 {
335 	int ret;
336 	const u32 *iram_data, *dram_data;
337 	const struct firmware *fw = dp->fw;
338 	const struct cdn_firmware_header *hdr;
339 
340 	hdr = (struct cdn_firmware_header *)fw->data;
341 	if (fw->size != le32_to_cpu(hdr->size_bytes)) {
342 		DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
343 		return -EINVAL;
344 	}
345 
346 	iram_data = (const u32 *)(fw->data + hdr->header_size);
347 	dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
348 
349 	ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
350 				   dram_data, hdr->dram_size);
351 	if (ret)
352 		return ret;
353 
354 	ret = cdn_dp_set_firmware_active(dp, true);
355 	if (ret) {
356 		DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
357 		return ret;
358 	}
359 
360 	return cdn_dp_event_config(dp);
361 }
362 
cdn_dp_get_sink_capability(struct cdn_dp_device * dp)363 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
364 {
365 	int ret;
366 
367 	if (!cdn_dp_check_sink_connection(dp))
368 		return -ENODEV;
369 
370 	ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
371 			       DP_RECEIVER_CAP_SIZE);
372 	if (ret) {
373 		DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
374 		return ret;
375 	}
376 
377 	kfree(dp->edid);
378 	dp->edid = drm_do_get_edid(&dp->connector,
379 				   cdn_dp_get_edid_block, dp);
380 	return 0;
381 }
382 
cdn_dp_enable_phy(struct cdn_dp_device * dp,struct cdn_dp_port * port)383 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
384 {
385 	union extcon_property_value property;
386 	int ret;
387 
388 	if (!port->phy_enabled) {
389 		ret = phy_power_on(port->phy);
390 		if (ret) {
391 			DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
392 				      ret);
393 			goto err_phy;
394 		}
395 		port->phy_enabled = true;
396 	}
397 
398 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
399 			       DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
400 	if (ret) {
401 		DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
402 		goto err_power_on;
403 	}
404 
405 	ret = cdn_dp_get_hpd_status(dp);
406 	if (ret <= 0) {
407 		if (!ret)
408 			DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
409 		goto err_power_on;
410 	}
411 
412 	ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
413 				  EXTCON_PROP_USB_TYPEC_POLARITY, &property);
414 	if (ret) {
415 		DRM_DEV_ERROR(dp->dev, "get property failed\n");
416 		goto err_power_on;
417 	}
418 
419 	port->lanes = cdn_dp_get_port_lanes(port);
420 	ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
421 	if (ret) {
422 		DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
423 			      ret);
424 		goto err_power_on;
425 	}
426 
427 	dp->active_port = port->id;
428 	return 0;
429 
430 err_power_on:
431 	if (phy_power_off(port->phy))
432 		DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
433 	else
434 		port->phy_enabled = false;
435 
436 err_phy:
437 	cdn_dp_grf_write(dp, GRF_SOC_CON26,
438 			 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
439 	return ret;
440 }
441 
cdn_dp_disable_phy(struct cdn_dp_device * dp,struct cdn_dp_port * port)442 static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
443 			      struct cdn_dp_port *port)
444 {
445 	int ret;
446 
447 	if (port->phy_enabled) {
448 		ret = phy_power_off(port->phy);
449 		if (ret) {
450 			DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
451 			return ret;
452 		}
453 	}
454 
455 	port->phy_enabled = false;
456 	port->lanes = 0;
457 	dp->active_port = -1;
458 	return 0;
459 }
460 
cdn_dp_disable(struct cdn_dp_device * dp)461 static int cdn_dp_disable(struct cdn_dp_device *dp)
462 {
463 	int ret, i;
464 
465 	if (!dp->active)
466 		return 0;
467 
468 	for (i = 0; i < dp->ports; i++)
469 		cdn_dp_disable_phy(dp, dp->port[i]);
470 
471 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
472 			       DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
473 	if (ret) {
474 		DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
475 			      ret);
476 		return ret;
477 	}
478 
479 	cdn_dp_set_firmware_active(dp, false);
480 	cdn_dp_clk_disable(dp);
481 	dp->active = false;
482 	dp->max_lanes = 0;
483 	dp->max_rate = 0;
484 	if (!dp->connected) {
485 		kfree(dp->edid);
486 		dp->edid = NULL;
487 	}
488 
489 	return 0;
490 }
491 
cdn_dp_enable(struct cdn_dp_device * dp)492 static int cdn_dp_enable(struct cdn_dp_device *dp)
493 {
494 	int ret, i, lanes;
495 	struct cdn_dp_port *port;
496 
497 	port = cdn_dp_connected_port(dp);
498 	if (!port) {
499 		DRM_DEV_ERROR(dp->dev,
500 			      "Can't enable without connection\n");
501 		return -ENODEV;
502 	}
503 
504 	if (dp->active)
505 		return 0;
506 
507 	ret = cdn_dp_clk_enable(dp);
508 	if (ret)
509 		return ret;
510 
511 	ret = cdn_dp_firmware_init(dp);
512 	if (ret) {
513 		DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
514 		goto err_clk_disable;
515 	}
516 
517 	/* only enable the port that connected with downstream device */
518 	for (i = port->id; i < dp->ports; i++) {
519 		port = dp->port[i];
520 		lanes = cdn_dp_get_port_lanes(port);
521 		if (lanes) {
522 			ret = cdn_dp_enable_phy(dp, port);
523 			if (ret)
524 				continue;
525 
526 			ret = cdn_dp_get_sink_capability(dp);
527 			if (ret) {
528 				cdn_dp_disable_phy(dp, port);
529 			} else {
530 				dp->active = true;
531 				dp->lanes = port->lanes;
532 				return 0;
533 			}
534 		}
535 	}
536 
537 err_clk_disable:
538 	cdn_dp_clk_disable(dp);
539 	return ret;
540 }
541 
cdn_dp_encoder_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted)542 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
543 				    struct drm_display_mode *mode,
544 				    struct drm_display_mode *adjusted)
545 {
546 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
547 	struct drm_display_info *display_info = &dp->connector.display_info;
548 	struct video_info *video = &dp->video_info;
549 
550 	switch (display_info->bpc) {
551 	case 10:
552 		video->color_depth = 10;
553 		break;
554 	case 6:
555 		video->color_depth = 6;
556 		break;
557 	default:
558 		video->color_depth = 8;
559 		break;
560 	}
561 
562 	video->color_fmt = PXL_RGB;
563 	video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
564 	video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
565 
566 	memcpy(&dp->mode, adjusted, sizeof(*mode));
567 }
568 
cdn_dp_check_link_status(struct cdn_dp_device * dp)569 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
570 {
571 	u8 link_status[DP_LINK_STATUS_SIZE];
572 	struct cdn_dp_port *port = cdn_dp_connected_port(dp);
573 	u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
574 
575 	if (!port || !dp->max_rate || !dp->max_lanes)
576 		return false;
577 
578 	if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
579 			     DP_LINK_STATUS_SIZE)) {
580 		DRM_ERROR("Failed to get link status\n");
581 		return false;
582 	}
583 
584 	/* if link training is requested we should perform it always */
585 	return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
586 }
587 
cdn_dp_encoder_enable(struct drm_encoder * encoder)588 static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
589 {
590 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
591 	int ret, val;
592 
593 	ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
594 	if (ret < 0) {
595 		DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
596 		return;
597 	}
598 
599 	DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
600 			  (ret) ? "LIT" : "BIG");
601 	if (ret)
602 		val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
603 	else
604 		val = DP_SEL_VOP_LIT << 16;
605 
606 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
607 	if (ret)
608 		return;
609 
610 	mutex_lock(&dp->lock);
611 
612 	ret = cdn_dp_enable(dp);
613 	if (ret) {
614 		DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
615 			      ret);
616 		goto out;
617 	}
618 	if (!cdn_dp_check_link_status(dp)) {
619 		ret = cdn_dp_train_link(dp);
620 		if (ret) {
621 			DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
622 			goto out;
623 		}
624 	}
625 
626 	ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
627 	if (ret) {
628 		DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
629 		goto out;
630 	}
631 
632 	ret = cdn_dp_config_video(dp);
633 	if (ret) {
634 		DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
635 		goto out;
636 	}
637 
638 	ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
639 	if (ret) {
640 		DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
641 		goto out;
642 	}
643 out:
644 	mutex_unlock(&dp->lock);
645 }
646 
cdn_dp_encoder_disable(struct drm_encoder * encoder)647 static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
648 {
649 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
650 	int ret;
651 
652 	mutex_lock(&dp->lock);
653 	if (dp->active) {
654 		ret = cdn_dp_disable(dp);
655 		if (ret) {
656 			DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
657 				      ret);
658 		}
659 	}
660 	mutex_unlock(&dp->lock);
661 
662 	/*
663 	 * In the following 2 cases, we need to run the event_work to re-enable
664 	 * the DP:
665 	 * 1. If there is not just one port device is connected, and remove one
666 	 *    device from a port, the DP will be disabled here, at this case,
667 	 *    run the event_work to re-open DP for the other port.
668 	 * 2. If re-training or re-config failed, the DP will be disabled here.
669 	 *    run the event_work to re-connect it.
670 	 */
671 	if (!dp->connected && cdn_dp_connected_port(dp))
672 		schedule_work(&dp->event_work);
673 }
674 
cdn_dp_encoder_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)675 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
676 				       struct drm_crtc_state *crtc_state,
677 				       struct drm_connector_state *conn_state)
678 {
679 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
680 
681 	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
682 	s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
683 
684 	return 0;
685 }
686 
687 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
688 	.mode_set = cdn_dp_encoder_mode_set,
689 	.enable = cdn_dp_encoder_enable,
690 	.disable = cdn_dp_encoder_disable,
691 	.atomic_check = cdn_dp_encoder_atomic_check,
692 };
693 
cdn_dp_parse_dt(struct cdn_dp_device * dp)694 static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
695 {
696 	struct device *dev = dp->dev;
697 	struct device_node *np = dev->of_node;
698 	struct platform_device *pdev = to_platform_device(dev);
699 	struct resource *res;
700 
701 	dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
702 	if (IS_ERR(dp->grf)) {
703 		DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
704 		return PTR_ERR(dp->grf);
705 	}
706 
707 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
708 	dp->regs = devm_ioremap_resource(dev, res);
709 	if (IS_ERR(dp->regs)) {
710 		DRM_DEV_ERROR(dev, "ioremap reg failed\n");
711 		return PTR_ERR(dp->regs);
712 	}
713 
714 	dp->core_clk = devm_clk_get(dev, "core-clk");
715 	if (IS_ERR(dp->core_clk)) {
716 		DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
717 		return PTR_ERR(dp->core_clk);
718 	}
719 
720 	dp->pclk = devm_clk_get(dev, "pclk");
721 	if (IS_ERR(dp->pclk)) {
722 		DRM_DEV_ERROR(dev, "cannot get pclk\n");
723 		return PTR_ERR(dp->pclk);
724 	}
725 
726 	dp->spdif_clk = devm_clk_get(dev, "spdif");
727 	if (IS_ERR(dp->spdif_clk)) {
728 		DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
729 		return PTR_ERR(dp->spdif_clk);
730 	}
731 
732 	dp->grf_clk = devm_clk_get(dev, "grf");
733 	if (IS_ERR(dp->grf_clk)) {
734 		DRM_DEV_ERROR(dev, "cannot get grf clk\n");
735 		return PTR_ERR(dp->grf_clk);
736 	}
737 
738 	dp->spdif_rst = devm_reset_control_get(dev, "spdif");
739 	if (IS_ERR(dp->spdif_rst)) {
740 		DRM_DEV_ERROR(dev, "no spdif reset control found\n");
741 		return PTR_ERR(dp->spdif_rst);
742 	}
743 
744 	dp->dptx_rst = devm_reset_control_get(dev, "dptx");
745 	if (IS_ERR(dp->dptx_rst)) {
746 		DRM_DEV_ERROR(dev, "no uphy reset control found\n");
747 		return PTR_ERR(dp->dptx_rst);
748 	}
749 
750 	dp->core_rst = devm_reset_control_get(dev, "core");
751 	if (IS_ERR(dp->core_rst)) {
752 		DRM_DEV_ERROR(dev, "no core reset control found\n");
753 		return PTR_ERR(dp->core_rst);
754 	}
755 
756 	dp->apb_rst = devm_reset_control_get(dev, "apb");
757 	if (IS_ERR(dp->apb_rst)) {
758 		DRM_DEV_ERROR(dev, "no apb reset control found\n");
759 		return PTR_ERR(dp->apb_rst);
760 	}
761 
762 	return 0;
763 }
764 
cdn_dp_audio_hw_params(struct device * dev,void * data,struct hdmi_codec_daifmt * daifmt,struct hdmi_codec_params * params)765 static int cdn_dp_audio_hw_params(struct device *dev,  void *data,
766 				  struct hdmi_codec_daifmt *daifmt,
767 				  struct hdmi_codec_params *params)
768 {
769 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
770 	struct audio_info audio = {
771 		.sample_width = params->sample_width,
772 		.sample_rate = params->sample_rate,
773 		.channels = params->channels,
774 	};
775 	int ret;
776 
777 	mutex_lock(&dp->lock);
778 	if (!dp->active) {
779 		ret = -ENODEV;
780 		goto out;
781 	}
782 
783 	switch (daifmt->fmt) {
784 	case HDMI_I2S:
785 		audio.format = AFMT_I2S;
786 		break;
787 	case HDMI_SPDIF:
788 		audio.format = AFMT_SPDIF;
789 		break;
790 	default:
791 		DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
792 		ret = -EINVAL;
793 		goto out;
794 	}
795 
796 	ret = cdn_dp_audio_config(dp, &audio);
797 	if (!ret)
798 		dp->audio_info = audio;
799 
800 out:
801 	mutex_unlock(&dp->lock);
802 	return ret;
803 }
804 
cdn_dp_audio_shutdown(struct device * dev,void * data)805 static void cdn_dp_audio_shutdown(struct device *dev, void *data)
806 {
807 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
808 	int ret;
809 
810 	mutex_lock(&dp->lock);
811 	if (!dp->active)
812 		goto out;
813 
814 	ret = cdn_dp_audio_stop(dp, &dp->audio_info);
815 	if (!ret)
816 		dp->audio_info.format = AFMT_UNUSED;
817 out:
818 	mutex_unlock(&dp->lock);
819 }
820 
cdn_dp_audio_mute_stream(struct device * dev,void * data,bool enable,int direction)821 static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
822 				    bool enable, int direction)
823 {
824 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
825 	int ret;
826 
827 	mutex_lock(&dp->lock);
828 	if (!dp->active) {
829 		ret = -ENODEV;
830 		goto out;
831 	}
832 
833 	ret = cdn_dp_audio_mute(dp, enable);
834 
835 out:
836 	mutex_unlock(&dp->lock);
837 	return ret;
838 }
839 
cdn_dp_audio_get_eld(struct device * dev,void * data,u8 * buf,size_t len)840 static int cdn_dp_audio_get_eld(struct device *dev, void *data,
841 				u8 *buf, size_t len)
842 {
843 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
844 
845 	memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
846 
847 	return 0;
848 }
849 
850 static const struct hdmi_codec_ops audio_codec_ops = {
851 	.hw_params = cdn_dp_audio_hw_params,
852 	.audio_shutdown = cdn_dp_audio_shutdown,
853 	.mute_stream = cdn_dp_audio_mute_stream,
854 	.get_eld = cdn_dp_audio_get_eld,
855 	.no_capture_mute = 1,
856 };
857 
cdn_dp_audio_codec_init(struct cdn_dp_device * dp,struct device * dev)858 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
859 				   struct device *dev)
860 {
861 	struct hdmi_codec_pdata codec_data = {
862 		.i2s = 1,
863 		.spdif = 1,
864 		.ops = &audio_codec_ops,
865 		.max_i2s_channels = 8,
866 	};
867 
868 	dp->audio_pdev = platform_device_register_data(
869 			 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
870 			 &codec_data, sizeof(codec_data));
871 
872 	return PTR_ERR_OR_ZERO(dp->audio_pdev);
873 }
874 
cdn_dp_request_firmware(struct cdn_dp_device * dp)875 static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
876 {
877 	int ret;
878 	unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
879 	unsigned long sleep = 1000;
880 
881 	WARN_ON(!mutex_is_locked(&dp->lock));
882 
883 	if (dp->fw_loaded)
884 		return 0;
885 
886 	/* Drop the lock before getting the firmware to avoid blocking boot */
887 	mutex_unlock(&dp->lock);
888 
889 	while (time_before(jiffies, timeout)) {
890 		ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
891 		if (ret == -ENOENT) {
892 			msleep(sleep);
893 			sleep *= 2;
894 			continue;
895 		} else if (ret) {
896 			DRM_DEV_ERROR(dp->dev,
897 				      "failed to request firmware: %d\n", ret);
898 			goto out;
899 		}
900 
901 		dp->fw_loaded = true;
902 		ret = 0;
903 		goto out;
904 	}
905 
906 	DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
907 	ret = -ETIMEDOUT;
908 out:
909 	mutex_lock(&dp->lock);
910 	return ret;
911 }
912 
cdn_dp_pd_event_work(struct work_struct * work)913 static void cdn_dp_pd_event_work(struct work_struct *work)
914 {
915 	struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
916 						event_work);
917 	struct drm_connector *connector = &dp->connector;
918 	enum drm_connector_status old_status;
919 
920 	int ret;
921 
922 	mutex_lock(&dp->lock);
923 
924 	if (dp->suspended)
925 		goto out;
926 
927 	ret = cdn_dp_request_firmware(dp);
928 	if (ret)
929 		goto out;
930 
931 	dp->connected = true;
932 
933 	/* Not connected, notify userspace to disable the block */
934 	if (!cdn_dp_connected_port(dp)) {
935 		DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
936 		dp->connected = false;
937 
938 	/* Connected but not enabled, enable the block */
939 	} else if (!dp->active) {
940 		DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
941 		ret = cdn_dp_enable(dp);
942 		if (ret) {
943 			DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
944 			dp->connected = false;
945 		}
946 
947 	/* Enabled and connected to a dongle without a sink, notify userspace */
948 	} else if (!cdn_dp_check_sink_connection(dp)) {
949 		DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
950 		dp->connected = false;
951 
952 	/* Enabled and connected with a sink, re-train if requested */
953 	} else if (!cdn_dp_check_link_status(dp)) {
954 		unsigned int rate = dp->max_rate;
955 		unsigned int lanes = dp->max_lanes;
956 		struct drm_display_mode *mode = &dp->mode;
957 
958 		DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
959 		ret = cdn_dp_train_link(dp);
960 		if (ret) {
961 			dp->connected = false;
962 			DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
963 			goto out;
964 		}
965 
966 		/* If training result is changed, update the video config */
967 		if (mode->clock &&
968 		    (rate != dp->max_rate || lanes != dp->max_lanes)) {
969 			ret = cdn_dp_config_video(dp);
970 			if (ret) {
971 				dp->connected = false;
972 				DRM_DEV_ERROR(dp->dev,
973 					      "Failed to config video %d\n",
974 					      ret);
975 			}
976 		}
977 	}
978 
979 out:
980 	mutex_unlock(&dp->lock);
981 
982 	old_status = connector->status;
983 	connector->status = connector->funcs->detect(connector, false);
984 	if (old_status != connector->status)
985 		drm_kms_helper_hotplug_event(dp->drm_dev);
986 }
987 
cdn_dp_pd_event(struct notifier_block * nb,unsigned long event,void * priv)988 static int cdn_dp_pd_event(struct notifier_block *nb,
989 			   unsigned long event, void *priv)
990 {
991 	struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
992 						event_nb);
993 	struct cdn_dp_device *dp = port->dp;
994 
995 	/*
996 	 * It would be nice to be able to just do the work inline right here.
997 	 * However, we need to make a bunch of calls that might sleep in order
998 	 * to turn on the block/phy, so use a worker instead.
999 	 */
1000 	schedule_work(&dp->event_work);
1001 
1002 	return NOTIFY_DONE;
1003 }
1004 
cdn_dp_bind(struct device * dev,struct device * master,void * data)1005 static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
1006 {
1007 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1008 	struct drm_encoder *encoder;
1009 	struct drm_connector *connector;
1010 	struct cdn_dp_port *port;
1011 	struct drm_device *drm_dev = data;
1012 	int ret, i;
1013 
1014 	ret = cdn_dp_parse_dt(dp);
1015 	if (ret < 0)
1016 		return ret;
1017 
1018 	dp->drm_dev = drm_dev;
1019 	dp->connected = false;
1020 	dp->active = false;
1021 	dp->active_port = -1;
1022 	dp->fw_loaded = false;
1023 
1024 	INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
1025 
1026 	encoder = &dp->encoder;
1027 
1028 	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
1029 							     dev->of_node);
1030 	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1031 
1032 	ret = drm_simple_encoder_init(drm_dev, encoder,
1033 				      DRM_MODE_ENCODER_TMDS);
1034 	if (ret) {
1035 		DRM_ERROR("failed to initialize encoder with drm\n");
1036 		return ret;
1037 	}
1038 
1039 	drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
1040 
1041 	connector = &dp->connector;
1042 	connector->polled = DRM_CONNECTOR_POLL_HPD;
1043 	connector->dpms = DRM_MODE_DPMS_OFF;
1044 
1045 	ret = drm_connector_init(drm_dev, connector,
1046 				 &cdn_dp_atomic_connector_funcs,
1047 				 DRM_MODE_CONNECTOR_DisplayPort);
1048 	if (ret) {
1049 		DRM_ERROR("failed to initialize connector with drm\n");
1050 		goto err_free_encoder;
1051 	}
1052 
1053 	drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
1054 
1055 	ret = drm_connector_attach_encoder(connector, encoder);
1056 	if (ret) {
1057 		DRM_ERROR("failed to attach connector and encoder\n");
1058 		goto err_free_connector;
1059 	}
1060 
1061 	for (i = 0; i < dp->ports; i++) {
1062 		port = dp->port[i];
1063 
1064 		port->event_nb.notifier_call = cdn_dp_pd_event;
1065 		ret = devm_extcon_register_notifier(dp->dev, port->extcon,
1066 						    EXTCON_DISP_DP,
1067 						    &port->event_nb);
1068 		if (ret) {
1069 			DRM_DEV_ERROR(dev,
1070 				      "register EXTCON_DISP_DP notifier err\n");
1071 			goto err_free_connector;
1072 		}
1073 	}
1074 
1075 	pm_runtime_enable(dev);
1076 
1077 	schedule_work(&dp->event_work);
1078 
1079 	return 0;
1080 
1081 err_free_connector:
1082 	drm_connector_cleanup(connector);
1083 err_free_encoder:
1084 	drm_encoder_cleanup(encoder);
1085 	return ret;
1086 }
1087 
cdn_dp_unbind(struct device * dev,struct device * master,void * data)1088 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
1089 {
1090 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1091 	struct drm_encoder *encoder = &dp->encoder;
1092 	struct drm_connector *connector = &dp->connector;
1093 
1094 	cancel_work_sync(&dp->event_work);
1095 	cdn_dp_encoder_disable(encoder);
1096 	encoder->funcs->destroy(encoder);
1097 	connector->funcs->destroy(connector);
1098 
1099 	pm_runtime_disable(dev);
1100 	if (dp->fw_loaded)
1101 		release_firmware(dp->fw);
1102 	kfree(dp->edid);
1103 	dp->edid = NULL;
1104 }
1105 
1106 static const struct component_ops cdn_dp_component_ops = {
1107 	.bind = cdn_dp_bind,
1108 	.unbind = cdn_dp_unbind,
1109 };
1110 
cdn_dp_suspend(struct device * dev)1111 static int cdn_dp_suspend(struct device *dev)
1112 {
1113 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1114 	int ret = 0;
1115 
1116 	mutex_lock(&dp->lock);
1117 	if (dp->active)
1118 		ret = cdn_dp_disable(dp);
1119 	dp->suspended = true;
1120 	mutex_unlock(&dp->lock);
1121 
1122 	return ret;
1123 }
1124 
cdn_dp_resume(struct device * dev)1125 static __maybe_unused int cdn_dp_resume(struct device *dev)
1126 {
1127 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
1128 
1129 	mutex_lock(&dp->lock);
1130 	dp->suspended = false;
1131 	if (dp->fw_loaded)
1132 		schedule_work(&dp->event_work);
1133 	mutex_unlock(&dp->lock);
1134 
1135 	return 0;
1136 }
1137 
cdn_dp_probe(struct platform_device * pdev)1138 static int cdn_dp_probe(struct platform_device *pdev)
1139 {
1140 	struct device *dev = &pdev->dev;
1141 	const struct of_device_id *match;
1142 	struct cdn_dp_data *dp_data;
1143 	struct cdn_dp_port *port;
1144 	struct cdn_dp_device *dp;
1145 	struct extcon_dev *extcon;
1146 	struct phy *phy;
1147 	int i;
1148 
1149 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
1150 	if (!dp)
1151 		return -ENOMEM;
1152 	dp->dev = dev;
1153 
1154 	match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
1155 	dp_data = (struct cdn_dp_data *)match->data;
1156 
1157 	for (i = 0; i < dp_data->max_phy; i++) {
1158 		extcon = extcon_get_edev_by_phandle(dev, i);
1159 		phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
1160 
1161 		if (PTR_ERR(extcon) == -EPROBE_DEFER ||
1162 		    PTR_ERR(phy) == -EPROBE_DEFER)
1163 			return -EPROBE_DEFER;
1164 
1165 		if (IS_ERR(extcon) || IS_ERR(phy))
1166 			continue;
1167 
1168 		port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1169 		if (!port)
1170 			return -ENOMEM;
1171 
1172 		port->extcon = extcon;
1173 		port->phy = phy;
1174 		port->dp = dp;
1175 		port->id = i;
1176 		dp->port[dp->ports++] = port;
1177 	}
1178 
1179 	if (!dp->ports) {
1180 		DRM_DEV_ERROR(dev, "missing extcon or phy\n");
1181 		return -EINVAL;
1182 	}
1183 
1184 	mutex_init(&dp->lock);
1185 	dev_set_drvdata(dev, dp);
1186 
1187 	cdn_dp_audio_codec_init(dp, dev);
1188 
1189 	return component_add(dev, &cdn_dp_component_ops);
1190 }
1191 
cdn_dp_remove(struct platform_device * pdev)1192 static int cdn_dp_remove(struct platform_device *pdev)
1193 {
1194 	struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1195 
1196 	platform_device_unregister(dp->audio_pdev);
1197 	cdn_dp_suspend(dp->dev);
1198 	component_del(&pdev->dev, &cdn_dp_component_ops);
1199 
1200 	return 0;
1201 }
1202 
cdn_dp_shutdown(struct platform_device * pdev)1203 static void cdn_dp_shutdown(struct platform_device *pdev)
1204 {
1205 	struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1206 
1207 	cdn_dp_suspend(dp->dev);
1208 }
1209 
1210 static const struct dev_pm_ops cdn_dp_pm_ops = {
1211 	SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
1212 				cdn_dp_resume)
1213 };
1214 
1215 struct platform_driver cdn_dp_driver = {
1216 	.probe = cdn_dp_probe,
1217 	.remove = cdn_dp_remove,
1218 	.shutdown = cdn_dp_shutdown,
1219 	.driver = {
1220 		   .name = "cdn-dp",
1221 		   .owner = THIS_MODULE,
1222 		   .of_match_table = of_match_ptr(cdn_dp_dt_ids),
1223 		   .pm = &cdn_dp_pm_ops,
1224 	},
1225 };
1226