1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: Chris Zhong <zyw@rock-chips.com>
5 */
6
7 #include <linux/clk.h>
8 #include <linux/component.h>
9 #include <linux/extcon.h>
10 #include <linux/firmware.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/phy/phy.h>
13 #include <linux/regmap.h>
14 #include <linux/reset.h>
15
16 #include <sound/hdmi-codec.h>
17
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_dp_helper.h>
20 #include <drm/drm_edid.h>
21 #include <drm/drm_of.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_simple_kms_helper.h>
24
25 #include "cdn-dp-core.h"
26 #include "cdn-dp-reg.h"
27 #include "rockchip_drm_vop.h"
28
29 #define connector_to_dp(c) \
30 container_of(c, struct cdn_dp_device, connector)
31
32 #define encoder_to_dp(c) \
33 container_of(c, struct cdn_dp_device, encoder)
34
35 #define GRF_SOC_CON9 0x6224
36 #define DP_SEL_VOP_LIT BIT(12)
37 #define GRF_SOC_CON26 0x6268
38 #define DPTX_HPD_SEL (3 << 12)
39 #define DPTX_HPD_DEL (2 << 12)
40 #define DPTX_HPD_SEL_MASK (3 << 28)
41
42 #define CDN_FW_TIMEOUT_MS (64 * 1000)
43 #define CDN_DPCD_TIMEOUT_MS 5000
44 #define CDN_DP_FIRMWARE "rockchip/dptx.bin"
45 MODULE_FIRMWARE(CDN_DP_FIRMWARE);
46
47 struct cdn_dp_data {
48 u8 max_phy;
49 };
50
51 struct cdn_dp_data rk3399_cdn_dp = {
52 .max_phy = 2,
53 };
54
55 static const struct of_device_id cdn_dp_dt_ids[] = {
56 { .compatible = "rockchip,rk3399-cdn-dp",
57 .data = (void *)&rk3399_cdn_dp },
58 {}
59 };
60
61 MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids);
62
cdn_dp_grf_write(struct cdn_dp_device * dp,unsigned int reg,unsigned int val)63 static int cdn_dp_grf_write(struct cdn_dp_device *dp,
64 unsigned int reg, unsigned int val)
65 {
66 int ret;
67
68 ret = clk_prepare_enable(dp->grf_clk);
69 if (ret) {
70 DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n");
71 return ret;
72 }
73
74 ret = regmap_write(dp->grf, reg, val);
75 if (ret) {
76 DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
77 clk_disable_unprepare(dp->grf_clk);
78 return ret;
79 }
80
81 clk_disable_unprepare(dp->grf_clk);
82
83 return 0;
84 }
85
cdn_dp_clk_enable(struct cdn_dp_device * dp)86 static int cdn_dp_clk_enable(struct cdn_dp_device *dp)
87 {
88 int ret;
89 unsigned long rate;
90
91 ret = clk_prepare_enable(dp->pclk);
92 if (ret < 0) {
93 DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret);
94 goto err_pclk;
95 }
96
97 ret = clk_prepare_enable(dp->core_clk);
98 if (ret < 0) {
99 DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret);
100 goto err_core_clk;
101 }
102
103 ret = pm_runtime_get_sync(dp->dev);
104 if (ret < 0) {
105 DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret);
106 goto err_pm_runtime_get;
107 }
108
109 reset_control_assert(dp->core_rst);
110 reset_control_assert(dp->dptx_rst);
111 reset_control_assert(dp->apb_rst);
112 reset_control_deassert(dp->core_rst);
113 reset_control_deassert(dp->dptx_rst);
114 reset_control_deassert(dp->apb_rst);
115
116 rate = clk_get_rate(dp->core_clk);
117 if (!rate) {
118 DRM_DEV_ERROR(dp->dev, "get clk rate failed\n");
119 ret = -EINVAL;
120 goto err_set_rate;
121 }
122
123 cdn_dp_set_fw_clk(dp, rate);
124 cdn_dp_clock_reset(dp);
125
126 return 0;
127
128 err_set_rate:
129 pm_runtime_put(dp->dev);
130 err_pm_runtime_get:
131 clk_disable_unprepare(dp->core_clk);
132 err_core_clk:
133 clk_disable_unprepare(dp->pclk);
134 err_pclk:
135 return ret;
136 }
137
cdn_dp_clk_disable(struct cdn_dp_device * dp)138 static void cdn_dp_clk_disable(struct cdn_dp_device *dp)
139 {
140 pm_runtime_put_sync(dp->dev);
141 clk_disable_unprepare(dp->pclk);
142 clk_disable_unprepare(dp->core_clk);
143 }
144
cdn_dp_get_port_lanes(struct cdn_dp_port * port)145 static int cdn_dp_get_port_lanes(struct cdn_dp_port *port)
146 {
147 struct extcon_dev *edev = port->extcon;
148 union extcon_property_value property;
149 int dptx;
150 u8 lanes;
151
152 dptx = extcon_get_state(edev, EXTCON_DISP_DP);
153 if (dptx > 0) {
154 extcon_get_property(edev, EXTCON_DISP_DP,
155 EXTCON_PROP_USB_SS, &property);
156 if (property.intval)
157 lanes = 2;
158 else
159 lanes = 4;
160 } else {
161 lanes = 0;
162 }
163
164 return lanes;
165 }
166
cdn_dp_get_sink_count(struct cdn_dp_device * dp,u8 * sink_count)167 static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
168 {
169 int ret;
170 u8 value;
171
172 *sink_count = 0;
173 ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
174 if (ret)
175 return ret;
176
177 *sink_count = DP_GET_SINK_COUNT(value);
178 return 0;
179 }
180
cdn_dp_connected_port(struct cdn_dp_device * dp)181 static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp)
182 {
183 struct cdn_dp_port *port;
184 int i, lanes;
185
186 for (i = 0; i < dp->ports; i++) {
187 port = dp->port[i];
188 lanes = cdn_dp_get_port_lanes(port);
189 if (lanes)
190 return port;
191 }
192 return NULL;
193 }
194
cdn_dp_check_sink_connection(struct cdn_dp_device * dp)195 static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp)
196 {
197 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS);
198 struct cdn_dp_port *port;
199 u8 sink_count = 0;
200
201 if (dp->active_port < 0 || dp->active_port >= dp->ports) {
202 DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n");
203 return false;
204 }
205
206 port = dp->port[dp->active_port];
207
208 /*
209 * Attempt to read sink count, retry in case the sink may not be ready.
210 *
211 * Sinks are *supposed* to come up within 1ms from an off state, but
212 * some docks need more time to power up.
213 */
214 while (time_before(jiffies, timeout)) {
215 if (!extcon_get_state(port->extcon, EXTCON_DISP_DP))
216 return false;
217
218 if (!cdn_dp_get_sink_count(dp, &sink_count))
219 return sink_count ? true : false;
220
221 usleep_range(5000, 10000);
222 }
223
224 DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n");
225 return false;
226 }
227
228 static enum drm_connector_status
cdn_dp_connector_detect(struct drm_connector * connector,bool force)229 cdn_dp_connector_detect(struct drm_connector *connector, bool force)
230 {
231 struct cdn_dp_device *dp = connector_to_dp(connector);
232 enum drm_connector_status status = connector_status_disconnected;
233
234 mutex_lock(&dp->lock);
235 if (dp->connected)
236 status = connector_status_connected;
237 mutex_unlock(&dp->lock);
238
239 return status;
240 }
241
cdn_dp_connector_destroy(struct drm_connector * connector)242 static void cdn_dp_connector_destroy(struct drm_connector *connector)
243 {
244 drm_connector_unregister(connector);
245 drm_connector_cleanup(connector);
246 }
247
248 static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
249 .detect = cdn_dp_connector_detect,
250 .destroy = cdn_dp_connector_destroy,
251 .fill_modes = drm_helper_probe_single_connector_modes,
252 .reset = drm_atomic_helper_connector_reset,
253 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
254 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
255 };
256
cdn_dp_connector_get_modes(struct drm_connector * connector)257 static int cdn_dp_connector_get_modes(struct drm_connector *connector)
258 {
259 struct cdn_dp_device *dp = connector_to_dp(connector);
260 struct edid *edid;
261 int ret = 0;
262
263 mutex_lock(&dp->lock);
264 edid = dp->edid;
265 if (edid) {
266 DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n",
267 edid->width_cm, edid->height_cm);
268
269 dp->sink_has_audio = drm_detect_monitor_audio(edid);
270 ret = drm_add_edid_modes(connector, edid);
271 if (ret)
272 drm_connector_update_edid_property(connector,
273 edid);
274 }
275 mutex_unlock(&dp->lock);
276
277 return ret;
278 }
279
280 static enum drm_mode_status
cdn_dp_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)281 cdn_dp_connector_mode_valid(struct drm_connector *connector,
282 struct drm_display_mode *mode)
283 {
284 struct cdn_dp_device *dp = connector_to_dp(connector);
285 struct drm_display_info *display_info = &dp->connector.display_info;
286 u32 requested, actual, rate, sink_max, source_max = 0;
287 u8 lanes, bpc;
288
289 /* If DP is disconnected, every mode is invalid */
290 if (!dp->connected)
291 return MODE_BAD;
292
293 switch (display_info->bpc) {
294 case 10:
295 bpc = 10;
296 break;
297 case 6:
298 bpc = 6;
299 break;
300 default:
301 bpc = 8;
302 break;
303 }
304
305 requested = mode->clock * bpc * 3 / 1000;
306
307 source_max = dp->lanes;
308 sink_max = drm_dp_max_lane_count(dp->dpcd);
309 lanes = min(source_max, sink_max);
310
311 source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
312 sink_max = drm_dp_max_link_rate(dp->dpcd);
313 rate = min(source_max, sink_max);
314
315 actual = rate * lanes / 100;
316
317 /* efficiency is about 0.8 */
318 actual = actual * 8 / 10;
319
320 if (requested > actual) {
321 DRM_DEV_DEBUG_KMS(dp->dev,
322 "requested=%d, actual=%d, clock=%d\n",
323 requested, actual, mode->clock);
324 return MODE_CLOCK_HIGH;
325 }
326
327 return MODE_OK;
328 }
329
330 static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
331 .get_modes = cdn_dp_connector_get_modes,
332 .mode_valid = cdn_dp_connector_mode_valid,
333 };
334
cdn_dp_firmware_init(struct cdn_dp_device * dp)335 static int cdn_dp_firmware_init(struct cdn_dp_device *dp)
336 {
337 int ret;
338 const u32 *iram_data, *dram_data;
339 const struct firmware *fw = dp->fw;
340 const struct cdn_firmware_header *hdr;
341
342 hdr = (struct cdn_firmware_header *)fw->data;
343 if (fw->size != le32_to_cpu(hdr->size_bytes)) {
344 DRM_DEV_ERROR(dp->dev, "firmware is invalid\n");
345 return -EINVAL;
346 }
347
348 iram_data = (const u32 *)(fw->data + hdr->header_size);
349 dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size);
350
351 ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size,
352 dram_data, hdr->dram_size);
353 if (ret)
354 return ret;
355
356 ret = cdn_dp_set_firmware_active(dp, true);
357 if (ret) {
358 DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret);
359 return ret;
360 }
361
362 return cdn_dp_event_config(dp);
363 }
364
cdn_dp_get_sink_capability(struct cdn_dp_device * dp)365 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
366 {
367 int ret;
368
369 if (!cdn_dp_check_sink_connection(dp))
370 return -ENODEV;
371
372 ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
373 DP_RECEIVER_CAP_SIZE);
374 if (ret) {
375 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
376 return ret;
377 }
378
379 kfree(dp->edid);
380 dp->edid = drm_do_get_edid(&dp->connector,
381 cdn_dp_get_edid_block, dp);
382 return 0;
383 }
384
cdn_dp_enable_phy(struct cdn_dp_device * dp,struct cdn_dp_port * port)385 static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
386 {
387 union extcon_property_value property;
388 int ret;
389
390 if (!port->phy_enabled) {
391 ret = phy_power_on(port->phy);
392 if (ret) {
393 DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n",
394 ret);
395 goto err_phy;
396 }
397 port->phy_enabled = true;
398 }
399
400 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
401 DPTX_HPD_SEL_MASK | DPTX_HPD_SEL);
402 if (ret) {
403 DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret);
404 goto err_power_on;
405 }
406
407 ret = cdn_dp_get_hpd_status(dp);
408 if (ret <= 0) {
409 if (!ret)
410 DRM_DEV_ERROR(dp->dev, "hpd does not exist\n");
411 goto err_power_on;
412 }
413
414 ret = extcon_get_property(port->extcon, EXTCON_DISP_DP,
415 EXTCON_PROP_USB_TYPEC_POLARITY, &property);
416 if (ret) {
417 DRM_DEV_ERROR(dp->dev, "get property failed\n");
418 goto err_power_on;
419 }
420
421 port->lanes = cdn_dp_get_port_lanes(port);
422 ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval);
423 if (ret) {
424 DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n",
425 ret);
426 goto err_power_on;
427 }
428
429 dp->active_port = port->id;
430 return 0;
431
432 err_power_on:
433 if (phy_power_off(port->phy))
434 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
435 else
436 port->phy_enabled = false;
437
438 err_phy:
439 cdn_dp_grf_write(dp, GRF_SOC_CON26,
440 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
441 return ret;
442 }
443
cdn_dp_disable_phy(struct cdn_dp_device * dp,struct cdn_dp_port * port)444 static int cdn_dp_disable_phy(struct cdn_dp_device *dp,
445 struct cdn_dp_port *port)
446 {
447 int ret;
448
449 if (port->phy_enabled) {
450 ret = phy_power_off(port->phy);
451 if (ret) {
452 DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret);
453 return ret;
454 }
455 }
456
457 port->phy_enabled = false;
458 port->lanes = 0;
459 dp->active_port = -1;
460 return 0;
461 }
462
cdn_dp_disable(struct cdn_dp_device * dp)463 static int cdn_dp_disable(struct cdn_dp_device *dp)
464 {
465 int ret, i;
466
467 if (!dp->active)
468 return 0;
469
470 for (i = 0; i < dp->ports; i++)
471 cdn_dp_disable_phy(dp, dp->port[i]);
472
473 ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
474 DPTX_HPD_SEL_MASK | DPTX_HPD_DEL);
475 if (ret) {
476 DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n",
477 ret);
478 return ret;
479 }
480
481 cdn_dp_set_firmware_active(dp, false);
482 cdn_dp_clk_disable(dp);
483 dp->active = false;
484 dp->max_lanes = 0;
485 dp->max_rate = 0;
486 if (!dp->connected) {
487 kfree(dp->edid);
488 dp->edid = NULL;
489 }
490
491 return 0;
492 }
493
cdn_dp_enable(struct cdn_dp_device * dp)494 static int cdn_dp_enable(struct cdn_dp_device *dp)
495 {
496 int ret, i, lanes;
497 struct cdn_dp_port *port;
498
499 port = cdn_dp_connected_port(dp);
500 if (!port) {
501 DRM_DEV_ERROR(dp->dev,
502 "Can't enable without connection\n");
503 return -ENODEV;
504 }
505
506 if (dp->active)
507 return 0;
508
509 ret = cdn_dp_clk_enable(dp);
510 if (ret)
511 return ret;
512
513 ret = cdn_dp_firmware_init(dp);
514 if (ret) {
515 DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret);
516 goto err_clk_disable;
517 }
518
519 /* only enable the port that connected with downstream device */
520 for (i = port->id; i < dp->ports; i++) {
521 port = dp->port[i];
522 lanes = cdn_dp_get_port_lanes(port);
523 if (lanes) {
524 ret = cdn_dp_enable_phy(dp, port);
525 if (ret)
526 continue;
527
528 ret = cdn_dp_get_sink_capability(dp);
529 if (ret) {
530 cdn_dp_disable_phy(dp, port);
531 } else {
532 dp->active = true;
533 dp->lanes = port->lanes;
534 return 0;
535 }
536 }
537 }
538
539 err_clk_disable:
540 cdn_dp_clk_disable(dp);
541 return ret;
542 }
543
cdn_dp_encoder_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted)544 static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
545 struct drm_display_mode *mode,
546 struct drm_display_mode *adjusted)
547 {
548 struct cdn_dp_device *dp = encoder_to_dp(encoder);
549 struct drm_display_info *display_info = &dp->connector.display_info;
550 struct video_info *video = &dp->video_info;
551
552 switch (display_info->bpc) {
553 case 10:
554 video->color_depth = 10;
555 break;
556 case 6:
557 video->color_depth = 6;
558 break;
559 default:
560 video->color_depth = 8;
561 break;
562 }
563
564 video->color_fmt = PXL_RGB;
565 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
566 video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
567
568 drm_mode_copy(&dp->mode, adjusted);
569 }
570
cdn_dp_check_link_status(struct cdn_dp_device * dp)571 static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
572 {
573 u8 link_status[DP_LINK_STATUS_SIZE];
574 struct cdn_dp_port *port = cdn_dp_connected_port(dp);
575 u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
576
577 if (!port || !dp->max_rate || !dp->max_lanes)
578 return false;
579
580 if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
581 DP_LINK_STATUS_SIZE)) {
582 DRM_ERROR("Failed to get link status\n");
583 return false;
584 }
585
586 /* if link training is requested we should perform it always */
587 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes));
588 }
589
cdn_dp_encoder_enable(struct drm_encoder * encoder)590 static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
591 {
592 struct cdn_dp_device *dp = encoder_to_dp(encoder);
593 int ret, val;
594
595 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
596 if (ret < 0) {
597 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret);
598 return;
599 }
600
601 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
602 (ret) ? "LIT" : "BIG");
603 if (ret)
604 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
605 else
606 val = DP_SEL_VOP_LIT << 16;
607
608 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
609 if (ret)
610 return;
611
612 mutex_lock(&dp->lock);
613
614 ret = cdn_dp_enable(dp);
615 if (ret) {
616 DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n",
617 ret);
618 goto out;
619 }
620 if (!cdn_dp_check_link_status(dp)) {
621 ret = cdn_dp_train_link(dp);
622 if (ret) {
623 DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret);
624 goto out;
625 }
626 }
627
628 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
629 if (ret) {
630 DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
631 goto out;
632 }
633
634 ret = cdn_dp_config_video(dp);
635 if (ret) {
636 DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret);
637 goto out;
638 }
639
640 ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
641 if (ret) {
642 DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
643 goto out;
644 }
645 out:
646 mutex_unlock(&dp->lock);
647 }
648
cdn_dp_encoder_disable(struct drm_encoder * encoder)649 static void cdn_dp_encoder_disable(struct drm_encoder *encoder)
650 {
651 struct cdn_dp_device *dp = encoder_to_dp(encoder);
652 int ret;
653
654 mutex_lock(&dp->lock);
655 if (dp->active) {
656 ret = cdn_dp_disable(dp);
657 if (ret) {
658 DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n",
659 ret);
660 }
661 }
662 mutex_unlock(&dp->lock);
663
664 /*
665 * In the following 2 cases, we need to run the event_work to re-enable
666 * the DP:
667 * 1. If there is not just one port device is connected, and remove one
668 * device from a port, the DP will be disabled here, at this case,
669 * run the event_work to re-open DP for the other port.
670 * 2. If re-training or re-config failed, the DP will be disabled here.
671 * run the event_work to re-connect it.
672 */
673 if (!dp->connected && cdn_dp_connected_port(dp))
674 schedule_work(&dp->event_work);
675 }
676
cdn_dp_encoder_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)677 static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder,
678 struct drm_crtc_state *crtc_state,
679 struct drm_connector_state *conn_state)
680 {
681 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
682
683 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
684 s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
685
686 return 0;
687 }
688
689 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
690 .mode_set = cdn_dp_encoder_mode_set,
691 .enable = cdn_dp_encoder_enable,
692 .disable = cdn_dp_encoder_disable,
693 .atomic_check = cdn_dp_encoder_atomic_check,
694 };
695
cdn_dp_parse_dt(struct cdn_dp_device * dp)696 static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
697 {
698 struct device *dev = dp->dev;
699 struct device_node *np = dev->of_node;
700 struct platform_device *pdev = to_platform_device(dev);
701 struct resource *res;
702
703 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
704 if (IS_ERR(dp->grf)) {
705 DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n");
706 return PTR_ERR(dp->grf);
707 }
708
709 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
710 dp->regs = devm_ioremap_resource(dev, res);
711 if (IS_ERR(dp->regs)) {
712 DRM_DEV_ERROR(dev, "ioremap reg failed\n");
713 return PTR_ERR(dp->regs);
714 }
715
716 dp->core_clk = devm_clk_get(dev, "core-clk");
717 if (IS_ERR(dp->core_clk)) {
718 DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n");
719 return PTR_ERR(dp->core_clk);
720 }
721
722 dp->pclk = devm_clk_get(dev, "pclk");
723 if (IS_ERR(dp->pclk)) {
724 DRM_DEV_ERROR(dev, "cannot get pclk\n");
725 return PTR_ERR(dp->pclk);
726 }
727
728 dp->spdif_clk = devm_clk_get(dev, "spdif");
729 if (IS_ERR(dp->spdif_clk)) {
730 DRM_DEV_ERROR(dev, "cannot get spdif_clk\n");
731 return PTR_ERR(dp->spdif_clk);
732 }
733
734 dp->grf_clk = devm_clk_get(dev, "grf");
735 if (IS_ERR(dp->grf_clk)) {
736 DRM_DEV_ERROR(dev, "cannot get grf clk\n");
737 return PTR_ERR(dp->grf_clk);
738 }
739
740 dp->spdif_rst = devm_reset_control_get(dev, "spdif");
741 if (IS_ERR(dp->spdif_rst)) {
742 DRM_DEV_ERROR(dev, "no spdif reset control found\n");
743 return PTR_ERR(dp->spdif_rst);
744 }
745
746 dp->dptx_rst = devm_reset_control_get(dev, "dptx");
747 if (IS_ERR(dp->dptx_rst)) {
748 DRM_DEV_ERROR(dev, "no uphy reset control found\n");
749 return PTR_ERR(dp->dptx_rst);
750 }
751
752 dp->core_rst = devm_reset_control_get(dev, "core");
753 if (IS_ERR(dp->core_rst)) {
754 DRM_DEV_ERROR(dev, "no core reset control found\n");
755 return PTR_ERR(dp->core_rst);
756 }
757
758 dp->apb_rst = devm_reset_control_get(dev, "apb");
759 if (IS_ERR(dp->apb_rst)) {
760 DRM_DEV_ERROR(dev, "no apb reset control found\n");
761 return PTR_ERR(dp->apb_rst);
762 }
763
764 return 0;
765 }
766
cdn_dp_audio_hw_params(struct device * dev,void * data,struct hdmi_codec_daifmt * daifmt,struct hdmi_codec_params * params)767 static int cdn_dp_audio_hw_params(struct device *dev, void *data,
768 struct hdmi_codec_daifmt *daifmt,
769 struct hdmi_codec_params *params)
770 {
771 struct cdn_dp_device *dp = dev_get_drvdata(dev);
772 struct audio_info audio = {
773 .sample_width = params->sample_width,
774 .sample_rate = params->sample_rate,
775 .channels = params->channels,
776 };
777 int ret;
778
779 mutex_lock(&dp->lock);
780 if (!dp->active) {
781 ret = -ENODEV;
782 goto out;
783 }
784
785 switch (daifmt->fmt) {
786 case HDMI_I2S:
787 audio.format = AFMT_I2S;
788 break;
789 case HDMI_SPDIF:
790 audio.format = AFMT_SPDIF;
791 break;
792 default:
793 DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt);
794 ret = -EINVAL;
795 goto out;
796 }
797
798 ret = cdn_dp_audio_config(dp, &audio);
799 if (!ret)
800 dp->audio_info = audio;
801
802 out:
803 mutex_unlock(&dp->lock);
804 return ret;
805 }
806
cdn_dp_audio_shutdown(struct device * dev,void * data)807 static void cdn_dp_audio_shutdown(struct device *dev, void *data)
808 {
809 struct cdn_dp_device *dp = dev_get_drvdata(dev);
810 int ret;
811
812 mutex_lock(&dp->lock);
813 if (!dp->active)
814 goto out;
815
816 ret = cdn_dp_audio_stop(dp, &dp->audio_info);
817 if (!ret)
818 dp->audio_info.format = AFMT_UNUSED;
819 out:
820 mutex_unlock(&dp->lock);
821 }
822
cdn_dp_audio_mute_stream(struct device * dev,void * data,bool enable,int direction)823 static int cdn_dp_audio_mute_stream(struct device *dev, void *data,
824 bool enable, int direction)
825 {
826 struct cdn_dp_device *dp = dev_get_drvdata(dev);
827 int ret;
828
829 mutex_lock(&dp->lock);
830 if (!dp->active) {
831 ret = -ENODEV;
832 goto out;
833 }
834
835 ret = cdn_dp_audio_mute(dp, enable);
836
837 out:
838 mutex_unlock(&dp->lock);
839 return ret;
840 }
841
cdn_dp_audio_get_eld(struct device * dev,void * data,u8 * buf,size_t len)842 static int cdn_dp_audio_get_eld(struct device *dev, void *data,
843 u8 *buf, size_t len)
844 {
845 struct cdn_dp_device *dp = dev_get_drvdata(dev);
846
847 memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len));
848
849 return 0;
850 }
851
852 static const struct hdmi_codec_ops audio_codec_ops = {
853 .hw_params = cdn_dp_audio_hw_params,
854 .audio_shutdown = cdn_dp_audio_shutdown,
855 .mute_stream = cdn_dp_audio_mute_stream,
856 .get_eld = cdn_dp_audio_get_eld,
857 .no_capture_mute = 1,
858 };
859
cdn_dp_audio_codec_init(struct cdn_dp_device * dp,struct device * dev)860 static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
861 struct device *dev)
862 {
863 struct hdmi_codec_pdata codec_data = {
864 .i2s = 1,
865 .spdif = 1,
866 .ops = &audio_codec_ops,
867 .max_i2s_channels = 8,
868 };
869
870 dp->audio_pdev = platform_device_register_data(
871 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
872 &codec_data, sizeof(codec_data));
873
874 return PTR_ERR_OR_ZERO(dp->audio_pdev);
875 }
876
cdn_dp_request_firmware(struct cdn_dp_device * dp)877 static int cdn_dp_request_firmware(struct cdn_dp_device *dp)
878 {
879 int ret;
880 unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS);
881 unsigned long sleep = 1000;
882
883 WARN_ON(!mutex_is_locked(&dp->lock));
884
885 if (dp->fw_loaded)
886 return 0;
887
888 /* Drop the lock before getting the firmware to avoid blocking boot */
889 mutex_unlock(&dp->lock);
890
891 while (time_before(jiffies, timeout)) {
892 ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev);
893 if (ret == -ENOENT) {
894 msleep(sleep);
895 sleep *= 2;
896 continue;
897 } else if (ret) {
898 DRM_DEV_ERROR(dp->dev,
899 "failed to request firmware: %d\n", ret);
900 goto out;
901 }
902
903 dp->fw_loaded = true;
904 ret = 0;
905 goto out;
906 }
907
908 DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n");
909 ret = -ETIMEDOUT;
910 out:
911 mutex_lock(&dp->lock);
912 return ret;
913 }
914
cdn_dp_pd_event_work(struct work_struct * work)915 static void cdn_dp_pd_event_work(struct work_struct *work)
916 {
917 struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
918 event_work);
919 struct drm_connector *connector = &dp->connector;
920 enum drm_connector_status old_status;
921
922 int ret;
923
924 mutex_lock(&dp->lock);
925
926 if (dp->suspended)
927 goto out;
928
929 ret = cdn_dp_request_firmware(dp);
930 if (ret)
931 goto out;
932
933 dp->connected = true;
934
935 /* Not connected, notify userspace to disable the block */
936 if (!cdn_dp_connected_port(dp)) {
937 DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n");
938 dp->connected = false;
939
940 /* Connected but not enabled, enable the block */
941 } else if (!dp->active) {
942 DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n");
943 ret = cdn_dp_enable(dp);
944 if (ret) {
945 DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret);
946 dp->connected = false;
947 }
948
949 /* Enabled and connected to a dongle without a sink, notify userspace */
950 } else if (!cdn_dp_check_sink_connection(dp)) {
951 DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n");
952 dp->connected = false;
953
954 /* Enabled and connected with a sink, re-train if requested */
955 } else if (!cdn_dp_check_link_status(dp)) {
956 unsigned int rate = dp->max_rate;
957 unsigned int lanes = dp->max_lanes;
958 struct drm_display_mode *mode = &dp->mode;
959
960 DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
961 ret = cdn_dp_train_link(dp);
962 if (ret) {
963 dp->connected = false;
964 DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret);
965 goto out;
966 }
967
968 /* If training result is changed, update the video config */
969 if (mode->clock &&
970 (rate != dp->max_rate || lanes != dp->max_lanes)) {
971 ret = cdn_dp_config_video(dp);
972 if (ret) {
973 dp->connected = false;
974 DRM_DEV_ERROR(dp->dev,
975 "Failed to config video %d\n",
976 ret);
977 }
978 }
979 }
980
981 out:
982 mutex_unlock(&dp->lock);
983
984 old_status = connector->status;
985 connector->status = connector->funcs->detect(connector, false);
986 if (old_status != connector->status)
987 drm_kms_helper_hotplug_event(dp->drm_dev);
988 }
989
cdn_dp_pd_event(struct notifier_block * nb,unsigned long event,void * priv)990 static int cdn_dp_pd_event(struct notifier_block *nb,
991 unsigned long event, void *priv)
992 {
993 struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port,
994 event_nb);
995 struct cdn_dp_device *dp = port->dp;
996
997 /*
998 * It would be nice to be able to just do the work inline right here.
999 * However, we need to make a bunch of calls that might sleep in order
1000 * to turn on the block/phy, so use a worker instead.
1001 */
1002 schedule_work(&dp->event_work);
1003
1004 return NOTIFY_DONE;
1005 }
1006
cdn_dp_bind(struct device * dev,struct device * master,void * data)1007 static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
1008 {
1009 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1010 struct drm_encoder *encoder;
1011 struct drm_connector *connector;
1012 struct cdn_dp_port *port;
1013 struct drm_device *drm_dev = data;
1014 int ret, i;
1015
1016 ret = cdn_dp_parse_dt(dp);
1017 if (ret < 0)
1018 return ret;
1019
1020 dp->drm_dev = drm_dev;
1021 dp->connected = false;
1022 dp->active = false;
1023 dp->active_port = -1;
1024 dp->fw_loaded = false;
1025
1026 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
1027
1028 encoder = &dp->encoder;
1029
1030 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
1031 dev->of_node);
1032 DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
1033
1034 ret = drm_simple_encoder_init(drm_dev, encoder,
1035 DRM_MODE_ENCODER_TMDS);
1036 if (ret) {
1037 DRM_ERROR("failed to initialize encoder with drm\n");
1038 return ret;
1039 }
1040
1041 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs);
1042
1043 connector = &dp->connector;
1044 connector->polled = DRM_CONNECTOR_POLL_HPD;
1045 connector->dpms = DRM_MODE_DPMS_OFF;
1046
1047 ret = drm_connector_init(drm_dev, connector,
1048 &cdn_dp_atomic_connector_funcs,
1049 DRM_MODE_CONNECTOR_DisplayPort);
1050 if (ret) {
1051 DRM_ERROR("failed to initialize connector with drm\n");
1052 goto err_free_encoder;
1053 }
1054
1055 drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
1056
1057 ret = drm_connector_attach_encoder(connector, encoder);
1058 if (ret) {
1059 DRM_ERROR("failed to attach connector and encoder\n");
1060 goto err_free_connector;
1061 }
1062
1063 for (i = 0; i < dp->ports; i++) {
1064 port = dp->port[i];
1065
1066 port->event_nb.notifier_call = cdn_dp_pd_event;
1067 ret = devm_extcon_register_notifier(dp->dev, port->extcon,
1068 EXTCON_DISP_DP,
1069 &port->event_nb);
1070 if (ret) {
1071 DRM_DEV_ERROR(dev,
1072 "register EXTCON_DISP_DP notifier err\n");
1073 goto err_free_connector;
1074 }
1075 }
1076
1077 pm_runtime_enable(dev);
1078
1079 schedule_work(&dp->event_work);
1080
1081 return 0;
1082
1083 err_free_connector:
1084 drm_connector_cleanup(connector);
1085 err_free_encoder:
1086 drm_encoder_cleanup(encoder);
1087 return ret;
1088 }
1089
cdn_dp_unbind(struct device * dev,struct device * master,void * data)1090 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
1091 {
1092 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1093 struct drm_encoder *encoder = &dp->encoder;
1094 struct drm_connector *connector = &dp->connector;
1095
1096 cancel_work_sync(&dp->event_work);
1097 cdn_dp_encoder_disable(encoder);
1098 encoder->funcs->destroy(encoder);
1099 connector->funcs->destroy(connector);
1100
1101 pm_runtime_disable(dev);
1102 if (dp->fw_loaded)
1103 release_firmware(dp->fw);
1104 kfree(dp->edid);
1105 dp->edid = NULL;
1106 }
1107
1108 static const struct component_ops cdn_dp_component_ops = {
1109 .bind = cdn_dp_bind,
1110 .unbind = cdn_dp_unbind,
1111 };
1112
cdn_dp_suspend(struct device * dev)1113 static int cdn_dp_suspend(struct device *dev)
1114 {
1115 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1116 int ret = 0;
1117
1118 mutex_lock(&dp->lock);
1119 if (dp->active)
1120 ret = cdn_dp_disable(dp);
1121 dp->suspended = true;
1122 mutex_unlock(&dp->lock);
1123
1124 return ret;
1125 }
1126
cdn_dp_resume(struct device * dev)1127 static __maybe_unused int cdn_dp_resume(struct device *dev)
1128 {
1129 struct cdn_dp_device *dp = dev_get_drvdata(dev);
1130
1131 mutex_lock(&dp->lock);
1132 dp->suspended = false;
1133 if (dp->fw_loaded)
1134 schedule_work(&dp->event_work);
1135 mutex_unlock(&dp->lock);
1136
1137 return 0;
1138 }
1139
cdn_dp_probe(struct platform_device * pdev)1140 static int cdn_dp_probe(struct platform_device *pdev)
1141 {
1142 struct device *dev = &pdev->dev;
1143 const struct of_device_id *match;
1144 struct cdn_dp_data *dp_data;
1145 struct cdn_dp_port *port;
1146 struct cdn_dp_device *dp;
1147 struct extcon_dev *extcon;
1148 struct phy *phy;
1149 int ret;
1150 int i;
1151
1152 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
1153 if (!dp)
1154 return -ENOMEM;
1155 dp->dev = dev;
1156
1157 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node);
1158 dp_data = (struct cdn_dp_data *)match->data;
1159
1160 for (i = 0; i < dp_data->max_phy; i++) {
1161 extcon = extcon_get_edev_by_phandle(dev, i);
1162 phy = devm_of_phy_get_by_index(dev, dev->of_node, i);
1163
1164 if (PTR_ERR(extcon) == -EPROBE_DEFER ||
1165 PTR_ERR(phy) == -EPROBE_DEFER)
1166 return -EPROBE_DEFER;
1167
1168 if (IS_ERR(extcon) || IS_ERR(phy))
1169 continue;
1170
1171 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1172 if (!port)
1173 return -ENOMEM;
1174
1175 port->extcon = extcon;
1176 port->phy = phy;
1177 port->dp = dp;
1178 port->id = i;
1179 dp->port[dp->ports++] = port;
1180 }
1181
1182 if (!dp->ports) {
1183 DRM_DEV_ERROR(dev, "missing extcon or phy\n");
1184 return -EINVAL;
1185 }
1186
1187 mutex_init(&dp->lock);
1188 dev_set_drvdata(dev, dp);
1189
1190 ret = cdn_dp_audio_codec_init(dp, dev);
1191 if (ret)
1192 return ret;
1193
1194 ret = component_add(dev, &cdn_dp_component_ops);
1195 if (ret)
1196 goto err_audio_deinit;
1197
1198 return 0;
1199
1200 err_audio_deinit:
1201 platform_device_unregister(dp->audio_pdev);
1202 return ret;
1203 }
1204
cdn_dp_remove(struct platform_device * pdev)1205 static int cdn_dp_remove(struct platform_device *pdev)
1206 {
1207 struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1208
1209 platform_device_unregister(dp->audio_pdev);
1210 cdn_dp_suspend(dp->dev);
1211 component_del(&pdev->dev, &cdn_dp_component_ops);
1212
1213 return 0;
1214 }
1215
cdn_dp_shutdown(struct platform_device * pdev)1216 static void cdn_dp_shutdown(struct platform_device *pdev)
1217 {
1218 struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1219
1220 cdn_dp_suspend(dp->dev);
1221 }
1222
1223 static const struct dev_pm_ops cdn_dp_pm_ops = {
1224 SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend,
1225 cdn_dp_resume)
1226 };
1227
1228 struct platform_driver cdn_dp_driver = {
1229 .probe = cdn_dp_probe,
1230 .remove = cdn_dp_remove,
1231 .shutdown = cdn_dp_shutdown,
1232 .driver = {
1233 .name = "cdn-dp",
1234 .owner = THIS_MODULE,
1235 .of_match_table = of_match_ptr(cdn_dp_dt_ids),
1236 .pm = &cdn_dp_pm_ops,
1237 },
1238 };
1239