• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_atomic.h"
9 #include "intel_cx0_phy_regs.h"
10 #include "intel_ddi.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_driver.h"
14 #include "intel_display_power_map.h"
15 #include "intel_display_types.h"
16 #include "intel_dkl_phy_regs.h"
17 #include "intel_dp.h"
18 #include "intel_dp_mst.h"
19 #include "intel_mg_phy_regs.h"
20 #include "intel_modeset_lock.h"
21 #include "intel_tc.h"
22 
23 #define DP_PIN_ASSIGNMENT_C	0x3
24 #define DP_PIN_ASSIGNMENT_D	0x4
25 #define DP_PIN_ASSIGNMENT_E	0x5
26 
27 enum tc_port_mode {
28 	TC_PORT_DISCONNECTED,
29 	TC_PORT_TBT_ALT,
30 	TC_PORT_DP_ALT,
31 	TC_PORT_LEGACY,
32 };
33 
34 struct intel_tc_port;
35 
36 struct intel_tc_phy_ops {
37 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 	bool (*is_ready)(struct intel_tc_port *tc);
40 	bool (*is_owned)(struct intel_tc_port *tc);
41 	void (*get_hw_state)(struct intel_tc_port *tc);
42 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 	void (*disconnect)(struct intel_tc_port *tc);
44 	void (*init)(struct intel_tc_port *tc);
45 };
46 
47 struct intel_tc_port {
48 	struct intel_digital_port *dig_port;
49 
50 	const struct intel_tc_phy_ops *phy_ops;
51 
52 	struct mutex lock;	/* protects the TypeC port mode */
53 	intel_wakeref_t lock_wakeref;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 	enum intel_display_power_domain lock_power_domain;
56 #endif
57 	struct delayed_work disconnect_phy_work;
58 	struct delayed_work link_reset_work;
59 	int link_refcount;
60 	bool legacy_port:1;
61 	const char *port_name;
62 	enum tc_port_mode mode;
63 	enum tc_port_mode init_mode;
64 	enum phy_fia phy_fia;
65 	u8 phy_fia_idx;
66 	u8 max_lane_count;
67 };
68 
69 static enum intel_display_power_domain
70 tc_phy_cold_off_domain(struct intel_tc_port *);
71 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
72 static bool tc_phy_is_ready(struct intel_tc_port *tc);
73 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
74 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
75 
tc_port_mode_name(enum tc_port_mode mode)76 static const char *tc_port_mode_name(enum tc_port_mode mode)
77 {
78 	static const char * const names[] = {
79 		[TC_PORT_DISCONNECTED] = "disconnected",
80 		[TC_PORT_TBT_ALT] = "tbt-alt",
81 		[TC_PORT_DP_ALT] = "dp-alt",
82 		[TC_PORT_LEGACY] = "legacy",
83 	};
84 
85 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
86 		mode = TC_PORT_DISCONNECTED;
87 
88 	return names[mode];
89 }
90 
to_tc_port(struct intel_digital_port * dig_port)91 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
92 {
93 	return dig_port->tc;
94 }
95 
tc_to_i915(struct intel_tc_port * tc)96 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
97 {
98 	return to_i915(tc->dig_port->base.base.dev);
99 }
100 
intel_tc_port_in_mode(struct intel_digital_port * dig_port,enum tc_port_mode mode)101 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
102 				  enum tc_port_mode mode)
103 {
104 	struct intel_tc_port *tc = to_tc_port(dig_port);
105 
106 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
107 }
108 
intel_tc_port_in_tbt_alt_mode(struct intel_digital_port * dig_port)109 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
110 {
111 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
112 }
113 
intel_tc_port_in_dp_alt_mode(struct intel_digital_port * dig_port)114 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
115 {
116 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
117 }
118 
intel_tc_port_in_legacy_mode(struct intel_digital_port * dig_port)119 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
120 {
121 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
122 }
123 
intel_tc_port_handles_hpd_glitches(struct intel_digital_port * dig_port)124 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
125 {
126 	struct intel_tc_port *tc = to_tc_port(dig_port);
127 
128 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
129 }
130 
131 /*
132  * The display power domains used for TC ports depending on the
133  * platform and TC mode (legacy, DP-alt, TBT):
134  *
135  * POWER_DOMAIN_DISPLAY_CORE:
136  * --------------------------
137  * ADLP/all modes:
138  *   - TCSS/IOM access for PHY ready state.
139  * ADLP+/all modes:
140  *   - DE/north-,south-HPD ISR access for HPD live state.
141  *
142  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
143  * -----------------------------------
144  * ICL+/all modes:
145  *   - DE/DDI_BUF access for port enabled state.
146  * ADLP/all modes:
147  *   - DE/DDI_BUF access for PHY owned state.
148  *
149  * POWER_DOMAIN_AUX_USBC<TC port index>:
150  * -------------------------------------
151  * ICL/legacy mode:
152  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
153  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
154  *     main lanes.
155  * ADLP/legacy, DP-alt modes:
156  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
157  *     main lanes.
158  *
159  * POWER_DOMAIN_TC_COLD_OFF:
160  * -------------------------
161  * ICL/DP-alt, TBT mode:
162  *   - TCSS/TBT: block TC-cold power state for using the (direct or
163  *     TBT DP-IN) AUX and main lanes.
164  *
165  * TGL/all modes:
166  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
167  *   - TCSS/PHY: block TC-cold power state for using the (direct or
168  *     TBT DP-IN) AUX and main lanes.
169  *
170  * ADLP/TBT mode:
171  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
172  *     AUX and main lanes.
173  *
174  * XELPDP+/all modes:
175  *   - TCSS/IOM,FIA access for PHY ready, owned state
176  *   - TCSS/PHY: block TC-cold power state for using the (direct or
177  *     TBT DP-IN) AUX and main lanes.
178  */
intel_tc_cold_requires_aux_pw(struct intel_digital_port * dig_port)179 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
180 {
181 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
182 	struct intel_tc_port *tc = to_tc_port(dig_port);
183 
184 	return tc_phy_cold_off_domain(tc) ==
185 	       intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
186 }
187 
188 static intel_wakeref_t
__tc_cold_block(struct intel_tc_port * tc,enum intel_display_power_domain * domain)189 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
190 {
191 	struct drm_i915_private *i915 = tc_to_i915(tc);
192 
193 	*domain = tc_phy_cold_off_domain(tc);
194 
195 	return intel_display_power_get(i915, *domain);
196 }
197 
198 static intel_wakeref_t
tc_cold_block(struct intel_tc_port * tc)199 tc_cold_block(struct intel_tc_port *tc)
200 {
201 	enum intel_display_power_domain domain;
202 	intel_wakeref_t wakeref;
203 
204 	wakeref = __tc_cold_block(tc, &domain);
205 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
206 	tc->lock_power_domain = domain;
207 #endif
208 	return wakeref;
209 }
210 
211 static void
__tc_cold_unblock(struct intel_tc_port * tc,enum intel_display_power_domain domain,intel_wakeref_t wakeref)212 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
213 		  intel_wakeref_t wakeref)
214 {
215 	struct drm_i915_private *i915 = tc_to_i915(tc);
216 
217 	intel_display_power_put(i915, domain, wakeref);
218 }
219 
220 static void
tc_cold_unblock(struct intel_tc_port * tc,intel_wakeref_t wakeref)221 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
222 {
223 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
224 
225 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
226 	drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
227 #endif
228 	__tc_cold_unblock(tc, domain, wakeref);
229 }
230 
231 static void
assert_display_core_power_enabled(struct intel_tc_port * tc)232 assert_display_core_power_enabled(struct intel_tc_port *tc)
233 {
234 	struct drm_i915_private *i915 = tc_to_i915(tc);
235 
236 	drm_WARN_ON(&i915->drm,
237 		    !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
238 }
239 
240 static void
assert_tc_cold_blocked(struct intel_tc_port * tc)241 assert_tc_cold_blocked(struct intel_tc_port *tc)
242 {
243 	struct drm_i915_private *i915 = tc_to_i915(tc);
244 	bool enabled;
245 
246 	enabled = intel_display_power_is_enabled(i915,
247 						 tc_phy_cold_off_domain(tc));
248 	drm_WARN_ON(&i915->drm, !enabled);
249 }
250 
251 static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port * tc)252 tc_port_power_domain(struct intel_tc_port *tc)
253 {
254 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
255 
256 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
257 }
258 
259 static void
assert_tc_port_power_enabled(struct intel_tc_port * tc)260 assert_tc_port_power_enabled(struct intel_tc_port *tc)
261 {
262 	struct drm_i915_private *i915 = tc_to_i915(tc);
263 
264 	drm_WARN_ON(&i915->drm,
265 		    !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
266 }
267 
intel_tc_port_get_lane_mask(struct intel_digital_port * dig_port)268 static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
269 {
270 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
271 	struct intel_tc_port *tc = to_tc_port(dig_port);
272 	u32 lane_mask;
273 
274 	lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
275 
276 	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
277 	assert_tc_cold_blocked(tc);
278 
279 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
280 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
281 }
282 
intel_tc_port_get_pin_assignment_mask(struct intel_digital_port * dig_port)283 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
284 {
285 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
286 	struct intel_tc_port *tc = to_tc_port(dig_port);
287 	u32 pin_mask;
288 
289 	pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
290 
291 	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
292 	assert_tc_cold_blocked(tc);
293 
294 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
295 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
296 }
297 
lnl_tc_port_get_max_lane_count(struct intel_digital_port * dig_port)298 static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
299 {
300 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
301 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
302 	intel_wakeref_t wakeref;
303 	u32 val, pin_assignment;
304 
305 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
306 		val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
307 
308 	pin_assignment =
309 		REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
310 
311 	switch (pin_assignment) {
312 	default:
313 		MISSING_CASE(pin_assignment);
314 		fallthrough;
315 	case DP_PIN_ASSIGNMENT_D:
316 		return 2;
317 	case DP_PIN_ASSIGNMENT_C:
318 	case DP_PIN_ASSIGNMENT_E:
319 		return 4;
320 	}
321 }
322 
mtl_tc_port_get_max_lane_count(struct intel_digital_port * dig_port)323 static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
324 {
325 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
326 	intel_wakeref_t wakeref;
327 	u32 pin_mask;
328 
329 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
330 		pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
331 
332 	switch (pin_mask) {
333 	default:
334 		MISSING_CASE(pin_mask);
335 		fallthrough;
336 	case DP_PIN_ASSIGNMENT_D:
337 		return 2;
338 	case DP_PIN_ASSIGNMENT_C:
339 	case DP_PIN_ASSIGNMENT_E:
340 		return 4;
341 	}
342 }
343 
intel_tc_port_get_max_lane_count(struct intel_digital_port * dig_port)344 static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
345 {
346 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
347 	intel_wakeref_t wakeref;
348 	u32 lane_mask = 0;
349 
350 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
351 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
352 
353 	switch (lane_mask) {
354 	default:
355 		MISSING_CASE(lane_mask);
356 		fallthrough;
357 	case 0x1:
358 	case 0x2:
359 	case 0x4:
360 	case 0x8:
361 		return 1;
362 	case 0x3:
363 	case 0xc:
364 		return 2;
365 	case 0xf:
366 		return 4;
367 	}
368 }
369 
get_max_lane_count(struct intel_tc_port * tc)370 static int get_max_lane_count(struct intel_tc_port *tc)
371 {
372 	struct intel_digital_port *dig_port = tc->dig_port;
373 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
374 
375 	if (tc->mode != TC_PORT_DP_ALT)
376 		return 4;
377 
378 	assert_tc_cold_blocked(tc);
379 
380 	if (DISPLAY_VER(i915) >= 20)
381 		return lnl_tc_port_get_max_lane_count(dig_port);
382 
383 	if (DISPLAY_VER(i915) >= 14)
384 		return mtl_tc_port_get_max_lane_count(dig_port);
385 
386 	return intel_tc_port_get_max_lane_count(dig_port);
387 }
388 
read_pin_configuration(struct intel_tc_port * tc)389 static void read_pin_configuration(struct intel_tc_port *tc)
390 {
391 	tc->max_lane_count = get_max_lane_count(tc);
392 }
393 
intel_tc_port_max_lane_count(struct intel_digital_port * dig_port)394 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
395 {
396 	struct intel_tc_port *tc = to_tc_port(dig_port);
397 
398 	if (!intel_encoder_is_tc(&dig_port->base))
399 		return 4;
400 
401 	return get_max_lane_count(tc);
402 }
403 
intel_tc_port_set_fia_lane_count(struct intel_digital_port * dig_port,int required_lanes)404 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
405 				      int required_lanes)
406 {
407 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
408 	struct intel_tc_port *tc = to_tc_port(dig_port);
409 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
410 	u32 val;
411 
412 	if (DISPLAY_VER(i915) >= 14)
413 		return;
414 
415 	drm_WARN_ON(&i915->drm,
416 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
417 
418 	assert_tc_cold_blocked(tc);
419 
420 	val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
421 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
422 
423 	switch (required_lanes) {
424 	case 1:
425 		val |= lane_reversal ?
426 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
427 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
428 		break;
429 	case 2:
430 		val |= lane_reversal ?
431 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
432 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
433 		break;
434 	case 4:
435 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
436 		break;
437 	default:
438 		MISSING_CASE(required_lanes);
439 	}
440 
441 	intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
442 }
443 
tc_port_fixup_legacy_flag(struct intel_tc_port * tc,u32 live_status_mask)444 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
445 				      u32 live_status_mask)
446 {
447 	struct drm_i915_private *i915 = tc_to_i915(tc);
448 	u32 valid_hpd_mask;
449 
450 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
451 
452 	if (hweight32(live_status_mask) != 1)
453 		return;
454 
455 	if (tc->legacy_port)
456 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
457 	else
458 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
459 				 BIT(TC_PORT_TBT_ALT);
460 
461 	if (!(live_status_mask & ~valid_hpd_mask))
462 		return;
463 
464 	/* If live status mismatches the VBT flag, trust the live status. */
465 	drm_dbg_kms(&i915->drm,
466 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
467 		    tc->port_name, live_status_mask, valid_hpd_mask);
468 
469 	tc->legacy_port = !tc->legacy_port;
470 }
471 
tc_phy_load_fia_params(struct intel_tc_port * tc,bool modular_fia)472 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
473 {
474 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
475 
476 	/*
477 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
478 	 * than two TC ports, there are multiple instances of Modular FIA.
479 	 */
480 	if (modular_fia) {
481 		tc->phy_fia = tc_port / 2;
482 		tc->phy_fia_idx = tc_port % 2;
483 	} else {
484 		tc->phy_fia = FIA1;
485 		tc->phy_fia_idx = tc_port;
486 	}
487 }
488 
489 /*
490  * ICL TC PHY handlers
491  * -------------------
492  */
493 static enum intel_display_power_domain
icl_tc_phy_cold_off_domain(struct intel_tc_port * tc)494 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
495 {
496 	struct drm_i915_private *i915 = tc_to_i915(tc);
497 	struct intel_digital_port *dig_port = tc->dig_port;
498 
499 	if (tc->legacy_port)
500 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
501 
502 	return POWER_DOMAIN_TC_COLD_OFF;
503 }
504 
icl_tc_phy_hpd_live_status(struct intel_tc_port * tc)505 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
506 {
507 	struct drm_i915_private *i915 = tc_to_i915(tc);
508 	struct intel_digital_port *dig_port = tc->dig_port;
509 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
510 	intel_wakeref_t wakeref;
511 	u32 fia_isr;
512 	u32 pch_isr;
513 	u32 mask = 0;
514 
515 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
516 		fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
517 		pch_isr = intel_de_read(i915, SDEISR);
518 	}
519 
520 	if (fia_isr == 0xffffffff) {
521 		drm_dbg_kms(&i915->drm,
522 			    "Port %s: PHY in TCCOLD, nothing connected\n",
523 			    tc->port_name);
524 		return mask;
525 	}
526 
527 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
528 		mask |= BIT(TC_PORT_TBT_ALT);
529 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
530 		mask |= BIT(TC_PORT_DP_ALT);
531 
532 	if (pch_isr & isr_bit)
533 		mask |= BIT(TC_PORT_LEGACY);
534 
535 	return mask;
536 }
537 
538 /*
539  * Return the PHY status complete flag indicating that display can acquire the
540  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
541  * is connected and it's ready to switch the ownership to display. The flag
542  * will be left cleared when a TBT-alt sink is connected, where the PHY is
543  * owned by the TBT subsystem and so switching the ownership to display is not
544  * required.
545  */
icl_tc_phy_is_ready(struct intel_tc_port * tc)546 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
547 {
548 	struct drm_i915_private *i915 = tc_to_i915(tc);
549 	u32 val;
550 
551 	assert_tc_cold_blocked(tc);
552 
553 	val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
554 	if (val == 0xffffffff) {
555 		drm_dbg_kms(&i915->drm,
556 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
557 			    tc->port_name);
558 		return false;
559 	}
560 
561 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
562 }
563 
icl_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)564 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
565 				      bool take)
566 {
567 	struct drm_i915_private *i915 = tc_to_i915(tc);
568 	u32 val;
569 
570 	assert_tc_cold_blocked(tc);
571 
572 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
573 	if (val == 0xffffffff) {
574 		drm_dbg_kms(&i915->drm,
575 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
576 			    tc->port_name, take ? "take" : "release");
577 
578 		return false;
579 	}
580 
581 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
582 	if (take)
583 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
584 
585 	intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
586 
587 	return true;
588 }
589 
icl_tc_phy_is_owned(struct intel_tc_port * tc)590 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
591 {
592 	struct drm_i915_private *i915 = tc_to_i915(tc);
593 	u32 val;
594 
595 	assert_tc_cold_blocked(tc);
596 
597 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
598 	if (val == 0xffffffff) {
599 		drm_dbg_kms(&i915->drm,
600 			    "Port %s: PHY in TCCOLD, assume not owned\n",
601 			    tc->port_name);
602 		return false;
603 	}
604 
605 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
606 }
607 
icl_tc_phy_get_hw_state(struct intel_tc_port * tc)608 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
609 {
610 	enum intel_display_power_domain domain;
611 	intel_wakeref_t tc_cold_wref;
612 
613 	tc_cold_wref = __tc_cold_block(tc, &domain);
614 
615 	tc->mode = tc_phy_get_current_mode(tc);
616 	if (tc->mode != TC_PORT_DISCONNECTED) {
617 		tc->lock_wakeref = tc_cold_block(tc);
618 
619 		read_pin_configuration(tc);
620 	}
621 
622 	__tc_cold_unblock(tc, domain, tc_cold_wref);
623 }
624 
625 /*
626  * This function implements the first part of the Connect Flow described by our
627  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
628  * lanes, EDID, etc) is done as needed in the typical places.
629  *
630  * Unlike the other ports, type-C ports are not available to use as soon as we
631  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
632  * display, USB, etc. As a result, handshaking through FIA is required around
633  * connect and disconnect to cleanly transfer ownership with the controller and
634  * set the type-C power state.
635  */
tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port * tc,int required_lanes)636 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
637 						int required_lanes)
638 {
639 	struct drm_i915_private *i915 = tc_to_i915(tc);
640 	struct intel_digital_port *dig_port = tc->dig_port;
641 	int max_lanes;
642 
643 	max_lanes = intel_tc_port_max_lane_count(dig_port);
644 	if (tc->mode == TC_PORT_LEGACY) {
645 		drm_WARN_ON(&i915->drm, max_lanes != 4);
646 		return true;
647 	}
648 
649 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
650 
651 	/*
652 	 * Now we have to re-check the live state, in case the port recently
653 	 * became disconnected. Not necessary for legacy mode.
654 	 */
655 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
656 		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
657 			    tc->port_name);
658 		return false;
659 	}
660 
661 	if (max_lanes < required_lanes) {
662 		drm_dbg_kms(&i915->drm,
663 			    "Port %s: PHY max lanes %d < required lanes %d\n",
664 			    tc->port_name,
665 			    max_lanes, required_lanes);
666 		return false;
667 	}
668 
669 	return true;
670 }
671 
icl_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)672 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
673 			       int required_lanes)
674 {
675 	struct drm_i915_private *i915 = tc_to_i915(tc);
676 
677 	tc->lock_wakeref = tc_cold_block(tc);
678 
679 	if (tc->mode == TC_PORT_TBT_ALT) {
680 		read_pin_configuration(tc);
681 
682 		return true;
683 	}
684 
685 	if ((!tc_phy_is_ready(tc) ||
686 	     !icl_tc_phy_take_ownership(tc, true)) &&
687 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
688 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
689 			    tc->port_name,
690 			    str_yes_no(tc_phy_is_ready(tc)));
691 		goto out_unblock_tc_cold;
692 	}
693 
694 	read_pin_configuration(tc);
695 
696 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
697 		goto out_release_phy;
698 
699 	return true;
700 
701 out_release_phy:
702 	icl_tc_phy_take_ownership(tc, false);
703 out_unblock_tc_cold:
704 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
705 
706 	return false;
707 }
708 
709 /*
710  * See the comment at the connect function. This implements the Disconnect
711  * Flow.
712  */
icl_tc_phy_disconnect(struct intel_tc_port * tc)713 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
714 {
715 	switch (tc->mode) {
716 	case TC_PORT_LEGACY:
717 	case TC_PORT_DP_ALT:
718 		icl_tc_phy_take_ownership(tc, false);
719 		fallthrough;
720 	case TC_PORT_TBT_ALT:
721 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
722 		break;
723 	default:
724 		MISSING_CASE(tc->mode);
725 	}
726 }
727 
icl_tc_phy_init(struct intel_tc_port * tc)728 static void icl_tc_phy_init(struct intel_tc_port *tc)
729 {
730 	tc_phy_load_fia_params(tc, false);
731 }
732 
733 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
734 	.cold_off_domain = icl_tc_phy_cold_off_domain,
735 	.hpd_live_status = icl_tc_phy_hpd_live_status,
736 	.is_ready = icl_tc_phy_is_ready,
737 	.is_owned = icl_tc_phy_is_owned,
738 	.get_hw_state = icl_tc_phy_get_hw_state,
739 	.connect = icl_tc_phy_connect,
740 	.disconnect = icl_tc_phy_disconnect,
741 	.init = icl_tc_phy_init,
742 };
743 
744 /*
745  * TGL TC PHY handlers
746  * -------------------
747  */
748 static enum intel_display_power_domain
tgl_tc_phy_cold_off_domain(struct intel_tc_port * tc)749 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
750 {
751 	return POWER_DOMAIN_TC_COLD_OFF;
752 }
753 
tgl_tc_phy_init(struct intel_tc_port * tc)754 static void tgl_tc_phy_init(struct intel_tc_port *tc)
755 {
756 	struct drm_i915_private *i915 = tc_to_i915(tc);
757 	intel_wakeref_t wakeref;
758 	u32 val;
759 
760 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
761 		val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
762 
763 	drm_WARN_ON(&i915->drm, val == 0xffffffff);
764 
765 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
766 }
767 
768 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
769 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
770 	.hpd_live_status = icl_tc_phy_hpd_live_status,
771 	.is_ready = icl_tc_phy_is_ready,
772 	.is_owned = icl_tc_phy_is_owned,
773 	.get_hw_state = icl_tc_phy_get_hw_state,
774 	.connect = icl_tc_phy_connect,
775 	.disconnect = icl_tc_phy_disconnect,
776 	.init = tgl_tc_phy_init,
777 };
778 
779 /*
780  * ADLP TC PHY handlers
781  * --------------------
782  */
783 static enum intel_display_power_domain
adlp_tc_phy_cold_off_domain(struct intel_tc_port * tc)784 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
785 {
786 	struct drm_i915_private *i915 = tc_to_i915(tc);
787 	struct intel_digital_port *dig_port = tc->dig_port;
788 
789 	if (tc->mode != TC_PORT_TBT_ALT)
790 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
791 
792 	return POWER_DOMAIN_TC_COLD_OFF;
793 }
794 
adlp_tc_phy_hpd_live_status(struct intel_tc_port * tc)795 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
796 {
797 	struct drm_i915_private *i915 = tc_to_i915(tc);
798 	struct intel_digital_port *dig_port = tc->dig_port;
799 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
800 	u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
801 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
802 	intel_wakeref_t wakeref;
803 	u32 cpu_isr;
804 	u32 pch_isr;
805 	u32 mask = 0;
806 
807 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
808 		cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
809 		pch_isr = intel_de_read(i915, SDEISR);
810 	}
811 
812 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
813 		mask |= BIT(TC_PORT_DP_ALT);
814 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
815 		mask |= BIT(TC_PORT_TBT_ALT);
816 
817 	if (pch_isr & pch_isr_bit)
818 		mask |= BIT(TC_PORT_LEGACY);
819 
820 	return mask;
821 }
822 
823 /*
824  * Return the PHY status complete flag indicating that display can acquire the
825  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
826  * the ownership to display, regardless of what sink is connected (TBT-alt,
827  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
828  * subsystem and so switching the ownership to display is not required.
829  */
adlp_tc_phy_is_ready(struct intel_tc_port * tc)830 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
831 {
832 	struct drm_i915_private *i915 = tc_to_i915(tc);
833 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
834 	u32 val;
835 
836 	assert_display_core_power_enabled(tc);
837 
838 	val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
839 	if (val == 0xffffffff) {
840 		drm_dbg_kms(&i915->drm,
841 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
842 			    tc->port_name);
843 		return false;
844 	}
845 
846 	return val & TCSS_DDI_STATUS_READY;
847 }
848 
adlp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)849 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
850 				       bool take)
851 {
852 	struct drm_i915_private *i915 = tc_to_i915(tc);
853 	enum port port = tc->dig_port->base.port;
854 
855 	assert_tc_port_power_enabled(tc);
856 
857 	intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
858 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
859 
860 	return true;
861 }
862 
adlp_tc_phy_is_owned(struct intel_tc_port * tc)863 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
864 {
865 	struct drm_i915_private *i915 = tc_to_i915(tc);
866 	enum port port = tc->dig_port->base.port;
867 	u32 val;
868 
869 	assert_tc_port_power_enabled(tc);
870 
871 	val = intel_de_read(i915, DDI_BUF_CTL(port));
872 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
873 }
874 
adlp_tc_phy_get_hw_state(struct intel_tc_port * tc)875 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
876 {
877 	struct drm_i915_private *i915 = tc_to_i915(tc);
878 	enum intel_display_power_domain port_power_domain =
879 		tc_port_power_domain(tc);
880 	intel_wakeref_t port_wakeref;
881 
882 	port_wakeref = intel_display_power_get(i915, port_power_domain);
883 
884 	tc->mode = tc_phy_get_current_mode(tc);
885 	if (tc->mode != TC_PORT_DISCONNECTED) {
886 		tc->lock_wakeref = tc_cold_block(tc);
887 
888 		read_pin_configuration(tc);
889 	}
890 
891 	intel_display_power_put(i915, port_power_domain, port_wakeref);
892 }
893 
adlp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)894 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
895 {
896 	struct drm_i915_private *i915 = tc_to_i915(tc);
897 	enum intel_display_power_domain port_power_domain =
898 		tc_port_power_domain(tc);
899 	intel_wakeref_t port_wakeref;
900 
901 	if (tc->mode == TC_PORT_TBT_ALT) {
902 		tc->lock_wakeref = tc_cold_block(tc);
903 
904 		read_pin_configuration(tc);
905 
906 		return true;
907 	}
908 
909 	port_wakeref = intel_display_power_get(i915, port_power_domain);
910 
911 	if (!adlp_tc_phy_take_ownership(tc, true) &&
912 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
913 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
914 			    tc->port_name);
915 		goto out_put_port_power;
916 	}
917 
918 	if (!tc_phy_is_ready(tc) &&
919 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
920 		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
921 			    tc->port_name);
922 		goto out_release_phy;
923 	}
924 
925 	tc->lock_wakeref = tc_cold_block(tc);
926 
927 	read_pin_configuration(tc);
928 
929 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
930 		goto out_unblock_tc_cold;
931 
932 	intel_display_power_put(i915, port_power_domain, port_wakeref);
933 
934 	return true;
935 
936 out_unblock_tc_cold:
937 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
938 out_release_phy:
939 	adlp_tc_phy_take_ownership(tc, false);
940 out_put_port_power:
941 	intel_display_power_put(i915, port_power_domain, port_wakeref);
942 
943 	return false;
944 }
945 
adlp_tc_phy_disconnect(struct intel_tc_port * tc)946 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
947 {
948 	struct drm_i915_private *i915 = tc_to_i915(tc);
949 	enum intel_display_power_domain port_power_domain =
950 		tc_port_power_domain(tc);
951 	intel_wakeref_t port_wakeref;
952 
953 	port_wakeref = intel_display_power_get(i915, port_power_domain);
954 
955 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
956 
957 	switch (tc->mode) {
958 	case TC_PORT_LEGACY:
959 	case TC_PORT_DP_ALT:
960 		adlp_tc_phy_take_ownership(tc, false);
961 		fallthrough;
962 	case TC_PORT_TBT_ALT:
963 		break;
964 	default:
965 		MISSING_CASE(tc->mode);
966 	}
967 
968 	intel_display_power_put(i915, port_power_domain, port_wakeref);
969 }
970 
adlp_tc_phy_init(struct intel_tc_port * tc)971 static void adlp_tc_phy_init(struct intel_tc_port *tc)
972 {
973 	tc_phy_load_fia_params(tc, true);
974 }
975 
976 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
977 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
978 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
979 	.is_ready = adlp_tc_phy_is_ready,
980 	.is_owned = adlp_tc_phy_is_owned,
981 	.get_hw_state = adlp_tc_phy_get_hw_state,
982 	.connect = adlp_tc_phy_connect,
983 	.disconnect = adlp_tc_phy_disconnect,
984 	.init = adlp_tc_phy_init,
985 };
986 
987 /*
988  * XELPDP TC PHY handlers
989  * ----------------------
990  */
xelpdp_tc_phy_hpd_live_status(struct intel_tc_port * tc)991 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
992 {
993 	struct drm_i915_private *i915 = tc_to_i915(tc);
994 	struct intel_digital_port *dig_port = tc->dig_port;
995 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
996 	u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
997 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
998 	intel_wakeref_t wakeref;
999 	u32 pica_isr;
1000 	u32 pch_isr;
1001 	u32 mask = 0;
1002 
1003 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
1004 		pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
1005 		pch_isr = intel_de_read(i915, SDEISR);
1006 	}
1007 
1008 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
1009 		mask |= BIT(TC_PORT_DP_ALT);
1010 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
1011 		mask |= BIT(TC_PORT_TBT_ALT);
1012 
1013 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
1014 		mask |= BIT(TC_PORT_LEGACY);
1015 
1016 	return mask;
1017 }
1018 
1019 static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port * tc)1020 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
1021 {
1022 	struct drm_i915_private *i915 = tc_to_i915(tc);
1023 	enum port port = tc->dig_port->base.port;
1024 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1025 
1026 	assert_tc_cold_blocked(tc);
1027 
1028 	return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
1029 }
1030 
1031 static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port * tc,bool enabled)1032 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1033 {
1034 	struct drm_i915_private *i915 = tc_to_i915(tc);
1035 
1036 	if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1037 		drm_dbg_kms(&i915->drm,
1038 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1039 			    enabled ? "enabled" : "disabled",
1040 			    tc->port_name);
1041 		return false;
1042 	}
1043 
1044 	return true;
1045 }
1046 
__xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)1047 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1048 {
1049 	struct drm_i915_private *i915 = tc_to_i915(tc);
1050 	enum port port = tc->dig_port->base.port;
1051 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1052 	u32 val;
1053 
1054 	assert_tc_cold_blocked(tc);
1055 
1056 	val = intel_de_read(i915, reg);
1057 	if (enable)
1058 		val |= XELPDP_TCSS_POWER_REQUEST;
1059 	else
1060 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1061 	intel_de_write(i915, reg, val);
1062 }
1063 
xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)1064 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1065 {
1066 	struct drm_i915_private *i915 = tc_to_i915(tc);
1067 
1068 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1069 
1070 	if (enable && !tc_phy_wait_for_ready(tc))
1071 		goto out_disable;
1072 
1073 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1074 		goto out_disable;
1075 
1076 	return true;
1077 
1078 out_disable:
1079 	if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
1080 		return false;
1081 
1082 	if (!enable)
1083 		return false;
1084 
1085 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1086 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1087 
1088 	return false;
1089 }
1090 
xelpdp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)1091 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1092 {
1093 	struct drm_i915_private *i915 = tc_to_i915(tc);
1094 	enum port port = tc->dig_port->base.port;
1095 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1096 	u32 val;
1097 
1098 	assert_tc_cold_blocked(tc);
1099 
1100 	val = intel_de_read(i915, reg);
1101 	if (take)
1102 		val |= XELPDP_TC_PHY_OWNERSHIP;
1103 	else
1104 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1105 	intel_de_write(i915, reg, val);
1106 }
1107 
xelpdp_tc_phy_is_owned(struct intel_tc_port * tc)1108 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1109 {
1110 	struct drm_i915_private *i915 = tc_to_i915(tc);
1111 	enum port port = tc->dig_port->base.port;
1112 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
1113 
1114 	assert_tc_cold_blocked(tc);
1115 
1116 	return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
1117 }
1118 
xelpdp_tc_phy_get_hw_state(struct intel_tc_port * tc)1119 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1120 {
1121 	struct drm_i915_private *i915 = tc_to_i915(tc);
1122 	intel_wakeref_t tc_cold_wref;
1123 	enum intel_display_power_domain domain;
1124 
1125 	tc_cold_wref = __tc_cold_block(tc, &domain);
1126 
1127 	tc->mode = tc_phy_get_current_mode(tc);
1128 	if (tc->mode != TC_PORT_DISCONNECTED) {
1129 		tc->lock_wakeref = tc_cold_block(tc);
1130 
1131 		read_pin_configuration(tc);
1132 	}
1133 
1134 	drm_WARN_ON(&i915->drm,
1135 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1136 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1137 
1138 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1139 }
1140 
xelpdp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1141 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1142 {
1143 	tc->lock_wakeref = tc_cold_block(tc);
1144 
1145 	if (tc->mode == TC_PORT_TBT_ALT) {
1146 		read_pin_configuration(tc);
1147 
1148 		return true;
1149 	}
1150 
1151 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1152 		goto out_unblock_tccold;
1153 
1154 	xelpdp_tc_phy_take_ownership(tc, true);
1155 
1156 	read_pin_configuration(tc);
1157 
1158 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1159 		goto out_release_phy;
1160 
1161 	return true;
1162 
1163 out_release_phy:
1164 	xelpdp_tc_phy_take_ownership(tc, false);
1165 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1166 
1167 out_unblock_tccold:
1168 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1169 
1170 	return false;
1171 }
1172 
xelpdp_tc_phy_disconnect(struct intel_tc_port * tc)1173 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1174 {
1175 	switch (tc->mode) {
1176 	case TC_PORT_LEGACY:
1177 	case TC_PORT_DP_ALT:
1178 		xelpdp_tc_phy_take_ownership(tc, false);
1179 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1180 		fallthrough;
1181 	case TC_PORT_TBT_ALT:
1182 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1183 		break;
1184 	default:
1185 		MISSING_CASE(tc->mode);
1186 	}
1187 }
1188 
1189 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1190 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1191 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1192 	.is_ready = adlp_tc_phy_is_ready,
1193 	.is_owned = xelpdp_tc_phy_is_owned,
1194 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1195 	.connect = xelpdp_tc_phy_connect,
1196 	.disconnect = xelpdp_tc_phy_disconnect,
1197 	.init = adlp_tc_phy_init,
1198 };
1199 
1200 /*
1201  * Generic TC PHY handlers
1202  * -----------------------
1203  */
1204 static enum intel_display_power_domain
tc_phy_cold_off_domain(struct intel_tc_port * tc)1205 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1206 {
1207 	return tc->phy_ops->cold_off_domain(tc);
1208 }
1209 
tc_phy_hpd_live_status(struct intel_tc_port * tc)1210 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1211 {
1212 	struct drm_i915_private *i915 = tc_to_i915(tc);
1213 	u32 mask;
1214 
1215 	mask = tc->phy_ops->hpd_live_status(tc);
1216 
1217 	/* The sink can be connected only in a single mode. */
1218 	drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1219 
1220 	return mask;
1221 }
1222 
tc_phy_is_ready(struct intel_tc_port * tc)1223 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1224 {
1225 	return tc->phy_ops->is_ready(tc);
1226 }
1227 
tc_phy_is_owned(struct intel_tc_port * tc)1228 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1229 {
1230 	return tc->phy_ops->is_owned(tc);
1231 }
1232 
tc_phy_get_hw_state(struct intel_tc_port * tc)1233 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1234 {
1235 	tc->phy_ops->get_hw_state(tc);
1236 }
1237 
tc_phy_is_ready_and_owned(struct intel_tc_port * tc,bool phy_is_ready,bool phy_is_owned)1238 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1239 				      bool phy_is_ready, bool phy_is_owned)
1240 {
1241 	struct drm_i915_private *i915 = tc_to_i915(tc);
1242 
1243 	drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1244 
1245 	return phy_is_ready && phy_is_owned;
1246 }
1247 
tc_phy_is_connected(struct intel_tc_port * tc,enum icl_port_dpll_id port_pll_type)1248 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1249 				enum icl_port_dpll_id port_pll_type)
1250 {
1251 	struct intel_encoder *encoder = &tc->dig_port->base;
1252 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1253 	bool phy_is_ready = tc_phy_is_ready(tc);
1254 	bool phy_is_owned = tc_phy_is_owned(tc);
1255 	bool is_connected;
1256 
1257 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1258 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1259 	else
1260 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1261 
1262 	drm_dbg_kms(&i915->drm,
1263 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1264 		    tc->port_name,
1265 		    str_yes_no(is_connected),
1266 		    str_yes_no(phy_is_ready),
1267 		    str_yes_no(phy_is_owned),
1268 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1269 
1270 	return is_connected;
1271 }
1272 
tc_phy_wait_for_ready(struct intel_tc_port * tc)1273 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1274 {
1275 	struct drm_i915_private *i915 = tc_to_i915(tc);
1276 
1277 	if (wait_for(tc_phy_is_ready(tc), 500)) {
1278 		drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1279 			tc->port_name);
1280 
1281 		return false;
1282 	}
1283 
1284 	return true;
1285 }
1286 
1287 static enum tc_port_mode
hpd_mask_to_tc_mode(u32 live_status_mask)1288 hpd_mask_to_tc_mode(u32 live_status_mask)
1289 {
1290 	if (live_status_mask)
1291 		return fls(live_status_mask) - 1;
1292 
1293 	return TC_PORT_DISCONNECTED;
1294 }
1295 
1296 static enum tc_port_mode
tc_phy_hpd_live_mode(struct intel_tc_port * tc)1297 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1298 {
1299 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1300 
1301 	return hpd_mask_to_tc_mode(live_status_mask);
1302 }
1303 
1304 static enum tc_port_mode
get_tc_mode_in_phy_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1305 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1306 			       enum tc_port_mode live_mode)
1307 {
1308 	switch (live_mode) {
1309 	case TC_PORT_LEGACY:
1310 	case TC_PORT_DP_ALT:
1311 		return live_mode;
1312 	default:
1313 		MISSING_CASE(live_mode);
1314 		fallthrough;
1315 	case TC_PORT_TBT_ALT:
1316 	case TC_PORT_DISCONNECTED:
1317 		if (tc->legacy_port)
1318 			return TC_PORT_LEGACY;
1319 		else
1320 			return TC_PORT_DP_ALT;
1321 	}
1322 }
1323 
1324 static enum tc_port_mode
get_tc_mode_in_phy_not_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1325 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1326 				   enum tc_port_mode live_mode)
1327 {
1328 	switch (live_mode) {
1329 	case TC_PORT_LEGACY:
1330 		return TC_PORT_DISCONNECTED;
1331 	case TC_PORT_DP_ALT:
1332 	case TC_PORT_TBT_ALT:
1333 		return TC_PORT_TBT_ALT;
1334 	default:
1335 		MISSING_CASE(live_mode);
1336 		fallthrough;
1337 	case TC_PORT_DISCONNECTED:
1338 		if (tc->legacy_port)
1339 			return TC_PORT_DISCONNECTED;
1340 		else
1341 			return TC_PORT_TBT_ALT;
1342 	}
1343 }
1344 
1345 static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port * tc)1346 tc_phy_get_current_mode(struct intel_tc_port *tc)
1347 {
1348 	struct drm_i915_private *i915 = tc_to_i915(tc);
1349 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1350 	bool phy_is_ready;
1351 	bool phy_is_owned;
1352 	enum tc_port_mode mode;
1353 
1354 	/*
1355 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1356 	 * and system resume whether or not a sink is connected. Wait here for
1357 	 * the initialization to get ready.
1358 	 */
1359 	if (tc->legacy_port)
1360 		tc_phy_wait_for_ready(tc);
1361 
1362 	phy_is_ready = tc_phy_is_ready(tc);
1363 	phy_is_owned = tc_phy_is_owned(tc);
1364 
1365 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1366 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1367 	} else {
1368 		drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1369 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1370 	}
1371 
1372 	drm_dbg_kms(&i915->drm,
1373 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1374 		    tc->port_name,
1375 		    tc_port_mode_name(mode),
1376 		    str_yes_no(phy_is_ready),
1377 		    str_yes_no(phy_is_owned),
1378 		    tc_port_mode_name(live_mode));
1379 
1380 	return mode;
1381 }
1382 
default_tc_mode(struct intel_tc_port * tc)1383 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1384 {
1385 	if (tc->legacy_port)
1386 		return TC_PORT_LEGACY;
1387 
1388 	return TC_PORT_TBT_ALT;
1389 }
1390 
1391 static enum tc_port_mode
hpd_mask_to_target_mode(struct intel_tc_port * tc,u32 live_status_mask)1392 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1393 {
1394 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1395 
1396 	if (mode != TC_PORT_DISCONNECTED)
1397 		return mode;
1398 
1399 	return default_tc_mode(tc);
1400 }
1401 
1402 static enum tc_port_mode
tc_phy_get_target_mode(struct intel_tc_port * tc)1403 tc_phy_get_target_mode(struct intel_tc_port *tc)
1404 {
1405 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1406 
1407 	return hpd_mask_to_target_mode(tc, live_status_mask);
1408 }
1409 
tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1410 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1411 {
1412 	struct drm_i915_private *i915 = tc_to_i915(tc);
1413 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1414 	bool connected;
1415 
1416 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1417 
1418 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1419 
1420 	connected = tc->phy_ops->connect(tc, required_lanes);
1421 	if (!connected && tc->mode != default_tc_mode(tc)) {
1422 		tc->mode = default_tc_mode(tc);
1423 		connected = tc->phy_ops->connect(tc, required_lanes);
1424 	}
1425 
1426 	drm_WARN_ON(&i915->drm, !connected);
1427 }
1428 
tc_phy_disconnect(struct intel_tc_port * tc)1429 static void tc_phy_disconnect(struct intel_tc_port *tc)
1430 {
1431 	if (tc->mode != TC_PORT_DISCONNECTED) {
1432 		tc->phy_ops->disconnect(tc);
1433 		tc->mode = TC_PORT_DISCONNECTED;
1434 	}
1435 }
1436 
tc_phy_init(struct intel_tc_port * tc)1437 static void tc_phy_init(struct intel_tc_port *tc)
1438 {
1439 	mutex_lock(&tc->lock);
1440 	tc->phy_ops->init(tc);
1441 	mutex_unlock(&tc->lock);
1442 }
1443 
intel_tc_port_reset_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1444 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1445 				     int required_lanes, bool force_disconnect)
1446 {
1447 	struct drm_i915_private *i915 = tc_to_i915(tc);
1448 	struct intel_digital_port *dig_port = tc->dig_port;
1449 	enum tc_port_mode old_tc_mode = tc->mode;
1450 
1451 	intel_display_power_flush_work(i915);
1452 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1453 		enum intel_display_power_domain aux_domain;
1454 		bool aux_powered;
1455 
1456 		aux_domain = intel_aux_power_domain(dig_port);
1457 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1458 		drm_dbg_kms(&i915->drm, "Port %s: AUX powered %d\n",
1459 			    tc->port_name, aux_powered);
1460 	}
1461 
1462 	tc_phy_disconnect(tc);
1463 	if (!force_disconnect)
1464 		tc_phy_connect(tc, required_lanes);
1465 
1466 	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1467 		    tc->port_name,
1468 		    tc_port_mode_name(old_tc_mode),
1469 		    tc_port_mode_name(tc->mode));
1470 }
1471 
intel_tc_port_needs_reset(struct intel_tc_port * tc)1472 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1473 {
1474 	return tc_phy_get_target_mode(tc) != tc->mode;
1475 }
1476 
intel_tc_port_update_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1477 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1478 				      int required_lanes, bool force_disconnect)
1479 {
1480 	if (force_disconnect ||
1481 	    intel_tc_port_needs_reset(tc))
1482 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1483 }
1484 
__intel_tc_port_get_link(struct intel_tc_port * tc)1485 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1486 {
1487 	tc->link_refcount++;
1488 }
1489 
__intel_tc_port_put_link(struct intel_tc_port * tc)1490 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1491 {
1492 	tc->link_refcount--;
1493 }
1494 
tc_port_is_enabled(struct intel_tc_port * tc)1495 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1496 {
1497 	struct drm_i915_private *i915 = tc_to_i915(tc);
1498 	struct intel_digital_port *dig_port = tc->dig_port;
1499 
1500 	assert_tc_port_power_enabled(tc);
1501 
1502 	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1503 	       DDI_BUF_CTL_ENABLE;
1504 }
1505 
1506 /**
1507  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1508  * @dig_port: digital port
1509  *
1510  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1511  * will be locked until intel_tc_port_sanitize_mode() is called.
1512  */
intel_tc_port_init_mode(struct intel_digital_port * dig_port)1513 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1514 {
1515 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1516 	struct intel_tc_port *tc = to_tc_port(dig_port);
1517 	bool update_mode = false;
1518 
1519 	mutex_lock(&tc->lock);
1520 
1521 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1522 	drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1523 	drm_WARN_ON(&i915->drm, tc->link_refcount);
1524 
1525 	tc_phy_get_hw_state(tc);
1526 	/*
1527 	 * Save the initial mode for the state check in
1528 	 * intel_tc_port_sanitize_mode().
1529 	 */
1530 	tc->init_mode = tc->mode;
1531 
1532 	/*
1533 	 * The PHY needs to be connected for AUX to work during HW readout and
1534 	 * MST topology resume, but the PHY mode can only be changed if the
1535 	 * port is disabled.
1536 	 *
1537 	 * An exception is the case where BIOS leaves the PHY incorrectly
1538 	 * disconnected on an enabled legacy port. Work around that by
1539 	 * connecting the PHY even though the port is enabled. This doesn't
1540 	 * cause a problem as the PHY ownership state is ignored by the
1541 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1542 	 */
1543 	if (!tc_port_is_enabled(tc)) {
1544 		update_mode = true;
1545 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1546 		drm_WARN_ON(&i915->drm, !tc->legacy_port);
1547 		drm_err(&i915->drm,
1548 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1549 			tc->port_name);
1550 		update_mode = true;
1551 	}
1552 
1553 	if (update_mode)
1554 		intel_tc_port_update_mode(tc, 1, false);
1555 
1556 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1557 	__intel_tc_port_get_link(tc);
1558 
1559 	mutex_unlock(&tc->lock);
1560 }
1561 
tc_port_has_active_links(struct intel_tc_port * tc,const struct intel_crtc_state * crtc_state)1562 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1563 				     const struct intel_crtc_state *crtc_state)
1564 {
1565 	struct drm_i915_private *i915 = tc_to_i915(tc);
1566 	struct intel_digital_port *dig_port = tc->dig_port;
1567 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1568 	int active_links = 0;
1569 
1570 	if (dig_port->dp.is_mst) {
1571 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1572 		active_links = intel_dp_mst_encoder_active_links(dig_port);
1573 	} else if (crtc_state && crtc_state->hw.active) {
1574 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1575 		active_links = 1;
1576 	}
1577 
1578 	if (active_links && !tc_phy_is_connected(tc, pll_type))
1579 		drm_err(&i915->drm,
1580 			"Port %s: PHY disconnected with %d active link(s)\n",
1581 			tc->port_name, active_links);
1582 
1583 	return active_links;
1584 }
1585 
1586 /**
1587  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1588  * @dig_port: digital port
1589  * @crtc_state: atomic state of CRTC connected to @dig_port
1590  *
1591  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1592  * loading and system resume:
1593  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1594  * the encoder is disabled.
1595  * If the encoder is disabled make sure the PHY is disconnected.
1596  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1597  */
intel_tc_port_sanitize_mode(struct intel_digital_port * dig_port,const struct intel_crtc_state * crtc_state)1598 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1599 				 const struct intel_crtc_state *crtc_state)
1600 {
1601 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1602 	struct intel_tc_port *tc = to_tc_port(dig_port);
1603 
1604 	mutex_lock(&tc->lock);
1605 
1606 	drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1607 	if (!tc_port_has_active_links(tc, crtc_state)) {
1608 		/*
1609 		 * TBT-alt is the default mode in any case the PHY ownership is not
1610 		 * held (regardless of the sink's connected live state), so
1611 		 * we'll just switch to disconnected mode from it here without
1612 		 * a note.
1613 		 */
1614 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1615 		    tc->init_mode != TC_PORT_DISCONNECTED)
1616 			drm_dbg_kms(&i915->drm,
1617 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1618 				    tc->port_name,
1619 				    tc_port_mode_name(tc->init_mode));
1620 		tc_phy_disconnect(tc);
1621 		__intel_tc_port_put_link(tc);
1622 	}
1623 
1624 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1625 		    tc->port_name,
1626 		    tc_port_mode_name(tc->mode));
1627 
1628 	mutex_unlock(&tc->lock);
1629 }
1630 
1631 /*
1632  * The type-C ports are different because even when they are connected, they may
1633  * not be available/usable by the graphics driver: see the comment on
1634  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1635  * concept of "usable" and make everything check for "connected and usable" we
1636  * define a port as "connected" when it is not only connected, but also when it
1637  * is usable by the rest of the driver. That maintains the old assumption that
1638  * connected ports are usable, and avoids exposing to the users objects they
1639  * can't really use.
1640  */
intel_tc_port_connected(struct intel_encoder * encoder)1641 bool intel_tc_port_connected(struct intel_encoder *encoder)
1642 {
1643 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1644 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1645 	struct intel_tc_port *tc = to_tc_port(dig_port);
1646 	u32 mask = ~0;
1647 
1648 	drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1649 
1650 	if (tc->mode != TC_PORT_DISCONNECTED)
1651 		mask = BIT(tc->mode);
1652 
1653 	return tc_phy_hpd_live_status(tc) & mask;
1654 }
1655 
__intel_tc_port_link_needs_reset(struct intel_tc_port * tc)1656 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1657 {
1658 	bool ret;
1659 
1660 	mutex_lock(&tc->lock);
1661 
1662 	ret = tc->link_refcount &&
1663 	      tc->mode == TC_PORT_DP_ALT &&
1664 	      intel_tc_port_needs_reset(tc);
1665 
1666 	mutex_unlock(&tc->lock);
1667 
1668 	return ret;
1669 }
1670 
intel_tc_port_link_needs_reset(struct intel_digital_port * dig_port)1671 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1672 {
1673 	if (!intel_encoder_is_tc(&dig_port->base))
1674 		return false;
1675 
1676 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1677 }
1678 
reset_link_commit(struct intel_tc_port * tc,struct intel_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)1679 static int reset_link_commit(struct intel_tc_port *tc,
1680 			     struct intel_atomic_state *state,
1681 			     struct drm_modeset_acquire_ctx *ctx)
1682 {
1683 	struct drm_i915_private *i915 = tc_to_i915(tc);
1684 	struct intel_digital_port *dig_port = tc->dig_port;
1685 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1686 	struct intel_crtc *crtc;
1687 	u8 pipe_mask;
1688 	int ret;
1689 
1690 	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1691 	if (ret)
1692 		return ret;
1693 
1694 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1695 	if (ret)
1696 		return ret;
1697 
1698 	if (!pipe_mask)
1699 		return 0;
1700 
1701 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1702 		struct intel_crtc_state *crtc_state;
1703 
1704 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1705 		if (IS_ERR(crtc_state))
1706 			return PTR_ERR(crtc_state);
1707 
1708 		crtc_state->uapi.connectors_changed = true;
1709 	}
1710 
1711 	if (!__intel_tc_port_link_needs_reset(tc))
1712 		return 0;
1713 
1714 	return drm_atomic_commit(&state->base);
1715 }
1716 
reset_link(struct intel_tc_port * tc)1717 static int reset_link(struct intel_tc_port *tc)
1718 {
1719 	struct drm_i915_private *i915 = tc_to_i915(tc);
1720 	struct drm_modeset_acquire_ctx ctx;
1721 	struct drm_atomic_state *_state;
1722 	struct intel_atomic_state *state;
1723 	int ret;
1724 
1725 	_state = drm_atomic_state_alloc(&i915->drm);
1726 	if (!_state)
1727 		return -ENOMEM;
1728 
1729 	state = to_intel_atomic_state(_state);
1730 	state->internal = true;
1731 
1732 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1733 		ret = reset_link_commit(tc, state, &ctx);
1734 
1735 	drm_atomic_state_put(&state->base);
1736 
1737 	return ret;
1738 }
1739 
intel_tc_port_link_reset_work(struct work_struct * work)1740 static void intel_tc_port_link_reset_work(struct work_struct *work)
1741 {
1742 	struct intel_tc_port *tc =
1743 		container_of(work, struct intel_tc_port, link_reset_work.work);
1744 	struct drm_i915_private *i915 = tc_to_i915(tc);
1745 	int ret;
1746 
1747 	if (!__intel_tc_port_link_needs_reset(tc))
1748 		return;
1749 
1750 	mutex_lock(&i915->drm.mode_config.mutex);
1751 
1752 	drm_dbg_kms(&i915->drm,
1753 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1754 		    tc->port_name);
1755 	ret = reset_link(tc);
1756 	drm_WARN_ON(&i915->drm, ret);
1757 
1758 	mutex_unlock(&i915->drm.mode_config.mutex);
1759 }
1760 
intel_tc_port_link_reset(struct intel_digital_port * dig_port)1761 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1762 {
1763 	if (!intel_tc_port_link_needs_reset(dig_port))
1764 		return false;
1765 
1766 	queue_delayed_work(system_unbound_wq,
1767 			   &to_tc_port(dig_port)->link_reset_work,
1768 			   msecs_to_jiffies(2000));
1769 
1770 	return true;
1771 }
1772 
intel_tc_port_link_cancel_reset_work(struct intel_digital_port * dig_port)1773 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1774 {
1775 	struct intel_tc_port *tc = to_tc_port(dig_port);
1776 
1777 	if (!intel_encoder_is_tc(&dig_port->base))
1778 		return;
1779 
1780 	cancel_delayed_work(&tc->link_reset_work);
1781 }
1782 
__intel_tc_port_lock(struct intel_tc_port * tc,int required_lanes)1783 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1784 				 int required_lanes)
1785 {
1786 	struct drm_i915_private *i915 = tc_to_i915(tc);
1787 
1788 	mutex_lock(&tc->lock);
1789 
1790 	cancel_delayed_work(&tc->disconnect_phy_work);
1791 
1792 	if (!tc->link_refcount)
1793 		intel_tc_port_update_mode(tc, required_lanes,
1794 					  false);
1795 
1796 	drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1797 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1798 				!tc_phy_is_owned(tc));
1799 }
1800 
intel_tc_port_lock(struct intel_digital_port * dig_port)1801 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1802 {
1803 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1804 }
1805 
1806 /*
1807  * Disconnect the given digital port from its TypeC PHY (handing back the
1808  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1809  * manner after each aux transactions and modeset disables.
1810  */
intel_tc_port_disconnect_phy_work(struct work_struct * work)1811 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1812 {
1813 	struct intel_tc_port *tc =
1814 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1815 
1816 	mutex_lock(&tc->lock);
1817 
1818 	if (!tc->link_refcount)
1819 		intel_tc_port_update_mode(tc, 1, true);
1820 
1821 	mutex_unlock(&tc->lock);
1822 }
1823 
1824 /**
1825  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1826  * @dig_port: digital port
1827  *
1828  * Flush the delayed work disconnecting an idle PHY.
1829  */
intel_tc_port_flush_work(struct intel_digital_port * dig_port)1830 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1831 {
1832 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1833 }
1834 
intel_tc_port_suspend(struct intel_digital_port * dig_port)1835 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1836 {
1837 	struct intel_tc_port *tc = to_tc_port(dig_port);
1838 
1839 	cancel_delayed_work_sync(&tc->link_reset_work);
1840 	intel_tc_port_flush_work(dig_port);
1841 }
1842 
intel_tc_port_unlock(struct intel_digital_port * dig_port)1843 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1844 {
1845 	struct intel_tc_port *tc = to_tc_port(dig_port);
1846 
1847 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1848 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1849 				   msecs_to_jiffies(1000));
1850 
1851 	mutex_unlock(&tc->lock);
1852 }
1853 
intel_tc_port_ref_held(struct intel_digital_port * dig_port)1854 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1855 {
1856 	struct intel_tc_port *tc = to_tc_port(dig_port);
1857 
1858 	return mutex_is_locked(&tc->lock) ||
1859 	       tc->link_refcount;
1860 }
1861 
intel_tc_port_get_link(struct intel_digital_port * dig_port,int required_lanes)1862 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1863 			    int required_lanes)
1864 {
1865 	struct intel_tc_port *tc = to_tc_port(dig_port);
1866 
1867 	__intel_tc_port_lock(tc, required_lanes);
1868 	__intel_tc_port_get_link(tc);
1869 	intel_tc_port_unlock(dig_port);
1870 }
1871 
intel_tc_port_put_link(struct intel_digital_port * dig_port)1872 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1873 {
1874 	struct intel_tc_port *tc = to_tc_port(dig_port);
1875 
1876 	intel_tc_port_lock(dig_port);
1877 	__intel_tc_port_put_link(tc);
1878 	intel_tc_port_unlock(dig_port);
1879 
1880 	/*
1881 	 * The firmware will not update the HPD status of other TypeC ports
1882 	 * that are active in DP-alt mode with their sink disconnected, until
1883 	 * this port is disabled and its PHY gets disconnected. Make sure this
1884 	 * happens in a timely manner by disconnecting the PHY synchronously.
1885 	 */
1886 	intel_tc_port_flush_work(dig_port);
1887 }
1888 
intel_tc_port_init(struct intel_digital_port * dig_port,bool is_legacy)1889 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1890 {
1891 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1892 	struct intel_tc_port *tc;
1893 	enum port port = dig_port->base.port;
1894 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1895 
1896 	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1897 		return -EINVAL;
1898 
1899 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1900 	if (!tc)
1901 		return -ENOMEM;
1902 
1903 	dig_port->tc = tc;
1904 	tc->dig_port = dig_port;
1905 
1906 	if (DISPLAY_VER(i915) >= 14)
1907 		tc->phy_ops = &xelpdp_tc_phy_ops;
1908 	else if (DISPLAY_VER(i915) >= 13)
1909 		tc->phy_ops = &adlp_tc_phy_ops;
1910 	else if (DISPLAY_VER(i915) >= 12)
1911 		tc->phy_ops = &tgl_tc_phy_ops;
1912 	else
1913 		tc->phy_ops = &icl_tc_phy_ops;
1914 
1915 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1916 				  tc_port + 1);
1917 	if (!tc->port_name) {
1918 		kfree(tc);
1919 		return -ENOMEM;
1920 	}
1921 
1922 	mutex_init(&tc->lock);
1923 	/* TODO: Combine the two works */
1924 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1925 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1926 	tc->legacy_port = is_legacy;
1927 	tc->mode = TC_PORT_DISCONNECTED;
1928 	tc->link_refcount = 0;
1929 
1930 	tc_phy_init(tc);
1931 
1932 	intel_tc_port_init_mode(dig_port);
1933 
1934 	return 0;
1935 }
1936 
intel_tc_port_cleanup(struct intel_digital_port * dig_port)1937 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1938 {
1939 	intel_tc_port_suspend(dig_port);
1940 
1941 	kfree(dig_port->tc->port_name);
1942 	kfree(dig_port->tc);
1943 	dig_port->tc = NULL;
1944 }
1945