1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
is_edp(struct intel_dp * intel_dp)107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
intel_dp_to_dev(struct intel_dp * intel_dp)114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
119 }
120
intel_attached_dp(struct drm_connector * connector)121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
132
intel_dp_unused_lane_mask(int lane_count)133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135 return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
intel_dp_max_link_bw(struct intel_dp * intel_dp)139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
140 {
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
146 case DP_LINK_BW_5_4:
147 break;
148 default:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155 }
156
intel_dp_max_lane_count(struct intel_dp * intel_dp)157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171 }
172
173 /*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
190 static int
intel_dp_link_required(int pixel_clock,int bpp)191 intel_dp_link_required(int pixel_clock, int bpp)
192 {
193 return (pixel_clock * bpp + 9) / 10;
194 }
195
196 static int
intel_dp_max_data_rate(int max_link_clock,int max_lanes)197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198 {
199 return (max_link_clock * max_lanes * 8) / 10;
200 }
201
202 static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)203 intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205 {
206 struct intel_dp *intel_dp = intel_attached_dp(connector);
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
211
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
214 return MODE_PANEL;
215
216 if (mode->vdisplay > fixed_mode->vdisplay)
217 return MODE_PANEL;
218
219 target_clock = fixed_mode->clock;
220 }
221
222 max_link_clock = intel_dp_max_link_rate(intel_dp);
223 max_lanes = intel_dp_max_lane_count(intel_dp);
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
229 return MODE_CLOCK_HIGH;
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
237 return MODE_OK;
238 }
239
intel_dp_pack_aux(const uint8_t * src,int src_bytes)240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 {
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250 }
251
intel_dp_unpack_aux(uint32_t src,uint8_t * dst,int dst_bytes)252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 {
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259 }
260
261 static void
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263 struct intel_dp *intel_dp);
264 static void
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266 struct intel_dp *intel_dp);
267
pps_lock(struct intel_dp * intel_dp)268 static void pps_lock(struct intel_dp *intel_dp)
269 {
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_aux_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284 }
285
pps_unlock(struct intel_dp * intel_dp)286 static void pps_unlock(struct intel_dp *intel_dp)
287 {
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_aux_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298 }
299
300 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302 {
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345 }
346
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
361
362 if (!pll_enabled) {
363 vlv_force_pll_off(dev, pipe);
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
368 }
369
370 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378 enum pipe pipe;
379
380 lockdep_assert_held(&dev_priv->pps_mutex);
381
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
413
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
430
431 return intel_dp->pps_pipe;
432 }
433
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
vlv_pipe_has_pp_on(struct drm_i915_private * dev_priv,enum pipe pipe)437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439 {
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 }
442
vlv_pipe_has_vdd_on(struct drm_i915_private * dev_priv,enum pipe pipe)443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445 {
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 }
448
vlv_pipe_any(struct drm_i915_private * dev_priv,enum pipe pipe)449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451 {
452 return true;
453 }
454
455 static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private * dev_priv,enum port port,vlv_pipe_check pipe_check)456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
459 {
460 enum pipe pipe;
461
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
472 return pipe;
473 }
474
475 return INVALID_PIPE;
476 }
477
478 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 {
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 }
514
vlv_power_sequencer_reset(struct drm_i915_private * dev_priv)515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 {
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
542 }
543
_pp_ctrl_reg(struct intel_dp * intel_dp)544 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545 {
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554 }
555
_pp_stat_reg(struct intel_dp * intel_dp)556 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557 {
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
edp_notify_handler(struct notifier_block * this,unsigned long code,void * unused)570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572 {
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
581 pps_lock(intel_dp);
582
583 if (IS_VALLEYVIEW(dev)) {
584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
587
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
599 pps_unlock(intel_dp);
600
601 return 0;
602 }
603
edp_have_panel_power(struct intel_dp * intel_dp)604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
605 {
606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
607 struct drm_i915_private *dev_priv = dev->dev_private;
608
609 lockdep_assert_held(&dev_priv->pps_mutex);
610
611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
616 }
617
edp_have_panel_vdd(struct intel_dp * intel_dp)618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
619 {
620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
621 struct drm_i915_private *dev_priv = dev->dev_private;
622
623 lockdep_assert_held(&dev_priv->pps_mutex);
624
625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
630 }
631
632 static void
intel_dp_check_edp(struct intel_dp * intel_dp)633 intel_dp_check_edp(struct intel_dp *intel_dp)
634 {
635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
636 struct drm_i915_private *dev_priv = dev->dev_private;
637
638 if (!is_edp(intel_dp))
639 return;
640
641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
646 }
647 }
648
649 static uint32_t
intel_dp_aux_wait_done(struct intel_dp * intel_dp,bool has_aux_irq)650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651 {
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656 uint32_t status;
657 bool done;
658
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660 if (has_aux_irq)
661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662 msecs_to_jiffies_timeout(10));
663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668 #undef C
669
670 return status;
671 }
672
i9xx_get_aux_clock_divider(struct intel_dp * intel_dp,int index)673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674 {
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
677
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */
682 return index ? 0 : intel_hrawclk(dev) / 2;
683 }
684
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
689 struct drm_i915_private *dev_priv = dev->dev_private;
690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700 }
701
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 {
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
708 if (intel_dig_port->port == PORT_A) {
709 if (index)
710 return 0;
711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
719 } else {
720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722 }
723
vlv_get_aux_clock_divider(struct intel_dp * intel_dp,int index)724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726 return index ? 0 : 100;
727 }
728
skl_get_aux_clock_divider(struct intel_dp * intel_dp,int index)729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737 }
738
i9xx_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t aux_clock_divider)739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743 {
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
759 DP_AUX_CH_CTL_DONE |
760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
762 timeout |
763 DP_AUX_CH_CTL_RECEIVE_ERROR |
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767 }
768
skl_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t unused)769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773 {
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782 }
783
784 static int
intel_dp_aux_ch(struct intel_dp * intel_dp,const uint8_t * send,int send_bytes,uint8_t * recv,int recv_size)785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786 const uint8_t *send, int send_bytes,
787 uint8_t *recv, int recv_size)
788 {
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
794 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes;
796 uint32_t status;
797 int try, clock = 0;
798 bool has_aux_irq = HAS_AUX_IRQ(dev);
799 bool vdd;
800
801 pps_lock(intel_dp);
802
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
809 vdd = edp_panel_vdd_on(intel_dp);
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
818
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
821 status = I915_READ_NOTRACE(ch_ctl);
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
837 ret = -EBUSY;
838 goto out;
839 }
840
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
852
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
857 I915_WRITE(ch_data + i,
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
860
861 /* Send the command and wait for it to complete */
862 I915_WRITE(ch_ctl, send_ctl);
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
874 continue;
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
883 continue;
884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 goto done;
887 }
888 }
889
890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
892 ret = -EBUSY;
893 goto out;
894 }
895
896 done:
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
902 ret = -EIO;
903 goto out;
904 }
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
910 ret = -ETIMEDOUT;
911 goto out;
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
917 if (recv_bytes > recv_size)
918 recv_bytes = recv_size;
919
920 for (i = 0; i < recv_bytes; i += 4)
921 intel_dp_unpack_aux(I915_READ(ch_data + i),
922 recv + i, recv_bytes - i);
923
924 ret = recv_bytes;
925 out:
926 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
927
928 if (vdd)
929 edp_panel_vdd_off(intel_dp, false);
930
931 pps_unlock(intel_dp);
932
933 return ret;
934 }
935
936 #define BARE_ADDRESS_SIZE 3
937 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
938 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)939 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
940 {
941 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
942 uint8_t txbuf[20], rxbuf[20];
943 size_t txsize, rxsize;
944 int ret;
945
946 txbuf[0] = (msg->request << 4) |
947 ((msg->address >> 16) & 0xf);
948 txbuf[1] = (msg->address >> 8) & 0xff;
949 txbuf[2] = msg->address & 0xff;
950 txbuf[3] = msg->size - 1;
951
952 switch (msg->request & ~DP_AUX_I2C_MOT) {
953 case DP_AUX_NATIVE_WRITE:
954 case DP_AUX_I2C_WRITE:
955 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
956 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
957 rxsize = 2; /* 0 or 1 data bytes */
958
959 if (WARN_ON(txsize > 20))
960 return -E2BIG;
961
962 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
963
964 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
965 if (ret > 0) {
966 msg->reply = rxbuf[0] >> 4;
967
968 if (ret > 1) {
969 /* Number of bytes written in a short write. */
970 ret = clamp_t(int, rxbuf[1], 0, msg->size);
971 } else {
972 /* Return payload size. */
973 ret = msg->size;
974 }
975 }
976 break;
977
978 case DP_AUX_NATIVE_READ:
979 case DP_AUX_I2C_READ:
980 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
981 rxsize = msg->size + 1;
982
983 if (WARN_ON(rxsize > 20))
984 return -E2BIG;
985
986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 if (ret > 0) {
988 msg->reply = rxbuf[0] >> 4;
989 /*
990 * Assume happy day, and copy the data. The caller is
991 * expected to check msg->reply before touching it.
992 *
993 * Return payload size.
994 */
995 ret--;
996 memcpy(msg->buffer, rxbuf + 1, ret);
997 }
998 break;
999
1000 default:
1001 ret = -EINVAL;
1002 break;
1003 }
1004
1005 return ret;
1006 }
1007
1008 static void
intel_dp_aux_init(struct intel_dp * intel_dp,struct intel_connector * connector)1009 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1010 {
1011 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1012 struct drm_i915_private *dev_priv = dev->dev_private;
1013 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1014 enum port port = intel_dig_port->port;
1015 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1016 const char *name = NULL;
1017 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1018 int ret;
1019
1020 /* On SKL we don't have Aux for port E so we rely on VBT to set
1021 * a proper alternate aux channel.
1022 */
1023 if (IS_SKYLAKE(dev) && port == PORT_E) {
1024 switch (info->alternate_aux_channel) {
1025 case DP_AUX_B:
1026 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1027 break;
1028 case DP_AUX_C:
1029 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1030 break;
1031 case DP_AUX_D:
1032 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1033 break;
1034 case DP_AUX_A:
1035 default:
1036 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1037 }
1038 }
1039
1040 switch (port) {
1041 case PORT_A:
1042 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1043 name = "DPDDC-A";
1044 break;
1045 case PORT_B:
1046 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1047 name = "DPDDC-B";
1048 break;
1049 case PORT_C:
1050 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1051 name = "DPDDC-C";
1052 break;
1053 case PORT_D:
1054 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1055 name = "DPDDC-D";
1056 break;
1057 case PORT_E:
1058 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1059 name = "DPDDC-E";
1060 break;
1061 default:
1062 BUG();
1063 }
1064
1065 /*
1066 * The AUX_CTL register is usually DP_CTL + 0x10.
1067 *
1068 * On Haswell and Broadwell though:
1069 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1070 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1071 *
1072 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1073 */
1074 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1075 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1076
1077 intel_dp->aux.name = name;
1078 intel_dp->aux.dev = dev->dev;
1079 intel_dp->aux.transfer = intel_dp_aux_transfer;
1080
1081 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1082 connector->base.kdev->kobj.name);
1083
1084 ret = drm_dp_aux_register(&intel_dp->aux);
1085 if (ret < 0) {
1086 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1087 name, ret);
1088 return;
1089 }
1090
1091 ret = sysfs_create_link(&connector->base.kdev->kobj,
1092 &intel_dp->aux.ddc.dev.kobj,
1093 intel_dp->aux.ddc.dev.kobj.name);
1094 if (ret < 0) {
1095 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1096 drm_dp_aux_unregister(&intel_dp->aux);
1097 }
1098 }
1099
1100 static void
intel_dp_connector_unregister(struct intel_connector * intel_connector)1101 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1102 {
1103 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1104
1105 if (!intel_connector->mst_port)
1106 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1107 intel_dp->aux.ddc.dev.kobj.name);
1108 intel_connector_unregister(intel_connector);
1109 }
1110
1111 static void
skl_edp_set_pll_config(struct intel_crtc_state * pipe_config)1112 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1113 {
1114 u32 ctrl1;
1115
1116 memset(&pipe_config->dpll_hw_state, 0,
1117 sizeof(pipe_config->dpll_hw_state));
1118
1119 pipe_config->ddi_pll_sel = SKL_DPLL0;
1120 pipe_config->dpll_hw_state.cfgcr1 = 0;
1121 pipe_config->dpll_hw_state.cfgcr2 = 0;
1122
1123 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1124 switch (pipe_config->port_clock / 2) {
1125 case 81000:
1126 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1127 SKL_DPLL0);
1128 break;
1129 case 135000:
1130 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1131 SKL_DPLL0);
1132 break;
1133 case 270000:
1134 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1135 SKL_DPLL0);
1136 break;
1137 case 162000:
1138 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1139 SKL_DPLL0);
1140 break;
1141 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1142 results in CDCLK change. Need to handle the change of CDCLK by
1143 disabling pipes and re-enabling them */
1144 case 108000:
1145 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1146 SKL_DPLL0);
1147 break;
1148 case 216000:
1149 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1150 SKL_DPLL0);
1151 break;
1152
1153 }
1154 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1155 }
1156
1157 void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state * pipe_config)1158 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1159 {
1160 memset(&pipe_config->dpll_hw_state, 0,
1161 sizeof(pipe_config->dpll_hw_state));
1162
1163 switch (pipe_config->port_clock / 2) {
1164 case 81000:
1165 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1166 break;
1167 case 135000:
1168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1169 break;
1170 case 270000:
1171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1172 break;
1173 }
1174 }
1175
1176 static int
intel_dp_sink_rates(struct intel_dp * intel_dp,const int ** sink_rates)1177 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1178 {
1179 if (intel_dp->num_sink_rates) {
1180 *sink_rates = intel_dp->sink_rates;
1181 return intel_dp->num_sink_rates;
1182 }
1183
1184 *sink_rates = default_rates;
1185
1186 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1187 }
1188
intel_dp_source_supports_hbr2(struct drm_device * dev)1189 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1190 {
1191 /* WaDisableHBR2:skl */
1192 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1193 return false;
1194
1195 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1196 (INTEL_INFO(dev)->gen >= 9))
1197 return true;
1198 else
1199 return false;
1200 }
1201
1202 static int
intel_dp_source_rates(struct drm_device * dev,const int ** source_rates)1203 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1204 {
1205 int size;
1206
1207 if (IS_BROXTON(dev)) {
1208 *source_rates = bxt_rates;
1209 size = ARRAY_SIZE(bxt_rates);
1210 } else if (IS_SKYLAKE(dev)) {
1211 *source_rates = skl_rates;
1212 size = ARRAY_SIZE(skl_rates);
1213 } else {
1214 *source_rates = default_rates;
1215 size = ARRAY_SIZE(default_rates);
1216 }
1217
1218 /* This depends on the fact that 5.4 is last value in the array */
1219 if (!intel_dp_source_supports_hbr2(dev))
1220 size--;
1221
1222 return size;
1223 }
1224
1225 static void
intel_dp_set_clock(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1226 intel_dp_set_clock(struct intel_encoder *encoder,
1227 struct intel_crtc_state *pipe_config)
1228 {
1229 struct drm_device *dev = encoder->base.dev;
1230 const struct dp_link_dpll *divisor = NULL;
1231 int i, count = 0;
1232
1233 if (IS_G4X(dev)) {
1234 divisor = gen4_dpll;
1235 count = ARRAY_SIZE(gen4_dpll);
1236 } else if (HAS_PCH_SPLIT(dev)) {
1237 divisor = pch_dpll;
1238 count = ARRAY_SIZE(pch_dpll);
1239 } else if (IS_CHERRYVIEW(dev)) {
1240 divisor = chv_dpll;
1241 count = ARRAY_SIZE(chv_dpll);
1242 } else if (IS_VALLEYVIEW(dev)) {
1243 divisor = vlv_dpll;
1244 count = ARRAY_SIZE(vlv_dpll);
1245 }
1246
1247 if (divisor && count) {
1248 for (i = 0; i < count; i++) {
1249 if (pipe_config->port_clock == divisor[i].clock) {
1250 pipe_config->dpll = divisor[i].dpll;
1251 pipe_config->clock_set = true;
1252 break;
1253 }
1254 }
1255 }
1256 }
1257
intersect_rates(const int * source_rates,int source_len,const int * sink_rates,int sink_len,int * common_rates)1258 static int intersect_rates(const int *source_rates, int source_len,
1259 const int *sink_rates, int sink_len,
1260 int *common_rates)
1261 {
1262 int i = 0, j = 0, k = 0;
1263
1264 while (i < source_len && j < sink_len) {
1265 if (source_rates[i] == sink_rates[j]) {
1266 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1267 return k;
1268 common_rates[k] = source_rates[i];
1269 ++k;
1270 ++i;
1271 ++j;
1272 } else if (source_rates[i] < sink_rates[j]) {
1273 ++i;
1274 } else {
1275 ++j;
1276 }
1277 }
1278 return k;
1279 }
1280
intel_dp_common_rates(struct intel_dp * intel_dp,int * common_rates)1281 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1282 int *common_rates)
1283 {
1284 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1285 const int *source_rates, *sink_rates;
1286 int source_len, sink_len;
1287
1288 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1289 source_len = intel_dp_source_rates(dev, &source_rates);
1290
1291 return intersect_rates(source_rates, source_len,
1292 sink_rates, sink_len,
1293 common_rates);
1294 }
1295
snprintf_int_array(char * str,size_t len,const int * array,int nelem)1296 static void snprintf_int_array(char *str, size_t len,
1297 const int *array, int nelem)
1298 {
1299 int i;
1300
1301 str[0] = '\0';
1302
1303 for (i = 0; i < nelem; i++) {
1304 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1305 if (r >= len)
1306 return;
1307 str += r;
1308 len -= r;
1309 }
1310 }
1311
intel_dp_print_rates(struct intel_dp * intel_dp)1312 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1313 {
1314 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1315 const int *source_rates, *sink_rates;
1316 int source_len, sink_len, common_len;
1317 int common_rates[DP_MAX_SUPPORTED_RATES];
1318 char str[128]; /* FIXME: too big for stack? */
1319
1320 if ((drm_debug & DRM_UT_KMS) == 0)
1321 return;
1322
1323 source_len = intel_dp_source_rates(dev, &source_rates);
1324 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1325 DRM_DEBUG_KMS("source rates: %s\n", str);
1326
1327 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1328 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1329 DRM_DEBUG_KMS("sink rates: %s\n", str);
1330
1331 common_len = intel_dp_common_rates(intel_dp, common_rates);
1332 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1333 DRM_DEBUG_KMS("common rates: %s\n", str);
1334 }
1335
rate_to_index(int find,const int * rates)1336 static int rate_to_index(int find, const int *rates)
1337 {
1338 int i = 0;
1339
1340 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1341 if (find == rates[i])
1342 break;
1343
1344 return i;
1345 }
1346
1347 int
intel_dp_max_link_rate(struct intel_dp * intel_dp)1348 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1349 {
1350 int rates[DP_MAX_SUPPORTED_RATES] = {};
1351 int len;
1352
1353 len = intel_dp_common_rates(intel_dp, rates);
1354 if (WARN_ON(len <= 0))
1355 return 162000;
1356
1357 return rates[rate_to_index(0, rates) - 1];
1358 }
1359
intel_dp_rate_select(struct intel_dp * intel_dp,int rate)1360 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1361 {
1362 return rate_to_index(rate, intel_dp->sink_rates);
1363 }
1364
intel_dp_compute_rate(struct intel_dp * intel_dp,int port_clock,uint8_t * link_bw,uint8_t * rate_select)1365 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1366 uint8_t *link_bw, uint8_t *rate_select)
1367 {
1368 if (intel_dp->num_sink_rates) {
1369 *link_bw = 0;
1370 *rate_select =
1371 intel_dp_rate_select(intel_dp, port_clock);
1372 } else {
1373 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1374 *rate_select = 0;
1375 }
1376 }
1377
1378 bool
intel_dp_compute_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1379 intel_dp_compute_config(struct intel_encoder *encoder,
1380 struct intel_crtc_state *pipe_config)
1381 {
1382 struct drm_device *dev = encoder->base.dev;
1383 struct drm_i915_private *dev_priv = dev->dev_private;
1384 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1385 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1386 enum port port = dp_to_dig_port(intel_dp)->port;
1387 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1388 struct intel_connector *intel_connector = intel_dp->attached_connector;
1389 int lane_count, clock;
1390 int min_lane_count = 1;
1391 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1392 /* Conveniently, the link BW constants become indices with a shift...*/
1393 int min_clock = 0;
1394 int max_clock;
1395 int bpp, mode_rate;
1396 int link_avail, link_clock;
1397 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1398 int common_len;
1399 uint8_t link_bw, rate_select;
1400
1401 common_len = intel_dp_common_rates(intel_dp, common_rates);
1402
1403 /* No common link rates between source and sink */
1404 WARN_ON(common_len <= 0);
1405
1406 max_clock = common_len - 1;
1407
1408 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1409 pipe_config->has_pch_encoder = true;
1410
1411 pipe_config->has_dp_encoder = true;
1412 pipe_config->has_drrs = false;
1413 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1414
1415 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1416 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1417 adjusted_mode);
1418
1419 if (INTEL_INFO(dev)->gen >= 9) {
1420 int ret;
1421 ret = skl_update_scaler_crtc(pipe_config);
1422 if (ret)
1423 return ret;
1424 }
1425
1426 if (!HAS_PCH_SPLIT(dev))
1427 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1428 intel_connector->panel.fitting_mode);
1429 else
1430 intel_pch_panel_fitting(intel_crtc, pipe_config,
1431 intel_connector->panel.fitting_mode);
1432 }
1433
1434 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1435 return false;
1436
1437 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1438 "max bw %d pixel clock %iKHz\n",
1439 max_lane_count, common_rates[max_clock],
1440 adjusted_mode->crtc_clock);
1441
1442 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1443 * bpc in between. */
1444 bpp = pipe_config->pipe_bpp;
1445 if (is_edp(intel_dp)) {
1446
1447 /* Get bpp from vbt only for panels that dont have bpp in edid */
1448 if (intel_connector->base.display_info.bpc == 0 &&
1449 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1450 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1451 dev_priv->vbt.edp_bpp);
1452 bpp = dev_priv->vbt.edp_bpp;
1453 }
1454
1455 /*
1456 * Use the maximum clock and number of lanes the eDP panel
1457 * advertizes being capable of. The panels are generally
1458 * designed to support only a single clock and lane
1459 * configuration, and typically these values correspond to the
1460 * native resolution of the panel.
1461 */
1462 min_lane_count = max_lane_count;
1463 min_clock = max_clock;
1464 }
1465
1466 for (; bpp >= 6*3; bpp -= 2*3) {
1467 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1468 bpp);
1469
1470 for (clock = min_clock; clock <= max_clock; clock++) {
1471 for (lane_count = min_lane_count;
1472 lane_count <= max_lane_count;
1473 lane_count <<= 1) {
1474
1475 link_clock = common_rates[clock];
1476 link_avail = intel_dp_max_data_rate(link_clock,
1477 lane_count);
1478
1479 if (mode_rate <= link_avail) {
1480 goto found;
1481 }
1482 }
1483 }
1484 }
1485
1486 return false;
1487
1488 found:
1489 if (intel_dp->color_range_auto) {
1490 /*
1491 * See:
1492 * CEA-861-E - 5.1 Default Encoding Parameters
1493 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1494 */
1495 pipe_config->limited_color_range =
1496 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1497 } else {
1498 pipe_config->limited_color_range =
1499 intel_dp->limited_color_range;
1500 }
1501
1502 pipe_config->lane_count = lane_count;
1503
1504 pipe_config->pipe_bpp = bpp;
1505 pipe_config->port_clock = common_rates[clock];
1506
1507 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1508 &link_bw, &rate_select);
1509
1510 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1511 link_bw, rate_select, pipe_config->lane_count,
1512 pipe_config->port_clock, bpp);
1513 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1514 mode_rate, link_avail);
1515
1516 intel_link_compute_m_n(bpp, lane_count,
1517 adjusted_mode->crtc_clock,
1518 pipe_config->port_clock,
1519 &pipe_config->dp_m_n);
1520
1521 if (intel_connector->panel.downclock_mode != NULL &&
1522 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1523 pipe_config->has_drrs = true;
1524 intel_link_compute_m_n(bpp, lane_count,
1525 intel_connector->panel.downclock_mode->clock,
1526 pipe_config->port_clock,
1527 &pipe_config->dp_m2_n2);
1528 }
1529
1530 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1531 skl_edp_set_pll_config(pipe_config);
1532 else if (IS_BROXTON(dev))
1533 /* handled in ddi */;
1534 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1535 hsw_dp_set_ddi_pll_sel(pipe_config);
1536 else
1537 intel_dp_set_clock(encoder, pipe_config);
1538
1539 return true;
1540 }
1541
ironlake_set_pll_cpu_edp(struct intel_dp * intel_dp)1542 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1543 {
1544 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1545 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1546 struct drm_device *dev = crtc->base.dev;
1547 struct drm_i915_private *dev_priv = dev->dev_private;
1548 u32 dpa_ctl;
1549
1550 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1551 crtc->config->port_clock);
1552 dpa_ctl = I915_READ(DP_A);
1553 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1554
1555 if (crtc->config->port_clock == 162000) {
1556 /* For a long time we've carried around a ILK-DevA w/a for the
1557 * 160MHz clock. If we're really unlucky, it's still required.
1558 */
1559 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1560 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1561 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1562 } else {
1563 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1564 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1565 }
1566
1567 I915_WRITE(DP_A, dpa_ctl);
1568
1569 POSTING_READ(DP_A);
1570 udelay(500);
1571 }
1572
intel_dp_set_link_params(struct intel_dp * intel_dp,const struct intel_crtc_state * pipe_config)1573 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1574 const struct intel_crtc_state *pipe_config)
1575 {
1576 intel_dp->link_rate = pipe_config->port_clock;
1577 intel_dp->lane_count = pipe_config->lane_count;
1578 }
1579
intel_dp_prepare(struct intel_encoder * encoder)1580 static void intel_dp_prepare(struct intel_encoder *encoder)
1581 {
1582 struct drm_device *dev = encoder->base.dev;
1583 struct drm_i915_private *dev_priv = dev->dev_private;
1584 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1585 enum port port = dp_to_dig_port(intel_dp)->port;
1586 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1587 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1588
1589 intel_dp_set_link_params(intel_dp, crtc->config);
1590
1591 /*
1592 * There are four kinds of DP registers:
1593 *
1594 * IBX PCH
1595 * SNB CPU
1596 * IVB CPU
1597 * CPT PCH
1598 *
1599 * IBX PCH and CPU are the same for almost everything,
1600 * except that the CPU DP PLL is configured in this
1601 * register
1602 *
1603 * CPT PCH is quite different, having many bits moved
1604 * to the TRANS_DP_CTL register instead. That
1605 * configuration happens (oddly) in ironlake_pch_enable
1606 */
1607
1608 /* Preserve the BIOS-computed detected bit. This is
1609 * supposed to be read-only.
1610 */
1611 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1612
1613 /* Handle DP bits in common between all three register formats */
1614 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1615 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1616
1617 if (crtc->config->has_audio)
1618 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1619
1620 /* Split out the IBX/CPU vs CPT settings */
1621
1622 if (IS_GEN7(dev) && port == PORT_A) {
1623 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1624 intel_dp->DP |= DP_SYNC_HS_HIGH;
1625 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1626 intel_dp->DP |= DP_SYNC_VS_HIGH;
1627 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1628
1629 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1630 intel_dp->DP |= DP_ENHANCED_FRAMING;
1631
1632 intel_dp->DP |= crtc->pipe << 29;
1633 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1634 u32 trans_dp;
1635
1636 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1637
1638 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1639 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1640 trans_dp |= TRANS_DP_ENH_FRAMING;
1641 else
1642 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1643 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1644 } else {
1645 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1646 crtc->config->limited_color_range)
1647 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1648
1649 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1650 intel_dp->DP |= DP_SYNC_HS_HIGH;
1651 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1652 intel_dp->DP |= DP_SYNC_VS_HIGH;
1653 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1654
1655 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1656 intel_dp->DP |= DP_ENHANCED_FRAMING;
1657
1658 if (IS_CHERRYVIEW(dev))
1659 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1660 else if (crtc->pipe == PIPE_B)
1661 intel_dp->DP |= DP_PIPEB_SELECT;
1662 }
1663 }
1664
1665 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1666 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1667
1668 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1669 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1670
1671 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1672 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1673
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)1674 static void wait_panel_status(struct intel_dp *intel_dp,
1675 u32 mask,
1676 u32 value)
1677 {
1678 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1679 struct drm_i915_private *dev_priv = dev->dev_private;
1680 u32 pp_stat_reg, pp_ctrl_reg;
1681
1682 lockdep_assert_held(&dev_priv->pps_mutex);
1683
1684 pp_stat_reg = _pp_stat_reg(intel_dp);
1685 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1686
1687 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1688 mask, value,
1689 I915_READ(pp_stat_reg),
1690 I915_READ(pp_ctrl_reg));
1691
1692 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1693 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1694 I915_READ(pp_stat_reg),
1695 I915_READ(pp_ctrl_reg));
1696 }
1697
1698 DRM_DEBUG_KMS("Wait complete\n");
1699 }
1700
wait_panel_on(struct intel_dp * intel_dp)1701 static void wait_panel_on(struct intel_dp *intel_dp)
1702 {
1703 DRM_DEBUG_KMS("Wait for panel power on\n");
1704 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1705 }
1706
wait_panel_off(struct intel_dp * intel_dp)1707 static void wait_panel_off(struct intel_dp *intel_dp)
1708 {
1709 DRM_DEBUG_KMS("Wait for panel power off time\n");
1710 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1711 }
1712
wait_panel_power_cycle(struct intel_dp * intel_dp)1713 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1714 {
1715 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1716
1717 /* When we disable the VDD override bit last we have to do the manual
1718 * wait. */
1719 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1720 intel_dp->panel_power_cycle_delay);
1721
1722 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1723 }
1724
wait_backlight_on(struct intel_dp * intel_dp)1725 static void wait_backlight_on(struct intel_dp *intel_dp)
1726 {
1727 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1728 intel_dp->backlight_on_delay);
1729 }
1730
edp_wait_backlight_off(struct intel_dp * intel_dp)1731 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1732 {
1733 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1734 intel_dp->backlight_off_delay);
1735 }
1736
1737 /* Read the current pp_control value, unlocking the register if it
1738 * is locked
1739 */
1740
ironlake_get_pp_control(struct intel_dp * intel_dp)1741 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1742 {
1743 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1744 struct drm_i915_private *dev_priv = dev->dev_private;
1745 u32 control;
1746
1747 lockdep_assert_held(&dev_priv->pps_mutex);
1748
1749 control = I915_READ(_pp_ctrl_reg(intel_dp));
1750 if (!IS_BROXTON(dev)) {
1751 control &= ~PANEL_UNLOCK_MASK;
1752 control |= PANEL_UNLOCK_REGS;
1753 }
1754 return control;
1755 }
1756
1757 /*
1758 * Must be paired with edp_panel_vdd_off().
1759 * Must hold pps_mutex around the whole on/off sequence.
1760 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1761 */
edp_panel_vdd_on(struct intel_dp * intel_dp)1762 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1763 {
1764 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1765 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1766 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1767 struct drm_i915_private *dev_priv = dev->dev_private;
1768 enum intel_display_power_domain power_domain;
1769 u32 pp;
1770 u32 pp_stat_reg, pp_ctrl_reg;
1771 bool need_to_disable = !intel_dp->want_panel_vdd;
1772
1773 lockdep_assert_held(&dev_priv->pps_mutex);
1774
1775 if (!is_edp(intel_dp))
1776 return false;
1777
1778 cancel_delayed_work(&intel_dp->panel_vdd_work);
1779 intel_dp->want_panel_vdd = true;
1780
1781 if (edp_have_panel_vdd(intel_dp))
1782 return need_to_disable;
1783
1784 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1785 intel_display_power_get(dev_priv, power_domain);
1786
1787 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1788 port_name(intel_dig_port->port));
1789
1790 if (!edp_have_panel_power(intel_dp))
1791 wait_panel_power_cycle(intel_dp);
1792
1793 pp = ironlake_get_pp_control(intel_dp);
1794 pp |= EDP_FORCE_VDD;
1795
1796 pp_stat_reg = _pp_stat_reg(intel_dp);
1797 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1798
1799 I915_WRITE(pp_ctrl_reg, pp);
1800 POSTING_READ(pp_ctrl_reg);
1801 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1802 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1803 /*
1804 * If the panel wasn't on, delay before accessing aux channel
1805 */
1806 if (!edp_have_panel_power(intel_dp)) {
1807 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1808 port_name(intel_dig_port->port));
1809 msleep(intel_dp->panel_power_up_delay);
1810 }
1811
1812 return need_to_disable;
1813 }
1814
1815 /*
1816 * Must be paired with intel_edp_panel_vdd_off() or
1817 * intel_edp_panel_off().
1818 * Nested calls to these functions are not allowed since
1819 * we drop the lock. Caller must use some higher level
1820 * locking to prevent nested calls from other threads.
1821 */
intel_edp_panel_vdd_on(struct intel_dp * intel_dp)1822 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1823 {
1824 bool vdd;
1825
1826 if (!is_edp(intel_dp))
1827 return;
1828
1829 pps_lock(intel_dp);
1830 vdd = edp_panel_vdd_on(intel_dp);
1831 pps_unlock(intel_dp);
1832
1833 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1834 port_name(dp_to_dig_port(intel_dp)->port));
1835 }
1836
edp_panel_vdd_off_sync(struct intel_dp * intel_dp)1837 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1838 {
1839 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1840 struct drm_i915_private *dev_priv = dev->dev_private;
1841 struct intel_digital_port *intel_dig_port =
1842 dp_to_dig_port(intel_dp);
1843 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1844 enum intel_display_power_domain power_domain;
1845 u32 pp;
1846 u32 pp_stat_reg, pp_ctrl_reg;
1847
1848 lockdep_assert_held(&dev_priv->pps_mutex);
1849
1850 WARN_ON(intel_dp->want_panel_vdd);
1851
1852 if (!edp_have_panel_vdd(intel_dp))
1853 return;
1854
1855 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1856 port_name(intel_dig_port->port));
1857
1858 pp = ironlake_get_pp_control(intel_dp);
1859 pp &= ~EDP_FORCE_VDD;
1860
1861 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1862 pp_stat_reg = _pp_stat_reg(intel_dp);
1863
1864 I915_WRITE(pp_ctrl_reg, pp);
1865 POSTING_READ(pp_ctrl_reg);
1866
1867 /* Make sure sequencer is idle before allowing subsequent activity */
1868 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1869 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1870
1871 if ((pp & POWER_TARGET_ON) == 0)
1872 intel_dp->last_power_cycle = jiffies;
1873
1874 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1875 intel_display_power_put(dev_priv, power_domain);
1876 }
1877
edp_panel_vdd_work(struct work_struct * __work)1878 static void edp_panel_vdd_work(struct work_struct *__work)
1879 {
1880 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1881 struct intel_dp, panel_vdd_work);
1882
1883 pps_lock(intel_dp);
1884 if (!intel_dp->want_panel_vdd)
1885 edp_panel_vdd_off_sync(intel_dp);
1886 pps_unlock(intel_dp);
1887 }
1888
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)1889 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1890 {
1891 unsigned long delay;
1892
1893 /*
1894 * Queue the timer to fire a long time from now (relative to the power
1895 * down delay) to keep the panel power up across a sequence of
1896 * operations.
1897 */
1898 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1899 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1900 }
1901
1902 /*
1903 * Must be paired with edp_panel_vdd_on().
1904 * Must hold pps_mutex around the whole on/off sequence.
1905 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1906 */
edp_panel_vdd_off(struct intel_dp * intel_dp,bool sync)1907 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1908 {
1909 struct drm_i915_private *dev_priv =
1910 intel_dp_to_dev(intel_dp)->dev_private;
1911
1912 lockdep_assert_held(&dev_priv->pps_mutex);
1913
1914 if (!is_edp(intel_dp))
1915 return;
1916
1917 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1918 port_name(dp_to_dig_port(intel_dp)->port));
1919
1920 intel_dp->want_panel_vdd = false;
1921
1922 if (sync)
1923 edp_panel_vdd_off_sync(intel_dp);
1924 else
1925 edp_panel_vdd_schedule_off(intel_dp);
1926 }
1927
edp_panel_on(struct intel_dp * intel_dp)1928 static void edp_panel_on(struct intel_dp *intel_dp)
1929 {
1930 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1931 struct drm_i915_private *dev_priv = dev->dev_private;
1932 u32 pp;
1933 u32 pp_ctrl_reg;
1934
1935 lockdep_assert_held(&dev_priv->pps_mutex);
1936
1937 if (!is_edp(intel_dp))
1938 return;
1939
1940 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1941 port_name(dp_to_dig_port(intel_dp)->port));
1942
1943 if (WARN(edp_have_panel_power(intel_dp),
1944 "eDP port %c panel power already on\n",
1945 port_name(dp_to_dig_port(intel_dp)->port)))
1946 return;
1947
1948 wait_panel_power_cycle(intel_dp);
1949
1950 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1951 pp = ironlake_get_pp_control(intel_dp);
1952 if (IS_GEN5(dev)) {
1953 /* ILK workaround: disable reset around power sequence */
1954 pp &= ~PANEL_POWER_RESET;
1955 I915_WRITE(pp_ctrl_reg, pp);
1956 POSTING_READ(pp_ctrl_reg);
1957 }
1958
1959 pp |= POWER_TARGET_ON;
1960 if (!IS_GEN5(dev))
1961 pp |= PANEL_POWER_RESET;
1962
1963 I915_WRITE(pp_ctrl_reg, pp);
1964 POSTING_READ(pp_ctrl_reg);
1965
1966 wait_panel_on(intel_dp);
1967 intel_dp->last_power_on = jiffies;
1968
1969 if (IS_GEN5(dev)) {
1970 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1971 I915_WRITE(pp_ctrl_reg, pp);
1972 POSTING_READ(pp_ctrl_reg);
1973 }
1974 }
1975
intel_edp_panel_on(struct intel_dp * intel_dp)1976 void intel_edp_panel_on(struct intel_dp *intel_dp)
1977 {
1978 if (!is_edp(intel_dp))
1979 return;
1980
1981 pps_lock(intel_dp);
1982 edp_panel_on(intel_dp);
1983 pps_unlock(intel_dp);
1984 }
1985
1986
edp_panel_off(struct intel_dp * intel_dp)1987 static void edp_panel_off(struct intel_dp *intel_dp)
1988 {
1989 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1990 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1991 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1992 struct drm_i915_private *dev_priv = dev->dev_private;
1993 enum intel_display_power_domain power_domain;
1994 u32 pp;
1995 u32 pp_ctrl_reg;
1996
1997 lockdep_assert_held(&dev_priv->pps_mutex);
1998
1999 if (!is_edp(intel_dp))
2000 return;
2001
2002 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2003 port_name(dp_to_dig_port(intel_dp)->port));
2004
2005 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2006 port_name(dp_to_dig_port(intel_dp)->port));
2007
2008 pp = ironlake_get_pp_control(intel_dp);
2009 /* We need to switch off panel power _and_ force vdd, for otherwise some
2010 * panels get very unhappy and cease to work. */
2011 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2012 EDP_BLC_ENABLE);
2013
2014 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2015
2016 intel_dp->want_panel_vdd = false;
2017
2018 I915_WRITE(pp_ctrl_reg, pp);
2019 POSTING_READ(pp_ctrl_reg);
2020
2021 intel_dp->last_power_cycle = jiffies;
2022 wait_panel_off(intel_dp);
2023
2024 /* We got a reference when we enabled the VDD. */
2025 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2026 intel_display_power_put(dev_priv, power_domain);
2027 }
2028
intel_edp_panel_off(struct intel_dp * intel_dp)2029 void intel_edp_panel_off(struct intel_dp *intel_dp)
2030 {
2031 if (!is_edp(intel_dp))
2032 return;
2033
2034 pps_lock(intel_dp);
2035 edp_panel_off(intel_dp);
2036 pps_unlock(intel_dp);
2037 }
2038
2039 /* Enable backlight in the panel power control. */
_intel_edp_backlight_on(struct intel_dp * intel_dp)2040 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2041 {
2042 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2043 struct drm_device *dev = intel_dig_port->base.base.dev;
2044 struct drm_i915_private *dev_priv = dev->dev_private;
2045 u32 pp;
2046 u32 pp_ctrl_reg;
2047
2048 /*
2049 * If we enable the backlight right away following a panel power
2050 * on, we may see slight flicker as the panel syncs with the eDP
2051 * link. So delay a bit to make sure the image is solid before
2052 * allowing it to appear.
2053 */
2054 wait_backlight_on(intel_dp);
2055
2056 pps_lock(intel_dp);
2057
2058 pp = ironlake_get_pp_control(intel_dp);
2059 pp |= EDP_BLC_ENABLE;
2060
2061 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2062
2063 I915_WRITE(pp_ctrl_reg, pp);
2064 POSTING_READ(pp_ctrl_reg);
2065
2066 pps_unlock(intel_dp);
2067 }
2068
2069 /* Enable backlight PWM and backlight PP control. */
intel_edp_backlight_on(struct intel_dp * intel_dp)2070 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2071 {
2072 if (!is_edp(intel_dp))
2073 return;
2074
2075 DRM_DEBUG_KMS("\n");
2076
2077 intel_panel_enable_backlight(intel_dp->attached_connector);
2078 _intel_edp_backlight_on(intel_dp);
2079 }
2080
2081 /* Disable backlight in the panel power control. */
_intel_edp_backlight_off(struct intel_dp * intel_dp)2082 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2083 {
2084 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2085 struct drm_i915_private *dev_priv = dev->dev_private;
2086 u32 pp;
2087 u32 pp_ctrl_reg;
2088
2089 if (!is_edp(intel_dp))
2090 return;
2091
2092 pps_lock(intel_dp);
2093
2094 pp = ironlake_get_pp_control(intel_dp);
2095 pp &= ~EDP_BLC_ENABLE;
2096
2097 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2098
2099 I915_WRITE(pp_ctrl_reg, pp);
2100 POSTING_READ(pp_ctrl_reg);
2101
2102 pps_unlock(intel_dp);
2103
2104 intel_dp->last_backlight_off = jiffies;
2105 edp_wait_backlight_off(intel_dp);
2106 }
2107
2108 /* Disable backlight PP control and backlight PWM. */
intel_edp_backlight_off(struct intel_dp * intel_dp)2109 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2110 {
2111 if (!is_edp(intel_dp))
2112 return;
2113
2114 DRM_DEBUG_KMS("\n");
2115
2116 _intel_edp_backlight_off(intel_dp);
2117 intel_panel_disable_backlight(intel_dp->attached_connector);
2118 }
2119
2120 /*
2121 * Hook for controlling the panel power control backlight through the bl_power
2122 * sysfs attribute. Take care to handle multiple calls.
2123 */
intel_edp_backlight_power(struct intel_connector * connector,bool enable)2124 static void intel_edp_backlight_power(struct intel_connector *connector,
2125 bool enable)
2126 {
2127 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2128 bool is_enabled;
2129
2130 pps_lock(intel_dp);
2131 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2132 pps_unlock(intel_dp);
2133
2134 if (is_enabled == enable)
2135 return;
2136
2137 DRM_DEBUG_KMS("panel power control backlight %s\n",
2138 enable ? "enable" : "disable");
2139
2140 if (enable)
2141 _intel_edp_backlight_on(intel_dp);
2142 else
2143 _intel_edp_backlight_off(intel_dp);
2144 }
2145
ironlake_edp_pll_on(struct intel_dp * intel_dp)2146 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2147 {
2148 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2149 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2150 struct drm_device *dev = crtc->dev;
2151 struct drm_i915_private *dev_priv = dev->dev_private;
2152 u32 dpa_ctl;
2153
2154 assert_pipe_disabled(dev_priv,
2155 to_intel_crtc(crtc)->pipe);
2156
2157 DRM_DEBUG_KMS("\n");
2158 dpa_ctl = I915_READ(DP_A);
2159 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2160 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2161
2162 /* We don't adjust intel_dp->DP while tearing down the link, to
2163 * facilitate link retraining (e.g. after hotplug). Hence clear all
2164 * enable bits here to ensure that we don't enable too much. */
2165 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2166 intel_dp->DP |= DP_PLL_ENABLE;
2167 I915_WRITE(DP_A, intel_dp->DP);
2168 POSTING_READ(DP_A);
2169 udelay(200);
2170 }
2171
ironlake_edp_pll_off(struct intel_dp * intel_dp)2172 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2173 {
2174 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2175 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2176 struct drm_device *dev = crtc->dev;
2177 struct drm_i915_private *dev_priv = dev->dev_private;
2178 u32 dpa_ctl;
2179
2180 assert_pipe_disabled(dev_priv,
2181 to_intel_crtc(crtc)->pipe);
2182
2183 dpa_ctl = I915_READ(DP_A);
2184 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2185 "dp pll off, should be on\n");
2186 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2187
2188 /* We can't rely on the value tracked for the DP register in
2189 * intel_dp->DP because link_down must not change that (otherwise link
2190 * re-training will fail. */
2191 dpa_ctl &= ~DP_PLL_ENABLE;
2192 I915_WRITE(DP_A, dpa_ctl);
2193 POSTING_READ(DP_A);
2194 udelay(200);
2195 }
2196
2197 /* If the sink supports it, try to set the power state appropriately */
intel_dp_sink_dpms(struct intel_dp * intel_dp,int mode)2198 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2199 {
2200 int ret, i;
2201
2202 /* Should have a valid DPCD by this point */
2203 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2204 return;
2205
2206 if (mode != DRM_MODE_DPMS_ON) {
2207 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2208 DP_SET_POWER_D3);
2209 } else {
2210 /*
2211 * When turning on, we need to retry for 1ms to give the sink
2212 * time to wake up.
2213 */
2214 for (i = 0; i < 3; i++) {
2215 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2216 DP_SET_POWER_D0);
2217 if (ret == 1)
2218 break;
2219 msleep(1);
2220 }
2221 }
2222
2223 if (ret != 1)
2224 DRM_DEBUG_KMS("failed to %s sink power state\n",
2225 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2226 }
2227
intel_dp_get_hw_state(struct intel_encoder * encoder,enum pipe * pipe)2228 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2229 enum pipe *pipe)
2230 {
2231 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2232 enum port port = dp_to_dig_port(intel_dp)->port;
2233 struct drm_device *dev = encoder->base.dev;
2234 struct drm_i915_private *dev_priv = dev->dev_private;
2235 enum intel_display_power_domain power_domain;
2236 u32 tmp;
2237
2238 power_domain = intel_display_port_power_domain(encoder);
2239 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2240 return false;
2241
2242 tmp = I915_READ(intel_dp->output_reg);
2243
2244 if (!(tmp & DP_PORT_EN))
2245 return false;
2246
2247 if (IS_GEN7(dev) && port == PORT_A) {
2248 *pipe = PORT_TO_PIPE_CPT(tmp);
2249 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2250 enum pipe p;
2251
2252 for_each_pipe(dev_priv, p) {
2253 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2254 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2255 *pipe = p;
2256 return true;
2257 }
2258 }
2259
2260 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2261 intel_dp->output_reg);
2262 } else if (IS_CHERRYVIEW(dev)) {
2263 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2264 } else {
2265 *pipe = PORT_TO_PIPE(tmp);
2266 }
2267
2268 return true;
2269 }
2270
intel_dp_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)2271 static void intel_dp_get_config(struct intel_encoder *encoder,
2272 struct intel_crtc_state *pipe_config)
2273 {
2274 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2275 u32 tmp, flags = 0;
2276 struct drm_device *dev = encoder->base.dev;
2277 struct drm_i915_private *dev_priv = dev->dev_private;
2278 enum port port = dp_to_dig_port(intel_dp)->port;
2279 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2280 int dotclock;
2281
2282 tmp = I915_READ(intel_dp->output_reg);
2283
2284 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2285
2286 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2287 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2288
2289 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2290 flags |= DRM_MODE_FLAG_PHSYNC;
2291 else
2292 flags |= DRM_MODE_FLAG_NHSYNC;
2293
2294 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2295 flags |= DRM_MODE_FLAG_PVSYNC;
2296 else
2297 flags |= DRM_MODE_FLAG_NVSYNC;
2298 } else {
2299 if (tmp & DP_SYNC_HS_HIGH)
2300 flags |= DRM_MODE_FLAG_PHSYNC;
2301 else
2302 flags |= DRM_MODE_FLAG_NHSYNC;
2303
2304 if (tmp & DP_SYNC_VS_HIGH)
2305 flags |= DRM_MODE_FLAG_PVSYNC;
2306 else
2307 flags |= DRM_MODE_FLAG_NVSYNC;
2308 }
2309
2310 pipe_config->base.adjusted_mode.flags |= flags;
2311
2312 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2313 tmp & DP_COLOR_RANGE_16_235)
2314 pipe_config->limited_color_range = true;
2315
2316 pipe_config->has_dp_encoder = true;
2317
2318 pipe_config->lane_count =
2319 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2320
2321 intel_dp_get_m_n(crtc, pipe_config);
2322
2323 if (port == PORT_A) {
2324 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2325 pipe_config->port_clock = 162000;
2326 else
2327 pipe_config->port_clock = 270000;
2328 }
2329
2330 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2331 &pipe_config->dp_m_n);
2332
2333 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2334 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2335
2336 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2337
2338 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2339 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2340 /*
2341 * This is a big fat ugly hack.
2342 *
2343 * Some machines in UEFI boot mode provide us a VBT that has 18
2344 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2345 * unknown we fail to light up. Yet the same BIOS boots up with
2346 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2347 * max, not what it tells us to use.
2348 *
2349 * Note: This will still be broken if the eDP panel is not lit
2350 * up by the BIOS, and thus we can't get the mode at module
2351 * load.
2352 */
2353 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2354 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2355 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2356 }
2357 }
2358
intel_disable_dp(struct intel_encoder * encoder)2359 static void intel_disable_dp(struct intel_encoder *encoder)
2360 {
2361 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2362 struct drm_device *dev = encoder->base.dev;
2363 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2364
2365 if (crtc->config->has_audio)
2366 intel_audio_codec_disable(encoder);
2367
2368 if (HAS_PSR(dev) && !HAS_DDI(dev))
2369 intel_psr_disable(intel_dp);
2370
2371 /* Make sure the panel is off before trying to change the mode. But also
2372 * ensure that we have vdd while we switch off the panel. */
2373 intel_edp_panel_vdd_on(intel_dp);
2374 intel_edp_backlight_off(intel_dp);
2375 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2376 intel_edp_panel_off(intel_dp);
2377
2378 /* disable the port before the pipe on g4x */
2379 if (INTEL_INFO(dev)->gen < 5)
2380 intel_dp_link_down(intel_dp);
2381 }
2382
ilk_post_disable_dp(struct intel_encoder * encoder)2383 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2384 {
2385 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2386 enum port port = dp_to_dig_port(intel_dp)->port;
2387
2388 intel_dp_link_down(intel_dp);
2389 if (port == PORT_A)
2390 ironlake_edp_pll_off(intel_dp);
2391 }
2392
vlv_post_disable_dp(struct intel_encoder * encoder)2393 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2394 {
2395 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2396
2397 intel_dp_link_down(intel_dp);
2398 }
2399
chv_data_lane_soft_reset(struct intel_encoder * encoder,bool reset)2400 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2401 bool reset)
2402 {
2403 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2404 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2405 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2406 enum pipe pipe = crtc->pipe;
2407 uint32_t val;
2408
2409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2410 if (reset)
2411 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2412 else
2413 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2414 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2415
2416 if (crtc->config->lane_count > 2) {
2417 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2418 if (reset)
2419 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2420 else
2421 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2422 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2423 }
2424
2425 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2426 val |= CHV_PCS_REQ_SOFTRESET_EN;
2427 if (reset)
2428 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2429 else
2430 val |= DPIO_PCS_CLK_SOFT_RESET;
2431 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2432
2433 if (crtc->config->lane_count > 2) {
2434 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2435 val |= CHV_PCS_REQ_SOFTRESET_EN;
2436 if (reset)
2437 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2438 else
2439 val |= DPIO_PCS_CLK_SOFT_RESET;
2440 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2441 }
2442 }
2443
chv_post_disable_dp(struct intel_encoder * encoder)2444 static void chv_post_disable_dp(struct intel_encoder *encoder)
2445 {
2446 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2447 struct drm_device *dev = encoder->base.dev;
2448 struct drm_i915_private *dev_priv = dev->dev_private;
2449
2450 intel_dp_link_down(intel_dp);
2451
2452 mutex_lock(&dev_priv->sb_lock);
2453
2454 /* Assert data lane reset */
2455 chv_data_lane_soft_reset(encoder, true);
2456
2457 mutex_unlock(&dev_priv->sb_lock);
2458 }
2459
2460 static void
_intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)2461 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2462 uint32_t *DP,
2463 uint8_t dp_train_pat)
2464 {
2465 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2466 struct drm_device *dev = intel_dig_port->base.base.dev;
2467 struct drm_i915_private *dev_priv = dev->dev_private;
2468 enum port port = intel_dig_port->port;
2469
2470 if (HAS_DDI(dev)) {
2471 uint32_t temp = I915_READ(DP_TP_CTL(port));
2472
2473 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2474 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2475 else
2476 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2477
2478 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2479 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2480 case DP_TRAINING_PATTERN_DISABLE:
2481 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2482
2483 break;
2484 case DP_TRAINING_PATTERN_1:
2485 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2486 break;
2487 case DP_TRAINING_PATTERN_2:
2488 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2489 break;
2490 case DP_TRAINING_PATTERN_3:
2491 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2492 break;
2493 }
2494 I915_WRITE(DP_TP_CTL(port), temp);
2495
2496 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2497 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2498 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2499
2500 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2501 case DP_TRAINING_PATTERN_DISABLE:
2502 *DP |= DP_LINK_TRAIN_OFF_CPT;
2503 break;
2504 case DP_TRAINING_PATTERN_1:
2505 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2506 break;
2507 case DP_TRAINING_PATTERN_2:
2508 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2509 break;
2510 case DP_TRAINING_PATTERN_3:
2511 DRM_ERROR("DP training pattern 3 not supported\n");
2512 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2513 break;
2514 }
2515
2516 } else {
2517 if (IS_CHERRYVIEW(dev))
2518 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2519 else
2520 *DP &= ~DP_LINK_TRAIN_MASK;
2521
2522 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2523 case DP_TRAINING_PATTERN_DISABLE:
2524 *DP |= DP_LINK_TRAIN_OFF;
2525 break;
2526 case DP_TRAINING_PATTERN_1:
2527 *DP |= DP_LINK_TRAIN_PAT_1;
2528 break;
2529 case DP_TRAINING_PATTERN_2:
2530 *DP |= DP_LINK_TRAIN_PAT_2;
2531 break;
2532 case DP_TRAINING_PATTERN_3:
2533 if (IS_CHERRYVIEW(dev)) {
2534 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2535 } else {
2536 DRM_ERROR("DP training pattern 3 not supported\n");
2537 *DP |= DP_LINK_TRAIN_PAT_2;
2538 }
2539 break;
2540 }
2541 }
2542 }
2543
intel_dp_enable_port(struct intel_dp * intel_dp)2544 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2545 {
2546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2547 struct drm_i915_private *dev_priv = dev->dev_private;
2548
2549 /* enable with pattern 1 (as per spec) */
2550 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2551 DP_TRAINING_PATTERN_1);
2552
2553 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2554 POSTING_READ(intel_dp->output_reg);
2555
2556 /*
2557 * Magic for VLV/CHV. We _must_ first set up the register
2558 * without actually enabling the port, and then do another
2559 * write to enable the port. Otherwise link training will
2560 * fail when the power sequencer is freshly used for this port.
2561 */
2562 intel_dp->DP |= DP_PORT_EN;
2563
2564 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2565 POSTING_READ(intel_dp->output_reg);
2566 }
2567
intel_enable_dp(struct intel_encoder * encoder)2568 static void intel_enable_dp(struct intel_encoder *encoder)
2569 {
2570 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2571 struct drm_device *dev = encoder->base.dev;
2572 struct drm_i915_private *dev_priv = dev->dev_private;
2573 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2574 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2575
2576 if (WARN_ON(dp_reg & DP_PORT_EN))
2577 return;
2578
2579 pps_lock(intel_dp);
2580
2581 if (IS_VALLEYVIEW(dev))
2582 vlv_init_panel_power_sequencer(intel_dp);
2583
2584 intel_dp_enable_port(intel_dp);
2585
2586 edp_panel_vdd_on(intel_dp);
2587 edp_panel_on(intel_dp);
2588 edp_panel_vdd_off(intel_dp, true);
2589
2590 pps_unlock(intel_dp);
2591
2592 if (IS_VALLEYVIEW(dev)) {
2593 unsigned int lane_mask = 0x0;
2594
2595 if (IS_CHERRYVIEW(dev))
2596 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2597
2598 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2599 lane_mask);
2600 }
2601
2602 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2603 intel_dp_start_link_train(intel_dp);
2604 intel_dp_stop_link_train(intel_dp);
2605
2606 if (crtc->config->has_audio) {
2607 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2608 pipe_name(crtc->pipe));
2609 intel_audio_codec_enable(encoder);
2610 }
2611 }
2612
g4x_enable_dp(struct intel_encoder * encoder)2613 static void g4x_enable_dp(struct intel_encoder *encoder)
2614 {
2615 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2616
2617 intel_enable_dp(encoder);
2618 intel_edp_backlight_on(intel_dp);
2619 }
2620
vlv_enable_dp(struct intel_encoder * encoder)2621 static void vlv_enable_dp(struct intel_encoder *encoder)
2622 {
2623 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2624
2625 intel_edp_backlight_on(intel_dp);
2626 intel_psr_enable(intel_dp);
2627 }
2628
g4x_pre_enable_dp(struct intel_encoder * encoder)2629 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2630 {
2631 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2632 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2633
2634 intel_dp_prepare(encoder);
2635
2636 /* Only ilk+ has port A */
2637 if (dport->port == PORT_A) {
2638 ironlake_set_pll_cpu_edp(intel_dp);
2639 ironlake_edp_pll_on(intel_dp);
2640 }
2641 }
2642
vlv_detach_power_sequencer(struct intel_dp * intel_dp)2643 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2644 {
2645 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2646 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2647 enum pipe pipe = intel_dp->pps_pipe;
2648 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2649
2650 edp_panel_vdd_off_sync(intel_dp);
2651
2652 /*
2653 * VLV seems to get confused when multiple power seqeuencers
2654 * have the same port selected (even if only one has power/vdd
2655 * enabled). The failure manifests as vlv_wait_port_ready() failing
2656 * CHV on the other hand doesn't seem to mind having the same port
2657 * selected in multiple power seqeuencers, but let's clear the
2658 * port select always when logically disconnecting a power sequencer
2659 * from a port.
2660 */
2661 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2662 pipe_name(pipe), port_name(intel_dig_port->port));
2663 I915_WRITE(pp_on_reg, 0);
2664 POSTING_READ(pp_on_reg);
2665
2666 intel_dp->pps_pipe = INVALID_PIPE;
2667 }
2668
vlv_steal_power_sequencer(struct drm_device * dev,enum pipe pipe)2669 static void vlv_steal_power_sequencer(struct drm_device *dev,
2670 enum pipe pipe)
2671 {
2672 struct drm_i915_private *dev_priv = dev->dev_private;
2673 struct intel_encoder *encoder;
2674
2675 lockdep_assert_held(&dev_priv->pps_mutex);
2676
2677 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2678 return;
2679
2680 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2681 base.head) {
2682 struct intel_dp *intel_dp;
2683 enum port port;
2684
2685 if (encoder->type != INTEL_OUTPUT_EDP)
2686 continue;
2687
2688 intel_dp = enc_to_intel_dp(&encoder->base);
2689 port = dp_to_dig_port(intel_dp)->port;
2690
2691 if (intel_dp->pps_pipe != pipe)
2692 continue;
2693
2694 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2695 pipe_name(pipe), port_name(port));
2696
2697 WARN(encoder->base.crtc,
2698 "stealing pipe %c power sequencer from active eDP port %c\n",
2699 pipe_name(pipe), port_name(port));
2700
2701 /* make sure vdd is off before we steal it */
2702 vlv_detach_power_sequencer(intel_dp);
2703 }
2704 }
2705
vlv_init_panel_power_sequencer(struct intel_dp * intel_dp)2706 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2707 {
2708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709 struct intel_encoder *encoder = &intel_dig_port->base;
2710 struct drm_device *dev = encoder->base.dev;
2711 struct drm_i915_private *dev_priv = dev->dev_private;
2712 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2713
2714 lockdep_assert_held(&dev_priv->pps_mutex);
2715
2716 if (!is_edp(intel_dp))
2717 return;
2718
2719 if (intel_dp->pps_pipe == crtc->pipe)
2720 return;
2721
2722 /*
2723 * If another power sequencer was being used on this
2724 * port previously make sure to turn off vdd there while
2725 * we still have control of it.
2726 */
2727 if (intel_dp->pps_pipe != INVALID_PIPE)
2728 vlv_detach_power_sequencer(intel_dp);
2729
2730 /*
2731 * We may be stealing the power
2732 * sequencer from another port.
2733 */
2734 vlv_steal_power_sequencer(dev, crtc->pipe);
2735
2736 /* now it's all ours */
2737 intel_dp->pps_pipe = crtc->pipe;
2738
2739 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2740 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2741
2742 /* init power sequencer on this pipe and port */
2743 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2744 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2745 }
2746
vlv_pre_enable_dp(struct intel_encoder * encoder)2747 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2748 {
2749 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2750 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2751 struct drm_device *dev = encoder->base.dev;
2752 struct drm_i915_private *dev_priv = dev->dev_private;
2753 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2754 enum dpio_channel port = vlv_dport_to_channel(dport);
2755 int pipe = intel_crtc->pipe;
2756 u32 val;
2757
2758 mutex_lock(&dev_priv->sb_lock);
2759
2760 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2761 val = 0;
2762 if (pipe)
2763 val |= (1<<21);
2764 else
2765 val &= ~(1<<21);
2766 val |= 0x001000c4;
2767 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2768 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2769 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2770
2771 mutex_unlock(&dev_priv->sb_lock);
2772
2773 intel_enable_dp(encoder);
2774 }
2775
vlv_dp_pre_pll_enable(struct intel_encoder * encoder)2776 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2777 {
2778 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2779 struct drm_device *dev = encoder->base.dev;
2780 struct drm_i915_private *dev_priv = dev->dev_private;
2781 struct intel_crtc *intel_crtc =
2782 to_intel_crtc(encoder->base.crtc);
2783 enum dpio_channel port = vlv_dport_to_channel(dport);
2784 int pipe = intel_crtc->pipe;
2785
2786 intel_dp_prepare(encoder);
2787
2788 /* Program Tx lane resets to default */
2789 mutex_lock(&dev_priv->sb_lock);
2790 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2791 DPIO_PCS_TX_LANE2_RESET |
2792 DPIO_PCS_TX_LANE1_RESET);
2793 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2794 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2795 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2796 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2797 DPIO_PCS_CLK_SOFT_RESET);
2798
2799 /* Fix up inter-pair skew failure */
2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2801 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2802 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2803 mutex_unlock(&dev_priv->sb_lock);
2804 }
2805
chv_pre_enable_dp(struct intel_encoder * encoder)2806 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2807 {
2808 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2809 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2810 struct drm_device *dev = encoder->base.dev;
2811 struct drm_i915_private *dev_priv = dev->dev_private;
2812 struct intel_crtc *intel_crtc =
2813 to_intel_crtc(encoder->base.crtc);
2814 enum dpio_channel ch = vlv_dport_to_channel(dport);
2815 int pipe = intel_crtc->pipe;
2816 int data, i, stagger;
2817 u32 val;
2818
2819 mutex_lock(&dev_priv->sb_lock);
2820
2821 /* allow hardware to manage TX FIFO reset source */
2822 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2823 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2824 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2825
2826 if (intel_crtc->config->lane_count > 2) {
2827 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2828 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2829 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2830 }
2831
2832 /* Program Tx lane latency optimal setting*/
2833 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2834 /* Set the upar bit */
2835 if (intel_crtc->config->lane_count == 1)
2836 data = 0x0;
2837 else
2838 data = (i == 1) ? 0x0 : 0x1;
2839 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2840 data << DPIO_UPAR_SHIFT);
2841 }
2842
2843 /* Data lane stagger programming */
2844 if (intel_crtc->config->port_clock > 270000)
2845 stagger = 0x18;
2846 else if (intel_crtc->config->port_clock > 135000)
2847 stagger = 0xd;
2848 else if (intel_crtc->config->port_clock > 67500)
2849 stagger = 0x7;
2850 else if (intel_crtc->config->port_clock > 33750)
2851 stagger = 0x4;
2852 else
2853 stagger = 0x2;
2854
2855 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2856 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2857 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2858
2859 if (intel_crtc->config->lane_count > 2) {
2860 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2861 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2862 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2863 }
2864
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2866 DPIO_LANESTAGGER_STRAP(stagger) |
2867 DPIO_LANESTAGGER_STRAP_OVRD |
2868 DPIO_TX1_STAGGER_MASK(0x1f) |
2869 DPIO_TX1_STAGGER_MULT(6) |
2870 DPIO_TX2_STAGGER_MULT(0));
2871
2872 if (intel_crtc->config->lane_count > 2) {
2873 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2874 DPIO_LANESTAGGER_STRAP(stagger) |
2875 DPIO_LANESTAGGER_STRAP_OVRD |
2876 DPIO_TX1_STAGGER_MASK(0x1f) |
2877 DPIO_TX1_STAGGER_MULT(7) |
2878 DPIO_TX2_STAGGER_MULT(5));
2879 }
2880
2881 /* Deassert data lane reset */
2882 chv_data_lane_soft_reset(encoder, false);
2883
2884 mutex_unlock(&dev_priv->sb_lock);
2885
2886 intel_enable_dp(encoder);
2887
2888 /* Second common lane will stay alive on its own now */
2889 if (dport->release_cl2_override) {
2890 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2891 dport->release_cl2_override = false;
2892 }
2893 }
2894
chv_dp_pre_pll_enable(struct intel_encoder * encoder)2895 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2896 {
2897 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2898 struct drm_device *dev = encoder->base.dev;
2899 struct drm_i915_private *dev_priv = dev->dev_private;
2900 struct intel_crtc *intel_crtc =
2901 to_intel_crtc(encoder->base.crtc);
2902 enum dpio_channel ch = vlv_dport_to_channel(dport);
2903 enum pipe pipe = intel_crtc->pipe;
2904 unsigned int lane_mask =
2905 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2906 u32 val;
2907
2908 intel_dp_prepare(encoder);
2909
2910 /*
2911 * Must trick the second common lane into life.
2912 * Otherwise we can't even access the PLL.
2913 */
2914 if (ch == DPIO_CH0 && pipe == PIPE_B)
2915 dport->release_cl2_override =
2916 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2917
2918 chv_phy_powergate_lanes(encoder, true, lane_mask);
2919
2920 mutex_lock(&dev_priv->sb_lock);
2921
2922 /* Assert data lane reset */
2923 chv_data_lane_soft_reset(encoder, true);
2924
2925 /* program left/right clock distribution */
2926 if (pipe != PIPE_B) {
2927 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2928 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2929 if (ch == DPIO_CH0)
2930 val |= CHV_BUFLEFTENA1_FORCE;
2931 if (ch == DPIO_CH1)
2932 val |= CHV_BUFRIGHTENA1_FORCE;
2933 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2934 } else {
2935 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2936 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2937 if (ch == DPIO_CH0)
2938 val |= CHV_BUFLEFTENA2_FORCE;
2939 if (ch == DPIO_CH1)
2940 val |= CHV_BUFRIGHTENA2_FORCE;
2941 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2942 }
2943
2944 /* program clock channel usage */
2945 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2946 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2947 if (pipe != PIPE_B)
2948 val &= ~CHV_PCS_USEDCLKCHANNEL;
2949 else
2950 val |= CHV_PCS_USEDCLKCHANNEL;
2951 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2952
2953 if (intel_crtc->config->lane_count > 2) {
2954 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2955 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2956 if (pipe != PIPE_B)
2957 val &= ~CHV_PCS_USEDCLKCHANNEL;
2958 else
2959 val |= CHV_PCS_USEDCLKCHANNEL;
2960 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2961 }
2962
2963 /*
2964 * This a a bit weird since generally CL
2965 * matches the pipe, but here we need to
2966 * pick the CL based on the port.
2967 */
2968 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2969 if (pipe != PIPE_B)
2970 val &= ~CHV_CMN_USEDCLKCHANNEL;
2971 else
2972 val |= CHV_CMN_USEDCLKCHANNEL;
2973 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2974
2975 mutex_unlock(&dev_priv->sb_lock);
2976 }
2977
chv_dp_post_pll_disable(struct intel_encoder * encoder)2978 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2979 {
2980 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2981 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2982 u32 val;
2983
2984 mutex_lock(&dev_priv->sb_lock);
2985
2986 /* disable left/right clock distribution */
2987 if (pipe != PIPE_B) {
2988 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2989 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2990 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2991 } else {
2992 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2993 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2994 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2995 }
2996
2997 mutex_unlock(&dev_priv->sb_lock);
2998
2999 /*
3000 * Leave the power down bit cleared for at least one
3001 * lane so that chv_powergate_phy_ch() will power
3002 * on something when the channel is otherwise unused.
3003 * When the port is off and the override is removed
3004 * the lanes power down anyway, so otherwise it doesn't
3005 * really matter what the state of power down bits is
3006 * after this.
3007 */
3008 chv_phy_powergate_lanes(encoder, false, 0x0);
3009 }
3010
3011 /*
3012 * Native read with retry for link status and receiver capability reads for
3013 * cases where the sink may still be asleep.
3014 *
3015 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3016 * supposed to retry 3 times per the spec.
3017 */
3018 static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)3019 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3020 void *buffer, size_t size)
3021 {
3022 ssize_t ret;
3023 int i;
3024
3025 /*
3026 * Sometime we just get the same incorrect byte repeated
3027 * over the entire buffer. Doing just one throw away read
3028 * initially seems to "solve" it.
3029 */
3030 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3031
3032 for (i = 0; i < 3; i++) {
3033 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3034 if (ret == size)
3035 return ret;
3036 msleep(1);
3037 }
3038
3039 return ret;
3040 }
3041
3042 /*
3043 * Fetch AUX CH registers 0x202 - 0x207 which contain
3044 * link status information
3045 */
3046 static bool
intel_dp_get_link_status(struct intel_dp * intel_dp,uint8_t link_status[DP_LINK_STATUS_SIZE])3047 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3048 {
3049 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3050 DP_LANE0_1_STATUS,
3051 link_status,
3052 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3053 }
3054
3055 /* These are source-specific values. */
3056 static uint8_t
intel_dp_voltage_max(struct intel_dp * intel_dp)3057 intel_dp_voltage_max(struct intel_dp *intel_dp)
3058 {
3059 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3060 struct drm_i915_private *dev_priv = dev->dev_private;
3061 enum port port = dp_to_dig_port(intel_dp)->port;
3062
3063 if (IS_BROXTON(dev))
3064 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3065 else if (INTEL_INFO(dev)->gen >= 9) {
3066 if (dev_priv->edp_low_vswing && port == PORT_A)
3067 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3068 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3069 } else if (IS_VALLEYVIEW(dev))
3070 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3071 else if (IS_GEN7(dev) && port == PORT_A)
3072 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3073 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3074 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3075 else
3076 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3077 }
3078
3079 static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp * intel_dp,uint8_t voltage_swing)3080 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3081 {
3082 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3083 enum port port = dp_to_dig_port(intel_dp)->port;
3084
3085 if (INTEL_INFO(dev)->gen >= 9) {
3086 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3088 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3089 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3090 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3092 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3094 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3095 default:
3096 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3097 }
3098 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3099 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3101 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3102 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3103 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3105 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3107 default:
3108 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3109 }
3110 } else if (IS_VALLEYVIEW(dev)) {
3111 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3112 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3113 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3115 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3116 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3117 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3119 default:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3121 }
3122 } else if (IS_GEN7(dev) && port == PORT_A) {
3123 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3125 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3129 default:
3130 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3131 }
3132 } else {
3133 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3139 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3141 default:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3143 }
3144 }
3145 }
3146
vlv_signal_levels(struct intel_dp * intel_dp)3147 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3148 {
3149 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3150 struct drm_i915_private *dev_priv = dev->dev_private;
3151 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3152 struct intel_crtc *intel_crtc =
3153 to_intel_crtc(dport->base.base.crtc);
3154 unsigned long demph_reg_value, preemph_reg_value,
3155 uniqtranscale_reg_value;
3156 uint8_t train_set = intel_dp->train_set[0];
3157 enum dpio_channel port = vlv_dport_to_channel(dport);
3158 int pipe = intel_crtc->pipe;
3159
3160 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3161 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3162 preemph_reg_value = 0x0004000;
3163 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3164 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3165 demph_reg_value = 0x2B405555;
3166 uniqtranscale_reg_value = 0x552AB83A;
3167 break;
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169 demph_reg_value = 0x2B404040;
3170 uniqtranscale_reg_value = 0x5548B83A;
3171 break;
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3173 demph_reg_value = 0x2B245555;
3174 uniqtranscale_reg_value = 0x5560B83A;
3175 break;
3176 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3177 demph_reg_value = 0x2B405555;
3178 uniqtranscale_reg_value = 0x5598DA3A;
3179 break;
3180 default:
3181 return 0;
3182 }
3183 break;
3184 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3185 preemph_reg_value = 0x0002000;
3186 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3188 demph_reg_value = 0x2B404040;
3189 uniqtranscale_reg_value = 0x5552B83A;
3190 break;
3191 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3192 demph_reg_value = 0x2B404848;
3193 uniqtranscale_reg_value = 0x5580B83A;
3194 break;
3195 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3196 demph_reg_value = 0x2B404040;
3197 uniqtranscale_reg_value = 0x55ADDA3A;
3198 break;
3199 default:
3200 return 0;
3201 }
3202 break;
3203 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3204 preemph_reg_value = 0x0000000;
3205 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3207 demph_reg_value = 0x2B305555;
3208 uniqtranscale_reg_value = 0x5570B83A;
3209 break;
3210 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3211 demph_reg_value = 0x2B2B4040;
3212 uniqtranscale_reg_value = 0x55ADDA3A;
3213 break;
3214 default:
3215 return 0;
3216 }
3217 break;
3218 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3219 preemph_reg_value = 0x0006000;
3220 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3222 demph_reg_value = 0x1B405555;
3223 uniqtranscale_reg_value = 0x55ADDA3A;
3224 break;
3225 default:
3226 return 0;
3227 }
3228 break;
3229 default:
3230 return 0;
3231 }
3232
3233 mutex_lock(&dev_priv->sb_lock);
3234 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3235 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3236 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3237 uniqtranscale_reg_value);
3238 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3239 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3240 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3241 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3242 mutex_unlock(&dev_priv->sb_lock);
3243
3244 return 0;
3245 }
3246
chv_need_uniq_trans_scale(uint8_t train_set)3247 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3248 {
3249 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3250 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3251 }
3252
chv_signal_levels(struct intel_dp * intel_dp)3253 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3254 {
3255 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3256 struct drm_i915_private *dev_priv = dev->dev_private;
3257 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3258 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3259 u32 deemph_reg_value, margin_reg_value, val;
3260 uint8_t train_set = intel_dp->train_set[0];
3261 enum dpio_channel ch = vlv_dport_to_channel(dport);
3262 enum pipe pipe = intel_crtc->pipe;
3263 int i;
3264
3265 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3266 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3267 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3269 deemph_reg_value = 128;
3270 margin_reg_value = 52;
3271 break;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3273 deemph_reg_value = 128;
3274 margin_reg_value = 77;
3275 break;
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3277 deemph_reg_value = 128;
3278 margin_reg_value = 102;
3279 break;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3281 deemph_reg_value = 128;
3282 margin_reg_value = 154;
3283 /* FIXME extra to set for 1200 */
3284 break;
3285 default:
3286 return 0;
3287 }
3288 break;
3289 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3290 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3292 deemph_reg_value = 85;
3293 margin_reg_value = 78;
3294 break;
3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3296 deemph_reg_value = 85;
3297 margin_reg_value = 116;
3298 break;
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3300 deemph_reg_value = 85;
3301 margin_reg_value = 154;
3302 break;
3303 default:
3304 return 0;
3305 }
3306 break;
3307 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3308 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3310 deemph_reg_value = 64;
3311 margin_reg_value = 104;
3312 break;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3314 deemph_reg_value = 64;
3315 margin_reg_value = 154;
3316 break;
3317 default:
3318 return 0;
3319 }
3320 break;
3321 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3322 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3324 deemph_reg_value = 43;
3325 margin_reg_value = 154;
3326 break;
3327 default:
3328 return 0;
3329 }
3330 break;
3331 default:
3332 return 0;
3333 }
3334
3335 mutex_lock(&dev_priv->sb_lock);
3336
3337 /* Clear calc init */
3338 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3339 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3340 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3341 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3342 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3343
3344 if (intel_crtc->config->lane_count > 2) {
3345 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3346 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3347 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3348 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3349 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3350 }
3351
3352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3353 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3354 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3355 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3356
3357 if (intel_crtc->config->lane_count > 2) {
3358 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3359 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3360 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3361 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3362 }
3363
3364 /* Program swing deemph */
3365 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3366 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3367 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3368 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3369 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3370 }
3371
3372 /* Program swing margin */
3373 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3374 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3375
3376 val &= ~DPIO_SWING_MARGIN000_MASK;
3377 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3378
3379 /*
3380 * Supposedly this value shouldn't matter when unique transition
3381 * scale is disabled, but in fact it does matter. Let's just
3382 * always program the same value and hope it's OK.
3383 */
3384 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3385 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3386
3387 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3388 }
3389
3390 /*
3391 * The document said it needs to set bit 27 for ch0 and bit 26
3392 * for ch1. Might be a typo in the doc.
3393 * For now, for this unique transition scale selection, set bit
3394 * 27 for ch0 and ch1.
3395 */
3396 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3397 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3398 if (chv_need_uniq_trans_scale(train_set))
3399 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3400 else
3401 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3402 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3403 }
3404
3405 /* Start swing calculation */
3406 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3407 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3408 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3409
3410 if (intel_crtc->config->lane_count > 2) {
3411 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3412 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3413 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3414 }
3415
3416 mutex_unlock(&dev_priv->sb_lock);
3417
3418 return 0;
3419 }
3420
3421 static void
intel_get_adjust_train(struct intel_dp * intel_dp,const uint8_t link_status[DP_LINK_STATUS_SIZE])3422 intel_get_adjust_train(struct intel_dp *intel_dp,
3423 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3424 {
3425 uint8_t v = 0;
3426 uint8_t p = 0;
3427 int lane;
3428 uint8_t voltage_max;
3429 uint8_t preemph_max;
3430
3431 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3432 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3433 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3434
3435 if (this_v > v)
3436 v = this_v;
3437 if (this_p > p)
3438 p = this_p;
3439 }
3440
3441 voltage_max = intel_dp_voltage_max(intel_dp);
3442 if (v >= voltage_max)
3443 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3444
3445 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3446 if (p >= preemph_max)
3447 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3448
3449 for (lane = 0; lane < 4; lane++)
3450 intel_dp->train_set[lane] = v | p;
3451 }
3452
3453 static uint32_t
gen4_signal_levels(uint8_t train_set)3454 gen4_signal_levels(uint8_t train_set)
3455 {
3456 uint32_t signal_levels = 0;
3457
3458 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3460 default:
3461 signal_levels |= DP_VOLTAGE_0_4;
3462 break;
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3464 signal_levels |= DP_VOLTAGE_0_6;
3465 break;
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3467 signal_levels |= DP_VOLTAGE_0_8;
3468 break;
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3470 signal_levels |= DP_VOLTAGE_1_2;
3471 break;
3472 }
3473 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3474 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3475 default:
3476 signal_levels |= DP_PRE_EMPHASIS_0;
3477 break;
3478 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3479 signal_levels |= DP_PRE_EMPHASIS_3_5;
3480 break;
3481 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3482 signal_levels |= DP_PRE_EMPHASIS_6;
3483 break;
3484 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3485 signal_levels |= DP_PRE_EMPHASIS_9_5;
3486 break;
3487 }
3488 return signal_levels;
3489 }
3490
3491 /* Gen6's DP voltage swing and pre-emphasis control */
3492 static uint32_t
gen6_edp_signal_levels(uint8_t train_set)3493 gen6_edp_signal_levels(uint8_t train_set)
3494 {
3495 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3496 DP_TRAIN_PRE_EMPHASIS_MASK);
3497 switch (signal_levels) {
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3499 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3502 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3503 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3504 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3505 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3507 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3508 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3509 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3510 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3511 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3512 default:
3513 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3514 "0x%x\n", signal_levels);
3515 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3516 }
3517 }
3518
3519 /* Gen7's DP voltage swing and pre-emphasis control */
3520 static uint32_t
gen7_edp_signal_levels(uint8_t train_set)3521 gen7_edp_signal_levels(uint8_t train_set)
3522 {
3523 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3524 DP_TRAIN_PRE_EMPHASIS_MASK);
3525 switch (signal_levels) {
3526 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3527 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3528 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3529 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3530 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3531 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3532
3533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3534 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3535 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3536 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3537
3538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3539 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3541 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3542
3543 default:
3544 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3545 "0x%x\n", signal_levels);
3546 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3547 }
3548 }
3549
3550 /* Properly updates "DP" with the correct signal levels. */
3551 static void
intel_dp_set_signal_levels(struct intel_dp * intel_dp,uint32_t * DP)3552 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3553 {
3554 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3555 enum port port = intel_dig_port->port;
3556 struct drm_device *dev = intel_dig_port->base.base.dev;
3557 uint32_t signal_levels, mask = 0;
3558 uint8_t train_set = intel_dp->train_set[0];
3559
3560 if (HAS_DDI(dev)) {
3561 signal_levels = ddi_signal_levels(intel_dp);
3562
3563 if (IS_BROXTON(dev))
3564 signal_levels = 0;
3565 else
3566 mask = DDI_BUF_EMP_MASK;
3567 } else if (IS_CHERRYVIEW(dev)) {
3568 signal_levels = chv_signal_levels(intel_dp);
3569 } else if (IS_VALLEYVIEW(dev)) {
3570 signal_levels = vlv_signal_levels(intel_dp);
3571 } else if (IS_GEN7(dev) && port == PORT_A) {
3572 signal_levels = gen7_edp_signal_levels(train_set);
3573 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3574 } else if (IS_GEN6(dev) && port == PORT_A) {
3575 signal_levels = gen6_edp_signal_levels(train_set);
3576 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3577 } else {
3578 signal_levels = gen4_signal_levels(train_set);
3579 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3580 }
3581
3582 if (mask)
3583 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3584
3585 DRM_DEBUG_KMS("Using vswing level %d\n",
3586 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3587 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3588 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3589 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3590
3591 *DP = (*DP & ~mask) | signal_levels;
3592 }
3593
3594 static bool
intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)3595 intel_dp_set_link_train(struct intel_dp *intel_dp,
3596 uint32_t *DP,
3597 uint8_t dp_train_pat)
3598 {
3599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3600 struct drm_i915_private *dev_priv =
3601 to_i915(intel_dig_port->base.base.dev);
3602 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3603 int ret, len;
3604
3605 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3606
3607 I915_WRITE(intel_dp->output_reg, *DP);
3608 POSTING_READ(intel_dp->output_reg);
3609
3610 buf[0] = dp_train_pat;
3611 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3612 DP_TRAINING_PATTERN_DISABLE) {
3613 /* don't write DP_TRAINING_LANEx_SET on disable */
3614 len = 1;
3615 } else {
3616 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3617 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3618 len = intel_dp->lane_count + 1;
3619 }
3620
3621 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3622 buf, len);
3623
3624 return ret == len;
3625 }
3626
3627 static bool
intel_dp_reset_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)3628 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3629 uint8_t dp_train_pat)
3630 {
3631 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3632 intel_dp_set_signal_levels(intel_dp, DP);
3633 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3634 }
3635
3636 static bool
intel_dp_update_link_train(struct intel_dp * intel_dp,uint32_t * DP,const uint8_t link_status[DP_LINK_STATUS_SIZE])3637 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3638 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3639 {
3640 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3641 struct drm_i915_private *dev_priv =
3642 to_i915(intel_dig_port->base.base.dev);
3643 int ret;
3644
3645 intel_get_adjust_train(intel_dp, link_status);
3646 intel_dp_set_signal_levels(intel_dp, DP);
3647
3648 I915_WRITE(intel_dp->output_reg, *DP);
3649 POSTING_READ(intel_dp->output_reg);
3650
3651 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3652 intel_dp->train_set, intel_dp->lane_count);
3653
3654 return ret == intel_dp->lane_count;
3655 }
3656
intel_dp_set_idle_link_train(struct intel_dp * intel_dp)3657 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3658 {
3659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3660 struct drm_device *dev = intel_dig_port->base.base.dev;
3661 struct drm_i915_private *dev_priv = dev->dev_private;
3662 enum port port = intel_dig_port->port;
3663 uint32_t val;
3664
3665 if (!HAS_DDI(dev))
3666 return;
3667
3668 val = I915_READ(DP_TP_CTL(port));
3669 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3670 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3671 I915_WRITE(DP_TP_CTL(port), val);
3672
3673 /*
3674 * On PORT_A we can have only eDP in SST mode. There the only reason
3675 * we need to set idle transmission mode is to work around a HW issue
3676 * where we enable the pipe while not in idle link-training mode.
3677 * In this case there is requirement to wait for a minimum number of
3678 * idle patterns to be sent.
3679 */
3680 if (port == PORT_A)
3681 return;
3682
3683 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3684 1))
3685 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3686 }
3687
3688 /* Enable corresponding port and start training pattern 1 */
3689 static void
intel_dp_link_training_clock_recovery(struct intel_dp * intel_dp)3690 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3691 {
3692 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3693 struct drm_device *dev = encoder->dev;
3694 int i;
3695 uint8_t voltage;
3696 int voltage_tries, loop_tries;
3697 uint32_t DP = intel_dp->DP;
3698 uint8_t link_config[2];
3699 uint8_t link_bw, rate_select;
3700
3701 if (HAS_DDI(dev))
3702 intel_ddi_prepare_link_retrain(encoder);
3703
3704 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3705 &link_bw, &rate_select);
3706
3707 /* Write the link configuration data */
3708 link_config[0] = link_bw;
3709 link_config[1] = intel_dp->lane_count;
3710 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3711 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3712 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3713 if (intel_dp->num_sink_rates)
3714 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3715 &rate_select, 1);
3716
3717 link_config[0] = 0;
3718 link_config[1] = DP_SET_ANSI_8B10B;
3719 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3720
3721 DP |= DP_PORT_EN;
3722
3723 /* clock recovery */
3724 if (!intel_dp_reset_link_train(intel_dp, &DP,
3725 DP_TRAINING_PATTERN_1 |
3726 DP_LINK_SCRAMBLING_DISABLE)) {
3727 DRM_ERROR("failed to enable link training\n");
3728 return;
3729 }
3730
3731 voltage = 0xff;
3732 voltage_tries = 0;
3733 loop_tries = 0;
3734 for (;;) {
3735 uint8_t link_status[DP_LINK_STATUS_SIZE];
3736
3737 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3738 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3739 DRM_ERROR("failed to get link status\n");
3740 break;
3741 }
3742
3743 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3744 DRM_DEBUG_KMS("clock recovery OK\n");
3745 break;
3746 }
3747
3748
3749 /* Check to see if we've tried the max voltage */
3750 for (i = 0; i < intel_dp->lane_count; i++)
3751 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3752 break;
3753 if (i == intel_dp->lane_count) {
3754 ++loop_tries;
3755 if (loop_tries == 5) {
3756 DRM_ERROR("too many full retries, give up\n");
3757 break;
3758 }
3759 intel_dp_reset_link_train(intel_dp, &DP,
3760 DP_TRAINING_PATTERN_1 |
3761 DP_LINK_SCRAMBLING_DISABLE);
3762 voltage_tries = 0;
3763 continue;
3764 }
3765
3766 /* Check to see if we've tried the same voltage 5 times */
3767 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3768 ++voltage_tries;
3769 if (voltage_tries == 5) {
3770 DRM_ERROR("too many voltage retries, give up\n");
3771 break;
3772 }
3773 } else
3774 voltage_tries = 0;
3775 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3776
3777 /* Update training set as requested by target */
3778 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3779 DRM_ERROR("failed to update link training\n");
3780 break;
3781 }
3782 }
3783
3784 intel_dp->DP = DP;
3785 }
3786
3787 static void
intel_dp_link_training_channel_equalization(struct intel_dp * intel_dp)3788 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3789 {
3790 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3791 struct drm_device *dev = dig_port->base.base.dev;
3792 bool channel_eq = false;
3793 int tries, cr_tries;
3794 uint32_t DP = intel_dp->DP;
3795 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3796
3797 /*
3798 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3799 *
3800 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3801 * also mandatory for downstream devices that support HBR2.
3802 *
3803 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3804 * supported but still not enabled.
3805 */
3806 if (intel_dp_source_supports_hbr2(dev) &&
3807 drm_dp_tps3_supported(intel_dp->dpcd))
3808 training_pattern = DP_TRAINING_PATTERN_3;
3809 else if (intel_dp->link_rate == 540000)
3810 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3811
3812 /* channel equalization */
3813 if (!intel_dp_set_link_train(intel_dp, &DP,
3814 training_pattern |
3815 DP_LINK_SCRAMBLING_DISABLE)) {
3816 DRM_ERROR("failed to start channel equalization\n");
3817 return;
3818 }
3819
3820 tries = 0;
3821 cr_tries = 0;
3822 channel_eq = false;
3823 for (;;) {
3824 uint8_t link_status[DP_LINK_STATUS_SIZE];
3825
3826 if (cr_tries > 5) {
3827 DRM_ERROR("failed to train DP, aborting\n");
3828 break;
3829 }
3830
3831 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3832 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3833 DRM_ERROR("failed to get link status\n");
3834 break;
3835 }
3836
3837 /* Make sure clock is still ok */
3838 if (!drm_dp_clock_recovery_ok(link_status,
3839 intel_dp->lane_count)) {
3840 intel_dp_link_training_clock_recovery(intel_dp);
3841 intel_dp_set_link_train(intel_dp, &DP,
3842 training_pattern |
3843 DP_LINK_SCRAMBLING_DISABLE);
3844 cr_tries++;
3845 continue;
3846 }
3847
3848 if (drm_dp_channel_eq_ok(link_status,
3849 intel_dp->lane_count)) {
3850 channel_eq = true;
3851 break;
3852 }
3853
3854 /* Try 5 times, then try clock recovery if that fails */
3855 if (tries > 5) {
3856 intel_dp_link_training_clock_recovery(intel_dp);
3857 intel_dp_set_link_train(intel_dp, &DP,
3858 training_pattern |
3859 DP_LINK_SCRAMBLING_DISABLE);
3860 tries = 0;
3861 cr_tries++;
3862 continue;
3863 }
3864
3865 /* Update training set as requested by target */
3866 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3867 DRM_ERROR("failed to update link training\n");
3868 break;
3869 }
3870 ++tries;
3871 }
3872
3873 intel_dp_set_idle_link_train(intel_dp);
3874
3875 intel_dp->DP = DP;
3876
3877 if (channel_eq)
3878 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3879 }
3880
intel_dp_stop_link_train(struct intel_dp * intel_dp)3881 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3882 {
3883 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3884 DP_TRAINING_PATTERN_DISABLE);
3885 }
3886
3887 void
intel_dp_start_link_train(struct intel_dp * intel_dp)3888 intel_dp_start_link_train(struct intel_dp *intel_dp)
3889 {
3890 intel_dp_link_training_clock_recovery(intel_dp);
3891 intel_dp_link_training_channel_equalization(intel_dp);
3892 }
3893
3894 static void
intel_dp_link_down(struct intel_dp * intel_dp)3895 intel_dp_link_down(struct intel_dp *intel_dp)
3896 {
3897 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3898 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3899 enum port port = intel_dig_port->port;
3900 struct drm_device *dev = intel_dig_port->base.base.dev;
3901 struct drm_i915_private *dev_priv = dev->dev_private;
3902 uint32_t DP = intel_dp->DP;
3903
3904 if (WARN_ON(HAS_DDI(dev)))
3905 return;
3906
3907 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3908 return;
3909
3910 DRM_DEBUG_KMS("\n");
3911
3912 if ((IS_GEN7(dev) && port == PORT_A) ||
3913 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3914 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3915 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3916 } else {
3917 if (IS_CHERRYVIEW(dev))
3918 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3919 else
3920 DP &= ~DP_LINK_TRAIN_MASK;
3921 DP |= DP_LINK_TRAIN_PAT_IDLE;
3922 }
3923 I915_WRITE(intel_dp->output_reg, DP);
3924 POSTING_READ(intel_dp->output_reg);
3925
3926 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3927 I915_WRITE(intel_dp->output_reg, DP);
3928 POSTING_READ(intel_dp->output_reg);
3929
3930 /*
3931 * HW workaround for IBX, we need to move the port
3932 * to transcoder A after disabling it to allow the
3933 * matching HDMI port to be enabled on transcoder A.
3934 */
3935 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3936 /* always enable with pattern 1 (as per spec) */
3937 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3938 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3939 I915_WRITE(intel_dp->output_reg, DP);
3940 POSTING_READ(intel_dp->output_reg);
3941
3942 DP &= ~DP_PORT_EN;
3943 I915_WRITE(intel_dp->output_reg, DP);
3944 POSTING_READ(intel_dp->output_reg);
3945 }
3946
3947 msleep(intel_dp->panel_power_down_delay);
3948 }
3949
3950 static bool
intel_dp_get_dpcd(struct intel_dp * intel_dp)3951 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3952 {
3953 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3954 struct drm_device *dev = dig_port->base.base.dev;
3955 struct drm_i915_private *dev_priv = dev->dev_private;
3956 uint8_t rev;
3957
3958 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3959 sizeof(intel_dp->dpcd)) < 0)
3960 return false; /* aux transfer failed */
3961
3962 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3963
3964 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3965 return false; /* DPCD not present */
3966
3967 /* Check if the panel supports PSR */
3968 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3969 if (is_edp(intel_dp)) {
3970 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3971 intel_dp->psr_dpcd,
3972 sizeof(intel_dp->psr_dpcd));
3973 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3974 dev_priv->psr.sink_support = true;
3975 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3976 }
3977
3978 if (INTEL_INFO(dev)->gen >= 9 &&
3979 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3980 uint8_t frame_sync_cap;
3981
3982 dev_priv->psr.sink_support = true;
3983 intel_dp_dpcd_read_wake(&intel_dp->aux,
3984 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3985 &frame_sync_cap, 1);
3986 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3987 /* PSR2 needs frame sync as well */
3988 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3989 DRM_DEBUG_KMS("PSR2 %s on sink",
3990 dev_priv->psr.psr2_support ? "supported" : "not supported");
3991 }
3992 }
3993
3994 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3995 yesno(intel_dp_source_supports_hbr2(dev)),
3996 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3997
3998 /* Intermediate frequency support */
3999 if (is_edp(intel_dp) &&
4000 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4001 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4002 (rev >= 0x03)) { /* eDp v1.4 or higher */
4003 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4004 int i;
4005
4006 intel_dp_dpcd_read_wake(&intel_dp->aux,
4007 DP_SUPPORTED_LINK_RATES,
4008 sink_rates,
4009 sizeof(sink_rates));
4010
4011 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4012 int val = le16_to_cpu(sink_rates[i]);
4013
4014 if (val == 0)
4015 break;
4016
4017 /* Value read is in kHz while drm clock is saved in deca-kHz */
4018 intel_dp->sink_rates[i] = (val * 200) / 10;
4019 }
4020 intel_dp->num_sink_rates = i;
4021 }
4022
4023 intel_dp_print_rates(intel_dp);
4024
4025 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4026 DP_DWN_STRM_PORT_PRESENT))
4027 return true; /* native DP sink */
4028
4029 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4030 return true; /* no per-port downstream info */
4031
4032 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4033 intel_dp->downstream_ports,
4034 DP_MAX_DOWNSTREAM_PORTS) < 0)
4035 return false; /* downstream port status fetch failed */
4036
4037 return true;
4038 }
4039
4040 static void
intel_dp_probe_oui(struct intel_dp * intel_dp)4041 intel_dp_probe_oui(struct intel_dp *intel_dp)
4042 {
4043 u8 buf[3];
4044
4045 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4046 return;
4047
4048 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4049 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4050 buf[0], buf[1], buf[2]);
4051
4052 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4053 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4054 buf[0], buf[1], buf[2]);
4055 }
4056
4057 static bool
intel_dp_probe_mst(struct intel_dp * intel_dp)4058 intel_dp_probe_mst(struct intel_dp *intel_dp)
4059 {
4060 u8 buf[1];
4061
4062 if (!intel_dp->can_mst)
4063 return false;
4064
4065 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4066 return false;
4067
4068 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4069 if (buf[0] & DP_MST_CAP) {
4070 DRM_DEBUG_KMS("Sink is MST capable\n");
4071 intel_dp->is_mst = true;
4072 } else {
4073 DRM_DEBUG_KMS("Sink is not MST capable\n");
4074 intel_dp->is_mst = false;
4075 }
4076 }
4077
4078 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4079 return intel_dp->is_mst;
4080 }
4081
intel_dp_sink_crc_stop(struct intel_dp * intel_dp)4082 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4083 {
4084 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4085 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4086 u8 buf;
4087 int ret = 0;
4088
4089 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4090 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4091 ret = -EIO;
4092 goto out;
4093 }
4094
4095 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4096 buf & ~DP_TEST_SINK_START) < 0) {
4097 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4098 ret = -EIO;
4099 goto out;
4100 }
4101
4102 intel_dp->sink_crc.started = false;
4103 out:
4104 hsw_enable_ips(intel_crtc);
4105 return ret;
4106 }
4107
intel_dp_sink_crc_start(struct intel_dp * intel_dp)4108 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4109 {
4110 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4111 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4112 u8 buf;
4113 int ret;
4114
4115 if (intel_dp->sink_crc.started) {
4116 ret = intel_dp_sink_crc_stop(intel_dp);
4117 if (ret)
4118 return ret;
4119 }
4120
4121 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4122 return -EIO;
4123
4124 if (!(buf & DP_TEST_CRC_SUPPORTED))
4125 return -ENOTTY;
4126
4127 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4128
4129 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4130 return -EIO;
4131
4132 hsw_disable_ips(intel_crtc);
4133
4134 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4135 buf | DP_TEST_SINK_START) < 0) {
4136 hsw_enable_ips(intel_crtc);
4137 return -EIO;
4138 }
4139
4140 intel_dp->sink_crc.started = true;
4141 return 0;
4142 }
4143
intel_dp_sink_crc(struct intel_dp * intel_dp,u8 * crc)4144 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4145 {
4146 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4147 struct drm_device *dev = dig_port->base.base.dev;
4148 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4149 u8 buf;
4150 int count, ret;
4151 int attempts = 6;
4152 bool old_equal_new;
4153
4154 ret = intel_dp_sink_crc_start(intel_dp);
4155 if (ret)
4156 return ret;
4157
4158 do {
4159 intel_wait_for_vblank(dev, intel_crtc->pipe);
4160
4161 if (drm_dp_dpcd_readb(&intel_dp->aux,
4162 DP_TEST_SINK_MISC, &buf) < 0) {
4163 ret = -EIO;
4164 goto stop;
4165 }
4166 count = buf & DP_TEST_COUNT_MASK;
4167
4168 /*
4169 * Count might be reset during the loop. In this case
4170 * last known count needs to be reset as well.
4171 */
4172 if (count == 0)
4173 intel_dp->sink_crc.last_count = 0;
4174
4175 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4176 ret = -EIO;
4177 goto stop;
4178 }
4179
4180 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4181 !memcmp(intel_dp->sink_crc.last_crc, crc,
4182 6 * sizeof(u8)));
4183
4184 } while (--attempts && (count == 0 || old_equal_new));
4185
4186 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4187 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4188
4189 if (attempts == 0) {
4190 if (old_equal_new) {
4191 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4192 } else {
4193 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4194 ret = -ETIMEDOUT;
4195 goto stop;
4196 }
4197 }
4198
4199 stop:
4200 intel_dp_sink_crc_stop(intel_dp);
4201 return ret;
4202 }
4203
4204 static bool
intel_dp_get_sink_irq(struct intel_dp * intel_dp,u8 * sink_irq_vector)4205 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4206 {
4207 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4208 DP_DEVICE_SERVICE_IRQ_VECTOR,
4209 sink_irq_vector, 1) == 1;
4210 }
4211
4212 static bool
intel_dp_get_sink_irq_esi(struct intel_dp * intel_dp,u8 * sink_irq_vector)4213 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4214 {
4215 int ret;
4216
4217 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4218 DP_SINK_COUNT_ESI,
4219 sink_irq_vector, 14);
4220 if (ret != 14)
4221 return false;
4222
4223 return true;
4224 }
4225
intel_dp_autotest_link_training(struct intel_dp * intel_dp)4226 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4227 {
4228 uint8_t test_result = DP_TEST_ACK;
4229 return test_result;
4230 }
4231
intel_dp_autotest_video_pattern(struct intel_dp * intel_dp)4232 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4233 {
4234 uint8_t test_result = DP_TEST_NAK;
4235 return test_result;
4236 }
4237
intel_dp_autotest_edid(struct intel_dp * intel_dp)4238 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4239 {
4240 uint8_t test_result = DP_TEST_NAK;
4241 struct intel_connector *intel_connector = intel_dp->attached_connector;
4242 struct drm_connector *connector = &intel_connector->base;
4243
4244 if (intel_connector->detect_edid == NULL ||
4245 connector->edid_corrupt ||
4246 intel_dp->aux.i2c_defer_count > 6) {
4247 /* Check EDID read for NACKs, DEFERs and corruption
4248 * (DP CTS 1.2 Core r1.1)
4249 * 4.2.2.4 : Failed EDID read, I2C_NAK
4250 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4251 * 4.2.2.6 : EDID corruption detected
4252 * Use failsafe mode for all cases
4253 */
4254 if (intel_dp->aux.i2c_nack_count > 0 ||
4255 intel_dp->aux.i2c_defer_count > 0)
4256 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4257 intel_dp->aux.i2c_nack_count,
4258 intel_dp->aux.i2c_defer_count);
4259 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4260 } else {
4261 struct edid *block = intel_connector->detect_edid;
4262
4263 /* We have to write the checksum
4264 * of the last block read
4265 */
4266 block += intel_connector->detect_edid->extensions;
4267
4268 if (!drm_dp_dpcd_write(&intel_dp->aux,
4269 DP_TEST_EDID_CHECKSUM,
4270 &block->checksum,
4271 1))
4272 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4273
4274 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4275 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4276 }
4277
4278 /* Set test active flag here so userspace doesn't interrupt things */
4279 intel_dp->compliance_test_active = 1;
4280
4281 return test_result;
4282 }
4283
intel_dp_autotest_phy_pattern(struct intel_dp * intel_dp)4284 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4285 {
4286 uint8_t test_result = DP_TEST_NAK;
4287 return test_result;
4288 }
4289
intel_dp_handle_test_request(struct intel_dp * intel_dp)4290 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4291 {
4292 uint8_t response = DP_TEST_NAK;
4293 uint8_t rxdata = 0;
4294 int status = 0;
4295
4296 intel_dp->compliance_test_active = 0;
4297 intel_dp->compliance_test_type = 0;
4298 intel_dp->compliance_test_data = 0;
4299
4300 intel_dp->aux.i2c_nack_count = 0;
4301 intel_dp->aux.i2c_defer_count = 0;
4302
4303 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4304 if (status <= 0) {
4305 DRM_DEBUG_KMS("Could not read test request from sink\n");
4306 goto update_status;
4307 }
4308
4309 switch (rxdata) {
4310 case DP_TEST_LINK_TRAINING:
4311 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4312 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4313 response = intel_dp_autotest_link_training(intel_dp);
4314 break;
4315 case DP_TEST_LINK_VIDEO_PATTERN:
4316 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4317 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4318 response = intel_dp_autotest_video_pattern(intel_dp);
4319 break;
4320 case DP_TEST_LINK_EDID_READ:
4321 DRM_DEBUG_KMS("EDID test requested\n");
4322 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4323 response = intel_dp_autotest_edid(intel_dp);
4324 break;
4325 case DP_TEST_LINK_PHY_TEST_PATTERN:
4326 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4327 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4328 response = intel_dp_autotest_phy_pattern(intel_dp);
4329 break;
4330 default:
4331 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4332 break;
4333 }
4334
4335 update_status:
4336 status = drm_dp_dpcd_write(&intel_dp->aux,
4337 DP_TEST_RESPONSE,
4338 &response, 1);
4339 if (status <= 0)
4340 DRM_DEBUG_KMS("Could not write test response to sink\n");
4341 }
4342
4343 static int
intel_dp_check_mst_status(struct intel_dp * intel_dp)4344 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4345 {
4346 bool bret;
4347
4348 if (intel_dp->is_mst) {
4349 u8 esi[16] = { 0 };
4350 int ret = 0;
4351 int retry;
4352 bool handled;
4353 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4354 go_again:
4355 if (bret == true) {
4356
4357 /* check link status - esi[10] = 0x200c */
4358 if (intel_dp->active_mst_links &&
4359 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4360 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4361 intel_dp_start_link_train(intel_dp);
4362 intel_dp_stop_link_train(intel_dp);
4363 }
4364
4365 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4366 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4367
4368 if (handled) {
4369 for (retry = 0; retry < 3; retry++) {
4370 int wret;
4371 wret = drm_dp_dpcd_write(&intel_dp->aux,
4372 DP_SINK_COUNT_ESI+1,
4373 &esi[1], 3);
4374 if (wret == 3) {
4375 break;
4376 }
4377 }
4378
4379 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4380 if (bret == true) {
4381 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4382 goto go_again;
4383 }
4384 } else
4385 ret = 0;
4386
4387 return ret;
4388 } else {
4389 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4390 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4391 intel_dp->is_mst = false;
4392 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4393 /* send a hotplug event */
4394 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4395 }
4396 }
4397 return -EINVAL;
4398 }
4399
4400 /*
4401 * According to DP spec
4402 * 5.1.2:
4403 * 1. Read DPCD
4404 * 2. Configure link according to Receiver Capabilities
4405 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4406 * 4. Check link status on receipt of hot-plug interrupt
4407 */
4408 static void
intel_dp_check_link_status(struct intel_dp * intel_dp)4409 intel_dp_check_link_status(struct intel_dp *intel_dp)
4410 {
4411 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4412 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4413 u8 sink_irq_vector;
4414 u8 link_status[DP_LINK_STATUS_SIZE];
4415
4416 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4417
4418 if (!intel_encoder->base.crtc)
4419 return;
4420
4421 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4422 return;
4423
4424 /* Try to read receiver status if the link appears to be up */
4425 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4426 return;
4427 }
4428
4429 /* Now read the DPCD to see if it's actually running */
4430 if (!intel_dp_get_dpcd(intel_dp)) {
4431 return;
4432 }
4433
4434 /* Try to read the source of the interrupt */
4435 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4436 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4437 /* Clear interrupt source */
4438 drm_dp_dpcd_writeb(&intel_dp->aux,
4439 DP_DEVICE_SERVICE_IRQ_VECTOR,
4440 sink_irq_vector);
4441
4442 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4443 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4444 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4445 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4446 }
4447
4448 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4449 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4450 intel_encoder->base.name);
4451 intel_dp_start_link_train(intel_dp);
4452 intel_dp_stop_link_train(intel_dp);
4453 }
4454 }
4455
4456 /* XXX this is probably wrong for multiple downstream ports */
4457 static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp * intel_dp)4458 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4459 {
4460 uint8_t *dpcd = intel_dp->dpcd;
4461 uint8_t type;
4462
4463 if (!intel_dp_get_dpcd(intel_dp))
4464 return connector_status_disconnected;
4465
4466 /* if there's no downstream port, we're done */
4467 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4468 return connector_status_connected;
4469
4470 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4471 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4472 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4473 uint8_t reg;
4474
4475 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4476 ®, 1) < 0)
4477 return connector_status_unknown;
4478
4479 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4480 : connector_status_disconnected;
4481 }
4482
4483 /* If no HPD, poke DDC gently */
4484 if (drm_probe_ddc(&intel_dp->aux.ddc))
4485 return connector_status_connected;
4486
4487 /* Well we tried, say unknown for unreliable port types */
4488 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4489 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4490 if (type == DP_DS_PORT_TYPE_VGA ||
4491 type == DP_DS_PORT_TYPE_NON_EDID)
4492 return connector_status_unknown;
4493 } else {
4494 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4495 DP_DWN_STRM_PORT_TYPE_MASK;
4496 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4497 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4498 return connector_status_unknown;
4499 }
4500
4501 /* Anything else is out of spec, warn and ignore */
4502 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4503 return connector_status_disconnected;
4504 }
4505
4506 static enum drm_connector_status
edp_detect(struct intel_dp * intel_dp)4507 edp_detect(struct intel_dp *intel_dp)
4508 {
4509 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4510 enum drm_connector_status status;
4511
4512 status = intel_panel_detect(dev);
4513 if (status == connector_status_unknown)
4514 status = connector_status_connected;
4515
4516 return status;
4517 }
4518
ibx_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4519 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4520 struct intel_digital_port *port)
4521 {
4522 u32 bit;
4523
4524 switch (port->port) {
4525 case PORT_A:
4526 return true;
4527 case PORT_B:
4528 bit = SDE_PORTB_HOTPLUG;
4529 break;
4530 case PORT_C:
4531 bit = SDE_PORTC_HOTPLUG;
4532 break;
4533 case PORT_D:
4534 bit = SDE_PORTD_HOTPLUG;
4535 break;
4536 default:
4537 MISSING_CASE(port->port);
4538 return false;
4539 }
4540
4541 return I915_READ(SDEISR) & bit;
4542 }
4543
cpt_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4544 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4545 struct intel_digital_port *port)
4546 {
4547 u32 bit;
4548
4549 switch (port->port) {
4550 case PORT_A:
4551 return true;
4552 case PORT_B:
4553 bit = SDE_PORTB_HOTPLUG_CPT;
4554 break;
4555 case PORT_C:
4556 bit = SDE_PORTC_HOTPLUG_CPT;
4557 break;
4558 case PORT_D:
4559 bit = SDE_PORTD_HOTPLUG_CPT;
4560 break;
4561 case PORT_E:
4562 bit = SDE_PORTE_HOTPLUG_SPT;
4563 break;
4564 default:
4565 MISSING_CASE(port->port);
4566 return false;
4567 }
4568
4569 return I915_READ(SDEISR) & bit;
4570 }
4571
g4x_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4572 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4573 struct intel_digital_port *port)
4574 {
4575 u32 bit;
4576
4577 switch (port->port) {
4578 case PORT_B:
4579 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4580 break;
4581 case PORT_C:
4582 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4583 break;
4584 case PORT_D:
4585 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4586 break;
4587 default:
4588 MISSING_CASE(port->port);
4589 return false;
4590 }
4591
4592 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4593 }
4594
gm45_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4595 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4596 struct intel_digital_port *port)
4597 {
4598 u32 bit;
4599
4600 switch (port->port) {
4601 case PORT_B:
4602 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4603 break;
4604 case PORT_C:
4605 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4606 break;
4607 case PORT_D:
4608 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4609 break;
4610 default:
4611 MISSING_CASE(port->port);
4612 return false;
4613 }
4614
4615 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4616 }
4617
bxt_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * intel_dig_port)4618 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4619 struct intel_digital_port *intel_dig_port)
4620 {
4621 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4622 enum port port;
4623 u32 bit;
4624
4625 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4626 switch (port) {
4627 case PORT_A:
4628 bit = BXT_DE_PORT_HP_DDIA;
4629 break;
4630 case PORT_B:
4631 bit = BXT_DE_PORT_HP_DDIB;
4632 break;
4633 case PORT_C:
4634 bit = BXT_DE_PORT_HP_DDIC;
4635 break;
4636 default:
4637 MISSING_CASE(port);
4638 return false;
4639 }
4640
4641 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4642 }
4643
4644 /*
4645 * intel_digital_port_connected - is the specified port connected?
4646 * @dev_priv: i915 private structure
4647 * @port: the port to test
4648 *
4649 * Return %true if @port is connected, %false otherwise.
4650 */
intel_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4651 static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4652 struct intel_digital_port *port)
4653 {
4654 if (HAS_PCH_IBX(dev_priv))
4655 return ibx_digital_port_connected(dev_priv, port);
4656 if (HAS_PCH_SPLIT(dev_priv))
4657 return cpt_digital_port_connected(dev_priv, port);
4658 else if (IS_BROXTON(dev_priv))
4659 return bxt_digital_port_connected(dev_priv, port);
4660 else if (IS_GM45(dev_priv))
4661 return gm45_digital_port_connected(dev_priv, port);
4662 else
4663 return g4x_digital_port_connected(dev_priv, port);
4664 }
4665
4666 static enum drm_connector_status
ironlake_dp_detect(struct intel_dp * intel_dp)4667 ironlake_dp_detect(struct intel_dp *intel_dp)
4668 {
4669 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4670 struct drm_i915_private *dev_priv = dev->dev_private;
4671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4672
4673 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4674 return connector_status_disconnected;
4675
4676 return intel_dp_detect_dpcd(intel_dp);
4677 }
4678
4679 static enum drm_connector_status
g4x_dp_detect(struct intel_dp * intel_dp)4680 g4x_dp_detect(struct intel_dp *intel_dp)
4681 {
4682 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4683 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4684
4685 /* Can't disconnect eDP, but you can close the lid... */
4686 if (is_edp(intel_dp)) {
4687 enum drm_connector_status status;
4688
4689 status = intel_panel_detect(dev);
4690 if (status == connector_status_unknown)
4691 status = connector_status_connected;
4692 return status;
4693 }
4694
4695 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4696 return connector_status_disconnected;
4697
4698 return intel_dp_detect_dpcd(intel_dp);
4699 }
4700
4701 static struct edid *
intel_dp_get_edid(struct intel_dp * intel_dp)4702 intel_dp_get_edid(struct intel_dp *intel_dp)
4703 {
4704 struct intel_connector *intel_connector = intel_dp->attached_connector;
4705
4706 /* use cached edid if we have one */
4707 if (intel_connector->edid) {
4708 /* invalid edid */
4709 if (IS_ERR(intel_connector->edid))
4710 return NULL;
4711
4712 return drm_edid_duplicate(intel_connector->edid);
4713 } else
4714 return drm_get_edid(&intel_connector->base,
4715 &intel_dp->aux.ddc);
4716 }
4717
4718 static void
intel_dp_set_edid(struct intel_dp * intel_dp)4719 intel_dp_set_edid(struct intel_dp *intel_dp)
4720 {
4721 struct intel_connector *intel_connector = intel_dp->attached_connector;
4722 struct edid *edid;
4723
4724 edid = intel_dp_get_edid(intel_dp);
4725 intel_connector->detect_edid = edid;
4726
4727 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4728 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4729 else
4730 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4731 }
4732
4733 static void
intel_dp_unset_edid(struct intel_dp * intel_dp)4734 intel_dp_unset_edid(struct intel_dp *intel_dp)
4735 {
4736 struct intel_connector *intel_connector = intel_dp->attached_connector;
4737
4738 kfree(intel_connector->detect_edid);
4739 intel_connector->detect_edid = NULL;
4740
4741 intel_dp->has_audio = false;
4742 }
4743
4744 static enum drm_connector_status
intel_dp_detect(struct drm_connector * connector,bool force)4745 intel_dp_detect(struct drm_connector *connector, bool force)
4746 {
4747 struct intel_dp *intel_dp = intel_attached_dp(connector);
4748 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4749 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4750 struct drm_device *dev = connector->dev;
4751 enum drm_connector_status status;
4752 enum intel_display_power_domain power_domain;
4753 bool ret;
4754 u8 sink_irq_vector;
4755
4756 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4757 connector->base.id, connector->name);
4758 intel_dp_unset_edid(intel_dp);
4759
4760 if (intel_dp->is_mst) {
4761 /* MST devices are disconnected from a monitor POV */
4762 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4763 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4764 return connector_status_disconnected;
4765 }
4766
4767 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4768 intel_display_power_get(to_i915(dev), power_domain);
4769
4770 /* Can't disconnect eDP, but you can close the lid... */
4771 if (is_edp(intel_dp))
4772 status = edp_detect(intel_dp);
4773 else if (HAS_PCH_SPLIT(dev))
4774 status = ironlake_dp_detect(intel_dp);
4775 else
4776 status = g4x_dp_detect(intel_dp);
4777 if (status != connector_status_connected)
4778 goto out;
4779
4780 intel_dp_probe_oui(intel_dp);
4781
4782 ret = intel_dp_probe_mst(intel_dp);
4783 if (ret) {
4784 /* if we are in MST mode then this connector
4785 won't appear connected or have anything with EDID on it */
4786 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4787 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4788 status = connector_status_disconnected;
4789 goto out;
4790 }
4791
4792 intel_dp_set_edid(intel_dp);
4793
4794 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4795 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4796 status = connector_status_connected;
4797
4798 /* Try to read the source of the interrupt */
4799 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4800 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4801 /* Clear interrupt source */
4802 drm_dp_dpcd_writeb(&intel_dp->aux,
4803 DP_DEVICE_SERVICE_IRQ_VECTOR,
4804 sink_irq_vector);
4805
4806 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4807 intel_dp_handle_test_request(intel_dp);
4808 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4809 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4810 }
4811
4812 out:
4813 intel_display_power_put(to_i915(dev), power_domain);
4814 return status;
4815 }
4816
4817 static void
intel_dp_force(struct drm_connector * connector)4818 intel_dp_force(struct drm_connector *connector)
4819 {
4820 struct intel_dp *intel_dp = intel_attached_dp(connector);
4821 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4822 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4823 enum intel_display_power_domain power_domain;
4824
4825 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4826 connector->base.id, connector->name);
4827 intel_dp_unset_edid(intel_dp);
4828
4829 if (connector->status != connector_status_connected)
4830 return;
4831
4832 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4833 intel_display_power_get(dev_priv, power_domain);
4834
4835 intel_dp_set_edid(intel_dp);
4836
4837 intel_display_power_put(dev_priv, power_domain);
4838
4839 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4840 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4841 }
4842
intel_dp_get_modes(struct drm_connector * connector)4843 static int intel_dp_get_modes(struct drm_connector *connector)
4844 {
4845 struct intel_connector *intel_connector = to_intel_connector(connector);
4846 struct edid *edid;
4847
4848 edid = intel_connector->detect_edid;
4849 if (edid) {
4850 int ret = intel_connector_update_modes(connector, edid);
4851 if (ret)
4852 return ret;
4853 }
4854
4855 /* if eDP has no EDID, fall back to fixed mode */
4856 if (is_edp(intel_attached_dp(connector)) &&
4857 intel_connector->panel.fixed_mode) {
4858 struct drm_display_mode *mode;
4859
4860 mode = drm_mode_duplicate(connector->dev,
4861 intel_connector->panel.fixed_mode);
4862 if (mode) {
4863 drm_mode_probed_add(connector, mode);
4864 return 1;
4865 }
4866 }
4867
4868 return 0;
4869 }
4870
4871 static bool
intel_dp_detect_audio(struct drm_connector * connector)4872 intel_dp_detect_audio(struct drm_connector *connector)
4873 {
4874 bool has_audio = false;
4875 struct edid *edid;
4876
4877 edid = to_intel_connector(connector)->detect_edid;
4878 if (edid)
4879 has_audio = drm_detect_monitor_audio(edid);
4880
4881 return has_audio;
4882 }
4883
4884 static int
intel_dp_set_property(struct drm_connector * connector,struct drm_property * property,uint64_t val)4885 intel_dp_set_property(struct drm_connector *connector,
4886 struct drm_property *property,
4887 uint64_t val)
4888 {
4889 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4890 struct intel_connector *intel_connector = to_intel_connector(connector);
4891 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4892 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4893 int ret;
4894
4895 ret = drm_object_property_set_value(&connector->base, property, val);
4896 if (ret)
4897 return ret;
4898
4899 if (property == dev_priv->force_audio_property) {
4900 int i = val;
4901 bool has_audio;
4902
4903 if (i == intel_dp->force_audio)
4904 return 0;
4905
4906 intel_dp->force_audio = i;
4907
4908 if (i == HDMI_AUDIO_AUTO)
4909 has_audio = intel_dp_detect_audio(connector);
4910 else
4911 has_audio = (i == HDMI_AUDIO_ON);
4912
4913 if (has_audio == intel_dp->has_audio)
4914 return 0;
4915
4916 intel_dp->has_audio = has_audio;
4917 goto done;
4918 }
4919
4920 if (property == dev_priv->broadcast_rgb_property) {
4921 bool old_auto = intel_dp->color_range_auto;
4922 bool old_range = intel_dp->limited_color_range;
4923
4924 switch (val) {
4925 case INTEL_BROADCAST_RGB_AUTO:
4926 intel_dp->color_range_auto = true;
4927 break;
4928 case INTEL_BROADCAST_RGB_FULL:
4929 intel_dp->color_range_auto = false;
4930 intel_dp->limited_color_range = false;
4931 break;
4932 case INTEL_BROADCAST_RGB_LIMITED:
4933 intel_dp->color_range_auto = false;
4934 intel_dp->limited_color_range = true;
4935 break;
4936 default:
4937 return -EINVAL;
4938 }
4939
4940 if (old_auto == intel_dp->color_range_auto &&
4941 old_range == intel_dp->limited_color_range)
4942 return 0;
4943
4944 goto done;
4945 }
4946
4947 if (is_edp(intel_dp) &&
4948 property == connector->dev->mode_config.scaling_mode_property) {
4949 if (val == DRM_MODE_SCALE_NONE) {
4950 DRM_DEBUG_KMS("no scaling not supported\n");
4951 return -EINVAL;
4952 }
4953
4954 if (intel_connector->panel.fitting_mode == val) {
4955 /* the eDP scaling property is not changed */
4956 return 0;
4957 }
4958 intel_connector->panel.fitting_mode = val;
4959
4960 goto done;
4961 }
4962
4963 return -EINVAL;
4964
4965 done:
4966 if (intel_encoder->base.crtc)
4967 intel_crtc_restore_mode(intel_encoder->base.crtc);
4968
4969 return 0;
4970 }
4971
4972 static void
intel_dp_connector_destroy(struct drm_connector * connector)4973 intel_dp_connector_destroy(struct drm_connector *connector)
4974 {
4975 struct intel_connector *intel_connector = to_intel_connector(connector);
4976
4977 kfree(intel_connector->detect_edid);
4978
4979 if (!IS_ERR_OR_NULL(intel_connector->edid))
4980 kfree(intel_connector->edid);
4981
4982 /* Can't call is_edp() since the encoder may have been destroyed
4983 * already. */
4984 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4985 intel_panel_fini(&intel_connector->panel);
4986
4987 drm_connector_cleanup(connector);
4988 kfree(connector);
4989 }
4990
intel_dp_encoder_destroy(struct drm_encoder * encoder)4991 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4992 {
4993 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4994 struct intel_dp *intel_dp = &intel_dig_port->dp;
4995
4996 drm_dp_aux_unregister(&intel_dp->aux);
4997 intel_dp_mst_encoder_cleanup(intel_dig_port);
4998 if (is_edp(intel_dp)) {
4999 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5000 /*
5001 * vdd might still be enabled do to the delayed vdd off.
5002 * Make sure vdd is actually turned off here.
5003 */
5004 pps_lock(intel_dp);
5005 edp_panel_vdd_off_sync(intel_dp);
5006 pps_unlock(intel_dp);
5007
5008 if (intel_dp->edp_notifier.notifier_call) {
5009 unregister_reboot_notifier(&intel_dp->edp_notifier);
5010 intel_dp->edp_notifier.notifier_call = NULL;
5011 }
5012 }
5013 drm_encoder_cleanup(encoder);
5014 kfree(intel_dig_port);
5015 }
5016
intel_dp_encoder_suspend(struct intel_encoder * intel_encoder)5017 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5018 {
5019 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5020
5021 if (!is_edp(intel_dp))
5022 return;
5023
5024 /*
5025 * vdd might still be enabled do to the delayed vdd off.
5026 * Make sure vdd is actually turned off here.
5027 */
5028 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5029 pps_lock(intel_dp);
5030 edp_panel_vdd_off_sync(intel_dp);
5031 pps_unlock(intel_dp);
5032 }
5033
intel_edp_panel_vdd_sanitize(struct intel_dp * intel_dp)5034 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5035 {
5036 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5037 struct drm_device *dev = intel_dig_port->base.base.dev;
5038 struct drm_i915_private *dev_priv = dev->dev_private;
5039 enum intel_display_power_domain power_domain;
5040
5041 lockdep_assert_held(&dev_priv->pps_mutex);
5042
5043 if (!edp_have_panel_vdd(intel_dp))
5044 return;
5045
5046 /*
5047 * The VDD bit needs a power domain reference, so if the bit is
5048 * already enabled when we boot or resume, grab this reference and
5049 * schedule a vdd off, so we don't hold on to the reference
5050 * indefinitely.
5051 */
5052 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5053 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5054 intel_display_power_get(dev_priv, power_domain);
5055
5056 edp_panel_vdd_schedule_off(intel_dp);
5057 }
5058
intel_dp_encoder_reset(struct drm_encoder * encoder)5059 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5060 {
5061 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5062 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5063
5064 if (!HAS_DDI(dev_priv))
5065 intel_dp->DP = I915_READ(intel_dp->output_reg);
5066
5067 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5068 return;
5069
5070 pps_lock(intel_dp);
5071
5072 /*
5073 * Read out the current power sequencer assignment,
5074 * in case the BIOS did something with it.
5075 */
5076 if (IS_VALLEYVIEW(encoder->dev))
5077 vlv_initial_power_sequencer_setup(intel_dp);
5078
5079 intel_edp_panel_vdd_sanitize(intel_dp);
5080
5081 pps_unlock(intel_dp);
5082 }
5083
5084 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5085 .dpms = drm_atomic_helper_connector_dpms,
5086 .detect = intel_dp_detect,
5087 .force = intel_dp_force,
5088 .fill_modes = drm_helper_probe_single_connector_modes,
5089 .set_property = intel_dp_set_property,
5090 .atomic_get_property = intel_connector_atomic_get_property,
5091 .destroy = intel_dp_connector_destroy,
5092 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5093 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5094 };
5095
5096 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5097 .get_modes = intel_dp_get_modes,
5098 .mode_valid = intel_dp_mode_valid,
5099 .best_encoder = intel_best_encoder,
5100 };
5101
5102 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5103 .reset = intel_dp_encoder_reset,
5104 .destroy = intel_dp_encoder_destroy,
5105 };
5106
5107 enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port * intel_dig_port,bool long_hpd)5108 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5109 {
5110 struct intel_dp *intel_dp = &intel_dig_port->dp;
5111 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5112 struct drm_device *dev = intel_dig_port->base.base.dev;
5113 struct drm_i915_private *dev_priv = dev->dev_private;
5114 enum intel_display_power_domain power_domain;
5115 enum irqreturn ret = IRQ_NONE;
5116
5117 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5118 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5119 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5120
5121 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5122 /*
5123 * vdd off can generate a long pulse on eDP which
5124 * would require vdd on to handle it, and thus we
5125 * would end up in an endless cycle of
5126 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5127 */
5128 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5129 port_name(intel_dig_port->port));
5130 return IRQ_HANDLED;
5131 }
5132
5133 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5134 port_name(intel_dig_port->port),
5135 long_hpd ? "long" : "short");
5136
5137 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5138 intel_display_power_get(dev_priv, power_domain);
5139
5140 if (long_hpd) {
5141 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5142 goto mst_fail;
5143
5144 if (!intel_dp_get_dpcd(intel_dp)) {
5145 goto mst_fail;
5146 }
5147
5148 intel_dp_probe_oui(intel_dp);
5149
5150 if (!intel_dp_probe_mst(intel_dp)) {
5151 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5152 intel_dp_check_link_status(intel_dp);
5153 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5154 goto mst_fail;
5155 }
5156 } else {
5157 if (intel_dp->is_mst) {
5158 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5159 goto mst_fail;
5160 }
5161
5162 if (!intel_dp->is_mst) {
5163 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5164 intel_dp_check_link_status(intel_dp);
5165 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5166 }
5167 }
5168
5169 ret = IRQ_HANDLED;
5170
5171 goto put_power;
5172 mst_fail:
5173 /* if we were in MST mode, and device is not there get out of MST mode */
5174 if (intel_dp->is_mst) {
5175 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5176 intel_dp->is_mst = false;
5177 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5178 }
5179 put_power:
5180 intel_display_power_put(dev_priv, power_domain);
5181
5182 return ret;
5183 }
5184
5185 /* Return which DP Port should be selected for Transcoder DP control */
5186 int
intel_trans_dp_port_sel(struct drm_crtc * crtc)5187 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5188 {
5189 struct drm_device *dev = crtc->dev;
5190 struct intel_encoder *intel_encoder;
5191 struct intel_dp *intel_dp;
5192
5193 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5194 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5195
5196 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5197 intel_encoder->type == INTEL_OUTPUT_EDP)
5198 return intel_dp->output_reg;
5199 }
5200
5201 return -1;
5202 }
5203
5204 /* check the VBT to see whether the eDP is on another port */
intel_dp_is_edp(struct drm_device * dev,enum port port)5205 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5206 {
5207 struct drm_i915_private *dev_priv = dev->dev_private;
5208 union child_device_config *p_child;
5209 int i;
5210 static const short port_mapping[] = {
5211 [PORT_B] = DVO_PORT_DPB,
5212 [PORT_C] = DVO_PORT_DPC,
5213 [PORT_D] = DVO_PORT_DPD,
5214 [PORT_E] = DVO_PORT_DPE,
5215 };
5216
5217 /*
5218 * eDP not supported on g4x. so bail out early just
5219 * for a bit extra safety in case the VBT is bonkers.
5220 */
5221 if (INTEL_INFO(dev)->gen < 5)
5222 return false;
5223
5224 if (port == PORT_A)
5225 return true;
5226
5227 if (!dev_priv->vbt.child_dev_num)
5228 return false;
5229
5230 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5231 p_child = dev_priv->vbt.child_dev + i;
5232
5233 if (p_child->common.dvo_port == port_mapping[port] &&
5234 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5235 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5236 return true;
5237 }
5238 return false;
5239 }
5240
5241 void
intel_dp_add_properties(struct intel_dp * intel_dp,struct drm_connector * connector)5242 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5243 {
5244 struct intel_connector *intel_connector = to_intel_connector(connector);
5245
5246 intel_attach_force_audio_property(connector);
5247 intel_attach_broadcast_rgb_property(connector);
5248 intel_dp->color_range_auto = true;
5249
5250 if (is_edp(intel_dp)) {
5251 drm_mode_create_scaling_mode_property(connector->dev);
5252 drm_object_attach_property(
5253 &connector->base,
5254 connector->dev->mode_config.scaling_mode_property,
5255 DRM_MODE_SCALE_ASPECT);
5256 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5257 }
5258 }
5259
intel_dp_init_panel_power_timestamps(struct intel_dp * intel_dp)5260 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5261 {
5262 intel_dp->last_power_cycle = jiffies;
5263 intel_dp->last_power_on = jiffies;
5264 intel_dp->last_backlight_off = jiffies;
5265 }
5266
5267 static void
intel_dp_init_panel_power_sequencer(struct drm_device * dev,struct intel_dp * intel_dp)5268 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5269 struct intel_dp *intel_dp)
5270 {
5271 struct drm_i915_private *dev_priv = dev->dev_private;
5272 struct edp_power_seq cur, vbt, spec,
5273 *final = &intel_dp->pps_delays;
5274 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5275 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5276
5277 lockdep_assert_held(&dev_priv->pps_mutex);
5278
5279 /* already initialized? */
5280 if (final->t11_t12 != 0)
5281 return;
5282
5283 if (IS_BROXTON(dev)) {
5284 /*
5285 * TODO: BXT has 2 sets of PPS registers.
5286 * Correct Register for Broxton need to be identified
5287 * using VBT. hardcoding for now
5288 */
5289 pp_ctrl_reg = BXT_PP_CONTROL(0);
5290 pp_on_reg = BXT_PP_ON_DELAYS(0);
5291 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5292 } else if (HAS_PCH_SPLIT(dev)) {
5293 pp_ctrl_reg = PCH_PP_CONTROL;
5294 pp_on_reg = PCH_PP_ON_DELAYS;
5295 pp_off_reg = PCH_PP_OFF_DELAYS;
5296 pp_div_reg = PCH_PP_DIVISOR;
5297 } else {
5298 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5299
5300 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5301 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5302 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5303 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5304 }
5305
5306 /* Workaround: Need to write PP_CONTROL with the unlock key as
5307 * the very first thing. */
5308 pp_ctl = ironlake_get_pp_control(intel_dp);
5309
5310 pp_on = I915_READ(pp_on_reg);
5311 pp_off = I915_READ(pp_off_reg);
5312 if (!IS_BROXTON(dev)) {
5313 I915_WRITE(pp_ctrl_reg, pp_ctl);
5314 pp_div = I915_READ(pp_div_reg);
5315 }
5316
5317 /* Pull timing values out of registers */
5318 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5319 PANEL_POWER_UP_DELAY_SHIFT;
5320
5321 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5322 PANEL_LIGHT_ON_DELAY_SHIFT;
5323
5324 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5325 PANEL_LIGHT_OFF_DELAY_SHIFT;
5326
5327 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5328 PANEL_POWER_DOWN_DELAY_SHIFT;
5329
5330 if (IS_BROXTON(dev)) {
5331 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5332 BXT_POWER_CYCLE_DELAY_SHIFT;
5333 if (tmp > 0)
5334 cur.t11_t12 = (tmp - 1) * 1000;
5335 else
5336 cur.t11_t12 = 0;
5337 } else {
5338 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5339 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5340 }
5341
5342 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5343 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5344
5345 vbt = dev_priv->vbt.edp_pps;
5346
5347 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5348 * our hw here, which are all in 100usec. */
5349 spec.t1_t3 = 210 * 10;
5350 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5351 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5352 spec.t10 = 500 * 10;
5353 /* This one is special and actually in units of 100ms, but zero
5354 * based in the hw (so we need to add 100 ms). But the sw vbt
5355 * table multiplies it with 1000 to make it in units of 100usec,
5356 * too. */
5357 spec.t11_t12 = (510 + 100) * 10;
5358
5359 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5360 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5361
5362 /* Use the max of the register settings and vbt. If both are
5363 * unset, fall back to the spec limits. */
5364 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5365 spec.field : \
5366 max(cur.field, vbt.field))
5367 assign_final(t1_t3);
5368 assign_final(t8);
5369 assign_final(t9);
5370 assign_final(t10);
5371 assign_final(t11_t12);
5372 #undef assign_final
5373
5374 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5375 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5376 intel_dp->backlight_on_delay = get_delay(t8);
5377 intel_dp->backlight_off_delay = get_delay(t9);
5378 intel_dp->panel_power_down_delay = get_delay(t10);
5379 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5380 #undef get_delay
5381
5382 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5383 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5384 intel_dp->panel_power_cycle_delay);
5385
5386 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5387 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5388 }
5389
5390 static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device * dev,struct intel_dp * intel_dp)5391 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5392 struct intel_dp *intel_dp)
5393 {
5394 struct drm_i915_private *dev_priv = dev->dev_private;
5395 u32 pp_on, pp_off, pp_div, port_sel = 0;
5396 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5397 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5398 enum port port = dp_to_dig_port(intel_dp)->port;
5399 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5400
5401 lockdep_assert_held(&dev_priv->pps_mutex);
5402
5403 if (IS_BROXTON(dev)) {
5404 /*
5405 * TODO: BXT has 2 sets of PPS registers.
5406 * Correct Register for Broxton need to be identified
5407 * using VBT. hardcoding for now
5408 */
5409 pp_ctrl_reg = BXT_PP_CONTROL(0);
5410 pp_on_reg = BXT_PP_ON_DELAYS(0);
5411 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5412
5413 } else if (HAS_PCH_SPLIT(dev)) {
5414 pp_on_reg = PCH_PP_ON_DELAYS;
5415 pp_off_reg = PCH_PP_OFF_DELAYS;
5416 pp_div_reg = PCH_PP_DIVISOR;
5417 } else {
5418 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5419
5420 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5421 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5422 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5423 }
5424
5425 /*
5426 * And finally store the new values in the power sequencer. The
5427 * backlight delays are set to 1 because we do manual waits on them. For
5428 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5429 * we'll end up waiting for the backlight off delay twice: once when we
5430 * do the manual sleep, and once when we disable the panel and wait for
5431 * the PP_STATUS bit to become zero.
5432 */
5433 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5434 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5435 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5436 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5437 /* Compute the divisor for the pp clock, simply match the Bspec
5438 * formula. */
5439 if (IS_BROXTON(dev)) {
5440 pp_div = I915_READ(pp_ctrl_reg);
5441 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5442 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5443 << BXT_POWER_CYCLE_DELAY_SHIFT);
5444 } else {
5445 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5446 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5447 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5448 }
5449
5450 /* Haswell doesn't have any port selection bits for the panel
5451 * power sequencer any more. */
5452 if (IS_VALLEYVIEW(dev)) {
5453 port_sel = PANEL_PORT_SELECT_VLV(port);
5454 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5455 if (port == PORT_A)
5456 port_sel = PANEL_PORT_SELECT_DPA;
5457 else
5458 port_sel = PANEL_PORT_SELECT_DPD;
5459 }
5460
5461 pp_on |= port_sel;
5462
5463 I915_WRITE(pp_on_reg, pp_on);
5464 I915_WRITE(pp_off_reg, pp_off);
5465 if (IS_BROXTON(dev))
5466 I915_WRITE(pp_ctrl_reg, pp_div);
5467 else
5468 I915_WRITE(pp_div_reg, pp_div);
5469
5470 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5471 I915_READ(pp_on_reg),
5472 I915_READ(pp_off_reg),
5473 IS_BROXTON(dev) ?
5474 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5475 I915_READ(pp_div_reg));
5476 }
5477
5478 /**
5479 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5480 * @dev: DRM device
5481 * @refresh_rate: RR to be programmed
5482 *
5483 * This function gets called when refresh rate (RR) has to be changed from
5484 * one frequency to another. Switches can be between high and low RR
5485 * supported by the panel or to any other RR based on media playback (in
5486 * this case, RR value needs to be passed from user space).
5487 *
5488 * The caller of this function needs to take a lock on dev_priv->drrs.
5489 */
intel_dp_set_drrs_state(struct drm_device * dev,int refresh_rate)5490 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5491 {
5492 struct drm_i915_private *dev_priv = dev->dev_private;
5493 struct intel_encoder *encoder;
5494 struct intel_digital_port *dig_port = NULL;
5495 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5496 struct intel_crtc_state *config = NULL;
5497 struct intel_crtc *intel_crtc = NULL;
5498 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5499
5500 if (refresh_rate <= 0) {
5501 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5502 return;
5503 }
5504
5505 if (intel_dp == NULL) {
5506 DRM_DEBUG_KMS("DRRS not supported.\n");
5507 return;
5508 }
5509
5510 /*
5511 * FIXME: This needs proper synchronization with psr state for some
5512 * platforms that cannot have PSR and DRRS enabled at the same time.
5513 */
5514
5515 dig_port = dp_to_dig_port(intel_dp);
5516 encoder = &dig_port->base;
5517 intel_crtc = to_intel_crtc(encoder->base.crtc);
5518
5519 if (!intel_crtc) {
5520 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5521 return;
5522 }
5523
5524 config = intel_crtc->config;
5525
5526 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5527 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5528 return;
5529 }
5530
5531 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5532 refresh_rate)
5533 index = DRRS_LOW_RR;
5534
5535 if (index == dev_priv->drrs.refresh_rate_type) {
5536 DRM_DEBUG_KMS(
5537 "DRRS requested for previously set RR...ignoring\n");
5538 return;
5539 }
5540
5541 if (!intel_crtc->active) {
5542 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5543 return;
5544 }
5545
5546 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5547 switch (index) {
5548 case DRRS_HIGH_RR:
5549 intel_dp_set_m_n(intel_crtc, M1_N1);
5550 break;
5551 case DRRS_LOW_RR:
5552 intel_dp_set_m_n(intel_crtc, M2_N2);
5553 break;
5554 case DRRS_MAX_RR:
5555 default:
5556 DRM_ERROR("Unsupported refreshrate type\n");
5557 }
5558 } else if (INTEL_INFO(dev)->gen > 6) {
5559 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5560 u32 val;
5561
5562 val = I915_READ(reg);
5563 if (index > DRRS_HIGH_RR) {
5564 if (IS_VALLEYVIEW(dev))
5565 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5566 else
5567 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5568 } else {
5569 if (IS_VALLEYVIEW(dev))
5570 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5571 else
5572 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5573 }
5574 I915_WRITE(reg, val);
5575 }
5576
5577 dev_priv->drrs.refresh_rate_type = index;
5578
5579 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5580 }
5581
5582 /**
5583 * intel_edp_drrs_enable - init drrs struct if supported
5584 * @intel_dp: DP struct
5585 *
5586 * Initializes frontbuffer_bits and drrs.dp
5587 */
intel_edp_drrs_enable(struct intel_dp * intel_dp)5588 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5589 {
5590 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5591 struct drm_i915_private *dev_priv = dev->dev_private;
5592 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5593 struct drm_crtc *crtc = dig_port->base.base.crtc;
5594 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5595
5596 if (!intel_crtc->config->has_drrs) {
5597 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5598 return;
5599 }
5600
5601 mutex_lock(&dev_priv->drrs.mutex);
5602 if (WARN_ON(dev_priv->drrs.dp)) {
5603 DRM_ERROR("DRRS already enabled\n");
5604 goto unlock;
5605 }
5606
5607 dev_priv->drrs.busy_frontbuffer_bits = 0;
5608
5609 dev_priv->drrs.dp = intel_dp;
5610
5611 unlock:
5612 mutex_unlock(&dev_priv->drrs.mutex);
5613 }
5614
5615 /**
5616 * intel_edp_drrs_disable - Disable DRRS
5617 * @intel_dp: DP struct
5618 *
5619 */
intel_edp_drrs_disable(struct intel_dp * intel_dp)5620 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5621 {
5622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5623 struct drm_i915_private *dev_priv = dev->dev_private;
5624 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5625 struct drm_crtc *crtc = dig_port->base.base.crtc;
5626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5627
5628 if (!intel_crtc->config->has_drrs)
5629 return;
5630
5631 mutex_lock(&dev_priv->drrs.mutex);
5632 if (!dev_priv->drrs.dp) {
5633 mutex_unlock(&dev_priv->drrs.mutex);
5634 return;
5635 }
5636
5637 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5638 intel_dp_set_drrs_state(dev_priv->dev,
5639 intel_dp->attached_connector->panel.
5640 fixed_mode->vrefresh);
5641
5642 dev_priv->drrs.dp = NULL;
5643 mutex_unlock(&dev_priv->drrs.mutex);
5644
5645 cancel_delayed_work_sync(&dev_priv->drrs.work);
5646 }
5647
intel_edp_drrs_downclock_work(struct work_struct * work)5648 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5649 {
5650 struct drm_i915_private *dev_priv =
5651 container_of(work, typeof(*dev_priv), drrs.work.work);
5652 struct intel_dp *intel_dp;
5653
5654 mutex_lock(&dev_priv->drrs.mutex);
5655
5656 intel_dp = dev_priv->drrs.dp;
5657
5658 if (!intel_dp)
5659 goto unlock;
5660
5661 /*
5662 * The delayed work can race with an invalidate hence we need to
5663 * recheck.
5664 */
5665
5666 if (dev_priv->drrs.busy_frontbuffer_bits)
5667 goto unlock;
5668
5669 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5670 intel_dp_set_drrs_state(dev_priv->dev,
5671 intel_dp->attached_connector->panel.
5672 downclock_mode->vrefresh);
5673
5674 unlock:
5675 mutex_unlock(&dev_priv->drrs.mutex);
5676 }
5677
5678 /**
5679 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5680 * @dev: DRM device
5681 * @frontbuffer_bits: frontbuffer plane tracking bits
5682 *
5683 * This function gets called everytime rendering on the given planes start.
5684 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5685 *
5686 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5687 */
intel_edp_drrs_invalidate(struct drm_device * dev,unsigned frontbuffer_bits)5688 void intel_edp_drrs_invalidate(struct drm_device *dev,
5689 unsigned frontbuffer_bits)
5690 {
5691 struct drm_i915_private *dev_priv = dev->dev_private;
5692 struct drm_crtc *crtc;
5693 enum pipe pipe;
5694
5695 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5696 return;
5697
5698 cancel_delayed_work(&dev_priv->drrs.work);
5699
5700 mutex_lock(&dev_priv->drrs.mutex);
5701 if (!dev_priv->drrs.dp) {
5702 mutex_unlock(&dev_priv->drrs.mutex);
5703 return;
5704 }
5705
5706 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5707 pipe = to_intel_crtc(crtc)->pipe;
5708
5709 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5710 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5711
5712 /* invalidate means busy screen hence upclock */
5713 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5714 intel_dp_set_drrs_state(dev_priv->dev,
5715 dev_priv->drrs.dp->attached_connector->panel.
5716 fixed_mode->vrefresh);
5717
5718 mutex_unlock(&dev_priv->drrs.mutex);
5719 }
5720
5721 /**
5722 * intel_edp_drrs_flush - Restart Idleness DRRS
5723 * @dev: DRM device
5724 * @frontbuffer_bits: frontbuffer plane tracking bits
5725 *
5726 * This function gets called every time rendering on the given planes has
5727 * completed or flip on a crtc is completed. So DRRS should be upclocked
5728 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5729 * if no other planes are dirty.
5730 *
5731 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5732 */
intel_edp_drrs_flush(struct drm_device * dev,unsigned frontbuffer_bits)5733 void intel_edp_drrs_flush(struct drm_device *dev,
5734 unsigned frontbuffer_bits)
5735 {
5736 struct drm_i915_private *dev_priv = dev->dev_private;
5737 struct drm_crtc *crtc;
5738 enum pipe pipe;
5739
5740 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5741 return;
5742
5743 cancel_delayed_work(&dev_priv->drrs.work);
5744
5745 mutex_lock(&dev_priv->drrs.mutex);
5746 if (!dev_priv->drrs.dp) {
5747 mutex_unlock(&dev_priv->drrs.mutex);
5748 return;
5749 }
5750
5751 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5752 pipe = to_intel_crtc(crtc)->pipe;
5753
5754 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5755 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5756
5757 /* flush means busy screen hence upclock */
5758 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5759 intel_dp_set_drrs_state(dev_priv->dev,
5760 dev_priv->drrs.dp->attached_connector->panel.
5761 fixed_mode->vrefresh);
5762
5763 /*
5764 * flush also means no more activity hence schedule downclock, if all
5765 * other fbs are quiescent too
5766 */
5767 if (!dev_priv->drrs.busy_frontbuffer_bits)
5768 schedule_delayed_work(&dev_priv->drrs.work,
5769 msecs_to_jiffies(1000));
5770 mutex_unlock(&dev_priv->drrs.mutex);
5771 }
5772
5773 /**
5774 * DOC: Display Refresh Rate Switching (DRRS)
5775 *
5776 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5777 * which enables swtching between low and high refresh rates,
5778 * dynamically, based on the usage scenario. This feature is applicable
5779 * for internal panels.
5780 *
5781 * Indication that the panel supports DRRS is given by the panel EDID, which
5782 * would list multiple refresh rates for one resolution.
5783 *
5784 * DRRS is of 2 types - static and seamless.
5785 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5786 * (may appear as a blink on screen) and is used in dock-undock scenario.
5787 * Seamless DRRS involves changing RR without any visual effect to the user
5788 * and can be used during normal system usage. This is done by programming
5789 * certain registers.
5790 *
5791 * Support for static/seamless DRRS may be indicated in the VBT based on
5792 * inputs from the panel spec.
5793 *
5794 * DRRS saves power by switching to low RR based on usage scenarios.
5795 *
5796 * eDP DRRS:-
5797 * The implementation is based on frontbuffer tracking implementation.
5798 * When there is a disturbance on the screen triggered by user activity or a
5799 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5800 * When there is no movement on screen, after a timeout of 1 second, a switch
5801 * to low RR is made.
5802 * For integration with frontbuffer tracking code,
5803 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5804 *
5805 * DRRS can be further extended to support other internal panels and also
5806 * the scenario of video playback wherein RR is set based on the rate
5807 * requested by userspace.
5808 */
5809
5810 /**
5811 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5812 * @intel_connector: eDP connector
5813 * @fixed_mode: preferred mode of panel
5814 *
5815 * This function is called only once at driver load to initialize basic
5816 * DRRS stuff.
5817 *
5818 * Returns:
5819 * Downclock mode if panel supports it, else return NULL.
5820 * DRRS support is determined by the presence of downclock mode (apart
5821 * from VBT setting).
5822 */
5823 static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector * intel_connector,struct drm_display_mode * fixed_mode)5824 intel_dp_drrs_init(struct intel_connector *intel_connector,
5825 struct drm_display_mode *fixed_mode)
5826 {
5827 struct drm_connector *connector = &intel_connector->base;
5828 struct drm_device *dev = connector->dev;
5829 struct drm_i915_private *dev_priv = dev->dev_private;
5830 struct drm_display_mode *downclock_mode = NULL;
5831
5832 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5833 mutex_init(&dev_priv->drrs.mutex);
5834
5835 if (INTEL_INFO(dev)->gen <= 6) {
5836 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5837 return NULL;
5838 }
5839
5840 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5841 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5842 return NULL;
5843 }
5844
5845 downclock_mode = intel_find_panel_downclock
5846 (dev, fixed_mode, connector);
5847
5848 if (!downclock_mode) {
5849 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5850 return NULL;
5851 }
5852
5853 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5854
5855 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5856 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5857 return downclock_mode;
5858 }
5859
intel_edp_init_connector(struct intel_dp * intel_dp,struct intel_connector * intel_connector)5860 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5861 struct intel_connector *intel_connector)
5862 {
5863 struct drm_connector *connector = &intel_connector->base;
5864 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5865 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5866 struct drm_device *dev = intel_encoder->base.dev;
5867 struct drm_i915_private *dev_priv = dev->dev_private;
5868 struct drm_display_mode *fixed_mode = NULL;
5869 struct drm_display_mode *downclock_mode = NULL;
5870 bool has_dpcd;
5871 struct drm_display_mode *scan;
5872 struct edid *edid;
5873 enum pipe pipe = INVALID_PIPE;
5874
5875 if (!is_edp(intel_dp))
5876 return true;
5877
5878 pps_lock(intel_dp);
5879 intel_edp_panel_vdd_sanitize(intel_dp);
5880 pps_unlock(intel_dp);
5881
5882 /* Cache DPCD and EDID for edp. */
5883 has_dpcd = intel_dp_get_dpcd(intel_dp);
5884
5885 if (has_dpcd) {
5886 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5887 dev_priv->no_aux_handshake =
5888 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5889 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5890 } else {
5891 /* if this fails, presume the device is a ghost */
5892 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5893 return false;
5894 }
5895
5896 /* We now know it's not a ghost, init power sequence regs. */
5897 pps_lock(intel_dp);
5898 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5899 pps_unlock(intel_dp);
5900
5901 mutex_lock(&dev->mode_config.mutex);
5902 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5903 if (edid) {
5904 if (drm_add_edid_modes(connector, edid)) {
5905 drm_mode_connector_update_edid_property(connector,
5906 edid);
5907 drm_edid_to_eld(connector, edid);
5908 } else {
5909 kfree(edid);
5910 edid = ERR_PTR(-EINVAL);
5911 }
5912 } else {
5913 edid = ERR_PTR(-ENOENT);
5914 }
5915 intel_connector->edid = edid;
5916
5917 /* prefer fixed mode from EDID if available */
5918 list_for_each_entry(scan, &connector->probed_modes, head) {
5919 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5920 fixed_mode = drm_mode_duplicate(dev, scan);
5921 downclock_mode = intel_dp_drrs_init(
5922 intel_connector, fixed_mode);
5923 break;
5924 }
5925 }
5926
5927 /* fallback to VBT if available for eDP */
5928 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5929 fixed_mode = drm_mode_duplicate(dev,
5930 dev_priv->vbt.lfp_lvds_vbt_mode);
5931 if (fixed_mode)
5932 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5933 }
5934 mutex_unlock(&dev->mode_config.mutex);
5935
5936 if (IS_VALLEYVIEW(dev)) {
5937 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5938 register_reboot_notifier(&intel_dp->edp_notifier);
5939
5940 /*
5941 * Figure out the current pipe for the initial backlight setup.
5942 * If the current pipe isn't valid, try the PPS pipe, and if that
5943 * fails just assume pipe A.
5944 */
5945 if (IS_CHERRYVIEW(dev))
5946 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5947 else
5948 pipe = PORT_TO_PIPE(intel_dp->DP);
5949
5950 if (pipe != PIPE_A && pipe != PIPE_B)
5951 pipe = intel_dp->pps_pipe;
5952
5953 if (pipe != PIPE_A && pipe != PIPE_B)
5954 pipe = PIPE_A;
5955
5956 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5957 pipe_name(pipe));
5958 }
5959
5960 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5961 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5962 intel_panel_setup_backlight(connector, pipe);
5963
5964 return true;
5965 }
5966
5967 bool
intel_dp_init_connector(struct intel_digital_port * intel_dig_port,struct intel_connector * intel_connector)5968 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5969 struct intel_connector *intel_connector)
5970 {
5971 struct drm_connector *connector = &intel_connector->base;
5972 struct intel_dp *intel_dp = &intel_dig_port->dp;
5973 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5974 struct drm_device *dev = intel_encoder->base.dev;
5975 struct drm_i915_private *dev_priv = dev->dev_private;
5976 enum port port = intel_dig_port->port;
5977 int type;
5978
5979 intel_dp->pps_pipe = INVALID_PIPE;
5980
5981 /* intel_dp vfuncs */
5982 if (INTEL_INFO(dev)->gen >= 9)
5983 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5984 else if (IS_VALLEYVIEW(dev))
5985 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5986 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5987 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5988 else if (HAS_PCH_SPLIT(dev))
5989 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5990 else
5991 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5992
5993 if (INTEL_INFO(dev)->gen >= 9)
5994 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5995 else
5996 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5997
5998 /* Preserve the current hw state. */
5999 intel_dp->DP = I915_READ(intel_dp->output_reg);
6000 intel_dp->attached_connector = intel_connector;
6001
6002 if (intel_dp_is_edp(dev, port))
6003 type = DRM_MODE_CONNECTOR_eDP;
6004 else
6005 type = DRM_MODE_CONNECTOR_DisplayPort;
6006
6007 /*
6008 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6009 * for DP the encoder type can be set by the caller to
6010 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6011 */
6012 if (type == DRM_MODE_CONNECTOR_eDP)
6013 intel_encoder->type = INTEL_OUTPUT_EDP;
6014
6015 /* eDP only on port B and/or C on vlv/chv */
6016 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6017 port != PORT_B && port != PORT_C))
6018 return false;
6019
6020 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6021 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6022 port_name(port));
6023
6024 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6025 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6026
6027 connector->interlace_allowed = true;
6028 connector->doublescan_allowed = 0;
6029
6030 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6031 edp_panel_vdd_work);
6032
6033 intel_connector_attach_encoder(intel_connector, intel_encoder);
6034 drm_connector_register(connector);
6035
6036 if (HAS_DDI(dev))
6037 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6038 else
6039 intel_connector->get_hw_state = intel_connector_get_hw_state;
6040 intel_connector->unregister = intel_dp_connector_unregister;
6041
6042 /* Set up the hotplug pin. */
6043 switch (port) {
6044 case PORT_A:
6045 intel_encoder->hpd_pin = HPD_PORT_A;
6046 break;
6047 case PORT_B:
6048 intel_encoder->hpd_pin = HPD_PORT_B;
6049 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6050 intel_encoder->hpd_pin = HPD_PORT_A;
6051 break;
6052 case PORT_C:
6053 intel_encoder->hpd_pin = HPD_PORT_C;
6054 break;
6055 case PORT_D:
6056 intel_encoder->hpd_pin = HPD_PORT_D;
6057 break;
6058 case PORT_E:
6059 intel_encoder->hpd_pin = HPD_PORT_E;
6060 break;
6061 default:
6062 BUG();
6063 }
6064
6065 if (is_edp(intel_dp)) {
6066 pps_lock(intel_dp);
6067 intel_dp_init_panel_power_timestamps(intel_dp);
6068 if (IS_VALLEYVIEW(dev))
6069 vlv_initial_power_sequencer_setup(intel_dp);
6070 else
6071 intel_dp_init_panel_power_sequencer(dev, intel_dp);
6072 pps_unlock(intel_dp);
6073 }
6074
6075 intel_dp_aux_init(intel_dp, intel_connector);
6076
6077 /* init MST on ports that can support it */
6078 if (HAS_DP_MST(dev) &&
6079 (port == PORT_B || port == PORT_C || port == PORT_D))
6080 intel_dp_mst_encoder_init(intel_dig_port,
6081 intel_connector->base.base.id);
6082
6083 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6084 drm_dp_aux_unregister(&intel_dp->aux);
6085 if (is_edp(intel_dp)) {
6086 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6087 /*
6088 * vdd might still be enabled do to the delayed vdd off.
6089 * Make sure vdd is actually turned off here.
6090 */
6091 pps_lock(intel_dp);
6092 edp_panel_vdd_off_sync(intel_dp);
6093 pps_unlock(intel_dp);
6094 }
6095 drm_connector_unregister(connector);
6096 drm_connector_cleanup(connector);
6097 return false;
6098 }
6099
6100 intel_dp_add_properties(intel_dp, connector);
6101
6102 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6103 * 0xd. Failure to do so will result in spurious interrupts being
6104 * generated on the port when a cable is not attached.
6105 */
6106 if (IS_G4X(dev) && !IS_GM45(dev)) {
6107 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6108 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6109 }
6110
6111 i915_debugfs_connector_add(connector);
6112
6113 return true;
6114 }
6115
intel_dp_init(struct drm_device * dev,int output_reg,enum port port)6116 bool intel_dp_init(struct drm_device *dev,
6117 int output_reg,
6118 enum port port)
6119 {
6120 struct drm_i915_private *dev_priv = dev->dev_private;
6121 struct intel_digital_port *intel_dig_port;
6122 struct intel_encoder *intel_encoder;
6123 struct drm_encoder *encoder;
6124 struct intel_connector *intel_connector;
6125
6126 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6127 if (!intel_dig_port)
6128 return false;
6129
6130 intel_connector = intel_connector_alloc();
6131 if (!intel_connector)
6132 goto err_connector_alloc;
6133
6134 intel_encoder = &intel_dig_port->base;
6135 encoder = &intel_encoder->base;
6136
6137 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6138 DRM_MODE_ENCODER_TMDS);
6139
6140 intel_encoder->compute_config = intel_dp_compute_config;
6141 intel_encoder->disable = intel_disable_dp;
6142 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6143 intel_encoder->get_config = intel_dp_get_config;
6144 intel_encoder->suspend = intel_dp_encoder_suspend;
6145 if (IS_CHERRYVIEW(dev)) {
6146 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6147 intel_encoder->pre_enable = chv_pre_enable_dp;
6148 intel_encoder->enable = vlv_enable_dp;
6149 intel_encoder->post_disable = chv_post_disable_dp;
6150 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6151 } else if (IS_VALLEYVIEW(dev)) {
6152 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6153 intel_encoder->pre_enable = vlv_pre_enable_dp;
6154 intel_encoder->enable = vlv_enable_dp;
6155 intel_encoder->post_disable = vlv_post_disable_dp;
6156 } else {
6157 intel_encoder->pre_enable = g4x_pre_enable_dp;
6158 intel_encoder->enable = g4x_enable_dp;
6159 if (INTEL_INFO(dev)->gen >= 5)
6160 intel_encoder->post_disable = ilk_post_disable_dp;
6161 }
6162
6163 intel_dig_port->port = port;
6164 intel_dig_port->dp.output_reg = output_reg;
6165
6166 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6167 if (IS_CHERRYVIEW(dev)) {
6168 if (port == PORT_D)
6169 intel_encoder->crtc_mask = 1 << 2;
6170 else
6171 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6172 } else {
6173 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6174 }
6175 intel_encoder->cloneable = 0;
6176
6177 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6178 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6179
6180 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6181 goto err_init_connector;
6182
6183 return true;
6184
6185 err_init_connector:
6186 drm_encoder_cleanup(encoder);
6187 kfree(intel_connector);
6188 err_connector_alloc:
6189 kfree(intel_dig_port);
6190 return false;
6191 }
6192
intel_dp_mst_suspend(struct drm_device * dev)6193 void intel_dp_mst_suspend(struct drm_device *dev)
6194 {
6195 struct drm_i915_private *dev_priv = dev->dev_private;
6196 int i;
6197
6198 /* disable MST */
6199 for (i = 0; i < I915_MAX_PORTS; i++) {
6200 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6201 if (!intel_dig_port)
6202 continue;
6203
6204 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6205 if (!intel_dig_port->dp.can_mst)
6206 continue;
6207 if (intel_dig_port->dp.is_mst)
6208 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6209 }
6210 }
6211 }
6212
intel_dp_mst_resume(struct drm_device * dev)6213 void intel_dp_mst_resume(struct drm_device *dev)
6214 {
6215 struct drm_i915_private *dev_priv = dev->dev_private;
6216 int i;
6217
6218 for (i = 0; i < I915_MAX_PORTS; i++) {
6219 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6220 if (!intel_dig_port)
6221 continue;
6222 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6223 int ret;
6224
6225 if (!intel_dig_port->dp.can_mst)
6226 continue;
6227
6228 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6229 if (ret != 0) {
6230 intel_dp_check_mst_status(&intel_dig_port->dp);
6231 }
6232 }
6233 }
6234 }
6235