1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
is_edp(struct intel_dp * intel_dp)107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
intel_dp_to_dev(struct intel_dp * intel_dp)114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
119 }
120
intel_attached_dp(struct drm_connector * connector)121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
132 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133
134 static int
intel_dp_max_link_bw(struct intel_dp * intel_dp)135 intel_dp_max_link_bw(struct intel_dp *intel_dp)
136 {
137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
141 case DP_LINK_BW_2_7:
142 case DP_LINK_BW_5_4:
143 break;
144 default:
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw);
147 max_link_bw = DP_LINK_BW_1_62;
148 break;
149 }
150 return max_link_bw;
151 }
152
intel_dp_max_lane_count(struct intel_dp * intel_dp)153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 u8 source_max, sink_max;
157
158 source_max = intel_dig_port->max_lanes;
159 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160
161 return min(source_max, sink_max);
162 }
163
164 /*
165 * The units on the numbers in the next two are... bizarre. Examples will
166 * make it clearer; this one parallels an example in the eDP spec.
167 *
168 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169 *
170 * 270000 * 1 * 8 / 10 == 216000
171 *
172 * The actual data capacity of that configuration is 2.16Gbit/s, so the
173 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
174 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
175 * 119000. At 18bpp that's 2142000 kilobits per second.
176 *
177 * Thus the strange-looking division by 10 in intel_dp_link_required, to
178 * get the result in decakilobits instead of kilobits.
179 */
180
181 static int
intel_dp_link_required(int pixel_clock,int bpp)182 intel_dp_link_required(int pixel_clock, int bpp)
183 {
184 return (pixel_clock * bpp + 9) / 10;
185 }
186
187 static int
intel_dp_max_data_rate(int max_link_clock,int max_lanes)188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 {
190 return (max_link_clock * max_lanes * 8) / 10;
191 }
192
193 static int
intel_dp_downstream_max_dotclock(struct intel_dp * intel_dp)194 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
195 {
196 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
197 struct intel_encoder *encoder = &intel_dig_port->base;
198 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
199 int max_dotclk = dev_priv->max_dotclk_freq;
200 int ds_max_dotclk;
201
202 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
203
204 if (type != DP_DS_PORT_TYPE_VGA)
205 return max_dotclk;
206
207 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
208 intel_dp->downstream_ports);
209
210 if (ds_max_dotclk != 0)
211 max_dotclk = min(max_dotclk, ds_max_dotclk);
212
213 return max_dotclk;
214 }
215
216 static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)217 intel_dp_mode_valid(struct drm_connector *connector,
218 struct drm_display_mode *mode)
219 {
220 struct intel_dp *intel_dp = intel_attached_dp(connector);
221 struct intel_connector *intel_connector = to_intel_connector(connector);
222 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
223 int target_clock = mode->clock;
224 int max_rate, mode_rate, max_lanes, max_link_clock;
225 int max_dotclk;
226
227 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
228
229 if (is_edp(intel_dp) && fixed_mode) {
230 if (mode->hdisplay > fixed_mode->hdisplay)
231 return MODE_PANEL;
232
233 if (mode->vdisplay > fixed_mode->vdisplay)
234 return MODE_PANEL;
235
236 target_clock = fixed_mode->clock;
237 }
238
239 max_link_clock = intel_dp_max_link_rate(intel_dp);
240 max_lanes = intel_dp_max_lane_count(intel_dp);
241
242 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
243 mode_rate = intel_dp_link_required(target_clock, 18);
244
245 if (mode_rate > max_rate || target_clock > max_dotclk)
246 return MODE_CLOCK_HIGH;
247
248 if (mode->clock < 10000)
249 return MODE_CLOCK_LOW;
250
251 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
252 return MODE_H_ILLEGAL;
253
254 return MODE_OK;
255 }
256
intel_dp_pack_aux(const uint8_t * src,int src_bytes)257 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
258 {
259 int i;
260 uint32_t v = 0;
261
262 if (src_bytes > 4)
263 src_bytes = 4;
264 for (i = 0; i < src_bytes; i++)
265 v |= ((uint32_t) src[i]) << ((3-i) * 8);
266 return v;
267 }
268
intel_dp_unpack_aux(uint32_t src,uint8_t * dst,int dst_bytes)269 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
270 {
271 int i;
272 if (dst_bytes > 4)
273 dst_bytes = 4;
274 for (i = 0; i < dst_bytes; i++)
275 dst[i] = src >> ((3-i) * 8);
276 }
277
278 static void
279 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
280 struct intel_dp *intel_dp);
281 static void
282 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
283 struct intel_dp *intel_dp,
284 bool force_disable_vdd);
285 static void
286 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
287
pps_lock(struct intel_dp * intel_dp)288 static void pps_lock(struct intel_dp *intel_dp)
289 {
290 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291 struct intel_encoder *encoder = &intel_dig_port->base;
292 struct drm_device *dev = encoder->base.dev;
293 struct drm_i915_private *dev_priv = to_i915(dev);
294 enum intel_display_power_domain power_domain;
295
296 /*
297 * See vlv_power_sequencer_reset() why we need
298 * a power domain reference here.
299 */
300 power_domain = intel_display_port_aux_power_domain(encoder);
301 intel_display_power_get(dev_priv, power_domain);
302
303 mutex_lock(&dev_priv->pps_mutex);
304 }
305
pps_unlock(struct intel_dp * intel_dp)306 static void pps_unlock(struct intel_dp *intel_dp)
307 {
308 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
309 struct intel_encoder *encoder = &intel_dig_port->base;
310 struct drm_device *dev = encoder->base.dev;
311 struct drm_i915_private *dev_priv = to_i915(dev);
312 enum intel_display_power_domain power_domain;
313
314 mutex_unlock(&dev_priv->pps_mutex);
315
316 power_domain = intel_display_port_aux_power_domain(encoder);
317 intel_display_power_put(dev_priv, power_domain);
318 }
319
320 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)321 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
322 {
323 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
324 struct drm_device *dev = intel_dig_port->base.base.dev;
325 struct drm_i915_private *dev_priv = to_i915(dev);
326 enum pipe pipe = intel_dp->pps_pipe;
327 bool pll_enabled, release_cl_override = false;
328 enum dpio_phy phy = DPIO_PHY(pipe);
329 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
330 uint32_t DP;
331
332 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
333 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
334 pipe_name(pipe), port_name(intel_dig_port->port)))
335 return;
336
337 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
338 pipe_name(pipe), port_name(intel_dig_port->port));
339
340 /* Preserve the BIOS-computed detected bit. This is
341 * supposed to be read-only.
342 */
343 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
344 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
345 DP |= DP_PORT_WIDTH(1);
346 DP |= DP_LINK_TRAIN_PAT_1;
347
348 if (IS_CHERRYVIEW(dev))
349 DP |= DP_PIPE_SELECT_CHV(pipe);
350 else if (pipe == PIPE_B)
351 DP |= DP_PIPEB_SELECT;
352
353 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
354
355 /*
356 * The DPLL for the pipe must be enabled for this to work.
357 * So enable temporarily it if it's not already enabled.
358 */
359 if (!pll_enabled) {
360 release_cl_override = IS_CHERRYVIEW(dev) &&
361 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
362
363 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
364 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
365 DRM_ERROR("Failed to force on pll for pipe %c!\n",
366 pipe_name(pipe));
367 return;
368 }
369 }
370
371 /*
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
376 */
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
382
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
385
386 if (!pll_enabled) {
387 vlv_force_pll_off(dev, pipe);
388
389 if (release_cl_override)
390 chv_phy_powergate_ch(dev_priv, phy, ch, false);
391 }
392 }
393
394 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)395 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
396 {
397 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
398 struct drm_device *dev = intel_dig_port->base.base.dev;
399 struct drm_i915_private *dev_priv = to_i915(dev);
400 struct intel_encoder *encoder;
401 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
402 enum pipe pipe;
403
404 lockdep_assert_held(&dev_priv->pps_mutex);
405
406 /* We should never land here with regular DP ports */
407 WARN_ON(!is_edp(intel_dp));
408
409 if (intel_dp->pps_pipe != INVALID_PIPE)
410 return intel_dp->pps_pipe;
411
412 /*
413 * We don't have power sequencer currently.
414 * Pick one that's not used by other ports.
415 */
416 for_each_intel_encoder(dev, encoder) {
417 struct intel_dp *tmp;
418
419 if (encoder->type != INTEL_OUTPUT_EDP)
420 continue;
421
422 tmp = enc_to_intel_dp(&encoder->base);
423
424 if (tmp->pps_pipe != INVALID_PIPE)
425 pipes &= ~(1 << tmp->pps_pipe);
426 }
427
428 /*
429 * Didn't find one. This should not happen since there
430 * are two power sequencers and up to two eDP ports.
431 */
432 if (WARN_ON(pipes == 0))
433 pipe = PIPE_A;
434 else
435 pipe = ffs(pipes) - 1;
436
437 vlv_steal_power_sequencer(dev, pipe);
438 intel_dp->pps_pipe = pipe;
439
440 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
441 pipe_name(intel_dp->pps_pipe),
442 port_name(intel_dig_port->port));
443
444 /* init power sequencer on this pipe and port */
445 intel_dp_init_panel_power_sequencer(dev, intel_dp);
446 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
447
448 /*
449 * Even vdd force doesn't work until we've made
450 * the power sequencer lock in on the port.
451 */
452 vlv_power_sequencer_kick(intel_dp);
453
454 return intel_dp->pps_pipe;
455 }
456
457 static int
bxt_power_sequencer_idx(struct intel_dp * intel_dp)458 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
459 {
460 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
461 struct drm_device *dev = intel_dig_port->base.base.dev;
462 struct drm_i915_private *dev_priv = to_i915(dev);
463
464 lockdep_assert_held(&dev_priv->pps_mutex);
465
466 /* We should never land here with regular DP ports */
467 WARN_ON(!is_edp(intel_dp));
468
469 /*
470 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
471 * mapping needs to be retrieved from VBT, for now just hard-code to
472 * use instance #0 always.
473 */
474 if (!intel_dp->pps_reset)
475 return 0;
476
477 intel_dp->pps_reset = false;
478
479 /*
480 * Only the HW needs to be reprogrammed, the SW state is fixed and
481 * has been setup during connector init.
482 */
483 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
484
485 return 0;
486 }
487
488 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
489 enum pipe pipe);
490
vlv_pipe_has_pp_on(struct drm_i915_private * dev_priv,enum pipe pipe)491 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
492 enum pipe pipe)
493 {
494 return I915_READ(PP_STATUS(pipe)) & PP_ON;
495 }
496
vlv_pipe_has_vdd_on(struct drm_i915_private * dev_priv,enum pipe pipe)497 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
498 enum pipe pipe)
499 {
500 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
501 }
502
vlv_pipe_any(struct drm_i915_private * dev_priv,enum pipe pipe)503 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
504 enum pipe pipe)
505 {
506 return true;
507 }
508
509 static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private * dev_priv,enum port port,vlv_pipe_check pipe_check)510 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
511 enum port port,
512 vlv_pipe_check pipe_check)
513 {
514 enum pipe pipe;
515
516 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
517 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
518 PANEL_PORT_SELECT_MASK;
519
520 if (port_sel != PANEL_PORT_SELECT_VLV(port))
521 continue;
522
523 if (!pipe_check(dev_priv, pipe))
524 continue;
525
526 return pipe;
527 }
528
529 return INVALID_PIPE;
530 }
531
532 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)533 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
534 {
535 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
536 struct drm_device *dev = intel_dig_port->base.base.dev;
537 struct drm_i915_private *dev_priv = to_i915(dev);
538 enum port port = intel_dig_port->port;
539
540 lockdep_assert_held(&dev_priv->pps_mutex);
541
542 /* try to find a pipe with this port selected */
543 /* first pick one where the panel is on */
544 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
545 vlv_pipe_has_pp_on);
546 /* didn't find one? pick one where vdd is on */
547 if (intel_dp->pps_pipe == INVALID_PIPE)
548 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
549 vlv_pipe_has_vdd_on);
550 /* didn't find one? pick one with just the correct port */
551 if (intel_dp->pps_pipe == INVALID_PIPE)
552 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
553 vlv_pipe_any);
554
555 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
556 if (intel_dp->pps_pipe == INVALID_PIPE) {
557 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
558 port_name(port));
559 return;
560 }
561
562 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
563 port_name(port), pipe_name(intel_dp->pps_pipe));
564
565 intel_dp_init_panel_power_sequencer(dev, intel_dp);
566 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
567 }
568
intel_power_sequencer_reset(struct drm_i915_private * dev_priv)569 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
570 {
571 struct drm_device *dev = &dev_priv->drm;
572 struct intel_encoder *encoder;
573
574 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
575 !IS_BROXTON(dev)))
576 return;
577
578 /*
579 * We can't grab pps_mutex here due to deadlock with power_domain
580 * mutex when power_domain functions are called while holding pps_mutex.
581 * That also means that in order to use pps_pipe the code needs to
582 * hold both a power domain reference and pps_mutex, and the power domain
583 * reference get/put must be done while _not_ holding pps_mutex.
584 * pps_{lock,unlock}() do these steps in the correct order, so one
585 * should use them always.
586 */
587
588 for_each_intel_encoder(dev, encoder) {
589 struct intel_dp *intel_dp;
590
591 if (encoder->type != INTEL_OUTPUT_EDP)
592 continue;
593
594 intel_dp = enc_to_intel_dp(&encoder->base);
595 if (IS_BROXTON(dev))
596 intel_dp->pps_reset = true;
597 else
598 intel_dp->pps_pipe = INVALID_PIPE;
599 }
600 }
601
602 struct pps_registers {
603 i915_reg_t pp_ctrl;
604 i915_reg_t pp_stat;
605 i915_reg_t pp_on;
606 i915_reg_t pp_off;
607 i915_reg_t pp_div;
608 };
609
intel_pps_get_registers(struct drm_i915_private * dev_priv,struct intel_dp * intel_dp,struct pps_registers * regs)610 static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
611 struct intel_dp *intel_dp,
612 struct pps_registers *regs)
613 {
614 int pps_idx = 0;
615
616 memset(regs, 0, sizeof(*regs));
617
618 if (IS_BROXTON(dev_priv))
619 pps_idx = bxt_power_sequencer_idx(intel_dp);
620 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
621 pps_idx = vlv_power_sequencer_pipe(intel_dp);
622
623 regs->pp_ctrl = PP_CONTROL(pps_idx);
624 regs->pp_stat = PP_STATUS(pps_idx);
625 regs->pp_on = PP_ON_DELAYS(pps_idx);
626 regs->pp_off = PP_OFF_DELAYS(pps_idx);
627 if (!IS_BROXTON(dev_priv))
628 regs->pp_div = PP_DIVISOR(pps_idx);
629 }
630
631 static i915_reg_t
_pp_ctrl_reg(struct intel_dp * intel_dp)632 _pp_ctrl_reg(struct intel_dp *intel_dp)
633 {
634 struct pps_registers regs;
635
636 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
637 ®s);
638
639 return regs.pp_ctrl;
640 }
641
642 static i915_reg_t
_pp_stat_reg(struct intel_dp * intel_dp)643 _pp_stat_reg(struct intel_dp *intel_dp)
644 {
645 struct pps_registers regs;
646
647 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
648 ®s);
649
650 return regs.pp_stat;
651 }
652
653 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
654 This function only applicable when panel PM state is not to be tracked */
edp_notify_handler(struct notifier_block * this,unsigned long code,void * unused)655 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
656 void *unused)
657 {
658 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
659 edp_notifier);
660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
661 struct drm_i915_private *dev_priv = to_i915(dev);
662
663 if (!is_edp(intel_dp) || code != SYS_RESTART)
664 return 0;
665
666 pps_lock(intel_dp);
667
668 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
669 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
670 i915_reg_t pp_ctrl_reg, pp_div_reg;
671 u32 pp_div;
672
673 pp_ctrl_reg = PP_CONTROL(pipe);
674 pp_div_reg = PP_DIVISOR(pipe);
675 pp_div = I915_READ(pp_div_reg);
676 pp_div &= PP_REFERENCE_DIVIDER_MASK;
677
678 /* 0x1F write to PP_DIV_REG sets max cycle delay */
679 I915_WRITE(pp_div_reg, pp_div | 0x1F);
680 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
681 msleep(intel_dp->panel_power_cycle_delay);
682 }
683
684 pps_unlock(intel_dp);
685
686 return 0;
687 }
688
edp_have_panel_power(struct intel_dp * intel_dp)689 static bool edp_have_panel_power(struct intel_dp *intel_dp)
690 {
691 struct drm_device *dev = intel_dp_to_dev(intel_dp);
692 struct drm_i915_private *dev_priv = to_i915(dev);
693
694 lockdep_assert_held(&dev_priv->pps_mutex);
695
696 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
697 intel_dp->pps_pipe == INVALID_PIPE)
698 return false;
699
700 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
701 }
702
edp_have_panel_vdd(struct intel_dp * intel_dp)703 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
704 {
705 struct drm_device *dev = intel_dp_to_dev(intel_dp);
706 struct drm_i915_private *dev_priv = to_i915(dev);
707
708 lockdep_assert_held(&dev_priv->pps_mutex);
709
710 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
711 intel_dp->pps_pipe == INVALID_PIPE)
712 return false;
713
714 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
715 }
716
717 static void
intel_dp_check_edp(struct intel_dp * intel_dp)718 intel_dp_check_edp(struct intel_dp *intel_dp)
719 {
720 struct drm_device *dev = intel_dp_to_dev(intel_dp);
721 struct drm_i915_private *dev_priv = to_i915(dev);
722
723 if (!is_edp(intel_dp))
724 return;
725
726 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
727 WARN(1, "eDP powered off while attempting aux channel communication.\n");
728 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
729 I915_READ(_pp_stat_reg(intel_dp)),
730 I915_READ(_pp_ctrl_reg(intel_dp)));
731 }
732 }
733
734 static uint32_t
intel_dp_aux_wait_done(struct intel_dp * intel_dp,bool has_aux_irq)735 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
736 {
737 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
738 struct drm_device *dev = intel_dig_port->base.base.dev;
739 struct drm_i915_private *dev_priv = to_i915(dev);
740 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
741 uint32_t status;
742 bool done;
743
744 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
745 if (has_aux_irq)
746 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
747 msecs_to_jiffies_timeout(10));
748 else
749 done = wait_for(C, 10) == 0;
750 if (!done)
751 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
752 has_aux_irq);
753 #undef C
754
755 return status;
756 }
757
g4x_get_aux_clock_divider(struct intel_dp * intel_dp,int index)758 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
759 {
760 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
761 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
762
763 if (index)
764 return 0;
765
766 /*
767 * The clock divider is based off the hrawclk, and would like to run at
768 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
769 */
770 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
771 }
772
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)773 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
774 {
775 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
776 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
777
778 if (index)
779 return 0;
780
781 /*
782 * The clock divider is based off the cdclk or PCH rawclk, and would
783 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
784 * divide by 2000 and use that
785 */
786 if (intel_dig_port->port == PORT_A)
787 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
788 else
789 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
790 }
791
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)792 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
793 {
794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
795 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
796
797 if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
798 /* Workaround for non-ULT HSW */
799 switch (index) {
800 case 0: return 63;
801 case 1: return 72;
802 default: return 0;
803 }
804 }
805
806 return ilk_get_aux_clock_divider(intel_dp, index);
807 }
808
skl_get_aux_clock_divider(struct intel_dp * intel_dp,int index)809 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
810 {
811 /*
812 * SKL doesn't need us to program the AUX clock divider (Hardware will
813 * derive the clock from CDCLK automatically). We still implement the
814 * get_aux_clock_divider vfunc to plug-in into the existing code.
815 */
816 return index ? 0 : 1;
817 }
818
g4x_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t aux_clock_divider)819 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
820 bool has_aux_irq,
821 int send_bytes,
822 uint32_t aux_clock_divider)
823 {
824 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
825 struct drm_device *dev = intel_dig_port->base.base.dev;
826 uint32_t precharge, timeout;
827
828 if (IS_GEN6(dev))
829 precharge = 3;
830 else
831 precharge = 5;
832
833 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
834 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
835 else
836 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
837
838 return DP_AUX_CH_CTL_SEND_BUSY |
839 DP_AUX_CH_CTL_DONE |
840 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
841 DP_AUX_CH_CTL_TIME_OUT_ERROR |
842 timeout |
843 DP_AUX_CH_CTL_RECEIVE_ERROR |
844 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
845 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
846 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
847 }
848
skl_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t unused)849 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
850 bool has_aux_irq,
851 int send_bytes,
852 uint32_t unused)
853 {
854 return DP_AUX_CH_CTL_SEND_BUSY |
855 DP_AUX_CH_CTL_DONE |
856 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
857 DP_AUX_CH_CTL_TIME_OUT_ERROR |
858 DP_AUX_CH_CTL_TIME_OUT_1600us |
859 DP_AUX_CH_CTL_RECEIVE_ERROR |
860 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
861 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
862 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
863 }
864
865 static int
intel_dp_aux_ch(struct intel_dp * intel_dp,const uint8_t * send,int send_bytes,uint8_t * recv,int recv_size)866 intel_dp_aux_ch(struct intel_dp *intel_dp,
867 const uint8_t *send, int send_bytes,
868 uint8_t *recv, int recv_size)
869 {
870 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
871 struct drm_device *dev = intel_dig_port->base.base.dev;
872 struct drm_i915_private *dev_priv = to_i915(dev);
873 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
874 uint32_t aux_clock_divider;
875 int i, ret, recv_bytes;
876 uint32_t status;
877 int try, clock = 0;
878 bool has_aux_irq = HAS_AUX_IRQ(dev);
879 bool vdd;
880
881 pps_lock(intel_dp);
882
883 /*
884 * We will be called with VDD already enabled for dpcd/edid/oui reads.
885 * In such cases we want to leave VDD enabled and it's up to upper layers
886 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
887 * ourselves.
888 */
889 vdd = edp_panel_vdd_on(intel_dp);
890
891 /* dp aux is extremely sensitive to irq latency, hence request the
892 * lowest possible wakeup latency and so prevent the cpu from going into
893 * deep sleep states.
894 */
895 pm_qos_update_request(&dev_priv->pm_qos, 0);
896
897 intel_dp_check_edp(intel_dp);
898
899 /* Try to wait for any previous AUX channel activity */
900 for (try = 0; try < 3; try++) {
901 status = I915_READ_NOTRACE(ch_ctl);
902 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
903 break;
904 msleep(1);
905 }
906
907 if (try == 3) {
908 static u32 last_status = -1;
909 const u32 status = I915_READ(ch_ctl);
910
911 if (status != last_status) {
912 WARN(1, "dp_aux_ch not started status 0x%08x\n",
913 status);
914 last_status = status;
915 }
916
917 ret = -EBUSY;
918 goto out;
919 }
920
921 /* Only 5 data registers! */
922 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
923 ret = -E2BIG;
924 goto out;
925 }
926
927 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
928 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
929 has_aux_irq,
930 send_bytes,
931 aux_clock_divider);
932
933 /* Must try at least 3 times according to DP spec */
934 for (try = 0; try < 5; try++) {
935 /* Load the send data into the aux channel data registers */
936 for (i = 0; i < send_bytes; i += 4)
937 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
938 intel_dp_pack_aux(send + i,
939 send_bytes - i));
940
941 /* Send the command and wait for it to complete */
942 I915_WRITE(ch_ctl, send_ctl);
943
944 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
945
946 /* Clear done status and any errors */
947 I915_WRITE(ch_ctl,
948 status |
949 DP_AUX_CH_CTL_DONE |
950 DP_AUX_CH_CTL_TIME_OUT_ERROR |
951 DP_AUX_CH_CTL_RECEIVE_ERROR);
952
953 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
954 continue;
955
956 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
957 * 400us delay required for errors and timeouts
958 * Timeout errors from the HW already meet this
959 * requirement so skip to next iteration
960 */
961 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
962 usleep_range(400, 500);
963 continue;
964 }
965 if (status & DP_AUX_CH_CTL_DONE)
966 goto done;
967 }
968 }
969
970 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
971 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
972 ret = -EBUSY;
973 goto out;
974 }
975
976 done:
977 /* Check for timeout or receive error.
978 * Timeouts occur when the sink is not connected
979 */
980 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
981 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
982 ret = -EIO;
983 goto out;
984 }
985
986 /* Timeouts occur when the device isn't connected, so they're
987 * "normal" -- don't fill the kernel log with these */
988 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
989 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
990 ret = -ETIMEDOUT;
991 goto out;
992 }
993
994 /* Unload any bytes sent back from the other side */
995 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
996 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
997
998 /*
999 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1000 * We have no idea of what happened so we return -EBUSY so
1001 * drm layer takes care for the necessary retries.
1002 */
1003 if (recv_bytes == 0 || recv_bytes > 20) {
1004 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1005 recv_bytes);
1006 /*
1007 * FIXME: This patch was created on top of a series that
1008 * organize the retries at drm level. There EBUSY should
1009 * also take care for 1ms wait before retrying.
1010 * That aux retries re-org is still needed and after that is
1011 * merged we remove this sleep from here.
1012 */
1013 usleep_range(1000, 1500);
1014 ret = -EBUSY;
1015 goto out;
1016 }
1017
1018 if (recv_bytes > recv_size)
1019 recv_bytes = recv_size;
1020
1021 for (i = 0; i < recv_bytes; i += 4)
1022 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1023 recv + i, recv_bytes - i);
1024
1025 ret = recv_bytes;
1026 out:
1027 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1028
1029 if (vdd)
1030 edp_panel_vdd_off(intel_dp, false);
1031
1032 pps_unlock(intel_dp);
1033
1034 return ret;
1035 }
1036
1037 #define BARE_ADDRESS_SIZE 3
1038 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1039 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)1040 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1041 {
1042 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1043 uint8_t txbuf[20], rxbuf[20];
1044 size_t txsize, rxsize;
1045 int ret;
1046
1047 txbuf[0] = (msg->request << 4) |
1048 ((msg->address >> 16) & 0xf);
1049 txbuf[1] = (msg->address >> 8) & 0xff;
1050 txbuf[2] = msg->address & 0xff;
1051 txbuf[3] = msg->size - 1;
1052
1053 switch (msg->request & ~DP_AUX_I2C_MOT) {
1054 case DP_AUX_NATIVE_WRITE:
1055 case DP_AUX_I2C_WRITE:
1056 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1057 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1058 rxsize = 2; /* 0 or 1 data bytes */
1059
1060 if (WARN_ON(txsize > 20))
1061 return -E2BIG;
1062
1063 WARN_ON(!msg->buffer != !msg->size);
1064
1065 if (msg->buffer)
1066 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1067
1068 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1069 if (ret > 0) {
1070 msg->reply = rxbuf[0] >> 4;
1071
1072 if (ret > 1) {
1073 /* Number of bytes written in a short write. */
1074 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1075 } else {
1076 /* Return payload size. */
1077 ret = msg->size;
1078 }
1079 }
1080 break;
1081
1082 case DP_AUX_NATIVE_READ:
1083 case DP_AUX_I2C_READ:
1084 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1085 rxsize = msg->size + 1;
1086
1087 if (WARN_ON(rxsize > 20))
1088 return -E2BIG;
1089
1090 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1091 if (ret > 0) {
1092 msg->reply = rxbuf[0] >> 4;
1093 /*
1094 * Assume happy day, and copy the data. The caller is
1095 * expected to check msg->reply before touching it.
1096 *
1097 * Return payload size.
1098 */
1099 ret--;
1100 memcpy(msg->buffer, rxbuf + 1, ret);
1101 }
1102 break;
1103
1104 default:
1105 ret = -EINVAL;
1106 break;
1107 }
1108
1109 return ret;
1110 }
1111
intel_aux_port(struct drm_i915_private * dev_priv,enum port port)1112 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1113 enum port port)
1114 {
1115 const struct ddi_vbt_port_info *info =
1116 &dev_priv->vbt.ddi_port_info[port];
1117 enum port aux_port;
1118
1119 if (!info->alternate_aux_channel) {
1120 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1121 port_name(port), port_name(port));
1122 return port;
1123 }
1124
1125 switch (info->alternate_aux_channel) {
1126 case DP_AUX_A:
1127 aux_port = PORT_A;
1128 break;
1129 case DP_AUX_B:
1130 aux_port = PORT_B;
1131 break;
1132 case DP_AUX_C:
1133 aux_port = PORT_C;
1134 break;
1135 case DP_AUX_D:
1136 aux_port = PORT_D;
1137 break;
1138 default:
1139 MISSING_CASE(info->alternate_aux_channel);
1140 aux_port = PORT_A;
1141 break;
1142 }
1143
1144 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1145 port_name(aux_port), port_name(port));
1146
1147 return aux_port;
1148 }
1149
g4x_aux_ctl_reg(struct drm_i915_private * dev_priv,enum port port)1150 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1151 enum port port)
1152 {
1153 switch (port) {
1154 case PORT_B:
1155 case PORT_C:
1156 case PORT_D:
1157 return DP_AUX_CH_CTL(port);
1158 default:
1159 MISSING_CASE(port);
1160 return DP_AUX_CH_CTL(PORT_B);
1161 }
1162 }
1163
g4x_aux_data_reg(struct drm_i915_private * dev_priv,enum port port,int index)1164 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1165 enum port port, int index)
1166 {
1167 switch (port) {
1168 case PORT_B:
1169 case PORT_C:
1170 case PORT_D:
1171 return DP_AUX_CH_DATA(port, index);
1172 default:
1173 MISSING_CASE(port);
1174 return DP_AUX_CH_DATA(PORT_B, index);
1175 }
1176 }
1177
ilk_aux_ctl_reg(struct drm_i915_private * dev_priv,enum port port)1178 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1179 enum port port)
1180 {
1181 switch (port) {
1182 case PORT_A:
1183 return DP_AUX_CH_CTL(port);
1184 case PORT_B:
1185 case PORT_C:
1186 case PORT_D:
1187 return PCH_DP_AUX_CH_CTL(port);
1188 default:
1189 MISSING_CASE(port);
1190 return DP_AUX_CH_CTL(PORT_A);
1191 }
1192 }
1193
ilk_aux_data_reg(struct drm_i915_private * dev_priv,enum port port,int index)1194 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1195 enum port port, int index)
1196 {
1197 switch (port) {
1198 case PORT_A:
1199 return DP_AUX_CH_DATA(port, index);
1200 case PORT_B:
1201 case PORT_C:
1202 case PORT_D:
1203 return PCH_DP_AUX_CH_DATA(port, index);
1204 default:
1205 MISSING_CASE(port);
1206 return DP_AUX_CH_DATA(PORT_A, index);
1207 }
1208 }
1209
skl_aux_ctl_reg(struct drm_i915_private * dev_priv,enum port port)1210 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1211 enum port port)
1212 {
1213 switch (port) {
1214 case PORT_A:
1215 case PORT_B:
1216 case PORT_C:
1217 case PORT_D:
1218 return DP_AUX_CH_CTL(port);
1219 default:
1220 MISSING_CASE(port);
1221 return DP_AUX_CH_CTL(PORT_A);
1222 }
1223 }
1224
skl_aux_data_reg(struct drm_i915_private * dev_priv,enum port port,int index)1225 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1226 enum port port, int index)
1227 {
1228 switch (port) {
1229 case PORT_A:
1230 case PORT_B:
1231 case PORT_C:
1232 case PORT_D:
1233 return DP_AUX_CH_DATA(port, index);
1234 default:
1235 MISSING_CASE(port);
1236 return DP_AUX_CH_DATA(PORT_A, index);
1237 }
1238 }
1239
intel_aux_ctl_reg(struct drm_i915_private * dev_priv,enum port port)1240 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1241 enum port port)
1242 {
1243 if (INTEL_INFO(dev_priv)->gen >= 9)
1244 return skl_aux_ctl_reg(dev_priv, port);
1245 else if (HAS_PCH_SPLIT(dev_priv))
1246 return ilk_aux_ctl_reg(dev_priv, port);
1247 else
1248 return g4x_aux_ctl_reg(dev_priv, port);
1249 }
1250
intel_aux_data_reg(struct drm_i915_private * dev_priv,enum port port,int index)1251 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1252 enum port port, int index)
1253 {
1254 if (INTEL_INFO(dev_priv)->gen >= 9)
1255 return skl_aux_data_reg(dev_priv, port, index);
1256 else if (HAS_PCH_SPLIT(dev_priv))
1257 return ilk_aux_data_reg(dev_priv, port, index);
1258 else
1259 return g4x_aux_data_reg(dev_priv, port, index);
1260 }
1261
intel_aux_reg_init(struct intel_dp * intel_dp)1262 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1263 {
1264 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1265 enum port port = intel_aux_port(dev_priv,
1266 dp_to_dig_port(intel_dp)->port);
1267 int i;
1268
1269 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1270 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1271 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1272 }
1273
1274 static void
intel_dp_aux_fini(struct intel_dp * intel_dp)1275 intel_dp_aux_fini(struct intel_dp *intel_dp)
1276 {
1277 kfree(intel_dp->aux.name);
1278 }
1279
1280 static void
intel_dp_aux_init(struct intel_dp * intel_dp)1281 intel_dp_aux_init(struct intel_dp *intel_dp)
1282 {
1283 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1284 enum port port = intel_dig_port->port;
1285
1286 intel_aux_reg_init(intel_dp);
1287 drm_dp_aux_init(&intel_dp->aux);
1288
1289 /* Failure to allocate our preferred name is not critical */
1290 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1291 intel_dp->aux.transfer = intel_dp_aux_transfer;
1292 }
1293
1294 static int
intel_dp_sink_rates(struct intel_dp * intel_dp,const int ** sink_rates)1295 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1296 {
1297 if (intel_dp->num_sink_rates) {
1298 *sink_rates = intel_dp->sink_rates;
1299 return intel_dp->num_sink_rates;
1300 }
1301
1302 *sink_rates = default_rates;
1303
1304 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1305 }
1306
intel_dp_source_supports_hbr2(struct intel_dp * intel_dp)1307 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1308 {
1309 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1310 struct drm_device *dev = dig_port->base.base.dev;
1311
1312 /* WaDisableHBR2:skl */
1313 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1314 return false;
1315
1316 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1317 (INTEL_INFO(dev)->gen >= 9))
1318 return true;
1319 else
1320 return false;
1321 }
1322
1323 static int
intel_dp_source_rates(struct intel_dp * intel_dp,const int ** source_rates)1324 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1325 {
1326 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1327 struct drm_device *dev = dig_port->base.base.dev;
1328 int size;
1329
1330 if (IS_BROXTON(dev)) {
1331 *source_rates = bxt_rates;
1332 size = ARRAY_SIZE(bxt_rates);
1333 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1334 *source_rates = skl_rates;
1335 size = ARRAY_SIZE(skl_rates);
1336 } else {
1337 *source_rates = default_rates;
1338 size = ARRAY_SIZE(default_rates);
1339 }
1340
1341 /* This depends on the fact that 5.4 is last value in the array */
1342 if (!intel_dp_source_supports_hbr2(intel_dp))
1343 size--;
1344
1345 return size;
1346 }
1347
1348 static void
intel_dp_set_clock(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1349 intel_dp_set_clock(struct intel_encoder *encoder,
1350 struct intel_crtc_state *pipe_config)
1351 {
1352 struct drm_device *dev = encoder->base.dev;
1353 const struct dp_link_dpll *divisor = NULL;
1354 int i, count = 0;
1355
1356 if (IS_G4X(dev)) {
1357 divisor = gen4_dpll;
1358 count = ARRAY_SIZE(gen4_dpll);
1359 } else if (HAS_PCH_SPLIT(dev)) {
1360 divisor = pch_dpll;
1361 count = ARRAY_SIZE(pch_dpll);
1362 } else if (IS_CHERRYVIEW(dev)) {
1363 divisor = chv_dpll;
1364 count = ARRAY_SIZE(chv_dpll);
1365 } else if (IS_VALLEYVIEW(dev)) {
1366 divisor = vlv_dpll;
1367 count = ARRAY_SIZE(vlv_dpll);
1368 }
1369
1370 if (divisor && count) {
1371 for (i = 0; i < count; i++) {
1372 if (pipe_config->port_clock == divisor[i].clock) {
1373 pipe_config->dpll = divisor[i].dpll;
1374 pipe_config->clock_set = true;
1375 break;
1376 }
1377 }
1378 }
1379 }
1380
intersect_rates(const int * source_rates,int source_len,const int * sink_rates,int sink_len,int * common_rates)1381 static int intersect_rates(const int *source_rates, int source_len,
1382 const int *sink_rates, int sink_len,
1383 int *common_rates)
1384 {
1385 int i = 0, j = 0, k = 0;
1386
1387 while (i < source_len && j < sink_len) {
1388 if (source_rates[i] == sink_rates[j]) {
1389 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1390 return k;
1391 common_rates[k] = source_rates[i];
1392 ++k;
1393 ++i;
1394 ++j;
1395 } else if (source_rates[i] < sink_rates[j]) {
1396 ++i;
1397 } else {
1398 ++j;
1399 }
1400 }
1401 return k;
1402 }
1403
intel_dp_common_rates(struct intel_dp * intel_dp,int * common_rates)1404 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1405 int *common_rates)
1406 {
1407 const int *source_rates, *sink_rates;
1408 int source_len, sink_len;
1409
1410 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1411 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1412
1413 return intersect_rates(source_rates, source_len,
1414 sink_rates, sink_len,
1415 common_rates);
1416 }
1417
snprintf_int_array(char * str,size_t len,const int * array,int nelem)1418 static void snprintf_int_array(char *str, size_t len,
1419 const int *array, int nelem)
1420 {
1421 int i;
1422
1423 str[0] = '\0';
1424
1425 for (i = 0; i < nelem; i++) {
1426 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1427 if (r >= len)
1428 return;
1429 str += r;
1430 len -= r;
1431 }
1432 }
1433
intel_dp_print_rates(struct intel_dp * intel_dp)1434 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1435 {
1436 const int *source_rates, *sink_rates;
1437 int source_len, sink_len, common_len;
1438 int common_rates[DP_MAX_SUPPORTED_RATES];
1439 char str[128]; /* FIXME: too big for stack? */
1440
1441 if ((drm_debug & DRM_UT_KMS) == 0)
1442 return;
1443
1444 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1445 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1446 DRM_DEBUG_KMS("source rates: %s\n", str);
1447
1448 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1449 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1450 DRM_DEBUG_KMS("sink rates: %s\n", str);
1451
1452 common_len = intel_dp_common_rates(intel_dp, common_rates);
1453 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1454 DRM_DEBUG_KMS("common rates: %s\n", str);
1455 }
1456
intel_dp_print_hw_revision(struct intel_dp * intel_dp)1457 static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
1458 {
1459 uint8_t rev;
1460 int len;
1461
1462 if ((drm_debug & DRM_UT_KMS) == 0)
1463 return;
1464
1465 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1466 DP_DWN_STRM_PORT_PRESENT))
1467 return;
1468
1469 len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
1470 if (len < 0)
1471 return;
1472
1473 DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
1474 }
1475
intel_dp_print_sw_revision(struct intel_dp * intel_dp)1476 static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
1477 {
1478 uint8_t rev[2];
1479 int len;
1480
1481 if ((drm_debug & DRM_UT_KMS) == 0)
1482 return;
1483
1484 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1485 DP_DWN_STRM_PORT_PRESENT))
1486 return;
1487
1488 len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
1489 if (len < 0)
1490 return;
1491
1492 DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
1493 }
1494
rate_to_index(int find,const int * rates)1495 static int rate_to_index(int find, const int *rates)
1496 {
1497 int i = 0;
1498
1499 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1500 if (find == rates[i])
1501 break;
1502
1503 return i;
1504 }
1505
1506 int
intel_dp_max_link_rate(struct intel_dp * intel_dp)1507 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1508 {
1509 int rates[DP_MAX_SUPPORTED_RATES] = {};
1510 int len;
1511
1512 len = intel_dp_common_rates(intel_dp, rates);
1513 if (WARN_ON(len <= 0))
1514 return 162000;
1515
1516 return rates[len - 1];
1517 }
1518
intel_dp_rate_select(struct intel_dp * intel_dp,int rate)1519 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1520 {
1521 return rate_to_index(rate, intel_dp->sink_rates);
1522 }
1523
intel_dp_compute_rate(struct intel_dp * intel_dp,int port_clock,uint8_t * link_bw,uint8_t * rate_select)1524 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1525 uint8_t *link_bw, uint8_t *rate_select)
1526 {
1527 if (intel_dp->num_sink_rates) {
1528 *link_bw = 0;
1529 *rate_select =
1530 intel_dp_rate_select(intel_dp, port_clock);
1531 } else {
1532 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1533 *rate_select = 0;
1534 }
1535 }
1536
intel_dp_compute_bpp(struct intel_dp * intel_dp,struct intel_crtc_state * pipe_config)1537 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1538 struct intel_crtc_state *pipe_config)
1539 {
1540 int bpp, bpc;
1541
1542 bpp = pipe_config->pipe_bpp;
1543 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1544
1545 if (bpc > 0)
1546 bpp = min(bpp, 3*bpc);
1547
1548 return bpp;
1549 }
1550
1551 bool
intel_dp_compute_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)1552 intel_dp_compute_config(struct intel_encoder *encoder,
1553 struct intel_crtc_state *pipe_config,
1554 struct drm_connector_state *conn_state)
1555 {
1556 struct drm_device *dev = encoder->base.dev;
1557 struct drm_i915_private *dev_priv = to_i915(dev);
1558 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1559 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1560 enum port port = dp_to_dig_port(intel_dp)->port;
1561 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1562 struct intel_connector *intel_connector = intel_dp->attached_connector;
1563 int lane_count, clock;
1564 int min_lane_count = 1;
1565 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1566 /* Conveniently, the link BW constants become indices with a shift...*/
1567 int min_clock = 0;
1568 int max_clock;
1569 int bpp, mode_rate;
1570 int link_avail, link_clock;
1571 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1572 int common_len;
1573 uint8_t link_bw, rate_select;
1574
1575 common_len = intel_dp_common_rates(intel_dp, common_rates);
1576
1577 /* No common link rates between source and sink */
1578 WARN_ON(common_len <= 0);
1579
1580 max_clock = common_len - 1;
1581
1582 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1583 pipe_config->has_pch_encoder = true;
1584
1585 pipe_config->has_drrs = false;
1586 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1587
1588 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1589 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1590 adjusted_mode);
1591
1592 if (INTEL_INFO(dev)->gen >= 9) {
1593 int ret;
1594 ret = skl_update_scaler_crtc(pipe_config);
1595 if (ret)
1596 return ret;
1597 }
1598
1599 if (HAS_GMCH_DISPLAY(dev))
1600 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1601 intel_connector->panel.fitting_mode);
1602 else
1603 intel_pch_panel_fitting(intel_crtc, pipe_config,
1604 intel_connector->panel.fitting_mode);
1605 }
1606
1607 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1608 return false;
1609
1610 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1611 "max bw %d pixel clock %iKHz\n",
1612 max_lane_count, common_rates[max_clock],
1613 adjusted_mode->crtc_clock);
1614
1615 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1616 * bpc in between. */
1617 bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1618 if (is_edp(intel_dp)) {
1619
1620 /* Get bpp from vbt only for panels that dont have bpp in edid */
1621 if (intel_connector->base.display_info.bpc == 0 &&
1622 (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1623 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1624 dev_priv->vbt.edp.bpp);
1625 bpp = dev_priv->vbt.edp.bpp;
1626 }
1627
1628 /*
1629 * Use the maximum clock and number of lanes the eDP panel
1630 * advertizes being capable of. The panels are generally
1631 * designed to support only a single clock and lane
1632 * configuration, and typically these values correspond to the
1633 * native resolution of the panel.
1634 */
1635 min_lane_count = max_lane_count;
1636 min_clock = max_clock;
1637 }
1638
1639 for (; bpp >= 6*3; bpp -= 2*3) {
1640 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1641 bpp);
1642
1643 for (clock = min_clock; clock <= max_clock; clock++) {
1644 for (lane_count = min_lane_count;
1645 lane_count <= max_lane_count;
1646 lane_count <<= 1) {
1647
1648 link_clock = common_rates[clock];
1649 link_avail = intel_dp_max_data_rate(link_clock,
1650 lane_count);
1651
1652 if (mode_rate <= link_avail) {
1653 goto found;
1654 }
1655 }
1656 }
1657 }
1658
1659 return false;
1660
1661 found:
1662 if (intel_dp->color_range_auto) {
1663 /*
1664 * See:
1665 * CEA-861-E - 5.1 Default Encoding Parameters
1666 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1667 */
1668 pipe_config->limited_color_range =
1669 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1670 } else {
1671 pipe_config->limited_color_range =
1672 intel_dp->limited_color_range;
1673 }
1674
1675 pipe_config->lane_count = lane_count;
1676
1677 pipe_config->pipe_bpp = bpp;
1678 pipe_config->port_clock = common_rates[clock];
1679
1680 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1681 &link_bw, &rate_select);
1682
1683 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1684 link_bw, rate_select, pipe_config->lane_count,
1685 pipe_config->port_clock, bpp);
1686 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1687 mode_rate, link_avail);
1688
1689 intel_link_compute_m_n(bpp, lane_count,
1690 adjusted_mode->crtc_clock,
1691 pipe_config->port_clock,
1692 &pipe_config->dp_m_n);
1693
1694 if (intel_connector->panel.downclock_mode != NULL &&
1695 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1696 pipe_config->has_drrs = true;
1697 intel_link_compute_m_n(bpp, lane_count,
1698 intel_connector->panel.downclock_mode->clock,
1699 pipe_config->port_clock,
1700 &pipe_config->dp_m2_n2);
1701 }
1702
1703 /*
1704 * DPLL0 VCO may need to be adjusted to get the correct
1705 * clock for eDP. This will affect cdclk as well.
1706 */
1707 if (is_edp(intel_dp) &&
1708 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1709 int vco;
1710
1711 switch (pipe_config->port_clock / 2) {
1712 case 108000:
1713 case 216000:
1714 vco = 8640000;
1715 break;
1716 default:
1717 vco = 8100000;
1718 break;
1719 }
1720
1721 to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1722 }
1723
1724 if (!HAS_DDI(dev))
1725 intel_dp_set_clock(encoder, pipe_config);
1726
1727 return true;
1728 }
1729
intel_dp_set_link_params(struct intel_dp * intel_dp,int link_rate,uint8_t lane_count,bool link_mst)1730 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1731 int link_rate, uint8_t lane_count,
1732 bool link_mst)
1733 {
1734 intel_dp->link_rate = link_rate;
1735 intel_dp->lane_count = lane_count;
1736 intel_dp->link_mst = link_mst;
1737 }
1738
intel_dp_prepare(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1739 static void intel_dp_prepare(struct intel_encoder *encoder,
1740 struct intel_crtc_state *pipe_config)
1741 {
1742 struct drm_device *dev = encoder->base.dev;
1743 struct drm_i915_private *dev_priv = to_i915(dev);
1744 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1745 enum port port = dp_to_dig_port(intel_dp)->port;
1746 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1747 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1748
1749 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1750 pipe_config->lane_count,
1751 intel_crtc_has_type(pipe_config,
1752 INTEL_OUTPUT_DP_MST));
1753
1754 /*
1755 * There are four kinds of DP registers:
1756 *
1757 * IBX PCH
1758 * SNB CPU
1759 * IVB CPU
1760 * CPT PCH
1761 *
1762 * IBX PCH and CPU are the same for almost everything,
1763 * except that the CPU DP PLL is configured in this
1764 * register
1765 *
1766 * CPT PCH is quite different, having many bits moved
1767 * to the TRANS_DP_CTL register instead. That
1768 * configuration happens (oddly) in ironlake_pch_enable
1769 */
1770
1771 /* Preserve the BIOS-computed detected bit. This is
1772 * supposed to be read-only.
1773 */
1774 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1775
1776 /* Handle DP bits in common between all three register formats */
1777 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1778 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1779
1780 /* Split out the IBX/CPU vs CPT settings */
1781
1782 if (IS_GEN7(dev) && port == PORT_A) {
1783 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1784 intel_dp->DP |= DP_SYNC_HS_HIGH;
1785 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1786 intel_dp->DP |= DP_SYNC_VS_HIGH;
1787 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1788
1789 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1790 intel_dp->DP |= DP_ENHANCED_FRAMING;
1791
1792 intel_dp->DP |= crtc->pipe << 29;
1793 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1794 u32 trans_dp;
1795
1796 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1797
1798 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1799 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1800 trans_dp |= TRANS_DP_ENH_FRAMING;
1801 else
1802 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1803 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1804 } else {
1805 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1806 !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
1807 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1808
1809 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1810 intel_dp->DP |= DP_SYNC_HS_HIGH;
1811 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1812 intel_dp->DP |= DP_SYNC_VS_HIGH;
1813 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1814
1815 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1816 intel_dp->DP |= DP_ENHANCED_FRAMING;
1817
1818 if (IS_CHERRYVIEW(dev))
1819 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1820 else if (crtc->pipe == PIPE_B)
1821 intel_dp->DP |= DP_PIPEB_SELECT;
1822 }
1823 }
1824
1825 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1826 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1827
1828 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1829 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1830
1831 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1832 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1833
1834 static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1835 struct intel_dp *intel_dp);
1836
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)1837 static void wait_panel_status(struct intel_dp *intel_dp,
1838 u32 mask,
1839 u32 value)
1840 {
1841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1842 struct drm_i915_private *dev_priv = to_i915(dev);
1843 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1844
1845 lockdep_assert_held(&dev_priv->pps_mutex);
1846
1847 intel_pps_verify_state(dev_priv, intel_dp);
1848
1849 pp_stat_reg = _pp_stat_reg(intel_dp);
1850 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1851
1852 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1853 mask, value,
1854 I915_READ(pp_stat_reg),
1855 I915_READ(pp_ctrl_reg));
1856
1857 if (intel_wait_for_register(dev_priv,
1858 pp_stat_reg, mask, value,
1859 5000))
1860 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1861 I915_READ(pp_stat_reg),
1862 I915_READ(pp_ctrl_reg));
1863
1864 DRM_DEBUG_KMS("Wait complete\n");
1865 }
1866
wait_panel_on(struct intel_dp * intel_dp)1867 static void wait_panel_on(struct intel_dp *intel_dp)
1868 {
1869 DRM_DEBUG_KMS("Wait for panel power on\n");
1870 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1871 }
1872
wait_panel_off(struct intel_dp * intel_dp)1873 static void wait_panel_off(struct intel_dp *intel_dp)
1874 {
1875 DRM_DEBUG_KMS("Wait for panel power off time\n");
1876 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1877 }
1878
wait_panel_power_cycle(struct intel_dp * intel_dp)1879 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1880 {
1881 ktime_t panel_power_on_time;
1882 s64 panel_power_off_duration;
1883
1884 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1885
1886 /* take the difference of currrent time and panel power off time
1887 * and then make panel wait for t11_t12 if needed. */
1888 panel_power_on_time = ktime_get_boottime();
1889 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1890
1891 /* When we disable the VDD override bit last we have to do the manual
1892 * wait. */
1893 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1894 wait_remaining_ms_from_jiffies(jiffies,
1895 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1896
1897 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1898 }
1899
wait_backlight_on(struct intel_dp * intel_dp)1900 static void wait_backlight_on(struct intel_dp *intel_dp)
1901 {
1902 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1903 intel_dp->backlight_on_delay);
1904 }
1905
edp_wait_backlight_off(struct intel_dp * intel_dp)1906 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1907 {
1908 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1909 intel_dp->backlight_off_delay);
1910 }
1911
1912 /* Read the current pp_control value, unlocking the register if it
1913 * is locked
1914 */
1915
ironlake_get_pp_control(struct intel_dp * intel_dp)1916 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1917 {
1918 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1919 struct drm_i915_private *dev_priv = to_i915(dev);
1920 u32 control;
1921
1922 lockdep_assert_held(&dev_priv->pps_mutex);
1923
1924 control = I915_READ(_pp_ctrl_reg(intel_dp));
1925 if (WARN_ON(!HAS_DDI(dev_priv) &&
1926 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
1927 control &= ~PANEL_UNLOCK_MASK;
1928 control |= PANEL_UNLOCK_REGS;
1929 }
1930 return control;
1931 }
1932
1933 /*
1934 * Must be paired with edp_panel_vdd_off().
1935 * Must hold pps_mutex around the whole on/off sequence.
1936 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1937 */
edp_panel_vdd_on(struct intel_dp * intel_dp)1938 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1939 {
1940 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1941 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1942 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1943 struct drm_i915_private *dev_priv = to_i915(dev);
1944 enum intel_display_power_domain power_domain;
1945 u32 pp;
1946 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1947 bool need_to_disable = !intel_dp->want_panel_vdd;
1948
1949 lockdep_assert_held(&dev_priv->pps_mutex);
1950
1951 if (!is_edp(intel_dp))
1952 return false;
1953
1954 cancel_delayed_work(&intel_dp->panel_vdd_work);
1955 intel_dp->want_panel_vdd = true;
1956
1957 if (edp_have_panel_vdd(intel_dp))
1958 return need_to_disable;
1959
1960 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1961 intel_display_power_get(dev_priv, power_domain);
1962
1963 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1964 port_name(intel_dig_port->port));
1965
1966 if (!edp_have_panel_power(intel_dp))
1967 wait_panel_power_cycle(intel_dp);
1968
1969 pp = ironlake_get_pp_control(intel_dp);
1970 pp |= EDP_FORCE_VDD;
1971
1972 pp_stat_reg = _pp_stat_reg(intel_dp);
1973 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1974
1975 I915_WRITE(pp_ctrl_reg, pp);
1976 POSTING_READ(pp_ctrl_reg);
1977 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1978 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1979 /*
1980 * If the panel wasn't on, delay before accessing aux channel
1981 */
1982 if (!edp_have_panel_power(intel_dp)) {
1983 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1984 port_name(intel_dig_port->port));
1985 msleep(intel_dp->panel_power_up_delay);
1986 }
1987
1988 return need_to_disable;
1989 }
1990
1991 /*
1992 * Must be paired with intel_edp_panel_vdd_off() or
1993 * intel_edp_panel_off().
1994 * Nested calls to these functions are not allowed since
1995 * we drop the lock. Caller must use some higher level
1996 * locking to prevent nested calls from other threads.
1997 */
intel_edp_panel_vdd_on(struct intel_dp * intel_dp)1998 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1999 {
2000 bool vdd;
2001
2002 if (!is_edp(intel_dp))
2003 return;
2004
2005 pps_lock(intel_dp);
2006 vdd = edp_panel_vdd_on(intel_dp);
2007 pps_unlock(intel_dp);
2008
2009 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2010 port_name(dp_to_dig_port(intel_dp)->port));
2011 }
2012
edp_panel_vdd_off_sync(struct intel_dp * intel_dp)2013 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2014 {
2015 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2016 struct drm_i915_private *dev_priv = to_i915(dev);
2017 struct intel_digital_port *intel_dig_port =
2018 dp_to_dig_port(intel_dp);
2019 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2020 enum intel_display_power_domain power_domain;
2021 u32 pp;
2022 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2023
2024 lockdep_assert_held(&dev_priv->pps_mutex);
2025
2026 WARN_ON(intel_dp->want_panel_vdd);
2027
2028 if (!edp_have_panel_vdd(intel_dp))
2029 return;
2030
2031 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2032 port_name(intel_dig_port->port));
2033
2034 pp = ironlake_get_pp_control(intel_dp);
2035 pp &= ~EDP_FORCE_VDD;
2036
2037 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2038 pp_stat_reg = _pp_stat_reg(intel_dp);
2039
2040 I915_WRITE(pp_ctrl_reg, pp);
2041 POSTING_READ(pp_ctrl_reg);
2042
2043 /* Make sure sequencer is idle before allowing subsequent activity */
2044 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2045 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2046
2047 if ((pp & PANEL_POWER_ON) == 0)
2048 intel_dp->panel_power_off_time = ktime_get_boottime();
2049
2050 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2051 intel_display_power_put(dev_priv, power_domain);
2052 }
2053
edp_panel_vdd_work(struct work_struct * __work)2054 static void edp_panel_vdd_work(struct work_struct *__work)
2055 {
2056 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2057 struct intel_dp, panel_vdd_work);
2058
2059 pps_lock(intel_dp);
2060 if (!intel_dp->want_panel_vdd)
2061 edp_panel_vdd_off_sync(intel_dp);
2062 pps_unlock(intel_dp);
2063 }
2064
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)2065 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2066 {
2067 unsigned long delay;
2068
2069 /*
2070 * Queue the timer to fire a long time from now (relative to the power
2071 * down delay) to keep the panel power up across a sequence of
2072 * operations.
2073 */
2074 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2075 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2076 }
2077
2078 /*
2079 * Must be paired with edp_panel_vdd_on().
2080 * Must hold pps_mutex around the whole on/off sequence.
2081 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2082 */
edp_panel_vdd_off(struct intel_dp * intel_dp,bool sync)2083 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2084 {
2085 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2086
2087 lockdep_assert_held(&dev_priv->pps_mutex);
2088
2089 if (!is_edp(intel_dp))
2090 return;
2091
2092 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2093 port_name(dp_to_dig_port(intel_dp)->port));
2094
2095 intel_dp->want_panel_vdd = false;
2096
2097 if (sync)
2098 edp_panel_vdd_off_sync(intel_dp);
2099 else
2100 edp_panel_vdd_schedule_off(intel_dp);
2101 }
2102
edp_panel_on(struct intel_dp * intel_dp)2103 static void edp_panel_on(struct intel_dp *intel_dp)
2104 {
2105 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2106 struct drm_i915_private *dev_priv = to_i915(dev);
2107 u32 pp;
2108 i915_reg_t pp_ctrl_reg;
2109
2110 lockdep_assert_held(&dev_priv->pps_mutex);
2111
2112 if (!is_edp(intel_dp))
2113 return;
2114
2115 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2116 port_name(dp_to_dig_port(intel_dp)->port));
2117
2118 if (WARN(edp_have_panel_power(intel_dp),
2119 "eDP port %c panel power already on\n",
2120 port_name(dp_to_dig_port(intel_dp)->port)))
2121 return;
2122
2123 wait_panel_power_cycle(intel_dp);
2124
2125 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2126 pp = ironlake_get_pp_control(intel_dp);
2127 if (IS_GEN5(dev)) {
2128 /* ILK workaround: disable reset around power sequence */
2129 pp &= ~PANEL_POWER_RESET;
2130 I915_WRITE(pp_ctrl_reg, pp);
2131 POSTING_READ(pp_ctrl_reg);
2132 }
2133
2134 pp |= PANEL_POWER_ON;
2135 if (!IS_GEN5(dev))
2136 pp |= PANEL_POWER_RESET;
2137
2138 I915_WRITE(pp_ctrl_reg, pp);
2139 POSTING_READ(pp_ctrl_reg);
2140
2141 wait_panel_on(intel_dp);
2142 intel_dp->last_power_on = jiffies;
2143
2144 if (IS_GEN5(dev)) {
2145 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2146 I915_WRITE(pp_ctrl_reg, pp);
2147 POSTING_READ(pp_ctrl_reg);
2148 }
2149 }
2150
intel_edp_panel_on(struct intel_dp * intel_dp)2151 void intel_edp_panel_on(struct intel_dp *intel_dp)
2152 {
2153 if (!is_edp(intel_dp))
2154 return;
2155
2156 pps_lock(intel_dp);
2157 edp_panel_on(intel_dp);
2158 pps_unlock(intel_dp);
2159 }
2160
2161
edp_panel_off(struct intel_dp * intel_dp)2162 static void edp_panel_off(struct intel_dp *intel_dp)
2163 {
2164 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2165 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2166 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2167 struct drm_i915_private *dev_priv = to_i915(dev);
2168 enum intel_display_power_domain power_domain;
2169 u32 pp;
2170 i915_reg_t pp_ctrl_reg;
2171
2172 lockdep_assert_held(&dev_priv->pps_mutex);
2173
2174 if (!is_edp(intel_dp))
2175 return;
2176
2177 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2178 port_name(dp_to_dig_port(intel_dp)->port));
2179
2180 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2181 port_name(dp_to_dig_port(intel_dp)->port));
2182
2183 pp = ironlake_get_pp_control(intel_dp);
2184 /* We need to switch off panel power _and_ force vdd, for otherwise some
2185 * panels get very unhappy and cease to work. */
2186 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2187 EDP_BLC_ENABLE);
2188
2189 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2190
2191 intel_dp->want_panel_vdd = false;
2192
2193 I915_WRITE(pp_ctrl_reg, pp);
2194 POSTING_READ(pp_ctrl_reg);
2195
2196 wait_panel_off(intel_dp);
2197 intel_dp->panel_power_off_time = ktime_get_boottime();
2198
2199 /* We got a reference when we enabled the VDD. */
2200 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2201 intel_display_power_put(dev_priv, power_domain);
2202 }
2203
intel_edp_panel_off(struct intel_dp * intel_dp)2204 void intel_edp_panel_off(struct intel_dp *intel_dp)
2205 {
2206 if (!is_edp(intel_dp))
2207 return;
2208
2209 pps_lock(intel_dp);
2210 edp_panel_off(intel_dp);
2211 pps_unlock(intel_dp);
2212 }
2213
2214 /* Enable backlight in the panel power control. */
_intel_edp_backlight_on(struct intel_dp * intel_dp)2215 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2216 {
2217 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2218 struct drm_device *dev = intel_dig_port->base.base.dev;
2219 struct drm_i915_private *dev_priv = to_i915(dev);
2220 u32 pp;
2221 i915_reg_t pp_ctrl_reg;
2222
2223 /*
2224 * If we enable the backlight right away following a panel power
2225 * on, we may see slight flicker as the panel syncs with the eDP
2226 * link. So delay a bit to make sure the image is solid before
2227 * allowing it to appear.
2228 */
2229 wait_backlight_on(intel_dp);
2230
2231 pps_lock(intel_dp);
2232
2233 pp = ironlake_get_pp_control(intel_dp);
2234 pp |= EDP_BLC_ENABLE;
2235
2236 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2237
2238 I915_WRITE(pp_ctrl_reg, pp);
2239 POSTING_READ(pp_ctrl_reg);
2240
2241 pps_unlock(intel_dp);
2242 }
2243
2244 /* Enable backlight PWM and backlight PP control. */
intel_edp_backlight_on(struct intel_dp * intel_dp)2245 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2246 {
2247 if (!is_edp(intel_dp))
2248 return;
2249
2250 DRM_DEBUG_KMS("\n");
2251
2252 intel_panel_enable_backlight(intel_dp->attached_connector);
2253 _intel_edp_backlight_on(intel_dp);
2254 }
2255
2256 /* Disable backlight in the panel power control. */
_intel_edp_backlight_off(struct intel_dp * intel_dp)2257 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2258 {
2259 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2260 struct drm_i915_private *dev_priv = to_i915(dev);
2261 u32 pp;
2262 i915_reg_t pp_ctrl_reg;
2263
2264 if (!is_edp(intel_dp))
2265 return;
2266
2267 pps_lock(intel_dp);
2268
2269 pp = ironlake_get_pp_control(intel_dp);
2270 pp &= ~EDP_BLC_ENABLE;
2271
2272 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2273
2274 I915_WRITE(pp_ctrl_reg, pp);
2275 POSTING_READ(pp_ctrl_reg);
2276
2277 pps_unlock(intel_dp);
2278
2279 intel_dp->last_backlight_off = jiffies;
2280 edp_wait_backlight_off(intel_dp);
2281 }
2282
2283 /* Disable backlight PP control and backlight PWM. */
intel_edp_backlight_off(struct intel_dp * intel_dp)2284 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2285 {
2286 if (!is_edp(intel_dp))
2287 return;
2288
2289 DRM_DEBUG_KMS("\n");
2290
2291 _intel_edp_backlight_off(intel_dp);
2292 intel_panel_disable_backlight(intel_dp->attached_connector);
2293 }
2294
2295 /*
2296 * Hook for controlling the panel power control backlight through the bl_power
2297 * sysfs attribute. Take care to handle multiple calls.
2298 */
intel_edp_backlight_power(struct intel_connector * connector,bool enable)2299 static void intel_edp_backlight_power(struct intel_connector *connector,
2300 bool enable)
2301 {
2302 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2303 bool is_enabled;
2304
2305 pps_lock(intel_dp);
2306 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2307 pps_unlock(intel_dp);
2308
2309 if (is_enabled == enable)
2310 return;
2311
2312 DRM_DEBUG_KMS("panel power control backlight %s\n",
2313 enable ? "enable" : "disable");
2314
2315 if (enable)
2316 _intel_edp_backlight_on(intel_dp);
2317 else
2318 _intel_edp_backlight_off(intel_dp);
2319 }
2320
assert_dp_port(struct intel_dp * intel_dp,bool state)2321 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2322 {
2323 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2324 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2325 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2326
2327 I915_STATE_WARN(cur_state != state,
2328 "DP port %c state assertion failure (expected %s, current %s)\n",
2329 port_name(dig_port->port),
2330 onoff(state), onoff(cur_state));
2331 }
2332 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2333
assert_edp_pll(struct drm_i915_private * dev_priv,bool state)2334 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2335 {
2336 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2337
2338 I915_STATE_WARN(cur_state != state,
2339 "eDP PLL state assertion failure (expected %s, current %s)\n",
2340 onoff(state), onoff(cur_state));
2341 }
2342 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2343 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2344
ironlake_edp_pll_on(struct intel_dp * intel_dp,struct intel_crtc_state * pipe_config)2345 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2346 struct intel_crtc_state *pipe_config)
2347 {
2348 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2349 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2350
2351 assert_pipe_disabled(dev_priv, crtc->pipe);
2352 assert_dp_port_disabled(intel_dp);
2353 assert_edp_pll_disabled(dev_priv);
2354
2355 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2356 pipe_config->port_clock);
2357
2358 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2359
2360 if (pipe_config->port_clock == 162000)
2361 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2362 else
2363 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2364
2365 I915_WRITE(DP_A, intel_dp->DP);
2366 POSTING_READ(DP_A);
2367 udelay(500);
2368
2369 /*
2370 * [DevILK] Work around required when enabling DP PLL
2371 * while a pipe is enabled going to FDI:
2372 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2373 * 2. Program DP PLL enable
2374 */
2375 if (IS_GEN5(dev_priv))
2376 intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
2377
2378 intel_dp->DP |= DP_PLL_ENABLE;
2379
2380 I915_WRITE(DP_A, intel_dp->DP);
2381 POSTING_READ(DP_A);
2382 udelay(200);
2383 }
2384
ironlake_edp_pll_off(struct intel_dp * intel_dp)2385 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2386 {
2387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2388 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2389 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2390
2391 assert_pipe_disabled(dev_priv, crtc->pipe);
2392 assert_dp_port_disabled(intel_dp);
2393 assert_edp_pll_enabled(dev_priv);
2394
2395 DRM_DEBUG_KMS("disabling eDP PLL\n");
2396
2397 intel_dp->DP &= ~DP_PLL_ENABLE;
2398
2399 I915_WRITE(DP_A, intel_dp->DP);
2400 POSTING_READ(DP_A);
2401 udelay(200);
2402 }
2403
2404 /* If the sink supports it, try to set the power state appropriately */
intel_dp_sink_dpms(struct intel_dp * intel_dp,int mode)2405 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2406 {
2407 int ret, i;
2408
2409 /* Should have a valid DPCD by this point */
2410 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2411 return;
2412
2413 if (mode != DRM_MODE_DPMS_ON) {
2414 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2415 DP_SET_POWER_D3);
2416 } else {
2417 /*
2418 * When turning on, we need to retry for 1ms to give the sink
2419 * time to wake up.
2420 */
2421 for (i = 0; i < 3; i++) {
2422 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2423 DP_SET_POWER_D0);
2424 if (ret == 1)
2425 break;
2426 msleep(1);
2427 }
2428 }
2429
2430 if (ret != 1)
2431 DRM_DEBUG_KMS("failed to %s sink power state\n",
2432 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2433 }
2434
intel_dp_get_hw_state(struct intel_encoder * encoder,enum pipe * pipe)2435 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2436 enum pipe *pipe)
2437 {
2438 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2439 enum port port = dp_to_dig_port(intel_dp)->port;
2440 struct drm_device *dev = encoder->base.dev;
2441 struct drm_i915_private *dev_priv = to_i915(dev);
2442 enum intel_display_power_domain power_domain;
2443 u32 tmp;
2444 bool ret;
2445
2446 power_domain = intel_display_port_power_domain(encoder);
2447 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2448 return false;
2449
2450 ret = false;
2451
2452 tmp = I915_READ(intel_dp->output_reg);
2453
2454 if (!(tmp & DP_PORT_EN))
2455 goto out;
2456
2457 if (IS_GEN7(dev) && port == PORT_A) {
2458 *pipe = PORT_TO_PIPE_CPT(tmp);
2459 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2460 enum pipe p;
2461
2462 for_each_pipe(dev_priv, p) {
2463 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2464 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2465 *pipe = p;
2466 ret = true;
2467
2468 goto out;
2469 }
2470 }
2471
2472 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2473 i915_mmio_reg_offset(intel_dp->output_reg));
2474 } else if (IS_CHERRYVIEW(dev)) {
2475 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2476 } else {
2477 *pipe = PORT_TO_PIPE(tmp);
2478 }
2479
2480 ret = true;
2481
2482 out:
2483 intel_display_power_put(dev_priv, power_domain);
2484
2485 return ret;
2486 }
2487
intel_dp_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)2488 static void intel_dp_get_config(struct intel_encoder *encoder,
2489 struct intel_crtc_state *pipe_config)
2490 {
2491 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2492 u32 tmp, flags = 0;
2493 struct drm_device *dev = encoder->base.dev;
2494 struct drm_i915_private *dev_priv = to_i915(dev);
2495 enum port port = dp_to_dig_port(intel_dp)->port;
2496 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2497
2498 tmp = I915_READ(intel_dp->output_reg);
2499
2500 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2501
2502 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2503 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2504
2505 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2506 flags |= DRM_MODE_FLAG_PHSYNC;
2507 else
2508 flags |= DRM_MODE_FLAG_NHSYNC;
2509
2510 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2511 flags |= DRM_MODE_FLAG_PVSYNC;
2512 else
2513 flags |= DRM_MODE_FLAG_NVSYNC;
2514 } else {
2515 if (tmp & DP_SYNC_HS_HIGH)
2516 flags |= DRM_MODE_FLAG_PHSYNC;
2517 else
2518 flags |= DRM_MODE_FLAG_NHSYNC;
2519
2520 if (tmp & DP_SYNC_VS_HIGH)
2521 flags |= DRM_MODE_FLAG_PVSYNC;
2522 else
2523 flags |= DRM_MODE_FLAG_NVSYNC;
2524 }
2525
2526 pipe_config->base.adjusted_mode.flags |= flags;
2527
2528 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2529 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2530 pipe_config->limited_color_range = true;
2531
2532 pipe_config->lane_count =
2533 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2534
2535 intel_dp_get_m_n(crtc, pipe_config);
2536
2537 if (port == PORT_A) {
2538 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2539 pipe_config->port_clock = 162000;
2540 else
2541 pipe_config->port_clock = 270000;
2542 }
2543
2544 pipe_config->base.adjusted_mode.crtc_clock =
2545 intel_dotclock_calculate(pipe_config->port_clock,
2546 &pipe_config->dp_m_n);
2547
2548 if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2549 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2550 /*
2551 * This is a big fat ugly hack.
2552 *
2553 * Some machines in UEFI boot mode provide us a VBT that has 18
2554 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2555 * unknown we fail to light up. Yet the same BIOS boots up with
2556 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2557 * max, not what it tells us to use.
2558 *
2559 * Note: This will still be broken if the eDP panel is not lit
2560 * up by the BIOS, and thus we can't get the mode at module
2561 * load.
2562 */
2563 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2564 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2565 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2566 }
2567 }
2568
intel_disable_dp(struct intel_encoder * encoder,struct intel_crtc_state * old_crtc_state,struct drm_connector_state * old_conn_state)2569 static void intel_disable_dp(struct intel_encoder *encoder,
2570 struct intel_crtc_state *old_crtc_state,
2571 struct drm_connector_state *old_conn_state)
2572 {
2573 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2574 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2575
2576 if (old_crtc_state->has_audio)
2577 intel_audio_codec_disable(encoder);
2578
2579 if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2580 intel_psr_disable(intel_dp);
2581
2582 /* Make sure the panel is off before trying to change the mode. But also
2583 * ensure that we have vdd while we switch off the panel. */
2584 intel_edp_panel_vdd_on(intel_dp);
2585 intel_edp_backlight_off(intel_dp);
2586 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2587 intel_edp_panel_off(intel_dp);
2588
2589 /* disable the port before the pipe on g4x */
2590 if (INTEL_GEN(dev_priv) < 5)
2591 intel_dp_link_down(intel_dp);
2592 }
2593
ilk_post_disable_dp(struct intel_encoder * encoder,struct intel_crtc_state * old_crtc_state,struct drm_connector_state * old_conn_state)2594 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2595 struct intel_crtc_state *old_crtc_state,
2596 struct drm_connector_state *old_conn_state)
2597 {
2598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2599 enum port port = dp_to_dig_port(intel_dp)->port;
2600
2601 intel_dp_link_down(intel_dp);
2602
2603 /* Only ilk+ has port A */
2604 if (port == PORT_A)
2605 ironlake_edp_pll_off(intel_dp);
2606 }
2607
vlv_post_disable_dp(struct intel_encoder * encoder,struct intel_crtc_state * old_crtc_state,struct drm_connector_state * old_conn_state)2608 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2609 struct intel_crtc_state *old_crtc_state,
2610 struct drm_connector_state *old_conn_state)
2611 {
2612 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2613
2614 intel_dp_link_down(intel_dp);
2615 }
2616
chv_post_disable_dp(struct intel_encoder * encoder,struct intel_crtc_state * old_crtc_state,struct drm_connector_state * old_conn_state)2617 static void chv_post_disable_dp(struct intel_encoder *encoder,
2618 struct intel_crtc_state *old_crtc_state,
2619 struct drm_connector_state *old_conn_state)
2620 {
2621 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2622 struct drm_device *dev = encoder->base.dev;
2623 struct drm_i915_private *dev_priv = to_i915(dev);
2624
2625 intel_dp_link_down(intel_dp);
2626
2627 mutex_lock(&dev_priv->sb_lock);
2628
2629 /* Assert data lane reset */
2630 chv_data_lane_soft_reset(encoder, true);
2631
2632 mutex_unlock(&dev_priv->sb_lock);
2633 }
2634
2635 static void
_intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)2636 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2637 uint32_t *DP,
2638 uint8_t dp_train_pat)
2639 {
2640 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2641 struct drm_device *dev = intel_dig_port->base.base.dev;
2642 struct drm_i915_private *dev_priv = to_i915(dev);
2643 enum port port = intel_dig_port->port;
2644
2645 if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2646 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2647 dp_train_pat & DP_TRAINING_PATTERN_MASK);
2648
2649 if (HAS_DDI(dev)) {
2650 uint32_t temp = I915_READ(DP_TP_CTL(port));
2651
2652 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2653 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2654 else
2655 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2656
2657 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2658 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2659 case DP_TRAINING_PATTERN_DISABLE:
2660 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2661
2662 break;
2663 case DP_TRAINING_PATTERN_1:
2664 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2665 break;
2666 case DP_TRAINING_PATTERN_2:
2667 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2668 break;
2669 case DP_TRAINING_PATTERN_3:
2670 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2671 break;
2672 }
2673 I915_WRITE(DP_TP_CTL(port), temp);
2674
2675 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2676 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2677 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2678
2679 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2680 case DP_TRAINING_PATTERN_DISABLE:
2681 *DP |= DP_LINK_TRAIN_OFF_CPT;
2682 break;
2683 case DP_TRAINING_PATTERN_1:
2684 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2685 break;
2686 case DP_TRAINING_PATTERN_2:
2687 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2688 break;
2689 case DP_TRAINING_PATTERN_3:
2690 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2691 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2692 break;
2693 }
2694
2695 } else {
2696 if (IS_CHERRYVIEW(dev))
2697 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2698 else
2699 *DP &= ~DP_LINK_TRAIN_MASK;
2700
2701 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2702 case DP_TRAINING_PATTERN_DISABLE:
2703 *DP |= DP_LINK_TRAIN_OFF;
2704 break;
2705 case DP_TRAINING_PATTERN_1:
2706 *DP |= DP_LINK_TRAIN_PAT_1;
2707 break;
2708 case DP_TRAINING_PATTERN_2:
2709 *DP |= DP_LINK_TRAIN_PAT_2;
2710 break;
2711 case DP_TRAINING_PATTERN_3:
2712 if (IS_CHERRYVIEW(dev)) {
2713 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2714 } else {
2715 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2716 *DP |= DP_LINK_TRAIN_PAT_2;
2717 }
2718 break;
2719 }
2720 }
2721 }
2722
intel_dp_enable_port(struct intel_dp * intel_dp,struct intel_crtc_state * old_crtc_state)2723 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2724 struct intel_crtc_state *old_crtc_state)
2725 {
2726 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2727 struct drm_i915_private *dev_priv = to_i915(dev);
2728
2729 /* enable with pattern 1 (as per spec) */
2730
2731 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2732
2733 /*
2734 * Magic for VLV/CHV. We _must_ first set up the register
2735 * without actually enabling the port, and then do another
2736 * write to enable the port. Otherwise link training will
2737 * fail when the power sequencer is freshly used for this port.
2738 */
2739 intel_dp->DP |= DP_PORT_EN;
2740 if (old_crtc_state->has_audio)
2741 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2742
2743 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2744 POSTING_READ(intel_dp->output_reg);
2745 }
2746
intel_enable_dp(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)2747 static void intel_enable_dp(struct intel_encoder *encoder,
2748 struct intel_crtc_state *pipe_config)
2749 {
2750 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2751 struct drm_device *dev = encoder->base.dev;
2752 struct drm_i915_private *dev_priv = to_i915(dev);
2753 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2754 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2755 enum pipe pipe = crtc->pipe;
2756
2757 if (WARN_ON(dp_reg & DP_PORT_EN))
2758 return;
2759
2760 pps_lock(intel_dp);
2761
2762 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2763 vlv_init_panel_power_sequencer(intel_dp);
2764
2765 intel_dp_enable_port(intel_dp, pipe_config);
2766
2767 edp_panel_vdd_on(intel_dp);
2768 edp_panel_on(intel_dp);
2769 edp_panel_vdd_off(intel_dp, true);
2770
2771 pps_unlock(intel_dp);
2772
2773 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2774 unsigned int lane_mask = 0x0;
2775
2776 if (IS_CHERRYVIEW(dev))
2777 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2778
2779 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2780 lane_mask);
2781 }
2782
2783 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2784 intel_dp_start_link_train(intel_dp);
2785 intel_dp_stop_link_train(intel_dp);
2786
2787 if (pipe_config->has_audio) {
2788 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2789 pipe_name(pipe));
2790 intel_audio_codec_enable(encoder);
2791 }
2792 }
2793
g4x_enable_dp(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2794 static void g4x_enable_dp(struct intel_encoder *encoder,
2795 struct intel_crtc_state *pipe_config,
2796 struct drm_connector_state *conn_state)
2797 {
2798 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2799
2800 intel_enable_dp(encoder, pipe_config);
2801 intel_edp_backlight_on(intel_dp);
2802 }
2803
vlv_enable_dp(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2804 static void vlv_enable_dp(struct intel_encoder *encoder,
2805 struct intel_crtc_state *pipe_config,
2806 struct drm_connector_state *conn_state)
2807 {
2808 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2809
2810 intel_edp_backlight_on(intel_dp);
2811 intel_psr_enable(intel_dp);
2812 }
2813
g4x_pre_enable_dp(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2814 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2815 struct intel_crtc_state *pipe_config,
2816 struct drm_connector_state *conn_state)
2817 {
2818 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2819 enum port port = dp_to_dig_port(intel_dp)->port;
2820
2821 intel_dp_prepare(encoder, pipe_config);
2822
2823 /* Only ilk+ has port A */
2824 if (port == PORT_A)
2825 ironlake_edp_pll_on(intel_dp, pipe_config);
2826 }
2827
vlv_detach_power_sequencer(struct intel_dp * intel_dp)2828 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2829 {
2830 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2831 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2832 enum pipe pipe = intel_dp->pps_pipe;
2833 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2834
2835 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2836 return;
2837
2838 edp_panel_vdd_off_sync(intel_dp);
2839
2840 /*
2841 * VLV seems to get confused when multiple power seqeuencers
2842 * have the same port selected (even if only one has power/vdd
2843 * enabled). The failure manifests as vlv_wait_port_ready() failing
2844 * CHV on the other hand doesn't seem to mind having the same port
2845 * selected in multiple power seqeuencers, but let's clear the
2846 * port select always when logically disconnecting a power sequencer
2847 * from a port.
2848 */
2849 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2850 pipe_name(pipe), port_name(intel_dig_port->port));
2851 I915_WRITE(pp_on_reg, 0);
2852 POSTING_READ(pp_on_reg);
2853
2854 intel_dp->pps_pipe = INVALID_PIPE;
2855 }
2856
vlv_steal_power_sequencer(struct drm_device * dev,enum pipe pipe)2857 static void vlv_steal_power_sequencer(struct drm_device *dev,
2858 enum pipe pipe)
2859 {
2860 struct drm_i915_private *dev_priv = to_i915(dev);
2861 struct intel_encoder *encoder;
2862
2863 lockdep_assert_held(&dev_priv->pps_mutex);
2864
2865 for_each_intel_encoder(dev, encoder) {
2866 struct intel_dp *intel_dp;
2867 enum port port;
2868
2869 if (encoder->type != INTEL_OUTPUT_EDP)
2870 continue;
2871
2872 intel_dp = enc_to_intel_dp(&encoder->base);
2873 port = dp_to_dig_port(intel_dp)->port;
2874
2875 if (intel_dp->pps_pipe != pipe)
2876 continue;
2877
2878 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2879 pipe_name(pipe), port_name(port));
2880
2881 WARN(encoder->base.crtc,
2882 "stealing pipe %c power sequencer from active eDP port %c\n",
2883 pipe_name(pipe), port_name(port));
2884
2885 /* make sure vdd is off before we steal it */
2886 vlv_detach_power_sequencer(intel_dp);
2887 }
2888 }
2889
vlv_init_panel_power_sequencer(struct intel_dp * intel_dp)2890 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2891 {
2892 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2893 struct intel_encoder *encoder = &intel_dig_port->base;
2894 struct drm_device *dev = encoder->base.dev;
2895 struct drm_i915_private *dev_priv = to_i915(dev);
2896 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2897
2898 lockdep_assert_held(&dev_priv->pps_mutex);
2899
2900 if (!is_edp(intel_dp))
2901 return;
2902
2903 if (intel_dp->pps_pipe == crtc->pipe)
2904 return;
2905
2906 /*
2907 * If another power sequencer was being used on this
2908 * port previously make sure to turn off vdd there while
2909 * we still have control of it.
2910 */
2911 if (intel_dp->pps_pipe != INVALID_PIPE)
2912 vlv_detach_power_sequencer(intel_dp);
2913
2914 /*
2915 * We may be stealing the power
2916 * sequencer from another port.
2917 */
2918 vlv_steal_power_sequencer(dev, crtc->pipe);
2919
2920 /* now it's all ours */
2921 intel_dp->pps_pipe = crtc->pipe;
2922
2923 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2924 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2925
2926 /* init power sequencer on this pipe and port */
2927 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2928 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
2929 }
2930
vlv_pre_enable_dp(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2931 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
2932 struct intel_crtc_state *pipe_config,
2933 struct drm_connector_state *conn_state)
2934 {
2935 vlv_phy_pre_encoder_enable(encoder);
2936
2937 intel_enable_dp(encoder, pipe_config);
2938 }
2939
vlv_dp_pre_pll_enable(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2940 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
2941 struct intel_crtc_state *pipe_config,
2942 struct drm_connector_state *conn_state)
2943 {
2944 intel_dp_prepare(encoder, pipe_config);
2945
2946 vlv_phy_pre_pll_enable(encoder);
2947 }
2948
chv_pre_enable_dp(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2949 static void chv_pre_enable_dp(struct intel_encoder *encoder,
2950 struct intel_crtc_state *pipe_config,
2951 struct drm_connector_state *conn_state)
2952 {
2953 chv_phy_pre_encoder_enable(encoder);
2954
2955 intel_enable_dp(encoder, pipe_config);
2956
2957 /* Second common lane will stay alive on its own now */
2958 chv_phy_release_cl2_override(encoder);
2959 }
2960
chv_dp_pre_pll_enable(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2961 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
2962 struct intel_crtc_state *pipe_config,
2963 struct drm_connector_state *conn_state)
2964 {
2965 intel_dp_prepare(encoder, pipe_config);
2966
2967 chv_phy_pre_pll_enable(encoder);
2968 }
2969
chv_dp_post_pll_disable(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config,struct drm_connector_state * conn_state)2970 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
2971 struct intel_crtc_state *pipe_config,
2972 struct drm_connector_state *conn_state)
2973 {
2974 chv_phy_post_pll_disable(encoder);
2975 }
2976
2977 /*
2978 * Fetch AUX CH registers 0x202 - 0x207 which contain
2979 * link status information
2980 */
2981 bool
intel_dp_get_link_status(struct intel_dp * intel_dp,uint8_t link_status[DP_LINK_STATUS_SIZE])2982 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2983 {
2984 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
2985 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2986 }
2987
2988 /* These are source-specific values. */
2989 uint8_t
intel_dp_voltage_max(struct intel_dp * intel_dp)2990 intel_dp_voltage_max(struct intel_dp *intel_dp)
2991 {
2992 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2993 struct drm_i915_private *dev_priv = to_i915(dev);
2994 enum port port = dp_to_dig_port(intel_dp)->port;
2995
2996 if (IS_BROXTON(dev))
2997 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2998 else if (INTEL_INFO(dev)->gen >= 9) {
2999 if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
3000 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3001 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3002 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3003 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3004 else if (IS_GEN7(dev) && port == PORT_A)
3005 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3006 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3007 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3008 else
3009 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3010 }
3011
3012 uint8_t
intel_dp_pre_emphasis_max(struct intel_dp * intel_dp,uint8_t voltage_swing)3013 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3014 {
3015 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3016 enum port port = dp_to_dig_port(intel_dp)->port;
3017
3018 if (INTEL_INFO(dev)->gen >= 9) {
3019 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3021 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3023 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3025 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3027 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3028 default:
3029 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3030 }
3031 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3032 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3034 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3036 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3038 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3040 default:
3041 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3042 }
3043 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3044 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3046 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3048 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3050 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3052 default:
3053 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3054 }
3055 } else if (IS_GEN7(dev) && port == PORT_A) {
3056 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3061 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3062 default:
3063 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3064 }
3065 } else {
3066 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3067 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3068 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3070 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3074 default:
3075 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3076 }
3077 }
3078 }
3079
vlv_signal_levels(struct intel_dp * intel_dp)3080 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3081 {
3082 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3083 unsigned long demph_reg_value, preemph_reg_value,
3084 uniqtranscale_reg_value;
3085 uint8_t train_set = intel_dp->train_set[0];
3086
3087 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3088 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3089 preemph_reg_value = 0x0004000;
3090 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092 demph_reg_value = 0x2B405555;
3093 uniqtranscale_reg_value = 0x552AB83A;
3094 break;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3096 demph_reg_value = 0x2B404040;
3097 uniqtranscale_reg_value = 0x5548B83A;
3098 break;
3099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3100 demph_reg_value = 0x2B245555;
3101 uniqtranscale_reg_value = 0x5560B83A;
3102 break;
3103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3104 demph_reg_value = 0x2B405555;
3105 uniqtranscale_reg_value = 0x5598DA3A;
3106 break;
3107 default:
3108 return 0;
3109 }
3110 break;
3111 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3112 preemph_reg_value = 0x0002000;
3113 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3115 demph_reg_value = 0x2B404040;
3116 uniqtranscale_reg_value = 0x5552B83A;
3117 break;
3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3119 demph_reg_value = 0x2B404848;
3120 uniqtranscale_reg_value = 0x5580B83A;
3121 break;
3122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3123 demph_reg_value = 0x2B404040;
3124 uniqtranscale_reg_value = 0x55ADDA3A;
3125 break;
3126 default:
3127 return 0;
3128 }
3129 break;
3130 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3131 preemph_reg_value = 0x0000000;
3132 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3134 demph_reg_value = 0x2B305555;
3135 uniqtranscale_reg_value = 0x5570B83A;
3136 break;
3137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3138 demph_reg_value = 0x2B2B4040;
3139 uniqtranscale_reg_value = 0x55ADDA3A;
3140 break;
3141 default:
3142 return 0;
3143 }
3144 break;
3145 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3146 preemph_reg_value = 0x0006000;
3147 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3149 demph_reg_value = 0x1B405555;
3150 uniqtranscale_reg_value = 0x55ADDA3A;
3151 break;
3152 default:
3153 return 0;
3154 }
3155 break;
3156 default:
3157 return 0;
3158 }
3159
3160 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3161 uniqtranscale_reg_value, 0);
3162
3163 return 0;
3164 }
3165
chv_signal_levels(struct intel_dp * intel_dp)3166 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3167 {
3168 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3169 u32 deemph_reg_value, margin_reg_value;
3170 bool uniq_trans_scale = false;
3171 uint8_t train_set = intel_dp->train_set[0];
3172
3173 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3174 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3175 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3176 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3177 deemph_reg_value = 128;
3178 margin_reg_value = 52;
3179 break;
3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181 deemph_reg_value = 128;
3182 margin_reg_value = 77;
3183 break;
3184 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3185 deemph_reg_value = 128;
3186 margin_reg_value = 102;
3187 break;
3188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3189 deemph_reg_value = 128;
3190 margin_reg_value = 154;
3191 uniq_trans_scale = true;
3192 break;
3193 default:
3194 return 0;
3195 }
3196 break;
3197 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3198 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3200 deemph_reg_value = 85;
3201 margin_reg_value = 78;
3202 break;
3203 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3204 deemph_reg_value = 85;
3205 margin_reg_value = 116;
3206 break;
3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3208 deemph_reg_value = 85;
3209 margin_reg_value = 154;
3210 break;
3211 default:
3212 return 0;
3213 }
3214 break;
3215 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3216 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218 deemph_reg_value = 64;
3219 margin_reg_value = 104;
3220 break;
3221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3222 deemph_reg_value = 64;
3223 margin_reg_value = 154;
3224 break;
3225 default:
3226 return 0;
3227 }
3228 break;
3229 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3230 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3232 deemph_reg_value = 43;
3233 margin_reg_value = 154;
3234 break;
3235 default:
3236 return 0;
3237 }
3238 break;
3239 default:
3240 return 0;
3241 }
3242
3243 chv_set_phy_signal_level(encoder, deemph_reg_value,
3244 margin_reg_value, uniq_trans_scale);
3245
3246 return 0;
3247 }
3248
3249 static uint32_t
gen4_signal_levels(uint8_t train_set)3250 gen4_signal_levels(uint8_t train_set)
3251 {
3252 uint32_t signal_levels = 0;
3253
3254 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3256 default:
3257 signal_levels |= DP_VOLTAGE_0_4;
3258 break;
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3260 signal_levels |= DP_VOLTAGE_0_6;
3261 break;
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3263 signal_levels |= DP_VOLTAGE_0_8;
3264 break;
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3266 signal_levels |= DP_VOLTAGE_1_2;
3267 break;
3268 }
3269 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3270 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3271 default:
3272 signal_levels |= DP_PRE_EMPHASIS_0;
3273 break;
3274 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3275 signal_levels |= DP_PRE_EMPHASIS_3_5;
3276 break;
3277 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3278 signal_levels |= DP_PRE_EMPHASIS_6;
3279 break;
3280 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3281 signal_levels |= DP_PRE_EMPHASIS_9_5;
3282 break;
3283 }
3284 return signal_levels;
3285 }
3286
3287 /* Gen6's DP voltage swing and pre-emphasis control */
3288 static uint32_t
gen6_edp_signal_levels(uint8_t train_set)3289 gen6_edp_signal_levels(uint8_t train_set)
3290 {
3291 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3292 DP_TRAIN_PRE_EMPHASIS_MASK);
3293 switch (signal_levels) {
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3296 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3298 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3301 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3304 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3307 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3308 default:
3309 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3310 "0x%x\n", signal_levels);
3311 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3312 }
3313 }
3314
3315 /* Gen7's DP voltage swing and pre-emphasis control */
3316 static uint32_t
gen7_edp_signal_levels(uint8_t train_set)3317 gen7_edp_signal_levels(uint8_t train_set)
3318 {
3319 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3320 DP_TRAIN_PRE_EMPHASIS_MASK);
3321 switch (signal_levels) {
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3323 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3325 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3327 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3328
3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3330 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3332 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3333
3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3335 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3337 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3338
3339 default:
3340 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3341 "0x%x\n", signal_levels);
3342 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3343 }
3344 }
3345
3346 void
intel_dp_set_signal_levels(struct intel_dp * intel_dp)3347 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3348 {
3349 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3350 enum port port = intel_dig_port->port;
3351 struct drm_device *dev = intel_dig_port->base.base.dev;
3352 struct drm_i915_private *dev_priv = to_i915(dev);
3353 uint32_t signal_levels, mask = 0;
3354 uint8_t train_set = intel_dp->train_set[0];
3355
3356 if (HAS_DDI(dev)) {
3357 signal_levels = ddi_signal_levels(intel_dp);
3358
3359 if (IS_BROXTON(dev))
3360 signal_levels = 0;
3361 else
3362 mask = DDI_BUF_EMP_MASK;
3363 } else if (IS_CHERRYVIEW(dev)) {
3364 signal_levels = chv_signal_levels(intel_dp);
3365 } else if (IS_VALLEYVIEW(dev)) {
3366 signal_levels = vlv_signal_levels(intel_dp);
3367 } else if (IS_GEN7(dev) && port == PORT_A) {
3368 signal_levels = gen7_edp_signal_levels(train_set);
3369 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3370 } else if (IS_GEN6(dev) && port == PORT_A) {
3371 signal_levels = gen6_edp_signal_levels(train_set);
3372 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3373 } else {
3374 signal_levels = gen4_signal_levels(train_set);
3375 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3376 }
3377
3378 if (mask)
3379 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3380
3381 DRM_DEBUG_KMS("Using vswing level %d\n",
3382 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3383 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3384 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3385 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3386
3387 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3388
3389 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3390 POSTING_READ(intel_dp->output_reg);
3391 }
3392
3393 void
intel_dp_program_link_training_pattern(struct intel_dp * intel_dp,uint8_t dp_train_pat)3394 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3395 uint8_t dp_train_pat)
3396 {
3397 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3398 struct drm_i915_private *dev_priv =
3399 to_i915(intel_dig_port->base.base.dev);
3400
3401 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3402
3403 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3404 POSTING_READ(intel_dp->output_reg);
3405 }
3406
intel_dp_set_idle_link_train(struct intel_dp * intel_dp)3407 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3408 {
3409 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3410 struct drm_device *dev = intel_dig_port->base.base.dev;
3411 struct drm_i915_private *dev_priv = to_i915(dev);
3412 enum port port = intel_dig_port->port;
3413 uint32_t val;
3414
3415 if (!HAS_DDI(dev))
3416 return;
3417
3418 val = I915_READ(DP_TP_CTL(port));
3419 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3420 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3421 I915_WRITE(DP_TP_CTL(port), val);
3422
3423 /*
3424 * On PORT_A we can have only eDP in SST mode. There the only reason
3425 * we need to set idle transmission mode is to work around a HW issue
3426 * where we enable the pipe while not in idle link-training mode.
3427 * In this case there is requirement to wait for a minimum number of
3428 * idle patterns to be sent.
3429 */
3430 if (port == PORT_A)
3431 return;
3432
3433 if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3434 DP_TP_STATUS_IDLE_DONE,
3435 DP_TP_STATUS_IDLE_DONE,
3436 1))
3437 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3438 }
3439
3440 static void
intel_dp_link_down(struct intel_dp * intel_dp)3441 intel_dp_link_down(struct intel_dp *intel_dp)
3442 {
3443 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3444 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3445 enum port port = intel_dig_port->port;
3446 struct drm_device *dev = intel_dig_port->base.base.dev;
3447 struct drm_i915_private *dev_priv = to_i915(dev);
3448 uint32_t DP = intel_dp->DP;
3449
3450 if (WARN_ON(HAS_DDI(dev)))
3451 return;
3452
3453 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3454 return;
3455
3456 DRM_DEBUG_KMS("\n");
3457
3458 if ((IS_GEN7(dev) && port == PORT_A) ||
3459 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3460 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3461 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3462 } else {
3463 if (IS_CHERRYVIEW(dev))
3464 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3465 else
3466 DP &= ~DP_LINK_TRAIN_MASK;
3467 DP |= DP_LINK_TRAIN_PAT_IDLE;
3468 }
3469 I915_WRITE(intel_dp->output_reg, DP);
3470 POSTING_READ(intel_dp->output_reg);
3471
3472 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3473 I915_WRITE(intel_dp->output_reg, DP);
3474 POSTING_READ(intel_dp->output_reg);
3475
3476 /*
3477 * HW workaround for IBX, we need to move the port
3478 * to transcoder A after disabling it to allow the
3479 * matching HDMI port to be enabled on transcoder A.
3480 */
3481 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3482 /*
3483 * We get CPU/PCH FIFO underruns on the other pipe when
3484 * doing the workaround. Sweep them under the rug.
3485 */
3486 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3487 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3488
3489 /* always enable with pattern 1 (as per spec) */
3490 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3491 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3492 I915_WRITE(intel_dp->output_reg, DP);
3493 POSTING_READ(intel_dp->output_reg);
3494
3495 DP &= ~DP_PORT_EN;
3496 I915_WRITE(intel_dp->output_reg, DP);
3497 POSTING_READ(intel_dp->output_reg);
3498
3499 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
3500 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3501 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3502 }
3503
3504 msleep(intel_dp->panel_power_down_delay);
3505
3506 intel_dp->DP = DP;
3507 }
3508
3509 static bool
intel_dp_read_dpcd(struct intel_dp * intel_dp)3510 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3511 {
3512 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3513 sizeof(intel_dp->dpcd)) < 0)
3514 return false; /* aux transfer failed */
3515
3516 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3517
3518 return intel_dp->dpcd[DP_DPCD_REV] != 0;
3519 }
3520
3521 static bool
intel_edp_init_dpcd(struct intel_dp * intel_dp)3522 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3523 {
3524 struct drm_i915_private *dev_priv =
3525 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3526
3527 /* this function is meant to be called only once */
3528 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3529
3530 if (!intel_dp_read_dpcd(intel_dp))
3531 return false;
3532
3533 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3534 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3535 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3536
3537 /* Check if the panel supports PSR */
3538 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3539 intel_dp->psr_dpcd,
3540 sizeof(intel_dp->psr_dpcd));
3541 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3542 dev_priv->psr.sink_support = true;
3543 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3544 }
3545
3546 if (INTEL_GEN(dev_priv) >= 9 &&
3547 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3548 uint8_t frame_sync_cap;
3549
3550 dev_priv->psr.sink_support = true;
3551 drm_dp_dpcd_read(&intel_dp->aux,
3552 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3553 &frame_sync_cap, 1);
3554 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3555 /* PSR2 needs frame sync as well */
3556 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3557 DRM_DEBUG_KMS("PSR2 %s on sink",
3558 dev_priv->psr.psr2_support ? "supported" : "not supported");
3559 }
3560
3561 /*
3562 * Read the eDP display control registers.
3563 *
3564 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3565 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3566 * set, but require eDP 1.4+ detection (e.g. for supported link rates
3567 * method). The display control registers should read zero if they're
3568 * not supported anyway.
3569 */
3570 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3571 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3572 sizeof(intel_dp->edp_dpcd))
3573 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3574 intel_dp->edp_dpcd);
3575
3576 /* Intermediate frequency support */
3577 if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
3578 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3579 int i;
3580
3581 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3582 sink_rates, sizeof(sink_rates));
3583
3584 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3585 int val = le16_to_cpu(sink_rates[i]);
3586
3587 if (val == 0)
3588 break;
3589
3590 /* Value read is in kHz while drm clock is saved in deca-kHz */
3591 intel_dp->sink_rates[i] = (val * 200) / 10;
3592 }
3593 intel_dp->num_sink_rates = i;
3594 }
3595
3596 return true;
3597 }
3598
3599
3600 static bool
intel_dp_get_dpcd(struct intel_dp * intel_dp)3601 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3602 {
3603 if (!intel_dp_read_dpcd(intel_dp))
3604 return false;
3605
3606 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3607 &intel_dp->sink_count, 1) < 0)
3608 return false;
3609
3610 /*
3611 * Sink count can change between short pulse hpd hence
3612 * a member variable in intel_dp will track any changes
3613 * between short pulse interrupts.
3614 */
3615 intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3616
3617 /*
3618 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3619 * a dongle is present but no display. Unless we require to know
3620 * if a dongle is present or not, we don't need to update
3621 * downstream port information. So, an early return here saves
3622 * time from performing other operations which are not required.
3623 */
3624 if (!is_edp(intel_dp) && !intel_dp->sink_count)
3625 return false;
3626
3627 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3628 DP_DWN_STRM_PORT_PRESENT))
3629 return true; /* native DP sink */
3630
3631 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3632 return true; /* no per-port downstream info */
3633
3634 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3635 intel_dp->downstream_ports,
3636 DP_MAX_DOWNSTREAM_PORTS) < 0)
3637 return false; /* downstream port status fetch failed */
3638
3639 return true;
3640 }
3641
3642 static void
intel_dp_probe_oui(struct intel_dp * intel_dp)3643 intel_dp_probe_oui(struct intel_dp *intel_dp)
3644 {
3645 u8 buf[3];
3646
3647 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3648 return;
3649
3650 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3651 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3652 buf[0], buf[1], buf[2]);
3653
3654 if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3655 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3656 buf[0], buf[1], buf[2]);
3657 }
3658
3659 static bool
intel_dp_can_mst(struct intel_dp * intel_dp)3660 intel_dp_can_mst(struct intel_dp *intel_dp)
3661 {
3662 u8 buf[1];
3663
3664 if (!i915.enable_dp_mst)
3665 return false;
3666
3667 if (!intel_dp->can_mst)
3668 return false;
3669
3670 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3671 return false;
3672
3673 if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3674 return false;
3675
3676 return buf[0] & DP_MST_CAP;
3677 }
3678
3679 static void
intel_dp_configure_mst(struct intel_dp * intel_dp)3680 intel_dp_configure_mst(struct intel_dp *intel_dp)
3681 {
3682 if (!i915.enable_dp_mst)
3683 return;
3684
3685 if (!intel_dp->can_mst)
3686 return;
3687
3688 intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3689
3690 if (intel_dp->is_mst)
3691 DRM_DEBUG_KMS("Sink is MST capable\n");
3692 else
3693 DRM_DEBUG_KMS("Sink is not MST capable\n");
3694
3695 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3696 intel_dp->is_mst);
3697 }
3698
intel_dp_sink_crc_stop(struct intel_dp * intel_dp)3699 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3700 {
3701 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3702 struct drm_device *dev = dig_port->base.base.dev;
3703 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3704 u8 buf;
3705 int ret = 0;
3706 int count = 0;
3707 int attempts = 10;
3708
3709 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3710 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3711 ret = -EIO;
3712 goto out;
3713 }
3714
3715 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3716 buf & ~DP_TEST_SINK_START) < 0) {
3717 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3718 ret = -EIO;
3719 goto out;
3720 }
3721
3722 do {
3723 intel_wait_for_vblank(dev, intel_crtc->pipe);
3724
3725 if (drm_dp_dpcd_readb(&intel_dp->aux,
3726 DP_TEST_SINK_MISC, &buf) < 0) {
3727 ret = -EIO;
3728 goto out;
3729 }
3730 count = buf & DP_TEST_COUNT_MASK;
3731 } while (--attempts && count);
3732
3733 if (attempts == 0) {
3734 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3735 ret = -ETIMEDOUT;
3736 }
3737
3738 out:
3739 hsw_enable_ips(intel_crtc);
3740 return ret;
3741 }
3742
intel_dp_sink_crc_start(struct intel_dp * intel_dp)3743 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3744 {
3745 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3746 struct drm_device *dev = dig_port->base.base.dev;
3747 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3748 u8 buf;
3749 int ret;
3750
3751 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3752 return -EIO;
3753
3754 if (!(buf & DP_TEST_CRC_SUPPORTED))
3755 return -ENOTTY;
3756
3757 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3758 return -EIO;
3759
3760 if (buf & DP_TEST_SINK_START) {
3761 ret = intel_dp_sink_crc_stop(intel_dp);
3762 if (ret)
3763 return ret;
3764 }
3765
3766 hsw_disable_ips(intel_crtc);
3767
3768 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3769 buf | DP_TEST_SINK_START) < 0) {
3770 hsw_enable_ips(intel_crtc);
3771 return -EIO;
3772 }
3773
3774 intel_wait_for_vblank(dev, intel_crtc->pipe);
3775 return 0;
3776 }
3777
intel_dp_sink_crc(struct intel_dp * intel_dp,u8 * crc)3778 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3779 {
3780 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3781 struct drm_device *dev = dig_port->base.base.dev;
3782 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3783 u8 buf;
3784 int count, ret;
3785 int attempts = 6;
3786
3787 ret = intel_dp_sink_crc_start(intel_dp);
3788 if (ret)
3789 return ret;
3790
3791 do {
3792 intel_wait_for_vblank(dev, intel_crtc->pipe);
3793
3794 if (drm_dp_dpcd_readb(&intel_dp->aux,
3795 DP_TEST_SINK_MISC, &buf) < 0) {
3796 ret = -EIO;
3797 goto stop;
3798 }
3799 count = buf & DP_TEST_COUNT_MASK;
3800
3801 } while (--attempts && count == 0);
3802
3803 if (attempts == 0) {
3804 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3805 ret = -ETIMEDOUT;
3806 goto stop;
3807 }
3808
3809 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3810 ret = -EIO;
3811 goto stop;
3812 }
3813
3814 stop:
3815 intel_dp_sink_crc_stop(intel_dp);
3816 return ret;
3817 }
3818
3819 static bool
intel_dp_get_sink_irq(struct intel_dp * intel_dp,u8 * sink_irq_vector)3820 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3821 {
3822 return drm_dp_dpcd_read(&intel_dp->aux,
3823 DP_DEVICE_SERVICE_IRQ_VECTOR,
3824 sink_irq_vector, 1) == 1;
3825 }
3826
3827 static bool
intel_dp_get_sink_irq_esi(struct intel_dp * intel_dp,u8 * sink_irq_vector)3828 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3829 {
3830 int ret;
3831
3832 ret = drm_dp_dpcd_read(&intel_dp->aux,
3833 DP_SINK_COUNT_ESI,
3834 sink_irq_vector, 14);
3835 if (ret != 14)
3836 return false;
3837
3838 return true;
3839 }
3840
intel_dp_autotest_link_training(struct intel_dp * intel_dp)3841 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3842 {
3843 uint8_t test_result = DP_TEST_ACK;
3844 return test_result;
3845 }
3846
intel_dp_autotest_video_pattern(struct intel_dp * intel_dp)3847 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3848 {
3849 uint8_t test_result = DP_TEST_NAK;
3850 return test_result;
3851 }
3852
intel_dp_autotest_edid(struct intel_dp * intel_dp)3853 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
3854 {
3855 uint8_t test_result = DP_TEST_NAK;
3856 struct intel_connector *intel_connector = intel_dp->attached_connector;
3857 struct drm_connector *connector = &intel_connector->base;
3858
3859 if (intel_connector->detect_edid == NULL ||
3860 connector->edid_corrupt ||
3861 intel_dp->aux.i2c_defer_count > 6) {
3862 /* Check EDID read for NACKs, DEFERs and corruption
3863 * (DP CTS 1.2 Core r1.1)
3864 * 4.2.2.4 : Failed EDID read, I2C_NAK
3865 * 4.2.2.5 : Failed EDID read, I2C_DEFER
3866 * 4.2.2.6 : EDID corruption detected
3867 * Use failsafe mode for all cases
3868 */
3869 if (intel_dp->aux.i2c_nack_count > 0 ||
3870 intel_dp->aux.i2c_defer_count > 0)
3871 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
3872 intel_dp->aux.i2c_nack_count,
3873 intel_dp->aux.i2c_defer_count);
3874 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
3875 } else {
3876 struct edid *block = intel_connector->detect_edid;
3877
3878 /* We have to write the checksum
3879 * of the last block read
3880 */
3881 block += intel_connector->detect_edid->extensions;
3882
3883 if (!drm_dp_dpcd_write(&intel_dp->aux,
3884 DP_TEST_EDID_CHECKSUM,
3885 &block->checksum,
3886 1))
3887 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
3888
3889 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3890 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
3891 }
3892
3893 /* Set test active flag here so userspace doesn't interrupt things */
3894 intel_dp->compliance_test_active = 1;
3895
3896 return test_result;
3897 }
3898
intel_dp_autotest_phy_pattern(struct intel_dp * intel_dp)3899 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3900 {
3901 uint8_t test_result = DP_TEST_NAK;
3902 return test_result;
3903 }
3904
intel_dp_handle_test_request(struct intel_dp * intel_dp)3905 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3906 {
3907 uint8_t response = DP_TEST_NAK;
3908 uint8_t rxdata = 0;
3909 int status = 0;
3910
3911 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
3912 if (status <= 0) {
3913 DRM_DEBUG_KMS("Could not read test request from sink\n");
3914 goto update_status;
3915 }
3916
3917 switch (rxdata) {
3918 case DP_TEST_LINK_TRAINING:
3919 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
3920 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
3921 response = intel_dp_autotest_link_training(intel_dp);
3922 break;
3923 case DP_TEST_LINK_VIDEO_PATTERN:
3924 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
3925 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
3926 response = intel_dp_autotest_video_pattern(intel_dp);
3927 break;
3928 case DP_TEST_LINK_EDID_READ:
3929 DRM_DEBUG_KMS("EDID test requested\n");
3930 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
3931 response = intel_dp_autotest_edid(intel_dp);
3932 break;
3933 case DP_TEST_LINK_PHY_TEST_PATTERN:
3934 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
3935 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
3936 response = intel_dp_autotest_phy_pattern(intel_dp);
3937 break;
3938 default:
3939 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
3940 break;
3941 }
3942
3943 update_status:
3944 status = drm_dp_dpcd_write(&intel_dp->aux,
3945 DP_TEST_RESPONSE,
3946 &response, 1);
3947 if (status <= 0)
3948 DRM_DEBUG_KMS("Could not write test response to sink\n");
3949 }
3950
3951 static int
intel_dp_check_mst_status(struct intel_dp * intel_dp)3952 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3953 {
3954 bool bret;
3955
3956 if (intel_dp->is_mst) {
3957 u8 esi[16] = { 0 };
3958 int ret = 0;
3959 int retry;
3960 bool handled;
3961 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3962 go_again:
3963 if (bret == true) {
3964
3965 /* check link status - esi[10] = 0x200c */
3966 if (intel_dp->active_mst_links &&
3967 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3968 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3969 intel_dp_start_link_train(intel_dp);
3970 intel_dp_stop_link_train(intel_dp);
3971 }
3972
3973 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3974 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3975
3976 if (handled) {
3977 for (retry = 0; retry < 3; retry++) {
3978 int wret;
3979 wret = drm_dp_dpcd_write(&intel_dp->aux,
3980 DP_SINK_COUNT_ESI+1,
3981 &esi[1], 3);
3982 if (wret == 3) {
3983 break;
3984 }
3985 }
3986
3987 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3988 if (bret == true) {
3989 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3990 goto go_again;
3991 }
3992 } else
3993 ret = 0;
3994
3995 return ret;
3996 } else {
3997 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3998 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3999 intel_dp->is_mst = false;
4000 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4001 /* send a hotplug event */
4002 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4003 }
4004 }
4005 return -EINVAL;
4006 }
4007
4008 static void
intel_dp_check_link_status(struct intel_dp * intel_dp)4009 intel_dp_check_link_status(struct intel_dp *intel_dp)
4010 {
4011 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4012 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4013 u8 link_status[DP_LINK_STATUS_SIZE];
4014
4015 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4016
4017 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4018 DRM_ERROR("Failed to get link status\n");
4019 return;
4020 }
4021
4022 if (!intel_encoder->base.crtc)
4023 return;
4024
4025 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4026 return;
4027
4028 /* FIXME: we need to synchronize this sort of stuff with hardware
4029 * readout. Currently fast link training doesn't work on boot-up. */
4030 if (!intel_dp->lane_count)
4031 return;
4032
4033 /* if link training is requested we should perform it always */
4034 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4035 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4036 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4037 intel_encoder->base.name);
4038 intel_dp_start_link_train(intel_dp);
4039 intel_dp_stop_link_train(intel_dp);
4040 }
4041 }
4042
4043 /*
4044 * According to DP spec
4045 * 5.1.2:
4046 * 1. Read DPCD
4047 * 2. Configure link according to Receiver Capabilities
4048 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4049 * 4. Check link status on receipt of hot-plug interrupt
4050 *
4051 * intel_dp_short_pulse - handles short pulse interrupts
4052 * when full detection is not required.
4053 * Returns %true if short pulse is handled and full detection
4054 * is NOT required and %false otherwise.
4055 */
4056 static bool
intel_dp_short_pulse(struct intel_dp * intel_dp)4057 intel_dp_short_pulse(struct intel_dp *intel_dp)
4058 {
4059 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4060 u8 sink_irq_vector = 0;
4061 u8 old_sink_count = intel_dp->sink_count;
4062 bool ret;
4063
4064 /*
4065 * Clearing compliance test variables to allow capturing
4066 * of values for next automated test request.
4067 */
4068 intel_dp->compliance_test_active = 0;
4069 intel_dp->compliance_test_type = 0;
4070 intel_dp->compliance_test_data = 0;
4071
4072 /*
4073 * Now read the DPCD to see if it's actually running
4074 * If the current value of sink count doesn't match with
4075 * the value that was stored earlier or dpcd read failed
4076 * we need to do full detection
4077 */
4078 ret = intel_dp_get_dpcd(intel_dp);
4079
4080 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4081 /* No need to proceed if we are going to do full detect */
4082 return false;
4083 }
4084
4085 /* Try to read the source of the interrupt */
4086 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4087 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4088 sink_irq_vector != 0) {
4089 /* Clear interrupt source */
4090 drm_dp_dpcd_writeb(&intel_dp->aux,
4091 DP_DEVICE_SERVICE_IRQ_VECTOR,
4092 sink_irq_vector);
4093
4094 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4095 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4096 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4097 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4098 }
4099
4100 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4101 intel_dp_check_link_status(intel_dp);
4102 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4103
4104 return true;
4105 }
4106
4107 /* XXX this is probably wrong for multiple downstream ports */
4108 static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp * intel_dp)4109 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4110 {
4111 uint8_t *dpcd = intel_dp->dpcd;
4112 uint8_t type;
4113
4114 if (!intel_dp_get_dpcd(intel_dp))
4115 return connector_status_disconnected;
4116
4117 if (is_edp(intel_dp))
4118 return connector_status_connected;
4119
4120 /* if there's no downstream port, we're done */
4121 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4122 return connector_status_connected;
4123
4124 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4125 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4126 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4127
4128 return intel_dp->sink_count ?
4129 connector_status_connected : connector_status_disconnected;
4130 }
4131
4132 if (intel_dp_can_mst(intel_dp))
4133 return connector_status_connected;
4134
4135 /* If no HPD, poke DDC gently */
4136 if (drm_probe_ddc(&intel_dp->aux.ddc))
4137 return connector_status_connected;
4138
4139 /* Well we tried, say unknown for unreliable port types */
4140 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4141 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4142 if (type == DP_DS_PORT_TYPE_VGA ||
4143 type == DP_DS_PORT_TYPE_NON_EDID)
4144 return connector_status_unknown;
4145 } else {
4146 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4147 DP_DWN_STRM_PORT_TYPE_MASK;
4148 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4149 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4150 return connector_status_unknown;
4151 }
4152
4153 /* Anything else is out of spec, warn and ignore */
4154 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4155 return connector_status_disconnected;
4156 }
4157
4158 static enum drm_connector_status
edp_detect(struct intel_dp * intel_dp)4159 edp_detect(struct intel_dp *intel_dp)
4160 {
4161 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4162 enum drm_connector_status status;
4163
4164 status = intel_panel_detect(dev);
4165 if (status == connector_status_unknown)
4166 status = connector_status_connected;
4167
4168 return status;
4169 }
4170
ibx_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4171 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4172 struct intel_digital_port *port)
4173 {
4174 u32 bit;
4175
4176 switch (port->port) {
4177 case PORT_A:
4178 return true;
4179 case PORT_B:
4180 bit = SDE_PORTB_HOTPLUG;
4181 break;
4182 case PORT_C:
4183 bit = SDE_PORTC_HOTPLUG;
4184 break;
4185 case PORT_D:
4186 bit = SDE_PORTD_HOTPLUG;
4187 break;
4188 default:
4189 MISSING_CASE(port->port);
4190 return false;
4191 }
4192
4193 return I915_READ(SDEISR) & bit;
4194 }
4195
cpt_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4196 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4197 struct intel_digital_port *port)
4198 {
4199 u32 bit;
4200
4201 switch (port->port) {
4202 case PORT_A:
4203 return true;
4204 case PORT_B:
4205 bit = SDE_PORTB_HOTPLUG_CPT;
4206 break;
4207 case PORT_C:
4208 bit = SDE_PORTC_HOTPLUG_CPT;
4209 break;
4210 case PORT_D:
4211 bit = SDE_PORTD_HOTPLUG_CPT;
4212 break;
4213 case PORT_E:
4214 bit = SDE_PORTE_HOTPLUG_SPT;
4215 break;
4216 default:
4217 MISSING_CASE(port->port);
4218 return false;
4219 }
4220
4221 return I915_READ(SDEISR) & bit;
4222 }
4223
g4x_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4224 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4225 struct intel_digital_port *port)
4226 {
4227 u32 bit;
4228
4229 switch (port->port) {
4230 case PORT_B:
4231 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4232 break;
4233 case PORT_C:
4234 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4235 break;
4236 case PORT_D:
4237 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4238 break;
4239 default:
4240 MISSING_CASE(port->port);
4241 return false;
4242 }
4243
4244 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4245 }
4246
gm45_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4247 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4248 struct intel_digital_port *port)
4249 {
4250 u32 bit;
4251
4252 switch (port->port) {
4253 case PORT_B:
4254 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4255 break;
4256 case PORT_C:
4257 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4258 break;
4259 case PORT_D:
4260 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4261 break;
4262 default:
4263 MISSING_CASE(port->port);
4264 return false;
4265 }
4266
4267 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4268 }
4269
bxt_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * intel_dig_port)4270 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4271 struct intel_digital_port *intel_dig_port)
4272 {
4273 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4274 enum port port;
4275 u32 bit;
4276
4277 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4278 switch (port) {
4279 case PORT_A:
4280 bit = BXT_DE_PORT_HP_DDIA;
4281 break;
4282 case PORT_B:
4283 bit = BXT_DE_PORT_HP_DDIB;
4284 break;
4285 case PORT_C:
4286 bit = BXT_DE_PORT_HP_DDIC;
4287 break;
4288 default:
4289 MISSING_CASE(port);
4290 return false;
4291 }
4292
4293 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4294 }
4295
4296 /*
4297 * intel_digital_port_connected - is the specified port connected?
4298 * @dev_priv: i915 private structure
4299 * @port: the port to test
4300 *
4301 * Return %true if @port is connected, %false otherwise.
4302 */
intel_digital_port_connected(struct drm_i915_private * dev_priv,struct intel_digital_port * port)4303 static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4304 struct intel_digital_port *port)
4305 {
4306 if (HAS_PCH_IBX(dev_priv))
4307 return ibx_digital_port_connected(dev_priv, port);
4308 else if (HAS_PCH_SPLIT(dev_priv))
4309 return cpt_digital_port_connected(dev_priv, port);
4310 else if (IS_BROXTON(dev_priv))
4311 return bxt_digital_port_connected(dev_priv, port);
4312 else if (IS_GM45(dev_priv))
4313 return gm45_digital_port_connected(dev_priv, port);
4314 else
4315 return g4x_digital_port_connected(dev_priv, port);
4316 }
4317
4318 static struct edid *
intel_dp_get_edid(struct intel_dp * intel_dp)4319 intel_dp_get_edid(struct intel_dp *intel_dp)
4320 {
4321 struct intel_connector *intel_connector = intel_dp->attached_connector;
4322
4323 /* use cached edid if we have one */
4324 if (intel_connector->edid) {
4325 /* invalid edid */
4326 if (IS_ERR(intel_connector->edid))
4327 return NULL;
4328
4329 return drm_edid_duplicate(intel_connector->edid);
4330 } else
4331 return drm_get_edid(&intel_connector->base,
4332 &intel_dp->aux.ddc);
4333 }
4334
4335 static void
intel_dp_set_edid(struct intel_dp * intel_dp)4336 intel_dp_set_edid(struct intel_dp *intel_dp)
4337 {
4338 struct intel_connector *intel_connector = intel_dp->attached_connector;
4339 struct edid *edid;
4340
4341 intel_dp_unset_edid(intel_dp);
4342 edid = intel_dp_get_edid(intel_dp);
4343 intel_connector->detect_edid = edid;
4344
4345 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4346 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4347 else
4348 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4349 }
4350
4351 static void
intel_dp_unset_edid(struct intel_dp * intel_dp)4352 intel_dp_unset_edid(struct intel_dp *intel_dp)
4353 {
4354 struct intel_connector *intel_connector = intel_dp->attached_connector;
4355
4356 kfree(intel_connector->detect_edid);
4357 intel_connector->detect_edid = NULL;
4358
4359 intel_dp->has_audio = false;
4360 }
4361
4362 static enum drm_connector_status
intel_dp_long_pulse(struct intel_connector * intel_connector)4363 intel_dp_long_pulse(struct intel_connector *intel_connector)
4364 {
4365 struct drm_connector *connector = &intel_connector->base;
4366 struct intel_dp *intel_dp = intel_attached_dp(connector);
4367 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4368 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4369 struct drm_device *dev = connector->dev;
4370 enum drm_connector_status status;
4371 enum intel_display_power_domain power_domain;
4372 u8 sink_irq_vector = 0;
4373
4374 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4375 intel_display_power_get(to_i915(dev), power_domain);
4376
4377 /* Can't disconnect eDP, but you can close the lid... */
4378 if (is_edp(intel_dp))
4379 status = edp_detect(intel_dp);
4380 else if (intel_digital_port_connected(to_i915(dev),
4381 dp_to_dig_port(intel_dp)))
4382 status = intel_dp_detect_dpcd(intel_dp);
4383 else
4384 status = connector_status_disconnected;
4385
4386 if (status == connector_status_disconnected) {
4387 intel_dp->compliance_test_active = 0;
4388 intel_dp->compliance_test_type = 0;
4389 intel_dp->compliance_test_data = 0;
4390
4391 if (intel_dp->is_mst) {
4392 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4393 intel_dp->is_mst,
4394 intel_dp->mst_mgr.mst_state);
4395 intel_dp->is_mst = false;
4396 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4397 intel_dp->is_mst);
4398 }
4399
4400 goto out;
4401 }
4402
4403 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4404 intel_encoder->type = INTEL_OUTPUT_DP;
4405
4406 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4407 yesno(intel_dp_source_supports_hbr2(intel_dp)),
4408 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4409
4410 intel_dp_print_rates(intel_dp);
4411
4412 intel_dp_probe_oui(intel_dp);
4413
4414 intel_dp_print_hw_revision(intel_dp);
4415 intel_dp_print_sw_revision(intel_dp);
4416
4417 intel_dp_configure_mst(intel_dp);
4418
4419 if (intel_dp->is_mst) {
4420 /*
4421 * If we are in MST mode then this connector
4422 * won't appear connected or have anything
4423 * with EDID on it
4424 */
4425 status = connector_status_disconnected;
4426 goto out;
4427 } else if (connector->status == connector_status_connected) {
4428 /*
4429 * If display was connected already and is still connected
4430 * check links status, there has been known issues of
4431 * link loss triggerring long pulse!!!!
4432 */
4433 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4434 intel_dp_check_link_status(intel_dp);
4435 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4436 goto out;
4437 }
4438
4439 /*
4440 * Clearing NACK and defer counts to get their exact values
4441 * while reading EDID which are required by Compliance tests
4442 * 4.2.2.4 and 4.2.2.5
4443 */
4444 intel_dp->aux.i2c_nack_count = 0;
4445 intel_dp->aux.i2c_defer_count = 0;
4446
4447 intel_dp_set_edid(intel_dp);
4448 if (is_edp(intel_dp) || intel_connector->detect_edid)
4449 status = connector_status_connected;
4450 intel_dp->detect_done = true;
4451
4452 /* Try to read the source of the interrupt */
4453 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4454 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4455 sink_irq_vector != 0) {
4456 /* Clear interrupt source */
4457 drm_dp_dpcd_writeb(&intel_dp->aux,
4458 DP_DEVICE_SERVICE_IRQ_VECTOR,
4459 sink_irq_vector);
4460
4461 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4462 intel_dp_handle_test_request(intel_dp);
4463 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4464 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4465 }
4466
4467 out:
4468 if (status != connector_status_connected && !intel_dp->is_mst)
4469 intel_dp_unset_edid(intel_dp);
4470
4471 intel_display_power_put(to_i915(dev), power_domain);
4472 return status;
4473 }
4474
4475 static enum drm_connector_status
intel_dp_detect(struct drm_connector * connector,bool force)4476 intel_dp_detect(struct drm_connector *connector, bool force)
4477 {
4478 struct intel_dp *intel_dp = intel_attached_dp(connector);
4479 enum drm_connector_status status = connector->status;
4480
4481 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4482 connector->base.id, connector->name);
4483
4484 /* If full detect is not performed yet, do a full detect */
4485 if (!intel_dp->detect_done)
4486 status = intel_dp_long_pulse(intel_dp->attached_connector);
4487
4488 intel_dp->detect_done = false;
4489
4490 return status;
4491 }
4492
4493 static void
intel_dp_force(struct drm_connector * connector)4494 intel_dp_force(struct drm_connector *connector)
4495 {
4496 struct intel_dp *intel_dp = intel_attached_dp(connector);
4497 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4498 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4499 enum intel_display_power_domain power_domain;
4500
4501 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4502 connector->base.id, connector->name);
4503 intel_dp_unset_edid(intel_dp);
4504
4505 if (connector->status != connector_status_connected)
4506 return;
4507
4508 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4509 intel_display_power_get(dev_priv, power_domain);
4510
4511 intel_dp_set_edid(intel_dp);
4512
4513 intel_display_power_put(dev_priv, power_domain);
4514
4515 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4516 intel_encoder->type = INTEL_OUTPUT_DP;
4517 }
4518
intel_dp_get_modes(struct drm_connector * connector)4519 static int intel_dp_get_modes(struct drm_connector *connector)
4520 {
4521 struct intel_connector *intel_connector = to_intel_connector(connector);
4522 struct edid *edid;
4523
4524 edid = intel_connector->detect_edid;
4525 if (edid) {
4526 int ret = intel_connector_update_modes(connector, edid);
4527 if (ret)
4528 return ret;
4529 }
4530
4531 /* if eDP has no EDID, fall back to fixed mode */
4532 if (is_edp(intel_attached_dp(connector)) &&
4533 intel_connector->panel.fixed_mode) {
4534 struct drm_display_mode *mode;
4535
4536 mode = drm_mode_duplicate(connector->dev,
4537 intel_connector->panel.fixed_mode);
4538 if (mode) {
4539 drm_mode_probed_add(connector, mode);
4540 return 1;
4541 }
4542 }
4543
4544 return 0;
4545 }
4546
4547 static bool
intel_dp_detect_audio(struct drm_connector * connector)4548 intel_dp_detect_audio(struct drm_connector *connector)
4549 {
4550 bool has_audio = false;
4551 struct edid *edid;
4552
4553 edid = to_intel_connector(connector)->detect_edid;
4554 if (edid)
4555 has_audio = drm_detect_monitor_audio(edid);
4556
4557 return has_audio;
4558 }
4559
4560 static int
intel_dp_set_property(struct drm_connector * connector,struct drm_property * property,uint64_t val)4561 intel_dp_set_property(struct drm_connector *connector,
4562 struct drm_property *property,
4563 uint64_t val)
4564 {
4565 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4566 struct intel_connector *intel_connector = to_intel_connector(connector);
4567 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4568 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4569 int ret;
4570
4571 ret = drm_object_property_set_value(&connector->base, property, val);
4572 if (ret)
4573 return ret;
4574
4575 if (property == dev_priv->force_audio_property) {
4576 int i = val;
4577 bool has_audio;
4578
4579 if (i == intel_dp->force_audio)
4580 return 0;
4581
4582 intel_dp->force_audio = i;
4583
4584 if (i == HDMI_AUDIO_AUTO)
4585 has_audio = intel_dp_detect_audio(connector);
4586 else
4587 has_audio = (i == HDMI_AUDIO_ON);
4588
4589 if (has_audio == intel_dp->has_audio)
4590 return 0;
4591
4592 intel_dp->has_audio = has_audio;
4593 goto done;
4594 }
4595
4596 if (property == dev_priv->broadcast_rgb_property) {
4597 bool old_auto = intel_dp->color_range_auto;
4598 bool old_range = intel_dp->limited_color_range;
4599
4600 switch (val) {
4601 case INTEL_BROADCAST_RGB_AUTO:
4602 intel_dp->color_range_auto = true;
4603 break;
4604 case INTEL_BROADCAST_RGB_FULL:
4605 intel_dp->color_range_auto = false;
4606 intel_dp->limited_color_range = false;
4607 break;
4608 case INTEL_BROADCAST_RGB_LIMITED:
4609 intel_dp->color_range_auto = false;
4610 intel_dp->limited_color_range = true;
4611 break;
4612 default:
4613 return -EINVAL;
4614 }
4615
4616 if (old_auto == intel_dp->color_range_auto &&
4617 old_range == intel_dp->limited_color_range)
4618 return 0;
4619
4620 goto done;
4621 }
4622
4623 if (is_edp(intel_dp) &&
4624 property == connector->dev->mode_config.scaling_mode_property) {
4625 if (val == DRM_MODE_SCALE_NONE) {
4626 DRM_DEBUG_KMS("no scaling not supported\n");
4627 return -EINVAL;
4628 }
4629 if (HAS_GMCH_DISPLAY(dev_priv) &&
4630 val == DRM_MODE_SCALE_CENTER) {
4631 DRM_DEBUG_KMS("centering not supported\n");
4632 return -EINVAL;
4633 }
4634
4635 if (intel_connector->panel.fitting_mode == val) {
4636 /* the eDP scaling property is not changed */
4637 return 0;
4638 }
4639 intel_connector->panel.fitting_mode = val;
4640
4641 goto done;
4642 }
4643
4644 return -EINVAL;
4645
4646 done:
4647 if (intel_encoder->base.crtc)
4648 intel_crtc_restore_mode(intel_encoder->base.crtc);
4649
4650 return 0;
4651 }
4652
4653 static int
intel_dp_connector_register(struct drm_connector * connector)4654 intel_dp_connector_register(struct drm_connector *connector)
4655 {
4656 struct intel_dp *intel_dp = intel_attached_dp(connector);
4657 int ret;
4658
4659 ret = intel_connector_register(connector);
4660 if (ret)
4661 return ret;
4662
4663 i915_debugfs_connector_add(connector);
4664
4665 DRM_DEBUG_KMS("registering %s bus for %s\n",
4666 intel_dp->aux.name, connector->kdev->kobj.name);
4667
4668 intel_dp->aux.dev = connector->kdev;
4669 return drm_dp_aux_register(&intel_dp->aux);
4670 }
4671
4672 static void
intel_dp_connector_unregister(struct drm_connector * connector)4673 intel_dp_connector_unregister(struct drm_connector *connector)
4674 {
4675 drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4676 intel_connector_unregister(connector);
4677 }
4678
4679 static void
intel_dp_connector_destroy(struct drm_connector * connector)4680 intel_dp_connector_destroy(struct drm_connector *connector)
4681 {
4682 struct intel_connector *intel_connector = to_intel_connector(connector);
4683
4684 kfree(intel_connector->detect_edid);
4685
4686 if (!IS_ERR_OR_NULL(intel_connector->edid))
4687 kfree(intel_connector->edid);
4688
4689 /* Can't call is_edp() since the encoder may have been destroyed
4690 * already. */
4691 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4692 intel_panel_fini(&intel_connector->panel);
4693
4694 drm_connector_cleanup(connector);
4695 kfree(connector);
4696 }
4697
intel_dp_encoder_destroy(struct drm_encoder * encoder)4698 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4699 {
4700 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4701 struct intel_dp *intel_dp = &intel_dig_port->dp;
4702
4703 intel_dp_mst_encoder_cleanup(intel_dig_port);
4704 if (is_edp(intel_dp)) {
4705 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4706 /*
4707 * vdd might still be enabled do to the delayed vdd off.
4708 * Make sure vdd is actually turned off here.
4709 */
4710 pps_lock(intel_dp);
4711 edp_panel_vdd_off_sync(intel_dp);
4712 pps_unlock(intel_dp);
4713
4714 if (intel_dp->edp_notifier.notifier_call) {
4715 unregister_reboot_notifier(&intel_dp->edp_notifier);
4716 intel_dp->edp_notifier.notifier_call = NULL;
4717 }
4718 }
4719
4720 intel_dp_aux_fini(intel_dp);
4721
4722 drm_encoder_cleanup(encoder);
4723 kfree(intel_dig_port);
4724 }
4725
intel_dp_encoder_suspend(struct intel_encoder * intel_encoder)4726 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4727 {
4728 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4729
4730 if (!is_edp(intel_dp))
4731 return;
4732
4733 /*
4734 * vdd might still be enabled do to the delayed vdd off.
4735 * Make sure vdd is actually turned off here.
4736 */
4737 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4738 pps_lock(intel_dp);
4739 edp_panel_vdd_off_sync(intel_dp);
4740 pps_unlock(intel_dp);
4741 }
4742
intel_edp_panel_vdd_sanitize(struct intel_dp * intel_dp)4743 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4744 {
4745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4746 struct drm_device *dev = intel_dig_port->base.base.dev;
4747 struct drm_i915_private *dev_priv = to_i915(dev);
4748 enum intel_display_power_domain power_domain;
4749
4750 lockdep_assert_held(&dev_priv->pps_mutex);
4751
4752 if (!edp_have_panel_vdd(intel_dp))
4753 return;
4754
4755 /*
4756 * The VDD bit needs a power domain reference, so if the bit is
4757 * already enabled when we boot or resume, grab this reference and
4758 * schedule a vdd off, so we don't hold on to the reference
4759 * indefinitely.
4760 */
4761 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4762 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4763 intel_display_power_get(dev_priv, power_domain);
4764
4765 edp_panel_vdd_schedule_off(intel_dp);
4766 }
4767
intel_dp_encoder_reset(struct drm_encoder * encoder)4768 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4769 {
4770 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4771 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4772
4773 if (!HAS_DDI(dev_priv))
4774 intel_dp->DP = I915_READ(intel_dp->output_reg);
4775
4776 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4777 return;
4778
4779 pps_lock(intel_dp);
4780
4781 /* Reinit the power sequencer, in case BIOS did something with it. */
4782 intel_dp_pps_init(encoder->dev, intel_dp);
4783 intel_edp_panel_vdd_sanitize(intel_dp);
4784
4785 pps_unlock(intel_dp);
4786 }
4787
4788 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4789 .dpms = drm_atomic_helper_connector_dpms,
4790 .detect = intel_dp_detect,
4791 .force = intel_dp_force,
4792 .fill_modes = drm_helper_probe_single_connector_modes,
4793 .set_property = intel_dp_set_property,
4794 .atomic_get_property = intel_connector_atomic_get_property,
4795 .late_register = intel_dp_connector_register,
4796 .early_unregister = intel_dp_connector_unregister,
4797 .destroy = intel_dp_connector_destroy,
4798 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4799 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4800 };
4801
4802 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4803 .get_modes = intel_dp_get_modes,
4804 .mode_valid = intel_dp_mode_valid,
4805 };
4806
4807 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4808 .reset = intel_dp_encoder_reset,
4809 .destroy = intel_dp_encoder_destroy,
4810 };
4811
4812 enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port * intel_dig_port,bool long_hpd)4813 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4814 {
4815 struct intel_dp *intel_dp = &intel_dig_port->dp;
4816 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4817 struct drm_device *dev = intel_dig_port->base.base.dev;
4818 struct drm_i915_private *dev_priv = to_i915(dev);
4819 enum intel_display_power_domain power_domain;
4820 enum irqreturn ret = IRQ_NONE;
4821
4822 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4823 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4824 intel_dig_port->base.type = INTEL_OUTPUT_DP;
4825
4826 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4827 /*
4828 * vdd off can generate a long pulse on eDP which
4829 * would require vdd on to handle it, and thus we
4830 * would end up in an endless cycle of
4831 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4832 */
4833 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4834 port_name(intel_dig_port->port));
4835 return IRQ_HANDLED;
4836 }
4837
4838 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4839 port_name(intel_dig_port->port),
4840 long_hpd ? "long" : "short");
4841
4842 if (long_hpd) {
4843 intel_dp->detect_done = false;
4844 return IRQ_NONE;
4845 }
4846
4847 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4848 intel_display_power_get(dev_priv, power_domain);
4849
4850 if (intel_dp->is_mst) {
4851 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
4852 /*
4853 * If we were in MST mode, and device is not
4854 * there, get out of MST mode
4855 */
4856 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4857 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4858 intel_dp->is_mst = false;
4859 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4860 intel_dp->is_mst);
4861 intel_dp->detect_done = false;
4862 goto put_power;
4863 }
4864 }
4865
4866 if (!intel_dp->is_mst) {
4867 if (!intel_dp_short_pulse(intel_dp)) {
4868 intel_dp->detect_done = false;
4869 goto put_power;
4870 }
4871 }
4872
4873 ret = IRQ_HANDLED;
4874
4875 put_power:
4876 intel_display_power_put(dev_priv, power_domain);
4877
4878 return ret;
4879 }
4880
4881 /* check the VBT to see whether the eDP is on another port */
intel_dp_is_edp(struct drm_device * dev,enum port port)4882 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4883 {
4884 struct drm_i915_private *dev_priv = to_i915(dev);
4885
4886 /*
4887 * eDP not supported on g4x. so bail out early just
4888 * for a bit extra safety in case the VBT is bonkers.
4889 */
4890 if (INTEL_INFO(dev)->gen < 5)
4891 return false;
4892
4893 if (port == PORT_A)
4894 return true;
4895
4896 return intel_bios_is_port_edp(dev_priv, port);
4897 }
4898
4899 void
intel_dp_add_properties(struct intel_dp * intel_dp,struct drm_connector * connector)4900 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4901 {
4902 struct intel_connector *intel_connector = to_intel_connector(connector);
4903
4904 intel_attach_force_audio_property(connector);
4905 intel_attach_broadcast_rgb_property(connector);
4906 intel_dp->color_range_auto = true;
4907
4908 if (is_edp(intel_dp)) {
4909 drm_mode_create_scaling_mode_property(connector->dev);
4910 drm_object_attach_property(
4911 &connector->base,
4912 connector->dev->mode_config.scaling_mode_property,
4913 DRM_MODE_SCALE_ASPECT);
4914 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4915 }
4916 }
4917
intel_dp_init_panel_power_timestamps(struct intel_dp * intel_dp)4918 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4919 {
4920 intel_dp->panel_power_off_time = ktime_get_boottime();
4921 intel_dp->last_power_on = jiffies;
4922 intel_dp->last_backlight_off = jiffies;
4923 }
4924
4925 static void
intel_pps_readout_hw_state(struct drm_i915_private * dev_priv,struct intel_dp * intel_dp,struct edp_power_seq * seq)4926 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
4927 struct intel_dp *intel_dp, struct edp_power_seq *seq)
4928 {
4929 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
4930 struct pps_registers regs;
4931
4932 intel_pps_get_registers(dev_priv, intel_dp, ®s);
4933
4934 /* Workaround: Need to write PP_CONTROL with the unlock key as
4935 * the very first thing. */
4936 pp_ctl = ironlake_get_pp_control(intel_dp);
4937
4938 pp_on = I915_READ(regs.pp_on);
4939 pp_off = I915_READ(regs.pp_off);
4940 if (!IS_BROXTON(dev_priv)) {
4941 I915_WRITE(regs.pp_ctrl, pp_ctl);
4942 pp_div = I915_READ(regs.pp_div);
4943 }
4944
4945 /* Pull timing values out of registers */
4946 seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4947 PANEL_POWER_UP_DELAY_SHIFT;
4948
4949 seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4950 PANEL_LIGHT_ON_DELAY_SHIFT;
4951
4952 seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4953 PANEL_LIGHT_OFF_DELAY_SHIFT;
4954
4955 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4956 PANEL_POWER_DOWN_DELAY_SHIFT;
4957
4958 if (IS_BROXTON(dev_priv)) {
4959 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
4960 BXT_POWER_CYCLE_DELAY_SHIFT;
4961 if (tmp > 0)
4962 seq->t11_t12 = (tmp - 1) * 1000;
4963 else
4964 seq->t11_t12 = 0;
4965 } else {
4966 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4967 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4968 }
4969 }
4970
4971 static void
intel_pps_dump_state(const char * state_name,const struct edp_power_seq * seq)4972 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
4973 {
4974 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4975 state_name,
4976 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
4977 }
4978
4979 static void
intel_pps_verify_state(struct drm_i915_private * dev_priv,struct intel_dp * intel_dp)4980 intel_pps_verify_state(struct drm_i915_private *dev_priv,
4981 struct intel_dp *intel_dp)
4982 {
4983 struct edp_power_seq hw;
4984 struct edp_power_seq *sw = &intel_dp->pps_delays;
4985
4986 intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
4987
4988 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
4989 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
4990 DRM_ERROR("PPS state mismatch\n");
4991 intel_pps_dump_state("sw", sw);
4992 intel_pps_dump_state("hw", &hw);
4993 }
4994 }
4995
4996 static void
intel_dp_init_panel_power_sequencer(struct drm_device * dev,struct intel_dp * intel_dp)4997 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4998 struct intel_dp *intel_dp)
4999 {
5000 struct drm_i915_private *dev_priv = to_i915(dev);
5001 struct edp_power_seq cur, vbt, spec,
5002 *final = &intel_dp->pps_delays;
5003
5004 lockdep_assert_held(&dev_priv->pps_mutex);
5005
5006 /* already initialized? */
5007 if (final->t11_t12 != 0)
5008 return;
5009
5010 intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5011
5012 intel_pps_dump_state("cur", &cur);
5013
5014 vbt = dev_priv->vbt.edp.pps;
5015
5016 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5017 * our hw here, which are all in 100usec. */
5018 spec.t1_t3 = 210 * 10;
5019 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5020 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5021 spec.t10 = 500 * 10;
5022 /* This one is special and actually in units of 100ms, but zero
5023 * based in the hw (so we need to add 100 ms). But the sw vbt
5024 * table multiplies it with 1000 to make it in units of 100usec,
5025 * too. */
5026 spec.t11_t12 = (510 + 100) * 10;
5027
5028 intel_pps_dump_state("vbt", &vbt);
5029
5030 /* Use the max of the register settings and vbt. If both are
5031 * unset, fall back to the spec limits. */
5032 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5033 spec.field : \
5034 max(cur.field, vbt.field))
5035 assign_final(t1_t3);
5036 assign_final(t8);
5037 assign_final(t9);
5038 assign_final(t10);
5039 assign_final(t11_t12);
5040 #undef assign_final
5041
5042 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5043 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5044 intel_dp->backlight_on_delay = get_delay(t8);
5045 intel_dp->backlight_off_delay = get_delay(t9);
5046 intel_dp->panel_power_down_delay = get_delay(t10);
5047 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5048 #undef get_delay
5049
5050 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5051 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5052 intel_dp->panel_power_cycle_delay);
5053
5054 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5055 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5056
5057 /*
5058 * We override the HW backlight delays to 1 because we do manual waits
5059 * on them. For T8, even BSpec recommends doing it. For T9, if we
5060 * don't do this, we'll end up waiting for the backlight off delay
5061 * twice: once when we do the manual sleep, and once when we disable
5062 * the panel and wait for the PP_STATUS bit to become zero.
5063 */
5064 final->t8 = 1;
5065 final->t9 = 1;
5066
5067 /*
5068 * HW has only a 100msec granularity for t11_t12 so round it up
5069 * accordingly.
5070 */
5071 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5072 }
5073
5074 static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device * dev,struct intel_dp * intel_dp,bool force_disable_vdd)5075 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5076 struct intel_dp *intel_dp,
5077 bool force_disable_vdd)
5078 {
5079 struct drm_i915_private *dev_priv = to_i915(dev);
5080 u32 pp_on, pp_off, pp_div, port_sel = 0;
5081 int div = dev_priv->rawclk_freq / 1000;
5082 struct pps_registers regs;
5083 enum port port = dp_to_dig_port(intel_dp)->port;
5084 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5085
5086 lockdep_assert_held(&dev_priv->pps_mutex);
5087
5088 intel_pps_get_registers(dev_priv, intel_dp, ®s);
5089
5090 /*
5091 * On some VLV machines the BIOS can leave the VDD
5092 * enabled even on power seqeuencers which aren't
5093 * hooked up to any port. This would mess up the
5094 * power domain tracking the first time we pick
5095 * one of these power sequencers for use since
5096 * edp_panel_vdd_on() would notice that the VDD was
5097 * already on and therefore wouldn't grab the power
5098 * domain reference. Disable VDD first to avoid this.
5099 * This also avoids spuriously turning the VDD on as
5100 * soon as the new power seqeuencer gets initialized.
5101 */
5102 if (force_disable_vdd) {
5103 u32 pp = ironlake_get_pp_control(intel_dp);
5104
5105 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5106
5107 if (pp & EDP_FORCE_VDD)
5108 DRM_DEBUG_KMS("VDD already on, disabling first\n");
5109
5110 pp &= ~EDP_FORCE_VDD;
5111
5112 I915_WRITE(regs.pp_ctrl, pp);
5113 }
5114
5115 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5116 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5117 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5118 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5119 /* Compute the divisor for the pp clock, simply match the Bspec
5120 * formula. */
5121 if (IS_BROXTON(dev)) {
5122 pp_div = I915_READ(regs.pp_ctrl);
5123 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5124 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5125 << BXT_POWER_CYCLE_DELAY_SHIFT);
5126 } else {
5127 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5128 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5129 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5130 }
5131
5132 /* Haswell doesn't have any port selection bits for the panel
5133 * power sequencer any more. */
5134 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5135 port_sel = PANEL_PORT_SELECT_VLV(port);
5136 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5137 if (port == PORT_A)
5138 port_sel = PANEL_PORT_SELECT_DPA;
5139 else
5140 port_sel = PANEL_PORT_SELECT_DPD;
5141 }
5142
5143 pp_on |= port_sel;
5144
5145 I915_WRITE(regs.pp_on, pp_on);
5146 I915_WRITE(regs.pp_off, pp_off);
5147 if (IS_BROXTON(dev))
5148 I915_WRITE(regs.pp_ctrl, pp_div);
5149 else
5150 I915_WRITE(regs.pp_div, pp_div);
5151
5152 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5153 I915_READ(regs.pp_on),
5154 I915_READ(regs.pp_off),
5155 IS_BROXTON(dev) ?
5156 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5157 I915_READ(regs.pp_div));
5158 }
5159
intel_dp_pps_init(struct drm_device * dev,struct intel_dp * intel_dp)5160 static void intel_dp_pps_init(struct drm_device *dev,
5161 struct intel_dp *intel_dp)
5162 {
5163 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5164 vlv_initial_power_sequencer_setup(intel_dp);
5165 } else {
5166 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5167 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5168 }
5169 }
5170
5171 /**
5172 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5173 * @dev_priv: i915 device
5174 * @crtc_state: a pointer to the active intel_crtc_state
5175 * @refresh_rate: RR to be programmed
5176 *
5177 * This function gets called when refresh rate (RR) has to be changed from
5178 * one frequency to another. Switches can be between high and low RR
5179 * supported by the panel or to any other RR based on media playback (in
5180 * this case, RR value needs to be passed from user space).
5181 *
5182 * The caller of this function needs to take a lock on dev_priv->drrs.
5183 */
intel_dp_set_drrs_state(struct drm_i915_private * dev_priv,struct intel_crtc_state * crtc_state,int refresh_rate)5184 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5185 struct intel_crtc_state *crtc_state,
5186 int refresh_rate)
5187 {
5188 struct intel_encoder *encoder;
5189 struct intel_digital_port *dig_port = NULL;
5190 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5192 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5193
5194 if (refresh_rate <= 0) {
5195 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5196 return;
5197 }
5198
5199 if (intel_dp == NULL) {
5200 DRM_DEBUG_KMS("DRRS not supported.\n");
5201 return;
5202 }
5203
5204 /*
5205 * FIXME: This needs proper synchronization with psr state for some
5206 * platforms that cannot have PSR and DRRS enabled at the same time.
5207 */
5208
5209 dig_port = dp_to_dig_port(intel_dp);
5210 encoder = &dig_port->base;
5211 intel_crtc = to_intel_crtc(encoder->base.crtc);
5212
5213 if (!intel_crtc) {
5214 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5215 return;
5216 }
5217
5218 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5219 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5220 return;
5221 }
5222
5223 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5224 refresh_rate)
5225 index = DRRS_LOW_RR;
5226
5227 if (index == dev_priv->drrs.refresh_rate_type) {
5228 DRM_DEBUG_KMS(
5229 "DRRS requested for previously set RR...ignoring\n");
5230 return;
5231 }
5232
5233 if (!crtc_state->base.active) {
5234 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5235 return;
5236 }
5237
5238 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5239 switch (index) {
5240 case DRRS_HIGH_RR:
5241 intel_dp_set_m_n(intel_crtc, M1_N1);
5242 break;
5243 case DRRS_LOW_RR:
5244 intel_dp_set_m_n(intel_crtc, M2_N2);
5245 break;
5246 case DRRS_MAX_RR:
5247 default:
5248 DRM_ERROR("Unsupported refreshrate type\n");
5249 }
5250 } else if (INTEL_GEN(dev_priv) > 6) {
5251 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5252 u32 val;
5253
5254 val = I915_READ(reg);
5255 if (index > DRRS_HIGH_RR) {
5256 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5257 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5258 else
5259 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5260 } else {
5261 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5262 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5263 else
5264 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5265 }
5266 I915_WRITE(reg, val);
5267 }
5268
5269 dev_priv->drrs.refresh_rate_type = index;
5270
5271 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5272 }
5273
5274 /**
5275 * intel_edp_drrs_enable - init drrs struct if supported
5276 * @intel_dp: DP struct
5277 * @crtc_state: A pointer to the active crtc state.
5278 *
5279 * Initializes frontbuffer_bits and drrs.dp
5280 */
intel_edp_drrs_enable(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)5281 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5282 struct intel_crtc_state *crtc_state)
5283 {
5284 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5285 struct drm_i915_private *dev_priv = to_i915(dev);
5286
5287 if (!crtc_state->has_drrs) {
5288 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5289 return;
5290 }
5291
5292 mutex_lock(&dev_priv->drrs.mutex);
5293 if (WARN_ON(dev_priv->drrs.dp)) {
5294 DRM_ERROR("DRRS already enabled\n");
5295 goto unlock;
5296 }
5297
5298 dev_priv->drrs.busy_frontbuffer_bits = 0;
5299
5300 dev_priv->drrs.dp = intel_dp;
5301
5302 unlock:
5303 mutex_unlock(&dev_priv->drrs.mutex);
5304 }
5305
5306 /**
5307 * intel_edp_drrs_disable - Disable DRRS
5308 * @intel_dp: DP struct
5309 * @old_crtc_state: Pointer to old crtc_state.
5310 *
5311 */
intel_edp_drrs_disable(struct intel_dp * intel_dp,struct intel_crtc_state * old_crtc_state)5312 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5313 struct intel_crtc_state *old_crtc_state)
5314 {
5315 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5316 struct drm_i915_private *dev_priv = to_i915(dev);
5317
5318 if (!old_crtc_state->has_drrs)
5319 return;
5320
5321 mutex_lock(&dev_priv->drrs.mutex);
5322 if (!dev_priv->drrs.dp) {
5323 mutex_unlock(&dev_priv->drrs.mutex);
5324 return;
5325 }
5326
5327 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5328 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5329 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5330
5331 dev_priv->drrs.dp = NULL;
5332 mutex_unlock(&dev_priv->drrs.mutex);
5333
5334 cancel_delayed_work_sync(&dev_priv->drrs.work);
5335 }
5336
intel_edp_drrs_downclock_work(struct work_struct * work)5337 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5338 {
5339 struct drm_i915_private *dev_priv =
5340 container_of(work, typeof(*dev_priv), drrs.work.work);
5341 struct intel_dp *intel_dp;
5342
5343 mutex_lock(&dev_priv->drrs.mutex);
5344
5345 intel_dp = dev_priv->drrs.dp;
5346
5347 if (!intel_dp)
5348 goto unlock;
5349
5350 /*
5351 * The delayed work can race with an invalidate hence we need to
5352 * recheck.
5353 */
5354
5355 if (dev_priv->drrs.busy_frontbuffer_bits)
5356 goto unlock;
5357
5358 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5359 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5360
5361 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5362 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5363 }
5364
5365 unlock:
5366 mutex_unlock(&dev_priv->drrs.mutex);
5367 }
5368
5369 /**
5370 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5371 * @dev_priv: i915 device
5372 * @frontbuffer_bits: frontbuffer plane tracking bits
5373 *
5374 * This function gets called everytime rendering on the given planes start.
5375 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5376 *
5377 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5378 */
intel_edp_drrs_invalidate(struct drm_i915_private * dev_priv,unsigned int frontbuffer_bits)5379 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5380 unsigned int frontbuffer_bits)
5381 {
5382 struct drm_crtc *crtc;
5383 enum pipe pipe;
5384
5385 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5386 return;
5387
5388 cancel_delayed_work(&dev_priv->drrs.work);
5389
5390 mutex_lock(&dev_priv->drrs.mutex);
5391 if (!dev_priv->drrs.dp) {
5392 mutex_unlock(&dev_priv->drrs.mutex);
5393 return;
5394 }
5395
5396 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5397 pipe = to_intel_crtc(crtc)->pipe;
5398
5399 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5400 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5401
5402 /* invalidate means busy screen hence upclock */
5403 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5404 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5405 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5406
5407 mutex_unlock(&dev_priv->drrs.mutex);
5408 }
5409
5410 /**
5411 * intel_edp_drrs_flush - Restart Idleness DRRS
5412 * @dev_priv: i915 device
5413 * @frontbuffer_bits: frontbuffer plane tracking bits
5414 *
5415 * This function gets called every time rendering on the given planes has
5416 * completed or flip on a crtc is completed. So DRRS should be upclocked
5417 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5418 * if no other planes are dirty.
5419 *
5420 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5421 */
intel_edp_drrs_flush(struct drm_i915_private * dev_priv,unsigned int frontbuffer_bits)5422 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5423 unsigned int frontbuffer_bits)
5424 {
5425 struct drm_crtc *crtc;
5426 enum pipe pipe;
5427
5428 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5429 return;
5430
5431 cancel_delayed_work(&dev_priv->drrs.work);
5432
5433 mutex_lock(&dev_priv->drrs.mutex);
5434 if (!dev_priv->drrs.dp) {
5435 mutex_unlock(&dev_priv->drrs.mutex);
5436 return;
5437 }
5438
5439 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5440 pipe = to_intel_crtc(crtc)->pipe;
5441
5442 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5443 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5444
5445 /* flush means busy screen hence upclock */
5446 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5447 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5448 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5449
5450 /*
5451 * flush also means no more activity hence schedule downclock, if all
5452 * other fbs are quiescent too
5453 */
5454 if (!dev_priv->drrs.busy_frontbuffer_bits)
5455 schedule_delayed_work(&dev_priv->drrs.work,
5456 msecs_to_jiffies(1000));
5457 mutex_unlock(&dev_priv->drrs.mutex);
5458 }
5459
5460 /**
5461 * DOC: Display Refresh Rate Switching (DRRS)
5462 *
5463 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5464 * which enables swtching between low and high refresh rates,
5465 * dynamically, based on the usage scenario. This feature is applicable
5466 * for internal panels.
5467 *
5468 * Indication that the panel supports DRRS is given by the panel EDID, which
5469 * would list multiple refresh rates for one resolution.
5470 *
5471 * DRRS is of 2 types - static and seamless.
5472 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5473 * (may appear as a blink on screen) and is used in dock-undock scenario.
5474 * Seamless DRRS involves changing RR without any visual effect to the user
5475 * and can be used during normal system usage. This is done by programming
5476 * certain registers.
5477 *
5478 * Support for static/seamless DRRS may be indicated in the VBT based on
5479 * inputs from the panel spec.
5480 *
5481 * DRRS saves power by switching to low RR based on usage scenarios.
5482 *
5483 * The implementation is based on frontbuffer tracking implementation. When
5484 * there is a disturbance on the screen triggered by user activity or a periodic
5485 * system activity, DRRS is disabled (RR is changed to high RR). When there is
5486 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5487 * made.
5488 *
5489 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5490 * and intel_edp_drrs_flush() are called.
5491 *
5492 * DRRS can be further extended to support other internal panels and also
5493 * the scenario of video playback wherein RR is set based on the rate
5494 * requested by userspace.
5495 */
5496
5497 /**
5498 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5499 * @intel_connector: eDP connector
5500 * @fixed_mode: preferred mode of panel
5501 *
5502 * This function is called only once at driver load to initialize basic
5503 * DRRS stuff.
5504 *
5505 * Returns:
5506 * Downclock mode if panel supports it, else return NULL.
5507 * DRRS support is determined by the presence of downclock mode (apart
5508 * from VBT setting).
5509 */
5510 static struct drm_display_mode *
intel_dp_drrs_init(struct intel_connector * intel_connector,struct drm_display_mode * fixed_mode)5511 intel_dp_drrs_init(struct intel_connector *intel_connector,
5512 struct drm_display_mode *fixed_mode)
5513 {
5514 struct drm_connector *connector = &intel_connector->base;
5515 struct drm_device *dev = connector->dev;
5516 struct drm_i915_private *dev_priv = to_i915(dev);
5517 struct drm_display_mode *downclock_mode = NULL;
5518
5519 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5520 mutex_init(&dev_priv->drrs.mutex);
5521
5522 if (INTEL_INFO(dev)->gen <= 6) {
5523 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5524 return NULL;
5525 }
5526
5527 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5528 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5529 return NULL;
5530 }
5531
5532 downclock_mode = intel_find_panel_downclock
5533 (dev, fixed_mode, connector);
5534
5535 if (!downclock_mode) {
5536 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5537 return NULL;
5538 }
5539
5540 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5541
5542 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5543 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5544 return downclock_mode;
5545 }
5546
intel_edp_init_connector(struct intel_dp * intel_dp,struct intel_connector * intel_connector)5547 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5548 struct intel_connector *intel_connector)
5549 {
5550 struct drm_connector *connector = &intel_connector->base;
5551 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5552 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5553 struct drm_device *dev = intel_encoder->base.dev;
5554 struct drm_i915_private *dev_priv = to_i915(dev);
5555 struct drm_display_mode *fixed_mode = NULL;
5556 struct drm_display_mode *downclock_mode = NULL;
5557 bool has_dpcd;
5558 struct drm_display_mode *scan;
5559 struct edid *edid;
5560 enum pipe pipe = INVALID_PIPE;
5561
5562 if (!is_edp(intel_dp))
5563 return true;
5564
5565 /*
5566 * On IBX/CPT we may get here with LVDS already registered. Since the
5567 * driver uses the only internal power sequencer available for both
5568 * eDP and LVDS bail out early in this case to prevent interfering
5569 * with an already powered-on LVDS power sequencer.
5570 */
5571 if (intel_get_lvds_encoder(dev)) {
5572 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5573 DRM_INFO("LVDS was detected, not registering eDP\n");
5574
5575 return false;
5576 }
5577
5578 pps_lock(intel_dp);
5579
5580 intel_dp_init_panel_power_timestamps(intel_dp);
5581 intel_dp_pps_init(dev, intel_dp);
5582 intel_edp_panel_vdd_sanitize(intel_dp);
5583
5584 pps_unlock(intel_dp);
5585
5586 /* Cache DPCD and EDID for edp. */
5587 has_dpcd = intel_edp_init_dpcd(intel_dp);
5588
5589 if (!has_dpcd) {
5590 /* if this fails, presume the device is a ghost */
5591 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5592 goto out_vdd_off;
5593 }
5594
5595 mutex_lock(&dev->mode_config.mutex);
5596 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5597 if (edid) {
5598 if (drm_add_edid_modes(connector, edid)) {
5599 drm_mode_connector_update_edid_property(connector,
5600 edid);
5601 drm_edid_to_eld(connector, edid);
5602 } else {
5603 kfree(edid);
5604 edid = ERR_PTR(-EINVAL);
5605 }
5606 } else {
5607 edid = ERR_PTR(-ENOENT);
5608 }
5609 intel_connector->edid = edid;
5610
5611 /* prefer fixed mode from EDID if available */
5612 list_for_each_entry(scan, &connector->probed_modes, head) {
5613 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5614 fixed_mode = drm_mode_duplicate(dev, scan);
5615 downclock_mode = intel_dp_drrs_init(
5616 intel_connector, fixed_mode);
5617 break;
5618 }
5619 }
5620
5621 /* fallback to VBT if available for eDP */
5622 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5623 fixed_mode = drm_mode_duplicate(dev,
5624 dev_priv->vbt.lfp_lvds_vbt_mode);
5625 if (fixed_mode) {
5626 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5627 connector->display_info.width_mm = fixed_mode->width_mm;
5628 connector->display_info.height_mm = fixed_mode->height_mm;
5629 }
5630 }
5631 mutex_unlock(&dev->mode_config.mutex);
5632
5633 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5634 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5635 register_reboot_notifier(&intel_dp->edp_notifier);
5636
5637 /*
5638 * Figure out the current pipe for the initial backlight setup.
5639 * If the current pipe isn't valid, try the PPS pipe, and if that
5640 * fails just assume pipe A.
5641 */
5642 if (IS_CHERRYVIEW(dev))
5643 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5644 else
5645 pipe = PORT_TO_PIPE(intel_dp->DP);
5646
5647 if (pipe != PIPE_A && pipe != PIPE_B)
5648 pipe = intel_dp->pps_pipe;
5649
5650 if (pipe != PIPE_A && pipe != PIPE_B)
5651 pipe = PIPE_A;
5652
5653 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5654 pipe_name(pipe));
5655 }
5656
5657 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5658 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5659 intel_panel_setup_backlight(connector, pipe);
5660
5661 return true;
5662
5663 out_vdd_off:
5664 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5665 /*
5666 * vdd might still be enabled do to the delayed vdd off.
5667 * Make sure vdd is actually turned off here.
5668 */
5669 pps_lock(intel_dp);
5670 edp_panel_vdd_off_sync(intel_dp);
5671 pps_unlock(intel_dp);
5672
5673 return false;
5674 }
5675
5676 bool
intel_dp_init_connector(struct intel_digital_port * intel_dig_port,struct intel_connector * intel_connector)5677 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5678 struct intel_connector *intel_connector)
5679 {
5680 struct drm_connector *connector = &intel_connector->base;
5681 struct intel_dp *intel_dp = &intel_dig_port->dp;
5682 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5683 struct drm_device *dev = intel_encoder->base.dev;
5684 struct drm_i915_private *dev_priv = to_i915(dev);
5685 enum port port = intel_dig_port->port;
5686 int type;
5687
5688 if (WARN(intel_dig_port->max_lanes < 1,
5689 "Not enough lanes (%d) for DP on port %c\n",
5690 intel_dig_port->max_lanes, port_name(port)))
5691 return false;
5692
5693 intel_dp->pps_pipe = INVALID_PIPE;
5694
5695 /* intel_dp vfuncs */
5696 if (INTEL_INFO(dev)->gen >= 9)
5697 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5698 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5699 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5700 else if (HAS_PCH_SPLIT(dev))
5701 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5702 else
5703 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5704
5705 if (INTEL_INFO(dev)->gen >= 9)
5706 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5707 else
5708 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5709
5710 if (HAS_DDI(dev))
5711 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5712
5713 /* Preserve the current hw state. */
5714 intel_dp->DP = I915_READ(intel_dp->output_reg);
5715 intel_dp->attached_connector = intel_connector;
5716
5717 if (intel_dp_is_edp(dev, port))
5718 type = DRM_MODE_CONNECTOR_eDP;
5719 else
5720 type = DRM_MODE_CONNECTOR_DisplayPort;
5721
5722 /*
5723 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5724 * for DP the encoder type can be set by the caller to
5725 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5726 */
5727 if (type == DRM_MODE_CONNECTOR_eDP)
5728 intel_encoder->type = INTEL_OUTPUT_EDP;
5729
5730 /* eDP only on port B and/or C on vlv/chv */
5731 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5732 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5733 return false;
5734
5735 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5736 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5737 port_name(port));
5738
5739 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5740 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5741
5742 connector->interlace_allowed = true;
5743 connector->doublescan_allowed = 0;
5744
5745 intel_dp_aux_init(intel_dp);
5746
5747 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5748 edp_panel_vdd_work);
5749
5750 intel_connector_attach_encoder(intel_connector, intel_encoder);
5751
5752 if (HAS_DDI(dev))
5753 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5754 else
5755 intel_connector->get_hw_state = intel_connector_get_hw_state;
5756
5757 /* Set up the hotplug pin. */
5758 switch (port) {
5759 case PORT_A:
5760 intel_encoder->hpd_pin = HPD_PORT_A;
5761 break;
5762 case PORT_B:
5763 intel_encoder->hpd_pin = HPD_PORT_B;
5764 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5765 intel_encoder->hpd_pin = HPD_PORT_A;
5766 break;
5767 case PORT_C:
5768 intel_encoder->hpd_pin = HPD_PORT_C;
5769 break;
5770 case PORT_D:
5771 intel_encoder->hpd_pin = HPD_PORT_D;
5772 break;
5773 case PORT_E:
5774 intel_encoder->hpd_pin = HPD_PORT_E;
5775 break;
5776 default:
5777 BUG();
5778 }
5779
5780 /* init MST on ports that can support it */
5781 if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
5782 (port == PORT_B || port == PORT_C || port == PORT_D))
5783 intel_dp_mst_encoder_init(intel_dig_port,
5784 intel_connector->base.base.id);
5785
5786 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5787 intel_dp_aux_fini(intel_dp);
5788 intel_dp_mst_encoder_cleanup(intel_dig_port);
5789 goto fail;
5790 }
5791
5792 intel_dp_add_properties(intel_dp, connector);
5793
5794 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5795 * 0xd. Failure to do so will result in spurious interrupts being
5796 * generated on the port when a cable is not attached.
5797 */
5798 if (IS_G4X(dev) && !IS_GM45(dev)) {
5799 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5800 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5801 }
5802
5803 return true;
5804
5805 fail:
5806 drm_connector_cleanup(connector);
5807
5808 return false;
5809 }
5810
intel_dp_init(struct drm_device * dev,i915_reg_t output_reg,enum port port)5811 bool intel_dp_init(struct drm_device *dev,
5812 i915_reg_t output_reg,
5813 enum port port)
5814 {
5815 struct drm_i915_private *dev_priv = to_i915(dev);
5816 struct intel_digital_port *intel_dig_port;
5817 struct intel_encoder *intel_encoder;
5818 struct drm_encoder *encoder;
5819 struct intel_connector *intel_connector;
5820
5821 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5822 if (!intel_dig_port)
5823 return false;
5824
5825 intel_connector = intel_connector_alloc();
5826 if (!intel_connector)
5827 goto err_connector_alloc;
5828
5829 intel_encoder = &intel_dig_port->base;
5830 encoder = &intel_encoder->base;
5831
5832 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5833 DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5834 goto err_encoder_init;
5835
5836 intel_encoder->compute_config = intel_dp_compute_config;
5837 intel_encoder->disable = intel_disable_dp;
5838 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5839 intel_encoder->get_config = intel_dp_get_config;
5840 intel_encoder->suspend = intel_dp_encoder_suspend;
5841 if (IS_CHERRYVIEW(dev)) {
5842 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5843 intel_encoder->pre_enable = chv_pre_enable_dp;
5844 intel_encoder->enable = vlv_enable_dp;
5845 intel_encoder->post_disable = chv_post_disable_dp;
5846 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5847 } else if (IS_VALLEYVIEW(dev)) {
5848 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5849 intel_encoder->pre_enable = vlv_pre_enable_dp;
5850 intel_encoder->enable = vlv_enable_dp;
5851 intel_encoder->post_disable = vlv_post_disable_dp;
5852 } else {
5853 intel_encoder->pre_enable = g4x_pre_enable_dp;
5854 intel_encoder->enable = g4x_enable_dp;
5855 if (INTEL_INFO(dev)->gen >= 5)
5856 intel_encoder->post_disable = ilk_post_disable_dp;
5857 }
5858
5859 intel_dig_port->port = port;
5860 intel_dig_port->dp.output_reg = output_reg;
5861 intel_dig_port->max_lanes = 4;
5862
5863 intel_encoder->type = INTEL_OUTPUT_DP;
5864 if (IS_CHERRYVIEW(dev)) {
5865 if (port == PORT_D)
5866 intel_encoder->crtc_mask = 1 << 2;
5867 else
5868 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5869 } else {
5870 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5871 }
5872 intel_encoder->cloneable = 0;
5873
5874 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5875 dev_priv->hotplug.irq_port[port] = intel_dig_port;
5876
5877 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5878 goto err_init_connector;
5879
5880 return true;
5881
5882 err_init_connector:
5883 drm_encoder_cleanup(encoder);
5884 err_encoder_init:
5885 kfree(intel_connector);
5886 err_connector_alloc:
5887 kfree(intel_dig_port);
5888 return false;
5889 }
5890
intel_dp_mst_suspend(struct drm_device * dev)5891 void intel_dp_mst_suspend(struct drm_device *dev)
5892 {
5893 struct drm_i915_private *dev_priv = to_i915(dev);
5894 int i;
5895
5896 /* disable MST */
5897 for (i = 0; i < I915_MAX_PORTS; i++) {
5898 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5899
5900 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5901 continue;
5902
5903 if (intel_dig_port->dp.is_mst)
5904 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5905 }
5906 }
5907
intel_dp_mst_resume(struct drm_device * dev)5908 void intel_dp_mst_resume(struct drm_device *dev)
5909 {
5910 struct drm_i915_private *dev_priv = to_i915(dev);
5911 int i;
5912
5913 for (i = 0; i < I915_MAX_PORTS; i++) {
5914 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5915 int ret;
5916
5917 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5918 continue;
5919
5920 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5921 if (ret)
5922 intel_dp_check_mst_status(&intel_dig_port->dp);
5923 }
5924 }
5925