• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "dp.h"
25 #include "conn.h"
26 #include "head.h"
27 #include "ior.h"
28 
29 #include <drm/display/drm_dp.h>
30 
31 #include <subdev/bios.h>
32 #include <subdev/bios/init.h>
33 #include <subdev/gpio.h>
34 #include <subdev/i2c.h>
35 
36 #include <nvif/event.h>
37 
38 /* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
39  * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
40  * so we're unable to detect this in a nice way.
41  */
42 #define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
43 
44 struct lt_state {
45 	struct nvkm_outp *outp;
46 
47 	int repeaters;
48 	int repeater;
49 
50 	u8  stat[6];
51 	u8  conf[4];
52 	bool pc2;
53 	u8  pc2stat;
54 	u8  pc2conf[2];
55 };
56 
57 static int
nvkm_dp_train_sense(struct lt_state * lt,bool pc,u32 delay)58 nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
59 {
60 	struct nvkm_outp *outp = lt->outp;
61 	u32 addr;
62 	int ret;
63 
64 	usleep_range(delay, delay * 2);
65 
66 	if (lt->repeater)
67 		addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
68 	else
69 		addr = DPCD_LS02;
70 
71 	ret = nvkm_rdaux(outp->dp.aux, addr, &lt->stat[0], 3);
72 	if (ret)
73 		return ret;
74 
75 	if (lt->repeater)
76 		addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
77 	else
78 		addr = DPCD_LS06;
79 
80 	ret = nvkm_rdaux(outp->dp.aux, addr, &lt->stat[4], 2);
81 	if (ret)
82 		return ret;
83 
84 	if (pc) {
85 		ret = nvkm_rdaux(outp->dp.aux, DPCD_LS0C, &lt->pc2stat, 1);
86 		if (ret)
87 			lt->pc2stat = 0x00;
88 
89 		OUTP_TRACE(outp, "status %6ph pc2 %02x", lt->stat, lt->pc2stat);
90 	} else {
91 		OUTP_TRACE(outp, "status %6ph", lt->stat);
92 	}
93 
94 	return 0;
95 }
96 
97 static int
nvkm_dp_train_drive(struct lt_state * lt,bool pc)98 nvkm_dp_train_drive(struct lt_state *lt, bool pc)
99 {
100 	struct nvkm_outp *outp = lt->outp;
101 	struct nvkm_ior *ior = outp->ior;
102 	struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
103 	struct nvbios_dpout info;
104 	struct nvbios_dpcfg ocfg;
105 	u8  ver, hdr, cnt, len;
106 	u32 addr;
107 	u32 data;
108 	int ret, i;
109 
110 	for (i = 0; i < ior->dp.nr; i++) {
111 		u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
112 		u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
113 		u8 lpre = (lane & 0x0c) >> 2;
114 		u8 lvsw = (lane & 0x03) >> 0;
115 		u8 hivs = 3 - lpre;
116 		u8 hipe = 3;
117 		u8 hipc = 3;
118 
119 		if (lpc2 >= hipc)
120 			lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
121 		if (lpre >= hipe) {
122 			lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
123 			lvsw = hivs = 3 - (lpre & 3);
124 		} else
125 		if (lvsw >= hivs) {
126 			lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
127 		}
128 
129 		lt->conf[i] = (lpre << 3) | lvsw;
130 		lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
131 
132 		OUTP_TRACE(outp, "config lane %d %02x %02x", i, lt->conf[i], lpc2);
133 
134 		if (lt->repeater != lt->repeaters)
135 			continue;
136 
137 		data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
138 					  &ver, &hdr, &cnt, &len, &info);
139 		if (!data)
140 			continue;
141 
142 		data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3, lpre & 3,
143 					  &ver, &hdr, &cnt, &len, &ocfg);
144 		if (!data)
145 			continue;
146 
147 		ior->func->dp->drive(ior, i, ocfg.pc, ocfg.dc, ocfg.pe, ocfg.tx_pu);
148 	}
149 
150 	if (lt->repeater)
151 		addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
152 	else
153 		addr = DPCD_LC03(0);
154 
155 	ret = nvkm_wraux(outp->dp.aux, addr, lt->conf, 4);
156 	if (ret)
157 		return ret;
158 
159 	if (pc) {
160 		ret = nvkm_wraux(outp->dp.aux, DPCD_LC0F, lt->pc2conf, 2);
161 		if (ret)
162 			return ret;
163 	}
164 
165 	return 0;
166 }
167 
168 static void
nvkm_dp_train_pattern(struct lt_state * lt,u8 pattern)169 nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
170 {
171 	struct nvkm_outp *outp = lt->outp;
172 	u32 addr;
173 	u8 sink_tp;
174 
175 	OUTP_TRACE(outp, "training pattern %d", pattern);
176 	outp->ior->func->dp->pattern(outp->ior, pattern);
177 
178 	if (lt->repeater)
179 		addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
180 	else
181 		addr = DPCD_LC02;
182 
183 	nvkm_rdaux(outp->dp.aux, addr, &sink_tp, 1);
184 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
185 	sink_tp |= (pattern != 4) ? pattern : 7;
186 
187 	if (pattern != 0)
188 		sink_tp |=  DPCD_LC02_SCRAMBLING_DISABLE;
189 	else
190 		sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
191 	nvkm_wraux(outp->dp.aux, addr, &sink_tp, 1);
192 }
193 
194 static int
nvkm_dp_train_eq(struct lt_state * lt)195 nvkm_dp_train_eq(struct lt_state *lt)
196 {
197 	struct nvkm_i2c_aux *aux = lt->outp->dp.aux;
198 	bool eq_done = false, cr_done = true;
199 	int tries = 0, usec = 0, i;
200 	u8 data;
201 
202 	if (lt->repeater) {
203 		if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
204 			usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
205 
206 		nvkm_dp_train_pattern(lt, 4);
207 	} else {
208 		if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
209 		    lt->outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
210 			nvkm_dp_train_pattern(lt, 4);
211 		else
212 		if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
213 		    lt->outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
214 			nvkm_dp_train_pattern(lt, 3);
215 		else
216 			nvkm_dp_train_pattern(lt, 2);
217 
218 		usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
219 	}
220 
221 	do {
222 		if ((tries &&
223 		    nvkm_dp_train_drive(lt, lt->pc2)) ||
224 		    nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
225 			break;
226 
227 		eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
228 		for (i = 0; i < lt->outp->ior->dp.nr && eq_done; i++) {
229 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
230 			if (!(lane & DPCD_LS02_LANE0_CR_DONE))
231 				cr_done = false;
232 			if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
233 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
234 				eq_done = false;
235 		}
236 	} while (!eq_done && cr_done && ++tries <= 5);
237 
238 	return eq_done ? 0 : -1;
239 }
240 
241 static int
nvkm_dp_train_cr(struct lt_state * lt)242 nvkm_dp_train_cr(struct lt_state *lt)
243 {
244 	bool cr_done = false, abort = false;
245 	int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
246 	int tries = 0, usec = 0, i;
247 
248 	nvkm_dp_train_pattern(lt, 1);
249 
250 	if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
251 		usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
252 
253 	do {
254 		if (nvkm_dp_train_drive(lt, false) ||
255 		    nvkm_dp_train_sense(lt, false, usec ? usec : 100))
256 			break;
257 
258 		cr_done = true;
259 		for (i = 0; i < lt->outp->ior->dp.nr; i++) {
260 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
261 			if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
262 				cr_done = false;
263 				if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
264 					abort = true;
265 				break;
266 			}
267 		}
268 
269 		if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
270 			voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
271 			tries = 0;
272 		}
273 	} while (!cr_done && !abort && ++tries < 5);
274 
275 	return cr_done ? 0 : -1;
276 }
277 
278 static int
nvkm_dp_train_links(struct nvkm_outp * outp,int rate)279 nvkm_dp_train_links(struct nvkm_outp *outp, int rate)
280 {
281 	struct nvkm_ior *ior = outp->ior;
282 	struct nvkm_disp *disp = outp->disp;
283 	struct nvkm_subdev *subdev = &disp->engine.subdev;
284 	struct nvkm_bios *bios = subdev->device->bios;
285 	struct lt_state lt = {
286 		.outp = outp,
287 	};
288 	u32 lnkcmp;
289 	u8 sink[2], data;
290 	int ret;
291 
292 	OUTP_DBG(outp, "training %d x %d MB/s", ior->dp.nr, ior->dp.bw * 27);
293 
294 	/* Intersect misc. capabilities of the OR and sink. */
295 	if (disp->engine.subdev.device->chipset < 0x110)
296 		outp->dp.dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
297 	if (disp->engine.subdev.device->chipset < 0xd0)
298 		outp->dp.dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
299 	lt.pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
300 
301 	if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.outp->dp.info.script[0])) {
302 		/* Execute BeforeLinkTraining script from DP Info table. */
303 		while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
304 			lnkcmp += 3;
305 		lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
306 
307 		nvbios_init(&outp->disp->engine.subdev, lnkcmp,
308 			init.outp = &outp->info;
309 			init.or   = ior->id;
310 			init.link = ior->asy.link;
311 		);
312 	}
313 
314 	/* Set desired link configuration on the source. */
315 	if ((lnkcmp = lt.outp->dp.info.lnkcmp)) {
316 		if (outp->dp.version < 0x30) {
317 			while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
318 				lnkcmp += 4;
319 			lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
320 		} else {
321 			while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
322 				lnkcmp += 3;
323 			lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
324 		}
325 
326 		nvbios_init(subdev, lnkcmp,
327 			init.outp = &outp->info;
328 			init.or   = ior->id;
329 			init.link = ior->asy.link;
330 		);
331 	}
332 
333 	ret = ior->func->dp->links(ior, outp->dp.aux);
334 	if (ret) {
335 		if (ret < 0) {
336 			OUTP_ERR(outp, "train failed with %d", ret);
337 			return ret;
338 		}
339 		return 0;
340 	}
341 
342 	ior->func->dp->power(ior, ior->dp.nr);
343 
344 	/* Select LTTPR non-transparent mode if we have a valid configuration,
345 	 * use transparent mode otherwise.
346 	 */
347 	if (outp->dp.lttpr[0] >= 0x14) {
348 		data = DPCD_LTTPR_MODE_TRANSPARENT;
349 		nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
350 
351 		if (outp->dp.lttprs) {
352 			data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
353 			nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
354 			lt.repeaters = outp->dp.lttprs;
355 		}
356 	}
357 
358 	/* Set desired link configuration on the sink. */
359 	sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0;
360 	sink[1] = ior->dp.nr;
361 	if (ior->dp.ef)
362 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
363 
364 	ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2);
365 	if (ret)
366 		return ret;
367 
368 	if (outp->dp.rate[rate].dpcd >= 0) {
369 		ret = nvkm_rdaux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
370 		if (ret)
371 			return ret;
372 
373 		sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
374 		sink[0] |= outp->dp.rate[rate].dpcd;
375 
376 		ret = nvkm_wraux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
377 		if (ret)
378 			return ret;
379 	}
380 
381 	/* Attempt to train the link in this configuration. */
382 	for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
383 		if (lt.repeater)
384 			OUTP_DBG(outp, "training LTTPR%d", lt.repeater);
385 		else
386 			OUTP_DBG(outp, "training sink");
387 
388 		memset(lt.stat, 0x00, sizeof(lt.stat));
389 		ret = nvkm_dp_train_cr(&lt);
390 		if (ret == 0)
391 			ret = nvkm_dp_train_eq(&lt);
392 		nvkm_dp_train_pattern(&lt, 0);
393 	}
394 
395 	return ret;
396 }
397 
398 static void
nvkm_dp_train_fini(struct nvkm_outp * outp)399 nvkm_dp_train_fini(struct nvkm_outp *outp)
400 {
401 	/* Execute AfterLinkTraining script from DP Info table. */
402 	nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[1],
403 		init.outp = &outp->info;
404 		init.or   = outp->ior->id;
405 		init.link = outp->ior->asy.link;
406 	);
407 }
408 
409 static void
nvkm_dp_train_init(struct nvkm_outp * outp)410 nvkm_dp_train_init(struct nvkm_outp *outp)
411 {
412 	/* Execute EnableSpread/DisableSpread script from DP Info table. */
413 	if (outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
414 		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[2],
415 			init.outp = &outp->info;
416 			init.or   = outp->ior->id;
417 			init.link = outp->ior->asy.link;
418 		);
419 	} else {
420 		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[3],
421 			init.outp = &outp->info;
422 			init.or   = outp->ior->id;
423 			init.link = outp->ior->asy.link;
424 		);
425 	}
426 
427 	if (!AMPERE_IED_HACK(outp->disp)) {
428 		/* Execute BeforeLinkTraining script from DP Info table. */
429 		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[0],
430 			init.outp = &outp->info;
431 			init.or   = outp->ior->id;
432 			init.link = outp->ior->asy.link;
433 		);
434 	}
435 }
436 
437 static int
nvkm_dp_train(struct nvkm_outp * outp,u32 dataKBps)438 nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
439 {
440 	struct nvkm_ior *ior = outp->ior;
441 	int ret = -EINVAL, nr, rate;
442 	u8  pwr;
443 
444 	/* Ensure sink is not in a low-power state. */
445 	if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) {
446 		if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
447 			pwr &= ~DPCD_SC00_SET_POWER;
448 			pwr |=  DPCD_SC00_SET_POWER_D0;
449 			nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1);
450 		}
451 	}
452 
453 	ior->dp.mst = outp->dp.lt.mst;
454 	ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
455 	ior->dp.nr = 0;
456 
457 	/* Link training. */
458 	OUTP_DBG(outp, "training");
459 	nvkm_dp_train_init(outp);
460 	for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
461 		for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) {
462 			if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
463 				/* Program selected link configuration. */
464 				ior->dp.bw = outp->dp.rate[rate].rate / 27000;
465 				ior->dp.nr = nr;
466 				ret = nvkm_dp_train_links(outp, rate);
467 			}
468 		}
469 	}
470 	nvkm_dp_train_fini(outp);
471 	if (ret < 0)
472 		OUTP_ERR(outp, "training failed");
473 	else
474 		OUTP_DBG(outp, "training done");
475 	atomic_set(&outp->dp.lt.done, 1);
476 	return ret;
477 }
478 
479 /* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
480  * converted to work inside nvkm. This is a temporary holdover until we start
481  * passing the drm_dp_aux device through NVKM
482  */
483 static int
nvkm_dp_read_dpcd_caps(struct nvkm_outp * outp)484 nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
485 {
486 	struct nvkm_i2c_aux *aux = outp->dp.aux;
487 	u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
488 	int ret;
489 
490 	ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
491 	if (ret < 0)
492 		return ret;
493 
494 	/*
495 	 * Prior to DP1.3 the bit represented by
496 	 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
497 	 * If it is set DP_DPCD_REV at 0000h could be at a value less than
498 	 * the true capability of the panel. The only way to check is to
499 	 * then compare 0000h and 2200h.
500 	 */
501 	if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
502 	      DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
503 		return 0;
504 
505 	ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
506 	if (ret < 0)
507 		return ret;
508 
509 	if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
510 		OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
511 			 outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
512 		return 0;
513 	}
514 
515 	if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
516 		return 0;
517 
518 	memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
519 
520 	return 0;
521 }
522 
523 void
nvkm_dp_disable(struct nvkm_outp * outp,struct nvkm_ior * ior)524 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
525 {
526 	/* Execute DisableLT script from DP Info Table. */
527 	nvbios_init(&ior->disp->engine.subdev, outp->dp.info.script[4],
528 		init.outp = &outp->info;
529 		init.or   = ior->id;
530 		init.link = ior->arm.link;
531 	);
532 }
533 
534 static void
nvkm_dp_release(struct nvkm_outp * outp)535 nvkm_dp_release(struct nvkm_outp *outp)
536 {
537 	/* Prevent link from being retrained if sink sends an IRQ. */
538 	atomic_set(&outp->dp.lt.done, 0);
539 	outp->ior->dp.nr = 0;
540 }
541 
542 static int
nvkm_dp_acquire(struct nvkm_outp * outp)543 nvkm_dp_acquire(struct nvkm_outp *outp)
544 {
545 	struct nvkm_ior *ior = outp->ior;
546 	struct nvkm_head *head;
547 	bool retrain = true;
548 	u32 datakbps = 0;
549 	u32 dataKBps;
550 	u32 linkKBps;
551 	u8  stat[3];
552 	int ret, i;
553 
554 	mutex_lock(&outp->dp.mutex);
555 
556 	/* Check that link configuration meets current requirements. */
557 	list_for_each_entry(head, &outp->disp->heads, head) {
558 		if (ior->asy.head & (1 << head->id)) {
559 			u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
560 			datakbps += khz * head->asy.or.depth;
561 		}
562 	}
563 
564 	linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
565 	dataKBps = DIV_ROUND_UP(datakbps, 8);
566 	OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d",
567 		 dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst);
568 	if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) {
569 		OUTP_DBG(outp, "link requirements changed");
570 		goto done;
571 	}
572 
573 	/* Check that link is still trained. */
574 	ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3);
575 	if (ret) {
576 		OUTP_DBG(outp, "failed to read link status, assuming no sink");
577 		goto done;
578 	}
579 
580 	if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
581 		for (i = 0; i < ior->dp.nr; i++) {
582 			u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
583 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
584 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
585 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
586 				OUTP_DBG(outp, "lane %d not equalised", lane);
587 				goto done;
588 			}
589 		}
590 		retrain = false;
591 	} else {
592 		OUTP_DBG(outp, "no inter-lane alignment");
593 	}
594 
595 done:
596 	if (retrain || !atomic_read(&outp->dp.lt.done))
597 		ret = nvkm_dp_train(outp, dataKBps);
598 	mutex_unlock(&outp->dp.mutex);
599 	return ret;
600 }
601 
602 static bool
nvkm_dp_enable_supported_link_rates(struct nvkm_outp * outp)603 nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
604 {
605 	u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
606 	int i, j, k;
607 
608 	if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
609 	    outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
610 	    nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0),
611 		       sink_rates, sizeof(sink_rates)))
612 		return false;
613 
614 	for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
615 		const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
616 
617 		if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
618 			break;
619 
620 		if (rate > outp->info.dpconf.link_bw * 27000) {
621 			OUTP_DBG(outp, "rate %d !outp", rate);
622 			continue;
623 		}
624 
625 		for (j = 0; j < outp->dp.rates; j++) {
626 			if (rate > outp->dp.rate[j].rate) {
627 				for (k = outp->dp.rates; k > j; k--)
628 					outp->dp.rate[k] = outp->dp.rate[k - 1];
629 				break;
630 			}
631 		}
632 
633 		outp->dp.rate[j].dpcd = i / 2;
634 		outp->dp.rate[j].rate = rate;
635 		outp->dp.rates++;
636 	}
637 
638 	for (i = 0; i < outp->dp.rates; i++)
639 		OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate);
640 
641 	return outp->dp.rates != 0;
642 }
643 
644 static bool
nvkm_dp_enable(struct nvkm_outp * outp,bool enable)645 nvkm_dp_enable(struct nvkm_outp *outp, bool enable)
646 {
647 	struct nvkm_i2c_aux *aux = outp->dp.aux;
648 
649 	if (enable) {
650 		if (!outp->dp.present) {
651 			OUTP_DBG(outp, "aux power -> always");
652 			nvkm_i2c_aux_monitor(aux, true);
653 			outp->dp.present = true;
654 		}
655 
656 		/* Detect any LTTPRs before reading DPCD receiver caps. */
657 		if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) &&
658 		    outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) {
659 			switch (outp->dp.lttpr[2]) {
660 			case 0x80: outp->dp.lttprs = 1; break;
661 			case 0x40: outp->dp.lttprs = 2; break;
662 			case 0x20: outp->dp.lttprs = 3; break;
663 			case 0x10: outp->dp.lttprs = 4; break;
664 			case 0x08: outp->dp.lttprs = 5; break;
665 			case 0x04: outp->dp.lttprs = 6; break;
666 			case 0x02: outp->dp.lttprs = 7; break;
667 			case 0x01: outp->dp.lttprs = 8; break;
668 			default:
669 				/* Unknown LTTPR count, we'll switch to transparent mode. */
670 				WARN_ON(1);
671 				outp->dp.lttprs = 0;
672 				break;
673 			}
674 		} else {
675 			/* No LTTPR support, or zero LTTPR count - don't touch it at all. */
676 			memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
677 		}
678 
679 		if (!nvkm_dp_read_dpcd_caps(outp)) {
680 			const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
681 			const u8 *rate;
682 			int rate_max;
683 
684 			outp->dp.rates = 0;
685 			outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
686 			outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr);
687 			if (outp->dp.lttprs && outp->dp.lttpr[4])
688 				outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]);
689 
690 			rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE];
691 			rate_max = min(rate_max, outp->info.dpconf.link_bw);
692 			if (outp->dp.lttprs && outp->dp.lttpr[1])
693 				rate_max = min_t(int, rate_max, outp->dp.lttpr[1]);
694 
695 			if (!nvkm_dp_enable_supported_link_rates(outp)) {
696 				for (rate = rates; *rate; rate++) {
697 					if (*rate > rate_max)
698 						continue;
699 
700 					if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
701 						break;
702 
703 					outp->dp.rate[outp->dp.rates].dpcd = -1;
704 					outp->dp.rate[outp->dp.rates].rate = *rate * 27000;
705 					outp->dp.rates++;
706 				}
707 			}
708 
709 			return true;
710 		}
711 	}
712 
713 	if (outp->dp.present) {
714 		OUTP_DBG(outp, "aux power -> demand");
715 		nvkm_i2c_aux_monitor(aux, false);
716 		outp->dp.present = false;
717 	}
718 
719 	atomic_set(&outp->dp.lt.done, 0);
720 	return false;
721 }
722 
723 static int
nvkm_dp_hpd(struct nvkm_notify * notify)724 nvkm_dp_hpd(struct nvkm_notify *notify)
725 {
726 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
727 	struct nvkm_outp *outp = container_of(notify, typeof(*outp), dp.hpd);
728 	struct nvkm_conn *conn = outp->conn;
729 	struct nvkm_disp *disp = outp->disp;
730 	struct nvif_notify_conn_rep_v0 rep = {};
731 
732 	OUTP_DBG(outp, "HPD: %d", line->mask);
733 	if (line->mask & NVKM_I2C_IRQ) {
734 		if (atomic_read(&outp->dp.lt.done))
735 			outp->func->acquire(outp);
736 		rep.mask |= NVIF_NOTIFY_CONN_V0_IRQ;
737 	} else {
738 		nvkm_dp_enable(outp, true);
739 	}
740 
741 	if (line->mask & NVKM_I2C_UNPLUG)
742 		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
743 	if (line->mask & NVKM_I2C_PLUG)
744 		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
745 
746 	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
747 	return NVKM_NOTIFY_KEEP;
748 }
749 
750 static void
nvkm_dp_fini(struct nvkm_outp * outp)751 nvkm_dp_fini(struct nvkm_outp *outp)
752 {
753 	nvkm_notify_put(&outp->dp.hpd);
754 	nvkm_dp_enable(outp, false);
755 }
756 
757 static void
nvkm_dp_init(struct nvkm_outp * outp)758 nvkm_dp_init(struct nvkm_outp *outp)
759 {
760 	struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
761 
762 	nvkm_notify_put(&outp->conn->hpd);
763 
764 	/* eDP panels need powering on by us (if the VBIOS doesn't default it
765 	 * to on) before doing any AUX channel transactions.  LVDS panel power
766 	 * is handled by the SOR itself, and not required for LVDS DDC.
767 	 */
768 	if (outp->conn->info.type == DCB_CONNECTOR_eDP) {
769 		int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
770 		if (power == 0)
771 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
772 
773 		/* We delay here unconditionally, even if already powered,
774 		 * because some laptop panels having a significant resume
775 		 * delay before the panel begins responding.
776 		 *
777 		 * This is likely a bit of a hack, but no better idea for
778 		 * handling this at the moment.
779 		 */
780 		msleep(300);
781 
782 		/* If the eDP panel can't be detected, we need to restore
783 		 * the panel power GPIO to avoid breaking another output.
784 		 */
785 		if (!nvkm_dp_enable(outp, true) && power == 0)
786 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
787 	} else {
788 		nvkm_dp_enable(outp, true);
789 	}
790 
791 	nvkm_notify_get(&outp->dp.hpd);
792 }
793 
794 static void *
nvkm_dp_dtor(struct nvkm_outp * outp)795 nvkm_dp_dtor(struct nvkm_outp *outp)
796 {
797 	nvkm_notify_fini(&outp->dp.hpd);
798 	return outp;
799 }
800 
801 static const struct nvkm_outp_func
802 nvkm_dp_func = {
803 	.dtor = nvkm_dp_dtor,
804 	.init = nvkm_dp_init,
805 	.fini = nvkm_dp_fini,
806 	.acquire = nvkm_dp_acquire,
807 	.release = nvkm_dp_release,
808 	.disable = nvkm_dp_disable,
809 };
810 
811 int
nvkm_dp_new(struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_outp ** poutp)812 nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct nvkm_outp **poutp)
813 {
814 	struct nvkm_device *device = disp->engine.subdev.device;
815 	struct nvkm_bios *bios = device->bios;
816 	struct nvkm_i2c *i2c = device->i2c;
817 	struct nvkm_outp *outp;
818 	u8  hdr, cnt, len;
819 	u32 data;
820 	int ret;
821 
822 	ret = nvkm_outp_new_(&nvkm_dp_func, disp, index, dcbE, poutp);
823 	outp = *poutp;
824 	if (ret)
825 		return ret;
826 
827 	if (dcbE->location == 0)
828 		outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
829 	else
830 		outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
831 	if (!outp->dp.aux) {
832 		OUTP_ERR(outp, "no aux");
833 		return -EINVAL;
834 	}
835 
836 	/* bios data is not optional */
837 	data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
838 				  &outp->dp.version, &hdr, &cnt, &len, &outp->dp.info);
839 	if (!data) {
840 		OUTP_ERR(outp, "no bios dp data");
841 		return -EINVAL;
842 	}
843 
844 	OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len);
845 
846 	/* hotplug detect, replaces gpio-based mechanism with aux events */
847 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true,
848 			       &(struct nvkm_i2c_ntfy_req) {
849 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG |
850 					NVKM_I2C_IRQ,
851 				.port = outp->dp.aux->id,
852 			       },
853 			       sizeof(struct nvkm_i2c_ntfy_req),
854 			       sizeof(struct nvkm_i2c_ntfy_rep),
855 			       &outp->dp.hpd);
856 	if (ret) {
857 		OUTP_ERR(outp, "error monitoring aux hpd: %d", ret);
858 		return ret;
859 	}
860 
861 	mutex_init(&outp->dp.mutex);
862 	atomic_set(&outp->dp.lt.done, 0);
863 	return 0;
864 }
865