• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 #include "chan.h"
26 #include "head.h"
27 #include "ior.h"
28 #include "outp.h"
29 
30 #include <core/client.h>
31 #include <core/ramht.h>
32 #include <subdev/bios.h>
33 #include <subdev/bios/disp.h>
34 #include <subdev/bios/init.h>
35 #include <subdev/bios/pll.h>
36 #include <subdev/devinit.h>
37 #include <subdev/i2c.h>
38 #include <subdev/mmu.h>
39 #include <subdev/timer.h>
40 
41 #include <nvif/class.h>
42 #include <nvif/unpack.h>
43 
44 static void
nv50_pior_clock(struct nvkm_ior * pior)45 nv50_pior_clock(struct nvkm_ior *pior)
46 {
47 	struct nvkm_device *device = pior->disp->engine.subdev.device;
48 	const u32 poff = nv50_ior_base(pior);
49 
50 	nvkm_mask(device, 0x614380 + poff, 0x00000707, 0x00000001);
51 }
52 
53 static int
nv50_pior_dp_links(struct nvkm_ior * pior,struct nvkm_i2c_aux * aux)54 nv50_pior_dp_links(struct nvkm_ior *pior, struct nvkm_i2c_aux *aux)
55 {
56 	int ret = nvkm_i2c_aux_lnk_ctl(aux, pior->dp.nr, pior->dp.bw, pior->dp.ef);
57 	if (ret)
58 		return ret;
59 
60 	return 1;
61 }
62 
63 static const struct nvkm_ior_func_dp
64 nv50_pior_dp = {
65 	.links = nv50_pior_dp_links,
66 };
67 
68 static void
nv50_pior_power_wait(struct nvkm_device * device,u32 poff)69 nv50_pior_power_wait(struct nvkm_device *device, u32 poff)
70 {
71 	nvkm_msec(device, 2000,
72 		if (!(nvkm_rd32(device, 0x61e004 + poff) & 0x80000000))
73 			break;
74 	);
75 }
76 
77 static void
nv50_pior_power(struct nvkm_ior * pior,bool normal,bool pu,bool data,bool vsync,bool hsync)78 nv50_pior_power(struct nvkm_ior *pior, bool normal, bool pu, bool data, bool vsync, bool hsync)
79 {
80 	struct nvkm_device *device = pior->disp->engine.subdev.device;
81 	const u32  poff = nv50_ior_base(pior);
82 	const u32 shift = normal ? 0 : 16;
83 	const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
84 	const u32 field = 0x80000000 | (0x00000101 << shift);
85 
86 	nv50_pior_power_wait(device, poff);
87 	nvkm_mask(device, 0x61e004 + poff, field, state);
88 	nv50_pior_power_wait(device, poff);
89 }
90 
91 void
nv50_pior_depth(struct nvkm_ior * ior,struct nvkm_ior_state * state,u32 ctrl)92 nv50_pior_depth(struct nvkm_ior *ior, struct nvkm_ior_state *state, u32 ctrl)
93 {
94 	/* GF119 moves this information to per-head methods, which is
95 	 * a lot more convenient, and where our shared code expect it.
96 	 */
97 	if (state->head && state == &ior->asy) {
98 		struct nvkm_head *head = nvkm_head_find(ior->disp, __ffs(state->head));
99 
100 		if (!WARN_ON(!head)) {
101 			struct nvkm_head_state *state = &head->asy;
102 			switch ((ctrl & 0x000f0000) >> 16) {
103 			case 6: state->or.depth = 30; break;
104 			case 5: state->or.depth = 24; break;
105 			case 2: state->or.depth = 18; break;
106 			case 0: state->or.depth = 18; break; /*XXX*/
107 			default:
108 				state->or.depth = 18;
109 				WARN_ON(1);
110 				break;
111 			}
112 		}
113 	}
114 }
115 
116 static void
nv50_pior_state(struct nvkm_ior * pior,struct nvkm_ior_state * state)117 nv50_pior_state(struct nvkm_ior *pior, struct nvkm_ior_state *state)
118 {
119 	struct nvkm_device *device = pior->disp->engine.subdev.device;
120 	const u32 coff = pior->id * 8 + (state == &pior->arm) * 4;
121 	u32 ctrl = nvkm_rd32(device, 0x610b80 + coff);
122 
123 	state->proto_evo = (ctrl & 0x00000f00) >> 8;
124 	state->rgdiv = 1;
125 	switch (state->proto_evo) {
126 	case 0: state->proto = TMDS; break;
127 	default:
128 		state->proto = UNKNOWN;
129 		break;
130 	}
131 
132 	state->head = ctrl & 0x00000003;
133 	nv50_pior_depth(pior, state, ctrl);
134 }
135 
136 static const struct nvkm_ior_func
137 nv50_pior = {
138 	.state = nv50_pior_state,
139 	.power = nv50_pior_power,
140 	.clock = nv50_pior_clock,
141 	.dp = &nv50_pior_dp,
142 };
143 
144 int
nv50_pior_new(struct nvkm_disp * disp,int id)145 nv50_pior_new(struct nvkm_disp *disp, int id)
146 {
147 	return nvkm_ior_new_(&nv50_pior, disp, PIOR, id, false);
148 }
149 
150 int
nv50_pior_cnt(struct nvkm_disp * disp,unsigned long * pmask)151 nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
152 {
153 	struct nvkm_device *device = disp->engine.subdev.device;
154 
155 	*pmask = (nvkm_rd32(device, 0x610184) & 0x70000000) >> 28;
156 	return 3;
157 }
158 
159 void
nv50_sor_clock(struct nvkm_ior * sor)160 nv50_sor_clock(struct nvkm_ior *sor)
161 {
162 	struct nvkm_device *device = sor->disp->engine.subdev.device;
163 	const int  div = sor->asy.link == 3;
164 	const u32 soff = nv50_ior_base(sor);
165 
166 	nvkm_mask(device, 0x614300 + soff, 0x00000707, (div << 8) | div);
167 }
168 
169 static void
nv50_sor_power_wait(struct nvkm_device * device,u32 soff)170 nv50_sor_power_wait(struct nvkm_device *device, u32 soff)
171 {
172 	nvkm_msec(device, 2000,
173 		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
174 			break;
175 	);
176 }
177 
178 void
nv50_sor_power(struct nvkm_ior * sor,bool normal,bool pu,bool data,bool vsync,bool hsync)179 nv50_sor_power(struct nvkm_ior *sor, bool normal, bool pu, bool data, bool vsync, bool hsync)
180 {
181 	struct nvkm_device *device = sor->disp->engine.subdev.device;
182 	const u32  soff = nv50_ior_base(sor);
183 	const u32 shift = normal ? 0 : 16;
184 	const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
185 	const u32 field = 0x80000000 | (0x00000001 << shift);
186 
187 	nv50_sor_power_wait(device, soff);
188 	nvkm_mask(device, 0x61c004 + soff, field, state);
189 	nv50_sor_power_wait(device, soff);
190 
191 	nvkm_msec(device, 2000,
192 		if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
193 			break;
194 	);
195 }
196 
197 void
nv50_sor_state(struct nvkm_ior * sor,struct nvkm_ior_state * state)198 nv50_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
199 {
200 	struct nvkm_device *device = sor->disp->engine.subdev.device;
201 	const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
202 	u32 ctrl = nvkm_rd32(device, 0x610b70 + coff);
203 
204 	state->proto_evo = (ctrl & 0x00000f00) >> 8;
205 	switch (state->proto_evo) {
206 	case 0: state->proto = LVDS; state->link = 1; break;
207 	case 1: state->proto = TMDS; state->link = 1; break;
208 	case 2: state->proto = TMDS; state->link = 2; break;
209 	case 5: state->proto = TMDS; state->link = 3; break;
210 	default:
211 		state->proto = UNKNOWN;
212 		break;
213 	}
214 
215 	state->head = ctrl & 0x00000003;
216 }
217 
218 static const struct nvkm_ior_func
219 nv50_sor = {
220 	.state = nv50_sor_state,
221 	.power = nv50_sor_power,
222 	.clock = nv50_sor_clock,
223 };
224 
225 static int
nv50_sor_new(struct nvkm_disp * disp,int id)226 nv50_sor_new(struct nvkm_disp *disp, int id)
227 {
228 	return nvkm_ior_new_(&nv50_sor, disp, SOR, id, false);
229 }
230 
231 int
nv50_sor_cnt(struct nvkm_disp * disp,unsigned long * pmask)232 nv50_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
233 {
234 	struct nvkm_device *device = disp->engine.subdev.device;
235 
236 	*pmask = (nvkm_rd32(device, 0x610184) & 0x03000000) >> 24;
237 	return 2;
238 }
239 
240 static void
nv50_dac_clock(struct nvkm_ior * dac)241 nv50_dac_clock(struct nvkm_ior *dac)
242 {
243 	struct nvkm_device *device = dac->disp->engine.subdev.device;
244 	const u32 doff = nv50_ior_base(dac);
245 
246 	nvkm_mask(device, 0x614280 + doff, 0x07070707, 0x00000000);
247 }
248 
249 int
nv50_dac_sense(struct nvkm_ior * dac,u32 loadval)250 nv50_dac_sense(struct nvkm_ior *dac, u32 loadval)
251 {
252 	struct nvkm_device *device = dac->disp->engine.subdev.device;
253 	const u32 doff = nv50_ior_base(dac);
254 
255 	dac->func->power(dac, false, true, false, false, false);
256 
257 	nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
258 	mdelay(9);
259 	udelay(500);
260 	loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
261 
262 	dac->func->power(dac, false, false, false, false, false);
263 	if (!(loadval & 0x80000000))
264 		return -ETIMEDOUT;
265 
266 	return (loadval & 0x38000000) >> 27;
267 }
268 
269 static void
nv50_dac_power_wait(struct nvkm_device * device,const u32 doff)270 nv50_dac_power_wait(struct nvkm_device *device, const u32 doff)
271 {
272 	nvkm_msec(device, 2000,
273 		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
274 			break;
275 	);
276 }
277 
278 void
nv50_dac_power(struct nvkm_ior * dac,bool normal,bool pu,bool data,bool vsync,bool hsync)279 nv50_dac_power(struct nvkm_ior *dac, bool normal, bool pu, bool data, bool vsync, bool hsync)
280 {
281 	struct nvkm_device *device = dac->disp->engine.subdev.device;
282 	const u32  doff = nv50_ior_base(dac);
283 	const u32 shift = normal ? 0 : 16;
284 	const u32 state = 0x80000000 | (0x00000040 * !    pu |
285 					0x00000010 * !  data |
286 					0x00000004 * ! vsync |
287 					0x00000001 * ! hsync) << shift;
288 	const u32 field = 0xc0000000 | (0x00000055 << shift);
289 
290 	nv50_dac_power_wait(device, doff);
291 	nvkm_mask(device, 0x61a004 + doff, field, state);
292 	nv50_dac_power_wait(device, doff);
293 }
294 
295 static void
nv50_dac_state(struct nvkm_ior * dac,struct nvkm_ior_state * state)296 nv50_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
297 {
298 	struct nvkm_device *device = dac->disp->engine.subdev.device;
299 	const u32 coff = dac->id * 8 + (state == &dac->arm) * 4;
300 	u32 ctrl = nvkm_rd32(device, 0x610b58 + coff);
301 
302 	state->proto_evo = (ctrl & 0x00000f00) >> 8;
303 	switch (state->proto_evo) {
304 	case 0: state->proto = CRT; break;
305 	default:
306 		state->proto = UNKNOWN;
307 		break;
308 	}
309 
310 	state->head = ctrl & 0x00000003;
311 }
312 
313 static const struct nvkm_ior_func
314 nv50_dac = {
315 	.state = nv50_dac_state,
316 	.power = nv50_dac_power,
317 	.sense = nv50_dac_sense,
318 	.clock = nv50_dac_clock,
319 };
320 
321 int
nv50_dac_new(struct nvkm_disp * disp,int id)322 nv50_dac_new(struct nvkm_disp *disp, int id)
323 {
324 	return nvkm_ior_new_(&nv50_dac, disp, DAC, id, false);
325 }
326 
327 int
nv50_dac_cnt(struct nvkm_disp * disp,unsigned long * pmask)328 nv50_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
329 {
330 	struct nvkm_device *device = disp->engine.subdev.device;
331 
332 	*pmask = (nvkm_rd32(device, 0x610184) & 0x00700000) >> 20;
333 	return 3;
334 }
335 
336 static void
nv50_head_vblank_put(struct nvkm_head * head)337 nv50_head_vblank_put(struct nvkm_head *head)
338 {
339 	struct nvkm_device *device = head->disp->engine.subdev.device;
340 
341 	nvkm_mask(device, 0x61002c, (4 << head->id), 0);
342 }
343 
344 static void
nv50_head_vblank_get(struct nvkm_head * head)345 nv50_head_vblank_get(struct nvkm_head *head)
346 {
347 	struct nvkm_device *device = head->disp->engine.subdev.device;
348 
349 	nvkm_mask(device, 0x61002c, (4 << head->id), (4 << head->id));
350 }
351 
352 static void
nv50_head_rgclk(struct nvkm_head * head,int div)353 nv50_head_rgclk(struct nvkm_head *head, int div)
354 {
355 	struct nvkm_device *device = head->disp->engine.subdev.device;
356 
357 	nvkm_mask(device, 0x614200 + (head->id * 0x800), 0x0000000f, div);
358 }
359 
360 void
nv50_head_rgpos(struct nvkm_head * head,u16 * hline,u16 * vline)361 nv50_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
362 {
363 	struct nvkm_device *device = head->disp->engine.subdev.device;
364 	const u32 hoff = head->id * 0x800;
365 
366 	/* vline read locks hline. */
367 	*vline = nvkm_rd32(device, 0x616340 + hoff) & 0x0000ffff;
368 	*hline = nvkm_rd32(device, 0x616344 + hoff) & 0x0000ffff;
369 }
370 
371 static void
nv50_head_state(struct nvkm_head * head,struct nvkm_head_state * state)372 nv50_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
373 {
374 	struct nvkm_device *device = head->disp->engine.subdev.device;
375 	const u32 hoff = head->id * 0x540 + (state == &head->arm) * 4;
376 	u32 data;
377 
378 	data = nvkm_rd32(device, 0x610ae8 + hoff);
379 	state->vblanke = (data & 0xffff0000) >> 16;
380 	state->hblanke = (data & 0x0000ffff);
381 	data = nvkm_rd32(device, 0x610af0 + hoff);
382 	state->vblanks = (data & 0xffff0000) >> 16;
383 	state->hblanks = (data & 0x0000ffff);
384 	data = nvkm_rd32(device, 0x610af8 + hoff);
385 	state->vtotal = (data & 0xffff0000) >> 16;
386 	state->htotal = (data & 0x0000ffff);
387 	data = nvkm_rd32(device, 0x610b00 + hoff);
388 	state->vsynce = (data & 0xffff0000) >> 16;
389 	state->hsynce = (data & 0x0000ffff);
390 	state->hz = (nvkm_rd32(device, 0x610ad0 + hoff) & 0x003fffff) * 1000;
391 }
392 
393 static const struct nvkm_head_func
394 nv50_head = {
395 	.state = nv50_head_state,
396 	.rgpos = nv50_head_rgpos,
397 	.rgclk = nv50_head_rgclk,
398 	.vblank_get = nv50_head_vblank_get,
399 	.vblank_put = nv50_head_vblank_put,
400 };
401 
402 int
nv50_head_new(struct nvkm_disp * disp,int id)403 nv50_head_new(struct nvkm_disp *disp, int id)
404 {
405 	return nvkm_head_new_(&nv50_head, disp, id);
406 }
407 
408 int
nv50_head_cnt(struct nvkm_disp * disp,unsigned long * pmask)409 nv50_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
410 {
411 	*pmask = 3;
412 	return 2;
413 }
414 
415 
416 static void
nv50_disp_mthd_list(struct nvkm_disp * disp,int debug,u32 base,int c,const struct nvkm_disp_mthd_list * list,int inst)417 nv50_disp_mthd_list(struct nvkm_disp *disp, int debug, u32 base, int c,
418 		    const struct nvkm_disp_mthd_list *list, int inst)
419 {
420 	struct nvkm_subdev *subdev = &disp->engine.subdev;
421 	struct nvkm_device *device = subdev->device;
422 	int i;
423 
424 	for (i = 0; list->data[i].mthd; i++) {
425 		if (list->data[i].addr) {
426 			u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
427 			u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
428 			u32 mthd = list->data[i].mthd + (list->mthd * inst);
429 			const char *name = list->data[i].name;
430 			char mods[16];
431 
432 			if (prev != next)
433 				snprintf(mods, sizeof(mods), "-> %08x", next);
434 			else
435 				snprintf(mods, sizeof(mods), "%13c", ' ');
436 
437 			nvkm_printk_(subdev, debug, info,
438 				     "\t%04x: %08x %s%s%s\n",
439 				     mthd, prev, mods, name ? " // " : "",
440 				     name ? name : "");
441 		}
442 	}
443 }
444 
445 void
nv50_disp_chan_mthd(struct nvkm_disp_chan * chan,int debug)446 nv50_disp_chan_mthd(struct nvkm_disp_chan *chan, int debug)
447 {
448 	struct nvkm_disp *disp = chan->disp;
449 	struct nvkm_subdev *subdev = &disp->engine.subdev;
450 	const struct nvkm_disp_chan_mthd *mthd = chan->mthd;
451 	const struct nvkm_disp_mthd_list *list;
452 	int i, j;
453 
454 	if (debug > subdev->debug)
455 		return;
456 	if (!mthd)
457 		return;
458 
459 	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
460 		u32 base = chan->head * mthd->addr;
461 		for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
462 			const char *cname = mthd->name;
463 			const char *sname = "";
464 			char cname_[16], sname_[16];
465 
466 			if (mthd->addr) {
467 				snprintf(cname_, sizeof(cname_), "%s %d",
468 					 mthd->name, chan->chid.user);
469 				cname = cname_;
470 			}
471 
472 			if (mthd->data[i].nr > 1) {
473 				snprintf(sname_, sizeof(sname_), " - %s %d",
474 					 mthd->data[i].name, j);
475 				sname = sname_;
476 			}
477 
478 			nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
479 			nv50_disp_mthd_list(disp, debug, base, mthd->prev,
480 					    list, j);
481 		}
482 	}
483 }
484 
485 static void
nv50_disp_chan_uevent_fini(struct nvkm_event * event,int type,int index)486 nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
487 {
488 	struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
489 	struct nvkm_device *device = disp->engine.subdev.device;
490 	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
491 	nvkm_wr32(device, 0x610020, 0x00000001 << index);
492 }
493 
494 static void
nv50_disp_chan_uevent_init(struct nvkm_event * event,int types,int index)495 nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
496 {
497 	struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
498 	struct nvkm_device *device = disp->engine.subdev.device;
499 	nvkm_wr32(device, 0x610020, 0x00000001 << index);
500 	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
501 }
502 
503 void
nv50_disp_chan_uevent_send(struct nvkm_disp * disp,int chid)504 nv50_disp_chan_uevent_send(struct nvkm_disp *disp, int chid)
505 {
506 	nvkm_event_send(&disp->uevent, NVKM_DISP_EVENT_CHAN_AWAKEN, chid, NULL, 0);
507 }
508 
509 const struct nvkm_event_func
510 nv50_disp_chan_uevent = {
511 	.init = nv50_disp_chan_uevent_init,
512 	.fini = nv50_disp_chan_uevent_fini,
513 };
514 
515 u64
nv50_disp_chan_user(struct nvkm_disp_chan * chan,u64 * psize)516 nv50_disp_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
517 {
518 	*psize = 0x1000;
519 	return 0x640000 + (chan->chid.user * 0x1000);
520 }
521 
522 void
nv50_disp_chan_intr(struct nvkm_disp_chan * chan,bool en)523 nv50_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
524 {
525 	struct nvkm_device *device = chan->disp->engine.subdev.device;
526 	const u32 mask = 0x00010001 << chan->chid.user;
527 	const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
528 	nvkm_mask(device, 0x610028, mask, data);
529 }
530 
531 static void
nv50_disp_pioc_fini(struct nvkm_disp_chan * chan)532 nv50_disp_pioc_fini(struct nvkm_disp_chan *chan)
533 {
534 	struct nvkm_disp *disp = chan->disp;
535 	struct nvkm_subdev *subdev = &disp->engine.subdev;
536 	struct nvkm_device *device = subdev->device;
537 	int ctrl = chan->chid.ctrl;
538 	int user = chan->chid.user;
539 
540 	nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
541 	if (nvkm_msec(device, 2000,
542 		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
543 			break;
544 	) < 0) {
545 		nvkm_error(subdev, "ch %d timeout: %08x\n", user,
546 			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
547 	}
548 }
549 
550 static int
nv50_disp_pioc_init(struct nvkm_disp_chan * chan)551 nv50_disp_pioc_init(struct nvkm_disp_chan *chan)
552 {
553 	struct nvkm_disp *disp = chan->disp;
554 	struct nvkm_subdev *subdev = &disp->engine.subdev;
555 	struct nvkm_device *device = subdev->device;
556 	int ctrl = chan->chid.ctrl;
557 	int user = chan->chid.user;
558 
559 	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
560 	if (nvkm_msec(device, 2000,
561 		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
562 			break;
563 	) < 0) {
564 		nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
565 			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
566 		return -EBUSY;
567 	}
568 
569 	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
570 	if (nvkm_msec(device, 2000,
571 		u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
572 		if ((tmp & 0x00030000) == 0x00010000)
573 			break;
574 	) < 0) {
575 		nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
576 			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
577 		return -EBUSY;
578 	}
579 
580 	return 0;
581 }
582 
583 const struct nvkm_disp_chan_func
584 nv50_disp_pioc_func = {
585 	.init = nv50_disp_pioc_init,
586 	.fini = nv50_disp_pioc_fini,
587 	.intr = nv50_disp_chan_intr,
588 	.user = nv50_disp_chan_user,
589 };
590 
591 int
nv50_disp_dmac_bind(struct nvkm_disp_chan * chan,struct nvkm_object * object,u32 handle)592 nv50_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
593 {
594 	return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -10, handle,
595 				 chan->chid.user << 28 | chan->chid.user);
596 }
597 
598 static void
nv50_disp_dmac_fini(struct nvkm_disp_chan * chan)599 nv50_disp_dmac_fini(struct nvkm_disp_chan *chan)
600 {
601 	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
602 	struct nvkm_device *device = subdev->device;
603 	int ctrl = chan->chid.ctrl;
604 	int user = chan->chid.user;
605 
606 	/* deactivate channel */
607 	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
608 	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
609 	if (nvkm_msec(device, 2000,
610 		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
611 			break;
612 	) < 0) {
613 		nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
614 			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
615 	}
616 
617 	chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
618 }
619 
620 static int
nv50_disp_dmac_init(struct nvkm_disp_chan * chan)621 nv50_disp_dmac_init(struct nvkm_disp_chan *chan)
622 {
623 	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
624 	struct nvkm_device *device = subdev->device;
625 	int ctrl = chan->chid.ctrl;
626 	int user = chan->chid.user;
627 
628 	/* initialise channel for dma command submission */
629 	nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
630 	nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
631 	nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
632 	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
633 	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
634 	nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
635 
636 	/* wait for it to go inactive */
637 	if (nvkm_msec(device, 2000,
638 		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
639 			break;
640 	) < 0) {
641 		nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
642 			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
643 		return -EBUSY;
644 	}
645 
646 	return 0;
647 }
648 
649 int
nv50_disp_dmac_push(struct nvkm_disp_chan * chan,u64 object)650 nv50_disp_dmac_push(struct nvkm_disp_chan *chan, u64 object)
651 {
652 	chan->memory = nvkm_umem_search(chan->object.client, object);
653 	if (IS_ERR(chan->memory))
654 		return PTR_ERR(chan->memory);
655 
656 	if (nvkm_memory_size(chan->memory) < 0x1000)
657 		return -EINVAL;
658 
659 	switch (nvkm_memory_target(chan->memory)) {
660 	case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
661 	case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
662 	case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
663 	default:
664 		return -EINVAL;
665 	}
666 
667 	chan->push |= nvkm_memory_addr(chan->memory) >> 8;
668 	return 0;
669 }
670 
671 const struct nvkm_disp_chan_func
672 nv50_disp_dmac_func = {
673 	.push = nv50_disp_dmac_push,
674 	.init = nv50_disp_dmac_init,
675 	.fini = nv50_disp_dmac_fini,
676 	.intr = nv50_disp_chan_intr,
677 	.user = nv50_disp_chan_user,
678 	.bind = nv50_disp_dmac_bind,
679 };
680 
681 const struct nvkm_disp_chan_user
682 nv50_disp_curs = {
683 	.func = &nv50_disp_pioc_func,
684 	.ctrl = 7,
685 	.user = 7,
686 };
687 
688 const struct nvkm_disp_chan_user
689 nv50_disp_oimm = {
690 	.func = &nv50_disp_pioc_func,
691 	.ctrl = 5,
692 	.user = 5,
693 };
694 
695 static const struct nvkm_disp_mthd_list
696 nv50_disp_ovly_mthd_base = {
697 	.mthd = 0x0000,
698 	.addr = 0x000000,
699 	.data = {
700 		{ 0x0080, 0x000000 },
701 		{ 0x0084, 0x0009a0 },
702 		{ 0x0088, 0x0009c0 },
703 		{ 0x008c, 0x0009c8 },
704 		{ 0x0090, 0x6109b4 },
705 		{ 0x0094, 0x610970 },
706 		{ 0x00a0, 0x610998 },
707 		{ 0x00a4, 0x610964 },
708 		{ 0x00c0, 0x610958 },
709 		{ 0x00e0, 0x6109a8 },
710 		{ 0x00e4, 0x6109d0 },
711 		{ 0x00e8, 0x6109d8 },
712 		{ 0x0100, 0x61094c },
713 		{ 0x0104, 0x610984 },
714 		{ 0x0108, 0x61098c },
715 		{ 0x0800, 0x6109f8 },
716 		{ 0x0808, 0x610a08 },
717 		{ 0x080c, 0x610a10 },
718 		{ 0x0810, 0x610a00 },
719 		{}
720 	}
721 };
722 
723 static const struct nvkm_disp_chan_mthd
724 nv50_disp_ovly_mthd = {
725 	.name = "Overlay",
726 	.addr = 0x000540,
727 	.prev = 0x000004,
728 	.data = {
729 		{ "Global", 1, &nv50_disp_ovly_mthd_base },
730 		{}
731 	}
732 };
733 
734 static const struct nvkm_disp_chan_user
735 nv50_disp_ovly = {
736 	.func = &nv50_disp_dmac_func,
737 	.ctrl = 3,
738 	.user = 3,
739 	.mthd = &nv50_disp_ovly_mthd,
740 };
741 
742 static const struct nvkm_disp_mthd_list
743 nv50_disp_base_mthd_base = {
744 	.mthd = 0x0000,
745 	.addr = 0x000000,
746 	.data = {
747 		{ 0x0080, 0x000000 },
748 		{ 0x0084, 0x0008c4 },
749 		{ 0x0088, 0x0008d0 },
750 		{ 0x008c, 0x0008dc },
751 		{ 0x0090, 0x0008e4 },
752 		{ 0x0094, 0x610884 },
753 		{ 0x00a0, 0x6108a0 },
754 		{ 0x00a4, 0x610878 },
755 		{ 0x00c0, 0x61086c },
756 		{ 0x00e0, 0x610858 },
757 		{ 0x00e4, 0x610860 },
758 		{ 0x00e8, 0x6108ac },
759 		{ 0x00ec, 0x6108b4 },
760 		{ 0x0100, 0x610894 },
761 		{ 0x0110, 0x6108bc },
762 		{ 0x0114, 0x61088c },
763 		{}
764 	}
765 };
766 
767 const struct nvkm_disp_mthd_list
768 nv50_disp_base_mthd_image = {
769 	.mthd = 0x0400,
770 	.addr = 0x000000,
771 	.data = {
772 		{ 0x0800, 0x6108f0 },
773 		{ 0x0804, 0x6108fc },
774 		{ 0x0808, 0x61090c },
775 		{ 0x080c, 0x610914 },
776 		{ 0x0810, 0x610904 },
777 		{}
778 	}
779 };
780 
781 static const struct nvkm_disp_chan_mthd
782 nv50_disp_base_mthd = {
783 	.name = "Base",
784 	.addr = 0x000540,
785 	.prev = 0x000004,
786 	.data = {
787 		{ "Global", 1, &nv50_disp_base_mthd_base },
788 		{  "Image", 2, &nv50_disp_base_mthd_image },
789 		{}
790 	}
791 };
792 
793 static const struct nvkm_disp_chan_user
794 nv50_disp_base = {
795 	.func = &nv50_disp_dmac_func,
796 	.ctrl = 1,
797 	.user = 1,
798 	.mthd = &nv50_disp_base_mthd,
799 };
800 
801 const struct nvkm_disp_mthd_list
802 nv50_disp_core_mthd_base = {
803 	.mthd = 0x0000,
804 	.addr = 0x000000,
805 	.data = {
806 		{ 0x0080, 0x000000 },
807 		{ 0x0084, 0x610bb8 },
808 		{ 0x0088, 0x610b9c },
809 		{ 0x008c, 0x000000 },
810 		{}
811 	}
812 };
813 
814 static const struct nvkm_disp_mthd_list
815 nv50_disp_core_mthd_dac = {
816 	.mthd = 0x0080,
817 	.addr = 0x000008,
818 	.data = {
819 		{ 0x0400, 0x610b58 },
820 		{ 0x0404, 0x610bdc },
821 		{ 0x0420, 0x610828 },
822 		{}
823 	}
824 };
825 
826 const struct nvkm_disp_mthd_list
827 nv50_disp_core_mthd_sor = {
828 	.mthd = 0x0040,
829 	.addr = 0x000008,
830 	.data = {
831 		{ 0x0600, 0x610b70 },
832 		{}
833 	}
834 };
835 
836 const struct nvkm_disp_mthd_list
837 nv50_disp_core_mthd_pior = {
838 	.mthd = 0x0040,
839 	.addr = 0x000008,
840 	.data = {
841 		{ 0x0700, 0x610b80 },
842 		{}
843 	}
844 };
845 
846 static const struct nvkm_disp_mthd_list
847 nv50_disp_core_mthd_head = {
848 	.mthd = 0x0400,
849 	.addr = 0x000540,
850 	.data = {
851 		{ 0x0800, 0x610ad8 },
852 		{ 0x0804, 0x610ad0 },
853 		{ 0x0808, 0x610a48 },
854 		{ 0x080c, 0x610a78 },
855 		{ 0x0810, 0x610ac0 },
856 		{ 0x0814, 0x610af8 },
857 		{ 0x0818, 0x610b00 },
858 		{ 0x081c, 0x610ae8 },
859 		{ 0x0820, 0x610af0 },
860 		{ 0x0824, 0x610b08 },
861 		{ 0x0828, 0x610b10 },
862 		{ 0x082c, 0x610a68 },
863 		{ 0x0830, 0x610a60 },
864 		{ 0x0834, 0x000000 },
865 		{ 0x0838, 0x610a40 },
866 		{ 0x0840, 0x610a24 },
867 		{ 0x0844, 0x610a2c },
868 		{ 0x0848, 0x610aa8 },
869 		{ 0x084c, 0x610ab0 },
870 		{ 0x0860, 0x610a84 },
871 		{ 0x0864, 0x610a90 },
872 		{ 0x0868, 0x610b18 },
873 		{ 0x086c, 0x610b20 },
874 		{ 0x0870, 0x610ac8 },
875 		{ 0x0874, 0x610a38 },
876 		{ 0x0880, 0x610a58 },
877 		{ 0x0884, 0x610a9c },
878 		{ 0x08a0, 0x610a70 },
879 		{ 0x08a4, 0x610a50 },
880 		{ 0x08a8, 0x610ae0 },
881 		{ 0x08c0, 0x610b28 },
882 		{ 0x08c4, 0x610b30 },
883 		{ 0x08c8, 0x610b40 },
884 		{ 0x08d4, 0x610b38 },
885 		{ 0x08d8, 0x610b48 },
886 		{ 0x08dc, 0x610b50 },
887 		{ 0x0900, 0x610a18 },
888 		{ 0x0904, 0x610ab8 },
889 		{}
890 	}
891 };
892 
893 static const struct nvkm_disp_chan_mthd
894 nv50_disp_core_mthd = {
895 	.name = "Core",
896 	.addr = 0x000000,
897 	.prev = 0x000004,
898 	.data = {
899 		{ "Global", 1, &nv50_disp_core_mthd_base },
900 		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
901 		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
902 		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
903 		{   "HEAD", 2, &nv50_disp_core_mthd_head },
904 		{}
905 	}
906 };
907 
908 static void
nv50_disp_core_fini(struct nvkm_disp_chan * chan)909 nv50_disp_core_fini(struct nvkm_disp_chan *chan)
910 {
911 	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
912 	struct nvkm_device *device = subdev->device;
913 
914 	/* deactivate channel */
915 	nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
916 	nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
917 	if (nvkm_msec(device, 2000,
918 		if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
919 			break;
920 	) < 0) {
921 		nvkm_error(subdev, "core fini: %08x\n",
922 			   nvkm_rd32(device, 0x610200));
923 	}
924 
925 	chan->suspend_put = nvkm_rd32(device, 0x640000);
926 }
927 
928 static int
nv50_disp_core_init(struct nvkm_disp_chan * chan)929 nv50_disp_core_init(struct nvkm_disp_chan *chan)
930 {
931 	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
932 	struct nvkm_device *device = subdev->device;
933 
934 	/* attempt to unstick channel from some unknown state */
935 	if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
936 		nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
937 	if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
938 		nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
939 
940 	/* initialise channel for dma command submission */
941 	nvkm_wr32(device, 0x610204, chan->push);
942 	nvkm_wr32(device, 0x610208, 0x00010000);
943 	nvkm_wr32(device, 0x61020c, 0x00000000);
944 	nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
945 	nvkm_wr32(device, 0x640000, chan->suspend_put);
946 	nvkm_wr32(device, 0x610200, 0x01000013);
947 
948 	/* wait for it to go inactive */
949 	if (nvkm_msec(device, 2000,
950 		if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
951 			break;
952 	) < 0) {
953 		nvkm_error(subdev, "core init: %08x\n",
954 			   nvkm_rd32(device, 0x610200));
955 		return -EBUSY;
956 	}
957 
958 	return 0;
959 }
960 
961 const struct nvkm_disp_chan_func
962 nv50_disp_core_func = {
963 	.push = nv50_disp_dmac_push,
964 	.init = nv50_disp_core_init,
965 	.fini = nv50_disp_core_fini,
966 	.intr = nv50_disp_chan_intr,
967 	.user = nv50_disp_chan_user,
968 	.bind = nv50_disp_dmac_bind,
969 };
970 
971 static const struct nvkm_disp_chan_user
972 nv50_disp_core = {
973 	.func = &nv50_disp_core_func,
974 	.ctrl = 0,
975 	.user = 0,
976 	.mthd = &nv50_disp_core_mthd,
977 };
978 
979 static u32
nv50_disp_super_iedt(struct nvkm_head * head,struct nvkm_outp * outp,u8 * ver,u8 * hdr,u8 * cnt,u8 * len,struct nvbios_outp * iedt)980 nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
981 		     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
982 		     struct nvbios_outp *iedt)
983 {
984 	struct nvkm_bios *bios = head->disp->engine.subdev.device->bios;
985 	const u8  l = ffs(outp->info.link);
986 	const u16 t = outp->info.hasht;
987 	const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or;
988 	u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
989 	if (!data)
990 		OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
991 	return data;
992 }
993 
994 static void
nv50_disp_super_ied_on(struct nvkm_head * head,struct nvkm_ior * ior,int id,u32 khz)995 nv50_disp_super_ied_on(struct nvkm_head *head,
996 		       struct nvkm_ior *ior, int id, u32 khz)
997 {
998 	struct nvkm_subdev *subdev = &head->disp->engine.subdev;
999 	struct nvkm_bios *bios = subdev->device->bios;
1000 	struct nvkm_outp *outp = ior->asy.outp;
1001 	struct nvbios_ocfg iedtrs;
1002 	struct nvbios_outp iedt;
1003 	u8  ver, hdr, cnt, len, flags = 0x00;
1004 	u32 data;
1005 
1006 	if (!outp) {
1007 		IOR_DBG(ior, "nothing to attach");
1008 		return;
1009 	}
1010 
1011 	/* Lookup IED table for the device. */
1012 	data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
1013 	if (!data)
1014 		return;
1015 
1016 	/* Lookup IEDT runtime settings for the current configuration. */
1017 	if (ior->type == SOR) {
1018 		if (ior->asy.proto == LVDS) {
1019 			if (head->asy.or.depth == 24)
1020 				flags |= 0x02;
1021 		}
1022 		if (ior->asy.link == 3)
1023 			flags |= 0x01;
1024 	}
1025 
1026 	data = nvbios_ocfg_match(bios, data, ior->asy.proto_evo, flags,
1027 				 &ver, &hdr, &cnt, &len, &iedtrs);
1028 	if (!data) {
1029 		OUTP_DBG(outp, "missing IEDT RS for %02x:%02x",
1030 			 ior->asy.proto_evo, flags);
1031 		return;
1032 	}
1033 
1034 	/* Execute the OnInt[23] script for the current frequency. */
1035 	data = nvbios_oclk_match(bios, iedtrs.clkcmp[id], khz);
1036 	if (!data) {
1037 		OUTP_DBG(outp, "missing IEDT RSS %d for %02x:%02x %d khz",
1038 			 id, ior->asy.proto_evo, flags, khz);
1039 		return;
1040 	}
1041 
1042 	nvbios_init(subdev, data,
1043 		init.outp = &outp->info;
1044 		init.or   = ior->id;
1045 		init.link = ior->asy.link;
1046 		init.head = head->id;
1047 	);
1048 }
1049 
1050 static void
nv50_disp_super_ied_off(struct nvkm_head * head,struct nvkm_ior * ior,int id)1051 nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id)
1052 {
1053 	struct nvkm_outp *outp = ior->arm.outp;
1054 	struct nvbios_outp iedt;
1055 	u8  ver, hdr, cnt, len;
1056 	u32 data;
1057 
1058 	if (!outp) {
1059 		IOR_DBG(ior, "nothing attached");
1060 		return;
1061 	}
1062 
1063 	data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
1064 	if (!data)
1065 		return;
1066 
1067 	nvbios_init(&head->disp->engine.subdev, iedt.script[id],
1068 		init.outp = &outp->info;
1069 		init.or   = ior->id;
1070 		init.link = ior->arm.link;
1071 		init.head = head->id;
1072 	);
1073 }
1074 
1075 static struct nvkm_ior *
nv50_disp_super_ior_asy(struct nvkm_head * head)1076 nv50_disp_super_ior_asy(struct nvkm_head *head)
1077 {
1078 	struct nvkm_ior *ior;
1079 	list_for_each_entry(ior, &head->disp->iors, head) {
1080 		if (ior->asy.head & (1 << head->id)) {
1081 			HEAD_DBG(head, "to %s", ior->name);
1082 			return ior;
1083 		}
1084 	}
1085 	HEAD_DBG(head, "nothing to attach");
1086 	return NULL;
1087 }
1088 
1089 static struct nvkm_ior *
nv50_disp_super_ior_arm(struct nvkm_head * head)1090 nv50_disp_super_ior_arm(struct nvkm_head *head)
1091 {
1092 	struct nvkm_ior *ior;
1093 	list_for_each_entry(ior, &head->disp->iors, head) {
1094 		if (ior->arm.head & (1 << head->id)) {
1095 			HEAD_DBG(head, "on %s", ior->name);
1096 			return ior;
1097 		}
1098 	}
1099 	HEAD_DBG(head, "nothing attached");
1100 	return NULL;
1101 }
1102 
1103 void
nv50_disp_super_3_0(struct nvkm_disp * disp,struct nvkm_head * head)1104 nv50_disp_super_3_0(struct nvkm_disp *disp, struct nvkm_head *head)
1105 {
1106 	struct nvkm_ior *ior;
1107 
1108 	/* Determine which OR, if any, we're attaching to the head. */
1109 	HEAD_DBG(head, "supervisor 3.0");
1110 	ior = nv50_disp_super_ior_asy(head);
1111 	if (!ior)
1112 		return;
1113 
1114 	/* Execute OnInt3 IED script. */
1115 	nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000);
1116 
1117 	/* OR-specific handling. */
1118 	if (ior->func->war_3)
1119 		ior->func->war_3(ior);
1120 }
1121 
1122 static void
nv50_disp_super_2_2_dp(struct nvkm_head * head,struct nvkm_ior * ior)1123 nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
1124 {
1125 	struct nvkm_subdev *subdev = &head->disp->engine.subdev;
1126 	const u32      khz = head->asy.hz / 1000;
1127 	const u32 linkKBps = ior->dp.bw * 27000;
1128 	const u32   symbol = 100000;
1129 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
1130 	int TU, VTUi, VTUf, VTUa;
1131 	u64 link_data_rate, link_ratio, unk;
1132 	u32 best_diff = 64 * symbol;
1133 	u64 h, v;
1134 
1135 	/* symbols/hblank - algorithm taken from comments in tegra driver */
1136 	h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7;
1137 	h = h * linkKBps;
1138 	do_div(h, khz);
1139 	h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr);
1140 
1141 	/* symbols/vblank - algorithm taken from comments in tegra driver */
1142 	v = head->asy.vblanks - head->asy.vblanke - 25;
1143 	v = v * linkKBps;
1144 	do_div(v, khz);
1145 	v = v - ((36 / ior->dp.nr) + 3) - 1;
1146 
1147 	ior->func->dp->audio_sym(ior, head->id, h, v);
1148 
1149 	/* watermark / activesym */
1150 	link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
1151 
1152 	/* calculate ratio of packed data rate to link symbol rate */
1153 	link_ratio = link_data_rate * symbol;
1154 	do_div(link_ratio, linkKBps);
1155 
1156 	for (TU = 64; ior->func->dp->activesym && TU >= 32; TU--) {
1157 		/* calculate average number of valid symbols in each TU */
1158 		u32 tu_valid = link_ratio * TU;
1159 		u32 calc, diff;
1160 
1161 		/* find a hw representation for the fraction.. */
1162 		VTUi = tu_valid / symbol;
1163 		calc = VTUi * symbol;
1164 		diff = tu_valid - calc;
1165 		if (diff) {
1166 			if (diff >= (symbol / 2)) {
1167 				VTUf = symbol / (symbol - diff);
1168 				if (symbol - (VTUf * diff))
1169 					VTUf++;
1170 
1171 				if (VTUf <= 15) {
1172 					VTUa  = 1;
1173 					calc += symbol - (symbol / VTUf);
1174 				} else {
1175 					VTUa  = 0;
1176 					VTUf  = 1;
1177 					calc += symbol;
1178 				}
1179 			} else {
1180 				VTUa  = 0;
1181 				VTUf  = min((int)(symbol / diff), 15);
1182 				calc += symbol / VTUf;
1183 			}
1184 
1185 			diff = calc - tu_valid;
1186 		} else {
1187 			/* no remainder, but the hw doesn't like the fractional
1188 			 * part to be zero.  decrement the integer part and
1189 			 * have the fraction add a whole symbol back
1190 			 */
1191 			VTUa = 0;
1192 			VTUf = 1;
1193 			VTUi--;
1194 		}
1195 
1196 		if (diff < best_diff) {
1197 			best_diff = diff;
1198 			bestTU = TU;
1199 			bestVTUa = VTUa;
1200 			bestVTUf = VTUf;
1201 			bestVTUi = VTUi;
1202 			if (diff == 0)
1203 				break;
1204 		}
1205 	}
1206 
1207 	if (ior->func->dp->activesym) {
1208 		if (!bestTU) {
1209 			nvkm_error(subdev, "unable to determine dp config\n");
1210 			return;
1211 		}
1212 
1213 		ior->func->dp->activesym(ior, head->id, bestTU, bestVTUa, bestVTUf, bestVTUi);
1214 	} else {
1215 		bestTU = 64;
1216 	}
1217 
1218 	/* XXX close to vbios numbers, but not right */
1219 	unk  = (symbol - link_ratio) * bestTU;
1220 	unk *= link_ratio;
1221 	do_div(unk, symbol);
1222 	do_div(unk, symbol);
1223 	unk += 6;
1224 
1225 	ior->func->dp->watermark(ior, head->id, unk);
1226 }
1227 
1228 void
nv50_disp_super_2_2(struct nvkm_disp * disp,struct nvkm_head * head)1229 nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head)
1230 {
1231 	const u32 khz = head->asy.hz / 1000;
1232 	struct nvkm_outp *outp;
1233 	struct nvkm_ior *ior;
1234 
1235 	/* Determine which OR, if any, we're attaching from the head. */
1236 	HEAD_DBG(head, "supervisor 2.2");
1237 	ior = nv50_disp_super_ior_asy(head);
1238 	if (!ior)
1239 		return;
1240 
1241 	/* For some reason, NVIDIA decided not to:
1242 	 *
1243 	 * A) Give dual-link LVDS a separate EVO protocol, like for TMDS.
1244 	 *  and
1245 	 * B) Use SetControlOutputResource.PixelDepth on LVDS.
1246 	 *
1247 	 * Override the values we usually read from HW with the same
1248 	 * data we pass though an ioctl instead.
1249 	 */
1250 	if (ior->type == SOR && ior->asy.proto == LVDS) {
1251 		head->asy.or.depth = (disp->sor.lvdsconf & 0x0200) ? 24 : 18;
1252 		ior->asy.link      = (disp->sor.lvdsconf & 0x0100) ? 3  : 1;
1253 	}
1254 
1255 	/* Handle any link training, etc. */
1256 	if ((outp = ior->asy.outp) && outp->func->acquire)
1257 		outp->func->acquire(outp);
1258 
1259 	/* Execute OnInt2 IED script. */
1260 	nv50_disp_super_ied_on(head, ior, 0, khz);
1261 
1262 	/* Program RG clock divider. */
1263 	head->func->rgclk(head, ior->asy.rgdiv);
1264 
1265 	/* Mode-specific internal DP configuration. */
1266 	if (ior->type == SOR && ior->asy.proto == DP)
1267 		nv50_disp_super_2_2_dp(head, ior);
1268 
1269 	/* OR-specific handling. */
1270 	ior->func->clock(ior);
1271 	if (ior->func->war_2)
1272 		ior->func->war_2(ior);
1273 }
1274 
1275 void
nv50_disp_super_2_1(struct nvkm_disp * disp,struct nvkm_head * head)1276 nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head)
1277 {
1278 	struct nvkm_devinit *devinit = disp->engine.subdev.device->devinit;
1279 	const u32 khz = head->asy.hz / 1000;
1280 	HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
1281 	if (khz)
1282 		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz);
1283 }
1284 
1285 void
nv50_disp_super_2_0(struct nvkm_disp * disp,struct nvkm_head * head)1286 nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
1287 {
1288 	struct nvkm_outp *outp;
1289 	struct nvkm_ior *ior;
1290 
1291 	/* Determine which OR, if any, we're detaching from the head. */
1292 	HEAD_DBG(head, "supervisor 2.0");
1293 	ior = nv50_disp_super_ior_arm(head);
1294 	if (!ior)
1295 		return;
1296 
1297 	/* Execute OffInt2 IED script. */
1298 	nv50_disp_super_ied_off(head, ior, 2);
1299 
1300 	/* If we're shutting down the OR's only active head, execute
1301 	 * the output path's disable function.
1302 	 */
1303 	if (ior->arm.head == (1 << head->id)) {
1304 		if ((outp = ior->arm.outp) && outp->func->disable)
1305 			outp->func->disable(outp, ior);
1306 	}
1307 }
1308 
1309 void
nv50_disp_super_1_0(struct nvkm_disp * disp,struct nvkm_head * head)1310 nv50_disp_super_1_0(struct nvkm_disp *disp, struct nvkm_head *head)
1311 {
1312 	struct nvkm_ior *ior;
1313 
1314 	/* Determine which OR, if any, we're detaching from the head. */
1315 	HEAD_DBG(head, "supervisor 1.0");
1316 	ior = nv50_disp_super_ior_arm(head);
1317 	if (!ior)
1318 		return;
1319 
1320 	/* Execute OffInt1 IED script. */
1321 	nv50_disp_super_ied_off(head, ior, 1);
1322 }
1323 
1324 void
nv50_disp_super_1(struct nvkm_disp * disp)1325 nv50_disp_super_1(struct nvkm_disp *disp)
1326 {
1327 	struct nvkm_head *head;
1328 	struct nvkm_ior *ior;
1329 
1330 	list_for_each_entry(head, &disp->heads, head) {
1331 		head->func->state(head, &head->arm);
1332 		head->func->state(head, &head->asy);
1333 	}
1334 
1335 	list_for_each_entry(ior, &disp->iors, head) {
1336 		ior->func->state(ior, &ior->arm);
1337 		ior->func->state(ior, &ior->asy);
1338 	}
1339 }
1340 
1341 void
nv50_disp_super(struct work_struct * work)1342 nv50_disp_super(struct work_struct *work)
1343 {
1344 	struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
1345 	struct nvkm_subdev *subdev = &disp->engine.subdev;
1346 	struct nvkm_device *device = subdev->device;
1347 	struct nvkm_head *head;
1348 	u32 super;
1349 
1350 	mutex_lock(&disp->super.mutex);
1351 	super = nvkm_rd32(device, 0x610030);
1352 
1353 	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super.pending, super);
1354 
1355 	if (disp->super.pending & 0x00000010) {
1356 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
1357 		nv50_disp_super_1(disp);
1358 		list_for_each_entry(head, &disp->heads, head) {
1359 			if (!(super & (0x00000020 << head->id)))
1360 				continue;
1361 			if (!(super & (0x00000080 << head->id)))
1362 				continue;
1363 			nv50_disp_super_1_0(disp, head);
1364 		}
1365 	} else
1366 	if (disp->super.pending & 0x00000020) {
1367 		list_for_each_entry(head, &disp->heads, head) {
1368 			if (!(super & (0x00000080 << head->id)))
1369 				continue;
1370 			nv50_disp_super_2_0(disp, head);
1371 		}
1372 		nvkm_outp_route(disp);
1373 		list_for_each_entry(head, &disp->heads, head) {
1374 			if (!(super & (0x00000200 << head->id)))
1375 				continue;
1376 			nv50_disp_super_2_1(disp, head);
1377 		}
1378 		list_for_each_entry(head, &disp->heads, head) {
1379 			if (!(super & (0x00000080 << head->id)))
1380 				continue;
1381 			nv50_disp_super_2_2(disp, head);
1382 		}
1383 	} else
1384 	if (disp->super.pending & 0x00000040) {
1385 		list_for_each_entry(head, &disp->heads, head) {
1386 			if (!(super & (0x00000080 << head->id)))
1387 				continue;
1388 			nv50_disp_super_3_0(disp, head);
1389 		}
1390 	}
1391 
1392 	nvkm_wr32(device, 0x610030, 0x80000000);
1393 	mutex_unlock(&disp->super.mutex);
1394 }
1395 
1396 const struct nvkm_enum
1397 nv50_disp_intr_error_type[] = {
1398 	{ 0, "NONE" },
1399 	{ 1, "PUSHBUFFER_ERR" },
1400 	{ 2, "TRAP" },
1401 	{ 3, "RESERVED_METHOD" },
1402 	{ 4, "INVALID_ARG" },
1403 	{ 5, "INVALID_STATE" },
1404 	{ 7, "UNRESOLVABLE_HANDLE" },
1405 	{}
1406 };
1407 
1408 static const struct nvkm_enum
1409 nv50_disp_intr_error_code[] = {
1410 	{ 0x00, "" },
1411 	{}
1412 };
1413 
1414 static void
nv50_disp_intr_error(struct nvkm_disp * disp,int chid)1415 nv50_disp_intr_error(struct nvkm_disp *disp, int chid)
1416 {
1417 	struct nvkm_subdev *subdev = &disp->engine.subdev;
1418 	struct nvkm_device *device = subdev->device;
1419 	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
1420 	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
1421 	u32 code = (addr & 0x00ff0000) >> 16;
1422 	u32 type = (addr & 0x00007000) >> 12;
1423 	u32 mthd = (addr & 0x00000ffc);
1424 	const struct nvkm_enum *ec, *et;
1425 
1426 	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
1427 	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
1428 
1429 	nvkm_error(subdev,
1430 		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
1431 		   type, et ? et->name : "", code, ec ? ec->name : "",
1432 		   chid, mthd, data);
1433 
1434 	if (chid < ARRAY_SIZE(disp->chan)) {
1435 		switch (mthd) {
1436 		case 0x0080:
1437 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
1438 			break;
1439 		default:
1440 			break;
1441 		}
1442 	}
1443 
1444 	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
1445 	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
1446 }
1447 
1448 void
nv50_disp_intr(struct nvkm_disp * disp)1449 nv50_disp_intr(struct nvkm_disp *disp)
1450 {
1451 	struct nvkm_device *device = disp->engine.subdev.device;
1452 	u32 intr0 = nvkm_rd32(device, 0x610020);
1453 	u32 intr1 = nvkm_rd32(device, 0x610024);
1454 
1455 	while (intr0 & 0x001f0000) {
1456 		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
1457 		nv50_disp_intr_error(disp, chid);
1458 		intr0 &= ~(0x00010000 << chid);
1459 	}
1460 
1461 	while (intr0 & 0x0000001f) {
1462 		u32 chid = __ffs(intr0 & 0x0000001f);
1463 		nv50_disp_chan_uevent_send(disp, chid);
1464 		intr0 &= ~(0x00000001 << chid);
1465 	}
1466 
1467 	if (intr1 & 0x00000004) {
1468 		nvkm_disp_vblank(disp, 0);
1469 		nvkm_wr32(device, 0x610024, 0x00000004);
1470 	}
1471 
1472 	if (intr1 & 0x00000008) {
1473 		nvkm_disp_vblank(disp, 1);
1474 		nvkm_wr32(device, 0x610024, 0x00000008);
1475 	}
1476 
1477 	if (intr1 & 0x00000070) {
1478 		disp->super.pending = (intr1 & 0x00000070);
1479 		queue_work(disp->super.wq, &disp->super.work);
1480 		nvkm_wr32(device, 0x610024, disp->super.pending);
1481 	}
1482 }
1483 
1484 void
nv50_disp_fini(struct nvkm_disp * disp)1485 nv50_disp_fini(struct nvkm_disp *disp)
1486 {
1487 	struct nvkm_device *device = disp->engine.subdev.device;
1488 	/* disable all interrupts */
1489 	nvkm_wr32(device, 0x610024, 0x00000000);
1490 	nvkm_wr32(device, 0x610020, 0x00000000);
1491 }
1492 
1493 int
nv50_disp_init(struct nvkm_disp * disp)1494 nv50_disp_init(struct nvkm_disp *disp)
1495 {
1496 	struct nvkm_device *device = disp->engine.subdev.device;
1497 	struct nvkm_head *head;
1498 	u32 tmp;
1499 	int i;
1500 
1501 	/* The below segments of code copying values from one register to
1502 	 * another appear to inform EVO of the display capabilities or
1503 	 * something similar.  NFI what the 0x614004 caps are for..
1504 	 */
1505 	tmp = nvkm_rd32(device, 0x614004);
1506 	nvkm_wr32(device, 0x610184, tmp);
1507 
1508 	/* ... CRTC caps */
1509 	list_for_each_entry(head, &disp->heads, head) {
1510 		tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
1511 		nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
1512 		tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
1513 		nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
1514 		tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
1515 		nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
1516 		tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
1517 		nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
1518 	}
1519 
1520 	/* ... DAC caps */
1521 	for (i = 0; i < disp->dac.nr; i++) {
1522 		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
1523 		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
1524 	}
1525 
1526 	/* ... SOR caps */
1527 	for (i = 0; i < disp->sor.nr; i++) {
1528 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
1529 		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
1530 	}
1531 
1532 	/* ... PIOR caps */
1533 	for (i = 0; i < disp->pior.nr; i++) {
1534 		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
1535 		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
1536 	}
1537 
1538 	/* steal display away from vbios, or something like that */
1539 	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
1540 		nvkm_wr32(device, 0x610024, 0x00000100);
1541 		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1542 		if (nvkm_msec(device, 2000,
1543 			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
1544 				break;
1545 		) < 0)
1546 			return -EBUSY;
1547 	}
1548 
1549 	/* point at display engine memory area (hash table, objects) */
1550 	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
1551 
1552 	/* enable supervisor interrupts, disable everything else */
1553 	nvkm_wr32(device, 0x61002c, 0x00000370);
1554 	nvkm_wr32(device, 0x610028, 0x00000000);
1555 	return 0;
1556 }
1557 
1558 int
nv50_disp_oneinit(struct nvkm_disp * disp)1559 nv50_disp_oneinit(struct nvkm_disp *disp)
1560 {
1561 	const struct nvkm_disp_func *func = disp->func;
1562 	struct nvkm_subdev *subdev = &disp->engine.subdev;
1563 	struct nvkm_device *device = subdev->device;
1564 	int ret, i;
1565 
1566 	if (func->wndw.cnt) {
1567 		disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask);
1568 		nvkm_debug(subdev, "Window(s): %d (%08lx)\n", disp->wndw.nr, disp->wndw.mask);
1569 	}
1570 
1571 	disp->head.nr = func->head.cnt(disp, &disp->head.mask);
1572 	nvkm_debug(subdev, "  Head(s): %d (%02lx)\n", disp->head.nr, disp->head.mask);
1573 	for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
1574 		ret = func->head.new(disp, i);
1575 		if (ret)
1576 			return ret;
1577 	}
1578 
1579 	if (func->dac.cnt) {
1580 		disp->dac.nr = func->dac.cnt(disp, &disp->dac.mask);
1581 		nvkm_debug(subdev, "   DAC(s): %d (%02lx)\n", disp->dac.nr, disp->dac.mask);
1582 		for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
1583 			ret = func->dac.new(disp, i);
1584 			if (ret)
1585 				return ret;
1586 		}
1587 	}
1588 
1589 	if (func->pior.cnt) {
1590 		disp->pior.nr = func->pior.cnt(disp, &disp->pior.mask);
1591 		nvkm_debug(subdev, "  PIOR(s): %d (%02lx)\n", disp->pior.nr, disp->pior.mask);
1592 		for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
1593 			ret = func->pior.new(disp, i);
1594 			if (ret)
1595 				return ret;
1596 		}
1597 	}
1598 
1599 	disp->sor.nr = func->sor.cnt(disp, &disp->sor.mask);
1600 	nvkm_debug(subdev, "   SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
1601 	for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
1602 		ret = func->sor.new(disp, i);
1603 		if (ret)
1604 			return ret;
1605 	}
1606 
1607 	ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
1608 	if (ret)
1609 		return ret;
1610 
1611 	return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
1612 			      0x1000, 0, disp->inst, &disp->ramht);
1613 }
1614 
1615 static const struct nvkm_disp_func
1616 nv50_disp = {
1617 	.oneinit = nv50_disp_oneinit,
1618 	.init = nv50_disp_init,
1619 	.fini = nv50_disp_fini,
1620 	.intr = nv50_disp_intr,
1621 	.super = nv50_disp_super,
1622 	.uevent = &nv50_disp_chan_uevent,
1623 	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
1624 	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
1625 	.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
1626 	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
1627 	.root = { 0, 0, NV50_DISP },
1628 	.user = {
1629 		{{0,0,NV50_DISP_CURSOR             }, nvkm_disp_chan_new, &nv50_disp_curs },
1630 		{{0,0,NV50_DISP_OVERLAY            }, nvkm_disp_chan_new, &nv50_disp_oimm },
1631 		{{0,0,NV50_DISP_BASE_CHANNEL_DMA   }, nvkm_disp_chan_new, &nv50_disp_base },
1632 		{{0,0,NV50_DISP_CORE_CHANNEL_DMA   }, nvkm_disp_core_new, &nv50_disp_core },
1633 		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, &nv50_disp_ovly },
1634 		{}
1635 	}
1636 };
1637 
1638 int
nv50_disp_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)1639 nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
1640 	      struct nvkm_disp **pdisp)
1641 {
1642 	return nvkm_disp_new_(&nv50_disp, device, type, inst, pdisp);
1643 }
1644