1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <core/object.h>
26 #include <core/client.h>
27 #include <core/parent.h>
28 #include <core/handle.h>
29 #include <nvif/unpack.h>
30 #include <nvif/class.h>
31
32 #include <engine/disp.h>
33
34 #include <subdev/bios.h>
35 #include <subdev/bios/dcb.h>
36 #include <subdev/bios/disp.h>
37 #include <subdev/bios/init.h>
38 #include <subdev/bios/pll.h>
39 #include <subdev/devinit.h>
40 #include <subdev/fb.h>
41 #include <subdev/timer.h>
42
43 #include "nv50.h"
44
45 /*******************************************************************************
46 * EVO channel base class
47 ******************************************************************************/
48
49 static void
nvd0_disp_chan_uevent_fini(struct nvkm_event * event,int type,int index)50 nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
51 {
52 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
53 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
54 }
55
56 static void
nvd0_disp_chan_uevent_init(struct nvkm_event * event,int types,int index)57 nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
58 {
59 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
60 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
61 }
62
63 const struct nvkm_event_func
64 nvd0_disp_chan_uevent = {
65 .ctor = nv50_disp_chan_uevent_ctor,
66 .init = nvd0_disp_chan_uevent_init,
67 .fini = nvd0_disp_chan_uevent_fini,
68 };
69
70 /*******************************************************************************
71 * EVO DMA channel base class
72 ******************************************************************************/
73
74 static int
nvd0_disp_dmac_object_attach(struct nouveau_object * parent,struct nouveau_object * object,u32 name)75 nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
76 struct nouveau_object *object, u32 name)
77 {
78 struct nv50_disp_base *base = (void *)parent->parent;
79 struct nv50_disp_chan *chan = (void *)parent;
80 u32 addr = nv_gpuobj(object)->node->offset;
81 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
82 return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
83 }
84
85 static void
nvd0_disp_dmac_object_detach(struct nouveau_object * parent,int cookie)86 nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
87 {
88 struct nv50_disp_base *base = (void *)parent->parent;
89 nouveau_ramht_remove(base->ramht, cookie);
90 }
91
92 static int
nvd0_disp_dmac_init(struct nouveau_object * object)93 nvd0_disp_dmac_init(struct nouveau_object *object)
94 {
95 struct nv50_disp_priv *priv = (void *)object->engine;
96 struct nv50_disp_dmac *dmac = (void *)object;
97 int chid = dmac->base.chid;
98 int ret;
99
100 ret = nv50_disp_chan_init(&dmac->base);
101 if (ret)
102 return ret;
103
104 /* enable error reporting */
105 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
106
107 /* initialise channel for dma command submission */
108 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
109 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
110 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
111 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
112 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
113 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
114
115 /* wait for it to go inactive */
116 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
117 nv_error(dmac, "init: 0x%08x\n",
118 nv_rd32(priv, 0x610490 + (chid * 0x10)));
119 return -EBUSY;
120 }
121
122 return 0;
123 }
124
125 static int
nvd0_disp_dmac_fini(struct nouveau_object * object,bool suspend)126 nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
127 {
128 struct nv50_disp_priv *priv = (void *)object->engine;
129 struct nv50_disp_dmac *dmac = (void *)object;
130 int chid = dmac->base.chid;
131
132 /* deactivate channel */
133 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
134 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
135 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
136 nv_error(dmac, "fini: 0x%08x\n",
137 nv_rd32(priv, 0x610490 + (chid * 0x10)));
138 if (suspend)
139 return -EBUSY;
140 }
141
142 /* disable error reporting and completion notification */
143 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
144 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
145
146 return nv50_disp_chan_fini(&dmac->base, suspend);
147 }
148
149 /*******************************************************************************
150 * EVO master channel object
151 ******************************************************************************/
152
153 const struct nv50_disp_mthd_list
154 nvd0_disp_mast_mthd_base = {
155 .mthd = 0x0000,
156 .addr = 0x000000,
157 .data = {
158 { 0x0080, 0x660080 },
159 { 0x0084, 0x660084 },
160 { 0x0088, 0x660088 },
161 { 0x008c, 0x000000 },
162 {}
163 }
164 };
165
166 const struct nv50_disp_mthd_list
167 nvd0_disp_mast_mthd_dac = {
168 .mthd = 0x0020,
169 .addr = 0x000020,
170 .data = {
171 { 0x0180, 0x660180 },
172 { 0x0184, 0x660184 },
173 { 0x0188, 0x660188 },
174 { 0x0190, 0x660190 },
175 {}
176 }
177 };
178
179 const struct nv50_disp_mthd_list
180 nvd0_disp_mast_mthd_sor = {
181 .mthd = 0x0020,
182 .addr = 0x000020,
183 .data = {
184 { 0x0200, 0x660200 },
185 { 0x0204, 0x660204 },
186 { 0x0208, 0x660208 },
187 { 0x0210, 0x660210 },
188 {}
189 }
190 };
191
192 const struct nv50_disp_mthd_list
193 nvd0_disp_mast_mthd_pior = {
194 .mthd = 0x0020,
195 .addr = 0x000020,
196 .data = {
197 { 0x0300, 0x660300 },
198 { 0x0304, 0x660304 },
199 { 0x0308, 0x660308 },
200 { 0x0310, 0x660310 },
201 {}
202 }
203 };
204
205 static const struct nv50_disp_mthd_list
206 nvd0_disp_mast_mthd_head = {
207 .mthd = 0x0300,
208 .addr = 0x000300,
209 .data = {
210 { 0x0400, 0x660400 },
211 { 0x0404, 0x660404 },
212 { 0x0408, 0x660408 },
213 { 0x040c, 0x66040c },
214 { 0x0410, 0x660410 },
215 { 0x0414, 0x660414 },
216 { 0x0418, 0x660418 },
217 { 0x041c, 0x66041c },
218 { 0x0420, 0x660420 },
219 { 0x0424, 0x660424 },
220 { 0x0428, 0x660428 },
221 { 0x042c, 0x66042c },
222 { 0x0430, 0x660430 },
223 { 0x0434, 0x660434 },
224 { 0x0438, 0x660438 },
225 { 0x0440, 0x660440 },
226 { 0x0444, 0x660444 },
227 { 0x0448, 0x660448 },
228 { 0x044c, 0x66044c },
229 { 0x0450, 0x660450 },
230 { 0x0454, 0x660454 },
231 { 0x0458, 0x660458 },
232 { 0x045c, 0x66045c },
233 { 0x0460, 0x660460 },
234 { 0x0468, 0x660468 },
235 { 0x046c, 0x66046c },
236 { 0x0470, 0x660470 },
237 { 0x0474, 0x660474 },
238 { 0x0480, 0x660480 },
239 { 0x0484, 0x660484 },
240 { 0x048c, 0x66048c },
241 { 0x0490, 0x660490 },
242 { 0x0494, 0x660494 },
243 { 0x0498, 0x660498 },
244 { 0x04b0, 0x6604b0 },
245 { 0x04b8, 0x6604b8 },
246 { 0x04bc, 0x6604bc },
247 { 0x04c0, 0x6604c0 },
248 { 0x04c4, 0x6604c4 },
249 { 0x04c8, 0x6604c8 },
250 { 0x04d0, 0x6604d0 },
251 { 0x04d4, 0x6604d4 },
252 { 0x04e0, 0x6604e0 },
253 { 0x04e4, 0x6604e4 },
254 { 0x04e8, 0x6604e8 },
255 { 0x04ec, 0x6604ec },
256 { 0x04f0, 0x6604f0 },
257 { 0x04f4, 0x6604f4 },
258 { 0x04f8, 0x6604f8 },
259 { 0x04fc, 0x6604fc },
260 { 0x0500, 0x660500 },
261 { 0x0504, 0x660504 },
262 { 0x0508, 0x660508 },
263 { 0x050c, 0x66050c },
264 { 0x0510, 0x660510 },
265 { 0x0514, 0x660514 },
266 { 0x0518, 0x660518 },
267 { 0x051c, 0x66051c },
268 { 0x052c, 0x66052c },
269 { 0x0530, 0x660530 },
270 { 0x054c, 0x66054c },
271 { 0x0550, 0x660550 },
272 { 0x0554, 0x660554 },
273 { 0x0558, 0x660558 },
274 { 0x055c, 0x66055c },
275 {}
276 }
277 };
278
279 static const struct nv50_disp_mthd_chan
280 nvd0_disp_mast_mthd_chan = {
281 .name = "Core",
282 .addr = 0x000000,
283 .data = {
284 { "Global", 1, &nvd0_disp_mast_mthd_base },
285 { "DAC", 3, &nvd0_disp_mast_mthd_dac },
286 { "SOR", 8, &nvd0_disp_mast_mthd_sor },
287 { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
288 { "HEAD", 4, &nvd0_disp_mast_mthd_head },
289 {}
290 }
291 };
292
293 static int
nvd0_disp_mast_init(struct nouveau_object * object)294 nvd0_disp_mast_init(struct nouveau_object *object)
295 {
296 struct nv50_disp_priv *priv = (void *)object->engine;
297 struct nv50_disp_dmac *mast = (void *)object;
298 int ret;
299
300 ret = nv50_disp_chan_init(&mast->base);
301 if (ret)
302 return ret;
303
304 /* enable error reporting */
305 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
306
307 /* initialise channel for dma command submission */
308 nv_wr32(priv, 0x610494, mast->push);
309 nv_wr32(priv, 0x610498, 0x00010000);
310 nv_wr32(priv, 0x61049c, 0x00000001);
311 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
312 nv_wr32(priv, 0x640000, 0x00000000);
313 nv_wr32(priv, 0x610490, 0x01000013);
314
315 /* wait for it to go inactive */
316 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
317 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
318 return -EBUSY;
319 }
320
321 return 0;
322 }
323
324 static int
nvd0_disp_mast_fini(struct nouveau_object * object,bool suspend)325 nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
326 {
327 struct nv50_disp_priv *priv = (void *)object->engine;
328 struct nv50_disp_dmac *mast = (void *)object;
329
330 /* deactivate channel */
331 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
332 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
333 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
334 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
335 if (suspend)
336 return -EBUSY;
337 }
338
339 /* disable error reporting and completion notification */
340 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
341 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
342
343 return nv50_disp_chan_fini(&mast->base, suspend);
344 }
345
346 struct nv50_disp_chan_impl
347 nvd0_disp_mast_ofuncs = {
348 .base.ctor = nv50_disp_mast_ctor,
349 .base.dtor = nv50_disp_dmac_dtor,
350 .base.init = nvd0_disp_mast_init,
351 .base.fini = nvd0_disp_mast_fini,
352 .base.ntfy = nv50_disp_chan_ntfy,
353 .base.map = nv50_disp_chan_map,
354 .base.rd32 = nv50_disp_chan_rd32,
355 .base.wr32 = nv50_disp_chan_wr32,
356 .chid = 0,
357 .attach = nvd0_disp_dmac_object_attach,
358 .detach = nvd0_disp_dmac_object_detach,
359 };
360
361 /*******************************************************************************
362 * EVO sync channel objects
363 ******************************************************************************/
364
365 static const struct nv50_disp_mthd_list
366 nvd0_disp_sync_mthd_base = {
367 .mthd = 0x0000,
368 .addr = 0x000000,
369 .data = {
370 { 0x0080, 0x661080 },
371 { 0x0084, 0x661084 },
372 { 0x0088, 0x661088 },
373 { 0x008c, 0x66108c },
374 { 0x0090, 0x661090 },
375 { 0x0094, 0x661094 },
376 { 0x00a0, 0x6610a0 },
377 { 0x00a4, 0x6610a4 },
378 { 0x00c0, 0x6610c0 },
379 { 0x00c4, 0x6610c4 },
380 { 0x00c8, 0x6610c8 },
381 { 0x00cc, 0x6610cc },
382 { 0x00e0, 0x6610e0 },
383 { 0x00e4, 0x6610e4 },
384 { 0x00e8, 0x6610e8 },
385 { 0x00ec, 0x6610ec },
386 { 0x00fc, 0x6610fc },
387 { 0x0100, 0x661100 },
388 { 0x0104, 0x661104 },
389 { 0x0108, 0x661108 },
390 { 0x010c, 0x66110c },
391 { 0x0110, 0x661110 },
392 { 0x0114, 0x661114 },
393 { 0x0118, 0x661118 },
394 { 0x011c, 0x66111c },
395 { 0x0130, 0x661130 },
396 { 0x0134, 0x661134 },
397 { 0x0138, 0x661138 },
398 { 0x013c, 0x66113c },
399 { 0x0140, 0x661140 },
400 { 0x0144, 0x661144 },
401 { 0x0148, 0x661148 },
402 { 0x014c, 0x66114c },
403 { 0x0150, 0x661150 },
404 { 0x0154, 0x661154 },
405 { 0x0158, 0x661158 },
406 { 0x015c, 0x66115c },
407 { 0x0160, 0x661160 },
408 { 0x0164, 0x661164 },
409 { 0x0168, 0x661168 },
410 { 0x016c, 0x66116c },
411 {}
412 }
413 };
414
415 static const struct nv50_disp_mthd_list
416 nvd0_disp_sync_mthd_image = {
417 .mthd = 0x0400,
418 .addr = 0x000400,
419 .data = {
420 { 0x0400, 0x661400 },
421 { 0x0404, 0x661404 },
422 { 0x0408, 0x661408 },
423 { 0x040c, 0x66140c },
424 { 0x0410, 0x661410 },
425 {}
426 }
427 };
428
429 const struct nv50_disp_mthd_chan
430 nvd0_disp_sync_mthd_chan = {
431 .name = "Base",
432 .addr = 0x001000,
433 .data = {
434 { "Global", 1, &nvd0_disp_sync_mthd_base },
435 { "Image", 2, &nvd0_disp_sync_mthd_image },
436 {}
437 }
438 };
439
440 struct nv50_disp_chan_impl
441 nvd0_disp_sync_ofuncs = {
442 .base.ctor = nv50_disp_sync_ctor,
443 .base.dtor = nv50_disp_dmac_dtor,
444 .base.init = nvd0_disp_dmac_init,
445 .base.fini = nvd0_disp_dmac_fini,
446 .base.ntfy = nv50_disp_chan_ntfy,
447 .base.map = nv50_disp_chan_map,
448 .base.rd32 = nv50_disp_chan_rd32,
449 .base.wr32 = nv50_disp_chan_wr32,
450 .chid = 1,
451 .attach = nvd0_disp_dmac_object_attach,
452 .detach = nvd0_disp_dmac_object_detach,
453 };
454
455 /*******************************************************************************
456 * EVO overlay channel objects
457 ******************************************************************************/
458
459 static const struct nv50_disp_mthd_list
460 nvd0_disp_ovly_mthd_base = {
461 .mthd = 0x0000,
462 .data = {
463 { 0x0080, 0x665080 },
464 { 0x0084, 0x665084 },
465 { 0x0088, 0x665088 },
466 { 0x008c, 0x66508c },
467 { 0x0090, 0x665090 },
468 { 0x0094, 0x665094 },
469 { 0x00a0, 0x6650a0 },
470 { 0x00a4, 0x6650a4 },
471 { 0x00b0, 0x6650b0 },
472 { 0x00b4, 0x6650b4 },
473 { 0x00b8, 0x6650b8 },
474 { 0x00c0, 0x6650c0 },
475 { 0x00e0, 0x6650e0 },
476 { 0x00e4, 0x6650e4 },
477 { 0x00e8, 0x6650e8 },
478 { 0x0100, 0x665100 },
479 { 0x0104, 0x665104 },
480 { 0x0108, 0x665108 },
481 { 0x010c, 0x66510c },
482 { 0x0110, 0x665110 },
483 { 0x0118, 0x665118 },
484 { 0x011c, 0x66511c },
485 { 0x0120, 0x665120 },
486 { 0x0124, 0x665124 },
487 { 0x0130, 0x665130 },
488 { 0x0134, 0x665134 },
489 { 0x0138, 0x665138 },
490 { 0x013c, 0x66513c },
491 { 0x0140, 0x665140 },
492 { 0x0144, 0x665144 },
493 { 0x0148, 0x665148 },
494 { 0x014c, 0x66514c },
495 { 0x0150, 0x665150 },
496 { 0x0154, 0x665154 },
497 { 0x0158, 0x665158 },
498 { 0x015c, 0x66515c },
499 { 0x0160, 0x665160 },
500 { 0x0164, 0x665164 },
501 { 0x0168, 0x665168 },
502 { 0x016c, 0x66516c },
503 { 0x0400, 0x665400 },
504 { 0x0408, 0x665408 },
505 { 0x040c, 0x66540c },
506 { 0x0410, 0x665410 },
507 {}
508 }
509 };
510
511 static const struct nv50_disp_mthd_chan
512 nvd0_disp_ovly_mthd_chan = {
513 .name = "Overlay",
514 .addr = 0x001000,
515 .data = {
516 { "Global", 1, &nvd0_disp_ovly_mthd_base },
517 {}
518 }
519 };
520
521 struct nv50_disp_chan_impl
522 nvd0_disp_ovly_ofuncs = {
523 .base.ctor = nv50_disp_ovly_ctor,
524 .base.dtor = nv50_disp_dmac_dtor,
525 .base.init = nvd0_disp_dmac_init,
526 .base.fini = nvd0_disp_dmac_fini,
527 .base.ntfy = nv50_disp_chan_ntfy,
528 .base.map = nv50_disp_chan_map,
529 .base.rd32 = nv50_disp_chan_rd32,
530 .base.wr32 = nv50_disp_chan_wr32,
531 .chid = 5,
532 .attach = nvd0_disp_dmac_object_attach,
533 .detach = nvd0_disp_dmac_object_detach,
534 };
535
536 /*******************************************************************************
537 * EVO PIO channel base class
538 ******************************************************************************/
539
540 static int
nvd0_disp_pioc_init(struct nouveau_object * object)541 nvd0_disp_pioc_init(struct nouveau_object *object)
542 {
543 struct nv50_disp_priv *priv = (void *)object->engine;
544 struct nv50_disp_pioc *pioc = (void *)object;
545 int chid = pioc->base.chid;
546 int ret;
547
548 ret = nv50_disp_chan_init(&pioc->base);
549 if (ret)
550 return ret;
551
552 /* enable error reporting */
553 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
554
555 /* activate channel */
556 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
557 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
558 nv_error(pioc, "init: 0x%08x\n",
559 nv_rd32(priv, 0x610490 + (chid * 0x10)));
560 return -EBUSY;
561 }
562
563 return 0;
564 }
565
566 static int
nvd0_disp_pioc_fini(struct nouveau_object * object,bool suspend)567 nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
568 {
569 struct nv50_disp_priv *priv = (void *)object->engine;
570 struct nv50_disp_pioc *pioc = (void *)object;
571 int chid = pioc->base.chid;
572
573 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
574 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
575 nv_error(pioc, "timeout: 0x%08x\n",
576 nv_rd32(priv, 0x610490 + (chid * 0x10)));
577 if (suspend)
578 return -EBUSY;
579 }
580
581 /* disable error reporting and completion notification */
582 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
583 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
584
585 return nv50_disp_chan_fini(&pioc->base, suspend);
586 }
587
588 /*******************************************************************************
589 * EVO immediate overlay channel objects
590 ******************************************************************************/
591
592 struct nv50_disp_chan_impl
593 nvd0_disp_oimm_ofuncs = {
594 .base.ctor = nv50_disp_oimm_ctor,
595 .base.dtor = nv50_disp_pioc_dtor,
596 .base.init = nvd0_disp_pioc_init,
597 .base.fini = nvd0_disp_pioc_fini,
598 .base.ntfy = nv50_disp_chan_ntfy,
599 .base.map = nv50_disp_chan_map,
600 .base.rd32 = nv50_disp_chan_rd32,
601 .base.wr32 = nv50_disp_chan_wr32,
602 .chid = 9,
603 };
604
605 /*******************************************************************************
606 * EVO cursor channel objects
607 ******************************************************************************/
608
609 struct nv50_disp_chan_impl
610 nvd0_disp_curs_ofuncs = {
611 .base.ctor = nv50_disp_curs_ctor,
612 .base.dtor = nv50_disp_pioc_dtor,
613 .base.init = nvd0_disp_pioc_init,
614 .base.fini = nvd0_disp_pioc_fini,
615 .base.ntfy = nv50_disp_chan_ntfy,
616 .base.map = nv50_disp_chan_map,
617 .base.rd32 = nv50_disp_chan_rd32,
618 .base.wr32 = nv50_disp_chan_wr32,
619 .chid = 13,
620 };
621
622 /*******************************************************************************
623 * Base display object
624 ******************************************************************************/
625
626 int
nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)627 nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
628 {
629 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
630 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
631 const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
632 union {
633 struct nv04_disp_scanoutpos_v0 v0;
634 } *args = data;
635 int ret;
636
637 nv_ioctl(object, "disp scanoutpos size %d\n", size);
638 if (nvif_unpack(args->v0, 0, 0, false)) {
639 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
640 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
641 args->v0.hblanke = (blanke & 0x0000ffff);
642 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
643 args->v0.hblanks = (blanks & 0x0000ffff);
644 args->v0.vtotal = ( total & 0xffff0000) >> 16;
645 args->v0.htotal = ( total & 0x0000ffff);
646 args->v0.time[0] = ktime_to_ns(ktime_get());
647 args->v0.vline = /* vline read locks hline */
648 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
649 args->v0.time[1] = ktime_to_ns(ktime_get());
650 args->v0.hline =
651 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
652 } else
653 return ret;
654
655 return 0;
656 }
657
658 static int
nvd0_disp_base_init(struct nouveau_object * object)659 nvd0_disp_base_init(struct nouveau_object *object)
660 {
661 struct nv50_disp_priv *priv = (void *)object->engine;
662 struct nv50_disp_base *base = (void *)object;
663 int ret, i;
664 u32 tmp;
665
666 ret = nouveau_parent_init(&base->base);
667 if (ret)
668 return ret;
669
670 /* The below segments of code copying values from one register to
671 * another appear to inform EVO of the display capabilities or
672 * something similar.
673 */
674
675 /* ... CRTC caps */
676 for (i = 0; i < priv->head.nr; i++) {
677 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
678 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
679 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
680 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
681 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
682 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
683 }
684
685 /* ... DAC caps */
686 for (i = 0; i < priv->dac.nr; i++) {
687 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
688 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
689 }
690
691 /* ... SOR caps */
692 for (i = 0; i < priv->sor.nr; i++) {
693 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
694 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
695 }
696
697 /* steal display away from vbios, or something like that */
698 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
699 nv_wr32(priv, 0x6100ac, 0x00000100);
700 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
701 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
702 nv_error(priv, "timeout acquiring display\n");
703 return -EBUSY;
704 }
705 }
706
707 /* point at display engine memory area (hash table, objects) */
708 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
709
710 /* enable supervisor interrupts, disable everything else */
711 nv_wr32(priv, 0x610090, 0x00000000);
712 nv_wr32(priv, 0x6100a0, 0x00000000);
713 nv_wr32(priv, 0x6100b0, 0x00000307);
714
715 /* disable underflow reporting, preventing an intermittent issue
716 * on some nve4 boards where the production vbios left this
717 * setting enabled by default.
718 *
719 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
720 */
721 for (i = 0; i < priv->head.nr; i++)
722 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
723
724 return 0;
725 }
726
727 static int
nvd0_disp_base_fini(struct nouveau_object * object,bool suspend)728 nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
729 {
730 struct nv50_disp_priv *priv = (void *)object->engine;
731 struct nv50_disp_base *base = (void *)object;
732
733 /* disable all interrupts */
734 nv_wr32(priv, 0x6100b0, 0x00000000);
735
736 return nouveau_parent_fini(&base->base, suspend);
737 }
738
739 struct nouveau_ofuncs
740 nvd0_disp_base_ofuncs = {
741 .ctor = nv50_disp_base_ctor,
742 .dtor = nv50_disp_base_dtor,
743 .init = nvd0_disp_base_init,
744 .fini = nvd0_disp_base_fini,
745 .mthd = nv50_disp_base_mthd,
746 .ntfy = nouveau_disp_ntfy,
747 };
748
749 static struct nouveau_oclass
750 nvd0_disp_base_oclass[] = {
751 { GF110_DISP, &nvd0_disp_base_ofuncs },
752 {}
753 };
754
755 static struct nouveau_oclass
756 nvd0_disp_sclass[] = {
757 { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
758 { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
759 { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
760 { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
761 { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
762 {}
763 };
764
765 /*******************************************************************************
766 * Display engine implementation
767 ******************************************************************************/
768
769 static void
nvd0_disp_vblank_init(struct nvkm_event * event,int type,int head)770 nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
771 {
772 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
773 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
774 }
775
776 static void
nvd0_disp_vblank_fini(struct nvkm_event * event,int type,int head)777 nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
778 {
779 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
780 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
781 }
782
783 const struct nvkm_event_func
784 nvd0_disp_vblank_func = {
785 .ctor = nouveau_disp_vblank_ctor,
786 .init = nvd0_disp_vblank_init,
787 .fini = nvd0_disp_vblank_fini,
788 };
789
790 static struct nvkm_output *
exec_lookup(struct nv50_disp_priv * priv,int head,int or,u32 ctrl,u32 * data,u8 * ver,u8 * hdr,u8 * cnt,u8 * len,struct nvbios_outp * info)791 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
792 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
793 struct nvbios_outp *info)
794 {
795 struct nouveau_bios *bios = nouveau_bios(priv);
796 struct nvkm_output *outp;
797 u16 mask, type;
798
799 if (or < 4) {
800 type = DCB_OUTPUT_ANALOG;
801 mask = 0;
802 } else {
803 or -= 4;
804 switch (ctrl & 0x00000f00) {
805 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
806 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
807 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
808 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
809 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
810 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
811 default:
812 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
813 return 0x0000;
814 }
815 }
816
817 mask = 0x00c0 & (mask << 6);
818 mask |= 0x0001 << or;
819 mask |= 0x0100 << head;
820
821 list_for_each_entry(outp, &priv->base.outp, head) {
822 if ((outp->info.hasht & 0xff) == type &&
823 (outp->info.hashm & mask) == mask) {
824 *data = nvbios_outp_match(bios, outp->info.hasht,
825 outp->info.hashm,
826 ver, hdr, cnt, len, info);
827 if (!*data)
828 return NULL;
829 return outp;
830 }
831 }
832
833 return NULL;
834 }
835
836 static struct nvkm_output *
exec_script(struct nv50_disp_priv * priv,int head,int id)837 exec_script(struct nv50_disp_priv *priv, int head, int id)
838 {
839 struct nouveau_bios *bios = nouveau_bios(priv);
840 struct nvkm_output *outp;
841 struct nvbios_outp info;
842 u8 ver, hdr, cnt, len;
843 u32 data, ctrl = 0;
844 int or;
845
846 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
847 ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
848 if (ctrl & (1 << head))
849 break;
850 }
851
852 if (or == 8)
853 return NULL;
854
855 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
856 if (outp) {
857 struct nvbios_init init = {
858 .subdev = nv_subdev(priv),
859 .bios = bios,
860 .offset = info.script[id],
861 .outp = &outp->info,
862 .crtc = head,
863 .execute = 1,
864 };
865
866 nvbios_exec(&init);
867 }
868
869 return outp;
870 }
871
872 static struct nvkm_output *
exec_clkcmp(struct nv50_disp_priv * priv,int head,int id,u32 pclk,u32 * conf)873 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
874 {
875 struct nouveau_bios *bios = nouveau_bios(priv);
876 struct nvkm_output *outp;
877 struct nvbios_outp info1;
878 struct nvbios_ocfg info2;
879 u8 ver, hdr, cnt, len;
880 u32 data, ctrl = 0;
881 int or;
882
883 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
884 ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
885 if (ctrl & (1 << head))
886 break;
887 }
888
889 if (or == 8)
890 return NULL;
891
892 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
893 if (!outp)
894 return NULL;
895
896 switch (outp->info.type) {
897 case DCB_OUTPUT_TMDS:
898 *conf = (ctrl & 0x00000f00) >> 8;
899 if (pclk >= 165000)
900 *conf |= 0x0100;
901 break;
902 case DCB_OUTPUT_LVDS:
903 *conf = priv->sor.lvdsconf;
904 break;
905 case DCB_OUTPUT_DP:
906 *conf = (ctrl & 0x00000f00) >> 8;
907 break;
908 case DCB_OUTPUT_ANALOG:
909 default:
910 *conf = 0x00ff;
911 break;
912 }
913
914 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
915 if (data && id < 0xff) {
916 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
917 if (data) {
918 struct nvbios_init init = {
919 .subdev = nv_subdev(priv),
920 .bios = bios,
921 .offset = data,
922 .outp = &outp->info,
923 .crtc = head,
924 .execute = 1,
925 };
926
927 nvbios_exec(&init);
928 }
929 }
930
931 return outp;
932 }
933
934 static void
nvd0_disp_intr_unk1_0(struct nv50_disp_priv * priv,int head)935 nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
936 {
937 exec_script(priv, head, 1);
938 }
939
940 static void
nvd0_disp_intr_unk2_0(struct nv50_disp_priv * priv,int head)941 nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
942 {
943 struct nvkm_output *outp = exec_script(priv, head, 2);
944
945 /* see note in nv50_disp_intr_unk20_0() */
946 if (outp && outp->info.type == DCB_OUTPUT_DP) {
947 struct nvkm_output_dp *outpdp = (void *)outp;
948 struct nvbios_init init = {
949 .subdev = nv_subdev(priv),
950 .bios = nouveau_bios(priv),
951 .outp = &outp->info,
952 .crtc = head,
953 .offset = outpdp->info.script[4],
954 .execute = 1,
955 };
956
957 nvbios_exec(&init);
958 atomic_set(&outpdp->lt.done, 0);
959 }
960 }
961
962 static void
nvd0_disp_intr_unk2_1(struct nv50_disp_priv * priv,int head)963 nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
964 {
965 struct nouveau_devinit *devinit = nouveau_devinit(priv);
966 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
967 if (pclk)
968 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
969 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
970 }
971
972 static void
nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv * priv,int head,struct dcb_output * outp)973 nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
974 struct dcb_output *outp)
975 {
976 const int or = ffs(outp->or) - 1;
977 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
978 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
979 const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
980 const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
981 const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
982 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
983 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
984 const u32 hoff = (head * 0x800);
985 const u32 soff = ( or * 0x800);
986 const u32 loff = (link * 0x080) + soff;
987 const u32 symbol = 100000;
988 const u32 TU = 64;
989 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
990 u32 clksor = nv_rd32(priv, 0x612300 + soff);
991 u32 datarate, link_nr, link_bw, bits;
992 u64 ratio, value;
993
994 link_nr = hweight32(dpctrl & 0x000f0000);
995 link_bw = (clksor & 0x007c0000) >> 18;
996 link_bw *= 27000;
997
998 /* symbols/hblank - algorithm taken from comments in tegra driver */
999 value = vblanke + vactive - vblanks - 7;
1000 value = value * link_bw;
1001 do_div(value, pclk);
1002 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1003 nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
1004
1005 /* symbols/vblank - algorithm taken from comments in tegra driver */
1006 value = vblanks - vblanke - 25;
1007 value = value * link_bw;
1008 do_div(value, pclk);
1009 value = value - ((36 / link_nr) + 3) - 1;
1010 nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
1011
1012 /* watermark */
1013 if ((conf & 0x3c0) == 0x180) bits = 30;
1014 else if ((conf & 0x3c0) == 0x140) bits = 24;
1015 else bits = 18;
1016 datarate = (pclk * bits) / 8;
1017
1018 ratio = datarate;
1019 ratio *= symbol;
1020 do_div(ratio, link_nr * link_bw);
1021
1022 value = (symbol - ratio) * TU;
1023 value *= ratio;
1024 do_div(value, symbol);
1025 do_div(value, symbol);
1026
1027 value += 5;
1028 value |= 0x08000000;
1029
1030 nv_wr32(priv, 0x616610 + hoff, value);
1031 }
1032
1033 static void
nvd0_disp_intr_unk2_2(struct nv50_disp_priv * priv,int head)1034 nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1035 {
1036 struct nvkm_output *outp;
1037 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1038 u32 conf, addr, data;
1039
1040 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1041 if (!outp)
1042 return;
1043
1044 /* see note in nv50_disp_intr_unk20_2() */
1045 if (outp->info.type == DCB_OUTPUT_DP) {
1046 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1047 switch ((sync & 0x000003c0) >> 6) {
1048 case 6: pclk = pclk * 30; break;
1049 case 5: pclk = pclk * 24; break;
1050 case 2:
1051 default:
1052 pclk = pclk * 18;
1053 break;
1054 }
1055
1056 if (nvkm_output_dp_train(outp, pclk, true))
1057 ERR("link not trained before attach\n");
1058 }
1059
1060 exec_clkcmp(priv, head, 0, pclk, &conf);
1061
1062 if (outp->info.type == DCB_OUTPUT_ANALOG) {
1063 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1064 data = 0x00000000;
1065 } else {
1066 if (outp->info.type == DCB_OUTPUT_DP)
1067 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1068 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1069 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1070 }
1071
1072 nv_mask(priv, addr, 0x00000707, data);
1073 }
1074
1075 static void
nvd0_disp_intr_unk4_0(struct nv50_disp_priv * priv,int head)1076 nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1077 {
1078 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1079 u32 conf;
1080
1081 exec_clkcmp(priv, head, 1, pclk, &conf);
1082 }
1083
1084 void
nvd0_disp_intr_supervisor(struct work_struct * work)1085 nvd0_disp_intr_supervisor(struct work_struct *work)
1086 {
1087 struct nv50_disp_priv *priv =
1088 container_of(work, struct nv50_disp_priv, supervisor);
1089 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1090 u32 mask[4];
1091 int head;
1092
1093 nv_debug(priv, "supervisor %d\n", ffs(priv->super));
1094 for (head = 0; head < priv->head.nr; head++) {
1095 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
1096 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
1097 }
1098
1099 if (priv->super & 0x00000001) {
1100 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1101 for (head = 0; head < priv->head.nr; head++) {
1102 if (!(mask[head] & 0x00001000))
1103 continue;
1104 nv_debug(priv, "supervisor 1.0 - head %d\n", head);
1105 nvd0_disp_intr_unk1_0(priv, head);
1106 }
1107 } else
1108 if (priv->super & 0x00000002) {
1109 for (head = 0; head < priv->head.nr; head++) {
1110 if (!(mask[head] & 0x00001000))
1111 continue;
1112 nv_debug(priv, "supervisor 2.0 - head %d\n", head);
1113 nvd0_disp_intr_unk2_0(priv, head);
1114 }
1115 for (head = 0; head < priv->head.nr; head++) {
1116 if (!(mask[head] & 0x00010000))
1117 continue;
1118 nv_debug(priv, "supervisor 2.1 - head %d\n", head);
1119 nvd0_disp_intr_unk2_1(priv, head);
1120 }
1121 for (head = 0; head < priv->head.nr; head++) {
1122 if (!(mask[head] & 0x00001000))
1123 continue;
1124 nv_debug(priv, "supervisor 2.2 - head %d\n", head);
1125 nvd0_disp_intr_unk2_2(priv, head);
1126 }
1127 } else
1128 if (priv->super & 0x00000004) {
1129 for (head = 0; head < priv->head.nr; head++) {
1130 if (!(mask[head] & 0x00001000))
1131 continue;
1132 nv_debug(priv, "supervisor 3.0 - head %d\n", head);
1133 nvd0_disp_intr_unk4_0(priv, head);
1134 }
1135 }
1136
1137 for (head = 0; head < priv->head.nr; head++)
1138 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
1139 nv_wr32(priv, 0x6101d0, 0x80000000);
1140 }
1141
1142 static void
nvd0_disp_intr_error(struct nv50_disp_priv * priv,int chid)1143 nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1144 {
1145 const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1146 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1147 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1148 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1149
1150 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1151 "0x%08x 0x%08x\n",
1152 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1153
1154 if (chid == 0) {
1155 switch (mthd & 0xffc) {
1156 case 0x0080:
1157 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1158 impl->mthd.core);
1159 break;
1160 default:
1161 break;
1162 }
1163 } else
1164 if (chid <= 4) {
1165 switch (mthd & 0xffc) {
1166 case 0x0080:
1167 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1168 impl->mthd.base);
1169 break;
1170 default:
1171 break;
1172 }
1173 } else
1174 if (chid <= 8) {
1175 switch (mthd & 0xffc) {
1176 case 0x0080:
1177 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1178 impl->mthd.ovly);
1179 break;
1180 default:
1181 break;
1182 }
1183 }
1184
1185 nv_wr32(priv, 0x61009c, (1 << chid));
1186 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1187 }
1188
1189 void
nvd0_disp_intr(struct nouveau_subdev * subdev)1190 nvd0_disp_intr(struct nouveau_subdev *subdev)
1191 {
1192 struct nv50_disp_priv *priv = (void *)subdev;
1193 u32 intr = nv_rd32(priv, 0x610088);
1194 int i;
1195
1196 if (intr & 0x00000001) {
1197 u32 stat = nv_rd32(priv, 0x61008c);
1198 while (stat) {
1199 int chid = __ffs(stat); stat &= ~(1 << chid);
1200 nv50_disp_chan_uevent_send(priv, chid);
1201 nv_wr32(priv, 0x61008c, 1 << chid);
1202 }
1203 intr &= ~0x00000001;
1204 }
1205
1206 if (intr & 0x00000002) {
1207 u32 stat = nv_rd32(priv, 0x61009c);
1208 int chid = ffs(stat) - 1;
1209 if (chid >= 0)
1210 nvd0_disp_intr_error(priv, chid);
1211 intr &= ~0x00000002;
1212 }
1213
1214 if (intr & 0x00100000) {
1215 u32 stat = nv_rd32(priv, 0x6100ac);
1216 if (stat & 0x00000007) {
1217 priv->super = (stat & 0x00000007);
1218 schedule_work(&priv->supervisor);
1219 nv_wr32(priv, 0x6100ac, priv->super);
1220 stat &= ~0x00000007;
1221 }
1222
1223 if (stat) {
1224 nv_info(priv, "unknown intr24 0x%08x\n", stat);
1225 nv_wr32(priv, 0x6100ac, stat);
1226 }
1227
1228 intr &= ~0x00100000;
1229 }
1230
1231 for (i = 0; i < priv->head.nr; i++) {
1232 u32 mask = 0x01000000 << i;
1233 if (mask & intr) {
1234 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1235 if (stat & 0x00000001)
1236 nouveau_disp_vblank(&priv->base, i);
1237 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1238 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1239 }
1240 }
1241 }
1242
1243 static int
nvd0_disp_ctor(struct nouveau_object * parent,struct nouveau_object * engine,struct nouveau_oclass * oclass,void * data,u32 size,struct nouveau_object ** pobject)1244 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1245 struct nouveau_oclass *oclass, void *data, u32 size,
1246 struct nouveau_object **pobject)
1247 {
1248 struct nv50_disp_priv *priv;
1249 int heads = nv_rd32(parent, 0x022448);
1250 int ret;
1251
1252 ret = nouveau_disp_create(parent, engine, oclass, heads,
1253 "PDISP", "display", &priv);
1254 *pobject = nv_object(priv);
1255 if (ret)
1256 return ret;
1257
1258 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
1259 if (ret)
1260 return ret;
1261
1262 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
1263 nv_engine(priv)->cclass = &nv50_disp_cclass;
1264 nv_subdev(priv)->intr = nvd0_disp_intr;
1265 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
1266 priv->sclass = nvd0_disp_sclass;
1267 priv->head.nr = heads;
1268 priv->dac.nr = 3;
1269 priv->sor.nr = 4;
1270 priv->dac.power = nv50_dac_power;
1271 priv->dac.sense = nv50_dac_sense;
1272 priv->sor.power = nv50_sor_power;
1273 priv->sor.hda_eld = nvd0_hda_eld;
1274 priv->sor.hdmi = nvd0_hdmi_ctrl;
1275 return 0;
1276 }
1277
1278 struct nouveau_oclass *
1279 nvd0_disp_outp_sclass[] = {
1280 &nvd0_sor_dp_impl.base.base,
1281 NULL
1282 };
1283
1284 struct nouveau_oclass *
1285 nvd0_disp_oclass = &(struct nv50_disp_impl) {
1286 .base.base.handle = NV_ENGINE(DISP, 0x90),
1287 .base.base.ofuncs = &(struct nouveau_ofuncs) {
1288 .ctor = nvd0_disp_ctor,
1289 .dtor = _nouveau_disp_dtor,
1290 .init = _nouveau_disp_init,
1291 .fini = _nouveau_disp_fini,
1292 },
1293 .base.vblank = &nvd0_disp_vblank_func,
1294 .base.outp = nvd0_disp_outp_sclass,
1295 .mthd.core = &nvd0_disp_mast_mthd_chan,
1296 .mthd.base = &nvd0_disp_sync_mthd_chan,
1297 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1298 .mthd.prev = -0x020000,
1299 .head.scanoutpos = nvd0_disp_base_scanoutpos,
1300 }.base.base;
1301