1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "conn.h"
26 #include "outp.h"
27
28 #include <core/client.h>
29 #include <core/notify.h>
30 #include <core/oproxy.h>
31 #include <subdev/bios.h>
32 #include <subdev/bios/dcb.h>
33
34 #include <nvif/class.h>
35 #include <nvif/event.h>
36 #include <nvif/unpack.h>
37
38 static void
nvkm_disp_vblank_fini(struct nvkm_event * event,int type,int head)39 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
40 {
41 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
42 disp->func->head.vblank_fini(disp, head);
43 }
44
45 static void
nvkm_disp_vblank_init(struct nvkm_event * event,int type,int head)46 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
47 {
48 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
49 disp->func->head.vblank_init(disp, head);
50 }
51
52 static int
nvkm_disp_vblank_ctor(struct nvkm_object * object,void * data,u32 size,struct nvkm_notify * notify)53 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
54 struct nvkm_notify *notify)
55 {
56 struct nvkm_disp *disp =
57 container_of(notify->event, typeof(*disp), vblank);
58 union {
59 struct nvif_notify_head_req_v0 v0;
60 } *req = data;
61 int ret;
62
63 if (nvif_unpack(req->v0, 0, 0, false)) {
64 notify->size = sizeof(struct nvif_notify_head_rep_v0);
65 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
66 notify->types = 1;
67 notify->index = req->v0.head;
68 return 0;
69 }
70 }
71
72 return ret;
73 }
74
75 static const struct nvkm_event_func
76 nvkm_disp_vblank_func = {
77 .ctor = nvkm_disp_vblank_ctor,
78 .init = nvkm_disp_vblank_init,
79 .fini = nvkm_disp_vblank_fini,
80 };
81
82 void
nvkm_disp_vblank(struct nvkm_disp * disp,int head)83 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
84 {
85 struct nvif_notify_head_rep_v0 rep = {};
86 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
87 }
88
89 static int
nvkm_disp_hpd_ctor(struct nvkm_object * object,void * data,u32 size,struct nvkm_notify * notify)90 nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
91 struct nvkm_notify *notify)
92 {
93 struct nvkm_disp *disp =
94 container_of(notify->event, typeof(*disp), hpd);
95 union {
96 struct nvif_notify_conn_req_v0 v0;
97 } *req = data;
98 struct nvkm_output *outp;
99 int ret;
100
101 if (nvif_unpack(req->v0, 0, 0, false)) {
102 notify->size = sizeof(struct nvif_notify_conn_rep_v0);
103 list_for_each_entry(outp, &disp->outp, head) {
104 if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
105 if (ret = -ENODEV, outp->conn->hpd.event) {
106 notify->types = req->v0.mask;
107 notify->index = req->v0.conn;
108 ret = 0;
109 }
110 break;
111 }
112 }
113 }
114
115 return ret;
116 }
117
118 static const struct nvkm_event_func
119 nvkm_disp_hpd_func = {
120 .ctor = nvkm_disp_hpd_ctor
121 };
122
123 int
nvkm_disp_ntfy(struct nvkm_object * object,u32 type,struct nvkm_event ** event)124 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
125 {
126 struct nvkm_disp *disp = nvkm_disp(object->engine);
127 switch (type) {
128 case NV04_DISP_NTFY_VBLANK:
129 *event = &disp->vblank;
130 return 0;
131 case NV04_DISP_NTFY_CONN:
132 *event = &disp->hpd;
133 return 0;
134 default:
135 break;
136 }
137 return -EINVAL;
138 }
139
140 static void
nvkm_disp_class_del(struct nvkm_oproxy * oproxy)141 nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
142 {
143 struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
144 mutex_lock(&disp->engine.subdev.mutex);
145 if (disp->client == oproxy)
146 disp->client = NULL;
147 mutex_unlock(&disp->engine.subdev.mutex);
148 }
149
150 static const struct nvkm_oproxy_func
151 nvkm_disp_class = {
152 .dtor[1] = nvkm_disp_class_del,
153 };
154
155 static int
nvkm_disp_class_new(struct nvkm_device * device,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)156 nvkm_disp_class_new(struct nvkm_device *device,
157 const struct nvkm_oclass *oclass, void *data, u32 size,
158 struct nvkm_object **pobject)
159 {
160 const struct nvkm_disp_oclass *sclass = oclass->engn;
161 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
162 struct nvkm_oproxy *oproxy;
163 int ret;
164
165 ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
166 if (ret)
167 return ret;
168 *pobject = &oproxy->base;
169
170 mutex_lock(&disp->engine.subdev.mutex);
171 if (disp->client) {
172 mutex_unlock(&disp->engine.subdev.mutex);
173 return -EBUSY;
174 }
175 disp->client = oproxy;
176 mutex_unlock(&disp->engine.subdev.mutex);
177
178 return sclass->ctor(disp, oclass, data, size, &oproxy->object);
179 }
180
181 static const struct nvkm_device_oclass
182 nvkm_disp_sclass = {
183 .ctor = nvkm_disp_class_new,
184 };
185
186 static int
nvkm_disp_class_get(struct nvkm_oclass * oclass,int index,const struct nvkm_device_oclass ** class)187 nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
188 const struct nvkm_device_oclass **class)
189 {
190 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
191 if (index == 0) {
192 const struct nvkm_disp_oclass *root = disp->func->root(disp);
193 oclass->base = root->base;
194 oclass->engn = root;
195 *class = &nvkm_disp_sclass;
196 return 0;
197 }
198 return 1;
199 }
200
201 static void
nvkm_disp_intr(struct nvkm_engine * engine)202 nvkm_disp_intr(struct nvkm_engine *engine)
203 {
204 struct nvkm_disp *disp = nvkm_disp(engine);
205 disp->func->intr(disp);
206 }
207
208 static int
nvkm_disp_fini(struct nvkm_engine * engine,bool suspend)209 nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
210 {
211 struct nvkm_disp *disp = nvkm_disp(engine);
212 struct nvkm_connector *conn;
213 struct nvkm_output *outp;
214
215 list_for_each_entry(outp, &disp->outp, head) {
216 nvkm_output_fini(outp);
217 }
218
219 list_for_each_entry(conn, &disp->conn, head) {
220 nvkm_connector_fini(conn);
221 }
222
223 return 0;
224 }
225
226 static int
nvkm_disp_init(struct nvkm_engine * engine)227 nvkm_disp_init(struct nvkm_engine *engine)
228 {
229 struct nvkm_disp *disp = nvkm_disp(engine);
230 struct nvkm_connector *conn;
231 struct nvkm_output *outp;
232
233 list_for_each_entry(conn, &disp->conn, head) {
234 nvkm_connector_init(conn);
235 }
236
237 list_for_each_entry(outp, &disp->outp, head) {
238 nvkm_output_init(outp);
239 }
240
241 return 0;
242 }
243
244 static void *
nvkm_disp_dtor(struct nvkm_engine * engine)245 nvkm_disp_dtor(struct nvkm_engine *engine)
246 {
247 struct nvkm_disp *disp = nvkm_disp(engine);
248 struct nvkm_connector *conn;
249 struct nvkm_output *outp;
250 void *data = disp;
251
252 if (disp->func->dtor)
253 data = disp->func->dtor(disp);
254
255 nvkm_event_fini(&disp->vblank);
256 nvkm_event_fini(&disp->hpd);
257
258 while (!list_empty(&disp->outp)) {
259 outp = list_first_entry(&disp->outp, typeof(*outp), head);
260 list_del(&outp->head);
261 nvkm_output_del(&outp);
262 }
263
264 while (!list_empty(&disp->conn)) {
265 conn = list_first_entry(&disp->conn, typeof(*conn), head);
266 list_del(&conn->head);
267 nvkm_connector_del(&conn);
268 }
269
270 return data;
271 }
272
273 static const struct nvkm_engine_func
274 nvkm_disp = {
275 .dtor = nvkm_disp_dtor,
276 .init = nvkm_disp_init,
277 .fini = nvkm_disp_fini,
278 .intr = nvkm_disp_intr,
279 .base.sclass = nvkm_disp_class_get,
280 };
281
282 int
nvkm_disp_ctor(const struct nvkm_disp_func * func,struct nvkm_device * device,int index,int heads,struct nvkm_disp * disp)283 nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
284 int index, int heads, struct nvkm_disp *disp)
285 {
286 struct nvkm_bios *bios = device->bios;
287 struct nvkm_output *outp, *outt, *pair;
288 struct nvkm_connector *conn;
289 struct nvbios_connE connE;
290 struct dcb_output dcbE;
291 u8 hpd = 0, ver, hdr;
292 u32 data;
293 int ret, i;
294
295 INIT_LIST_HEAD(&disp->outp);
296 INIT_LIST_HEAD(&disp->conn);
297 disp->func = func;
298 disp->head.nr = heads;
299
300 ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
301 true, &disp->engine);
302 if (ret)
303 return ret;
304
305 /* create output objects for each display path in the vbios */
306 i = -1;
307 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
308 const struct nvkm_disp_func_outp *outps;
309 int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
310 struct nvkm_output **);
311
312 if (dcbE.type == DCB_OUTPUT_UNUSED)
313 continue;
314 if (dcbE.type == DCB_OUTPUT_EOL)
315 break;
316 outp = NULL;
317
318 switch (dcbE.location) {
319 case 0: outps = &disp->func->outp.internal; break;
320 case 1: outps = &disp->func->outp.external; break;
321 default:
322 nvkm_warn(&disp->engine.subdev,
323 "dcb %d locn %d unknown\n", i, dcbE.location);
324 continue;
325 }
326
327 switch (dcbE.type) {
328 case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
329 case DCB_OUTPUT_TV : ctor = outps->tv ; break;
330 case DCB_OUTPUT_TMDS : ctor = outps->tmds; break;
331 case DCB_OUTPUT_LVDS : ctor = outps->lvds; break;
332 case DCB_OUTPUT_DP : ctor = outps->dp ; break;
333 default:
334 nvkm_warn(&disp->engine.subdev,
335 "dcb %d type %d unknown\n", i, dcbE.type);
336 continue;
337 }
338
339 if (ctor)
340 ret = ctor(disp, i, &dcbE, &outp);
341 else
342 ret = -ENODEV;
343
344 if (ret) {
345 if (ret == -ENODEV) {
346 nvkm_debug(&disp->engine.subdev,
347 "dcb %d %d/%d not supported\n",
348 i, dcbE.location, dcbE.type);
349 continue;
350 }
351 nvkm_error(&disp->engine.subdev,
352 "failed to create output %d\n", i);
353 nvkm_output_del(&outp);
354 continue;
355 }
356
357 list_add_tail(&outp->head, &disp->outp);
358 hpd = max(hpd, (u8)(dcbE.connector + 1));
359 }
360
361 /* create connector objects based on the outputs we support */
362 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
363 /* bios data *should* give us the most useful information */
364 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
365 &connE);
366
367 /* no bios connector data... */
368 if (!data) {
369 /* heuristic: anything with the same ccb index is
370 * considered to be on the same connector, any
371 * output path without an associated ccb entry will
372 * be put on its own connector
373 */
374 int ccb_index = outp->info.i2c_index;
375 if (ccb_index != 0xf) {
376 list_for_each_entry(pair, &disp->outp, head) {
377 if (pair->info.i2c_index == ccb_index) {
378 outp->conn = pair->conn;
379 break;
380 }
381 }
382 }
383
384 /* connector shared with another output path */
385 if (outp->conn)
386 continue;
387
388 memset(&connE, 0x00, sizeof(connE));
389 connE.type = DCB_CONNECTOR_NONE;
390 i = -1;
391 } else {
392 i = outp->info.connector;
393 }
394
395 /* check that we haven't already created this connector */
396 list_for_each_entry(conn, &disp->conn, head) {
397 if (conn->index == outp->info.connector) {
398 outp->conn = conn;
399 break;
400 }
401 }
402
403 if (outp->conn)
404 continue;
405
406 /* apparently we need to create a new one! */
407 ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
408 if (ret) {
409 nvkm_error(&disp->engine.subdev,
410 "failed to create output %d conn: %d\n",
411 outp->index, ret);
412 nvkm_connector_del(&outp->conn);
413 list_del(&outp->head);
414 nvkm_output_del(&outp);
415 continue;
416 }
417
418 list_add_tail(&outp->conn->head, &disp->conn);
419 }
420
421 ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
422 if (ret)
423 return ret;
424
425 ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
426 if (ret)
427 return ret;
428
429 return 0;
430 }
431
432 int
nvkm_disp_new_(const struct nvkm_disp_func * func,struct nvkm_device * device,int index,int heads,struct nvkm_disp ** pdisp)433 nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
434 int index, int heads, struct nvkm_disp **pdisp)
435 {
436 if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
437 return -ENOMEM;
438 return nvkm_disp_ctor(func, device, index, heads, *pdisp);
439 }
440