• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <core/class.h>
32 #include <core/math.h>
33 #include <core/enum.h>
34 
35 #include <subdev/timer.h>
36 #include <subdev/bar.h>
37 #include <subdev/vm.h>
38 
39 #include <engine/dmaobj.h>
40 #include <engine/fifo.h>
41 
42 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
43 static const struct {
44 	u64 subdev;
45 	u64 mask;
46 } fifo_engine[] = {
47 	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW)),
48 	_(NVDEV_ENGINE_VP      , 0),
49 	_(NVDEV_ENGINE_PPP     , 0),
50 	_(NVDEV_ENGINE_BSP     , 0),
51 	_(NVDEV_ENGINE_COPY0   , 0),
52 	_(NVDEV_ENGINE_COPY1   , 0),
53 	_(NVDEV_ENGINE_VENC    , 0),
54 };
55 #undef _
56 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
57 
58 struct nve0_fifo_engn {
59 	struct nouveau_gpuobj *playlist[2];
60 	int cur_playlist;
61 };
62 
63 struct nve0_fifo_priv {
64 	struct nouveau_fifo base;
65 	struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
66 	struct {
67 		struct nouveau_gpuobj *mem;
68 		struct nouveau_vma bar;
69 	} user;
70 	int spoon_nr;
71 };
72 
73 struct nve0_fifo_base {
74 	struct nouveau_fifo_base base;
75 	struct nouveau_gpuobj *pgd;
76 	struct nouveau_vm *vm;
77 };
78 
79 struct nve0_fifo_chan {
80 	struct nouveau_fifo_chan base;
81 	u32 engine;
82 };
83 
84 /*******************************************************************************
85  * FIFO channel objects
86  ******************************************************************************/
87 
88 static void
nve0_fifo_playlist_update(struct nve0_fifo_priv * priv,u32 engine)89 nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
90 {
91 	struct nouveau_bar *bar = nouveau_bar(priv);
92 	struct nve0_fifo_engn *engn = &priv->engine[engine];
93 	struct nouveau_gpuobj *cur;
94 	u32 match = (engine << 16) | 0x00000001;
95 	int i, p;
96 
97 	mutex_lock(&nv_subdev(priv)->mutex);
98 	cur = engn->playlist[engn->cur_playlist];
99 	if (unlikely(cur == NULL)) {
100 		int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
101 					     0x8000, 0x1000, 0, &cur);
102 		if (ret) {
103 			mutex_unlock(&nv_subdev(priv)->mutex);
104 			nv_error(priv, "playlist alloc failed\n");
105 			return;
106 		}
107 
108 		engn->playlist[engn->cur_playlist] = cur;
109 	}
110 
111 	engn->cur_playlist = !engn->cur_playlist;
112 
113 	for (i = 0, p = 0; i < priv->base.max; i++) {
114 		u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
115 		if (ctrl != match)
116 			continue;
117 		nv_wo32(cur, p + 0, i);
118 		nv_wo32(cur, p + 4, 0x00000000);
119 		p += 8;
120 	}
121 	bar->flush(bar);
122 
123 	nv_wr32(priv, 0x002270, cur->addr >> 12);
124 	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
125 	if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
126 		nv_error(priv, "playlist %d update timeout\n", engine);
127 	mutex_unlock(&nv_subdev(priv)->mutex);
128 }
129 
130 static int
nve0_fifo_context_attach(struct nouveau_object * parent,struct nouveau_object * object)131 nve0_fifo_context_attach(struct nouveau_object *parent,
132 			 struct nouveau_object *object)
133 {
134 	struct nouveau_bar *bar = nouveau_bar(parent);
135 	struct nve0_fifo_base *base = (void *)parent->parent;
136 	struct nouveau_engctx *ectx = (void *)object;
137 	u32 addr;
138 	int ret;
139 
140 	switch (nv_engidx(object->engine)) {
141 	case NVDEV_ENGINE_SW   : return 0;
142 	case NVDEV_ENGINE_GR   :
143 	case NVDEV_ENGINE_COPY0:
144 	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
145 	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
146 	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
147 	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
148 	default:
149 		return -EINVAL;
150 	}
151 
152 	if (!ectx->vma.node) {
153 		ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
154 					    NV_MEM_ACCESS_RW, &ectx->vma);
155 		if (ret)
156 			return ret;
157 
158 		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
159 	}
160 
161 	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
162 	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
163 	bar->flush(bar);
164 	return 0;
165 }
166 
167 static int
nve0_fifo_context_detach(struct nouveau_object * parent,bool suspend,struct nouveau_object * object)168 nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
169 			 struct nouveau_object *object)
170 {
171 	struct nouveau_bar *bar = nouveau_bar(parent);
172 	struct nve0_fifo_priv *priv = (void *)parent->engine;
173 	struct nve0_fifo_base *base = (void *)parent->parent;
174 	struct nve0_fifo_chan *chan = (void *)parent;
175 	u32 addr;
176 
177 	switch (nv_engidx(object->engine)) {
178 	case NVDEV_ENGINE_SW   : return 0;
179 	case NVDEV_ENGINE_GR   :
180 	case NVDEV_ENGINE_COPY0:
181 	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
182 	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
183 	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
184 	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
185 	default:
186 		return -EINVAL;
187 	}
188 
189 	nv_wr32(priv, 0x002634, chan->base.chid);
190 	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
191 		nv_error(priv, "channel %d [%s] kick timeout\n",
192 			 chan->base.chid, nouveau_client_name(chan));
193 		if (suspend)
194 			return -EBUSY;
195 	}
196 
197 	nv_wo32(base, addr + 0x00, 0x00000000);
198 	nv_wo32(base, addr + 0x04, 0x00000000);
199 	bar->flush(bar);
200 	return 0;
201 }
202 
203 static int
nve0_fifo_chan_ctor(struct nouveau_object * parent,struct nouveau_object * engine,struct nouveau_oclass * oclass,void * data,u32 size,struct nouveau_object ** pobject)204 nve0_fifo_chan_ctor(struct nouveau_object *parent,
205 		    struct nouveau_object *engine,
206 		    struct nouveau_oclass *oclass, void *data, u32 size,
207 		    struct nouveau_object **pobject)
208 {
209 	struct nouveau_bar *bar = nouveau_bar(parent);
210 	struct nve0_fifo_priv *priv = (void *)engine;
211 	struct nve0_fifo_base *base = (void *)parent;
212 	struct nve0_fifo_chan *chan;
213 	struct nve0_channel_ind_class *args = data;
214 	u64 usermem, ioffset, ilength;
215 	int ret, i;
216 
217 	if (size < sizeof(*args))
218 		return -EINVAL;
219 
220 	for (i = 0; i < FIFO_ENGINE_NR; i++) {
221 		if (args->engine & (1 << i)) {
222 			if (nouveau_engine(parent, fifo_engine[i].subdev)) {
223 				args->engine = (1 << i);
224 				break;
225 			}
226 		}
227 	}
228 
229 	if (i == FIFO_ENGINE_NR)
230 		return -ENODEV;
231 
232 	ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
233 					  priv->user.bar.offset, 0x200,
234 					  args->pushbuf,
235 					  fifo_engine[i].mask, &chan);
236 	*pobject = nv_object(chan);
237 	if (ret)
238 		return ret;
239 
240 	nv_parent(chan)->context_attach = nve0_fifo_context_attach;
241 	nv_parent(chan)->context_detach = nve0_fifo_context_detach;
242 	chan->engine = i;
243 
244 	usermem = chan->base.chid * 0x200;
245 	ioffset = args->ioffset;
246 	ilength = log2i(args->ilength / 8);
247 
248 	for (i = 0; i < 0x200; i += 4)
249 		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
250 
251 	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
252 	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
253 	nv_wo32(base, 0x10, 0x0000face);
254 	nv_wo32(base, 0x30, 0xfffff902);
255 	nv_wo32(base, 0x48, lower_32_bits(ioffset));
256 	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
257 	nv_wo32(base, 0x84, 0x20400000);
258 	nv_wo32(base, 0x94, 0x30000001);
259 	nv_wo32(base, 0x9c, 0x00000100);
260 	nv_wo32(base, 0xac, 0x0000001f);
261 	nv_wo32(base, 0xe8, chan->base.chid);
262 	nv_wo32(base, 0xb8, 0xf8000000);
263 	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
264 	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
265 	bar->flush(bar);
266 	return 0;
267 }
268 
269 static int
nve0_fifo_chan_init(struct nouveau_object * object)270 nve0_fifo_chan_init(struct nouveau_object *object)
271 {
272 	struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
273 	struct nve0_fifo_priv *priv = (void *)object->engine;
274 	struct nve0_fifo_chan *chan = (void *)object;
275 	u32 chid = chan->base.chid;
276 	int ret;
277 
278 	ret = nouveau_fifo_channel_init(&chan->base);
279 	if (ret)
280 		return ret;
281 
282 	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
283 	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
284 	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
285 	nve0_fifo_playlist_update(priv, chan->engine);
286 	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
287 	return 0;
288 }
289 
290 static int
nve0_fifo_chan_fini(struct nouveau_object * object,bool suspend)291 nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
292 {
293 	struct nve0_fifo_priv *priv = (void *)object->engine;
294 	struct nve0_fifo_chan *chan = (void *)object;
295 	u32 chid = chan->base.chid;
296 
297 	nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
298 	nve0_fifo_playlist_update(priv, chan->engine);
299 	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
300 
301 	return nouveau_fifo_channel_fini(&chan->base, suspend);
302 }
303 
304 static struct nouveau_ofuncs
305 nve0_fifo_ofuncs = {
306 	.ctor = nve0_fifo_chan_ctor,
307 	.dtor = _nouveau_fifo_channel_dtor,
308 	.init = nve0_fifo_chan_init,
309 	.fini = nve0_fifo_chan_fini,
310 	.rd32 = _nouveau_fifo_channel_rd32,
311 	.wr32 = _nouveau_fifo_channel_wr32,
312 };
313 
314 static struct nouveau_oclass
315 nve0_fifo_sclass[] = {
316 	{ NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
317 	{}
318 };
319 
320 /*******************************************************************************
321  * FIFO context - instmem heap and vm setup
322  ******************************************************************************/
323 
324 static int
nve0_fifo_context_ctor(struct nouveau_object * parent,struct nouveau_object * engine,struct nouveau_oclass * oclass,void * data,u32 size,struct nouveau_object ** pobject)325 nve0_fifo_context_ctor(struct nouveau_object *parent,
326 		    struct nouveau_object *engine,
327 		    struct nouveau_oclass *oclass, void *data, u32 size,
328 		    struct nouveau_object **pobject)
329 {
330 	struct nve0_fifo_base *base;
331 	int ret;
332 
333 	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
334 				          0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
335 	*pobject = nv_object(base);
336 	if (ret)
337 		return ret;
338 
339 	ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
340 				&base->pgd);
341 	if (ret)
342 		return ret;
343 
344 	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
345 	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
346 	nv_wo32(base, 0x0208, 0xffffffff);
347 	nv_wo32(base, 0x020c, 0x000000ff);
348 
349 	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
350 	if (ret)
351 		return ret;
352 
353 	return 0;
354 }
355 
356 static void
nve0_fifo_context_dtor(struct nouveau_object * object)357 nve0_fifo_context_dtor(struct nouveau_object *object)
358 {
359 	struct nve0_fifo_base *base = (void *)object;
360 	nouveau_vm_ref(NULL, &base->vm, base->pgd);
361 	nouveau_gpuobj_ref(NULL, &base->pgd);
362 	nouveau_fifo_context_destroy(&base->base);
363 }
364 
365 static struct nouveau_oclass
366 nve0_fifo_cclass = {
367 	.handle = NV_ENGCTX(FIFO, 0xe0),
368 	.ofuncs = &(struct nouveau_ofuncs) {
369 		.ctor = nve0_fifo_context_ctor,
370 		.dtor = nve0_fifo_context_dtor,
371 		.init = _nouveau_fifo_context_init,
372 		.fini = _nouveau_fifo_context_fini,
373 		.rd32 = _nouveau_fifo_context_rd32,
374 		.wr32 = _nouveau_fifo_context_wr32,
375 	},
376 };
377 
378 /*******************************************************************************
379  * PFIFO engine
380  ******************************************************************************/
381 
382 static const struct nouveau_enum nve0_fifo_fault_unit[] = {
383 	{}
384 };
385 
386 static const struct nouveau_enum nve0_fifo_fault_reason[] = {
387 	{ 0x00, "PT_NOT_PRESENT" },
388 	{ 0x01, "PT_TOO_SHORT" },
389 	{ 0x02, "PAGE_NOT_PRESENT" },
390 	{ 0x03, "VM_LIMIT_EXCEEDED" },
391 	{ 0x04, "NO_CHANNEL" },
392 	{ 0x05, "PAGE_SYSTEM_ONLY" },
393 	{ 0x06, "PAGE_READ_ONLY" },
394 	{ 0x0a, "COMPRESSED_SYSRAM" },
395 	{ 0x0c, "INVALID_STORAGE_TYPE" },
396 	{}
397 };
398 
399 static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
400 	{}
401 };
402 
403 static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
404 	{}
405 };
406 
407 static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
408 	{ 0x00200000, "ILLEGAL_MTHD" },
409 	{ 0x00800000, "EMPTY_SUBC" },
410 	{}
411 };
412 
413 static void
nve0_fifo_isr_vm_fault(struct nve0_fifo_priv * priv,int unit)414 nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
415 {
416 	u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
417 	u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
418 	u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
419 	u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
420 	u32 client = (stat & 0x00001f00) >> 8;
421 	const struct nouveau_enum *en;
422 	struct nouveau_engine *engine;
423 	struct nouveau_object *engctx = NULL;
424 
425 	nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
426 		       "write" : "read", (u64)vahi << 32 | valo);
427 	nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
428 	pr_cont("] from ");
429 	en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
430 	if (stat & 0x00000040) {
431 		pr_cont("/");
432 		nouveau_enum_print(nve0_fifo_fault_hubclient, client);
433 	} else {
434 		pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
435 		nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
436 	}
437 
438 	if (en && en->data2) {
439 		engine = nouveau_engine(priv, en->data2);
440 		if (engine)
441 			engctx = nouveau_engctx_get(engine, inst);
442 
443 	}
444 
445 	pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
446 			nouveau_client_name(engctx));
447 
448 	nouveau_engctx_put(engctx);
449 }
450 
451 static int
nve0_fifo_swmthd(struct nve0_fifo_priv * priv,u32 chid,u32 mthd,u32 data)452 nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
453 {
454 	struct nve0_fifo_chan *chan = NULL;
455 	struct nouveau_handle *bind;
456 	unsigned long flags;
457 	int ret = -EINVAL;
458 
459 	spin_lock_irqsave(&priv->base.lock, flags);
460 	if (likely(chid >= priv->base.min && chid <= priv->base.max))
461 		chan = (void *)priv->base.channel[chid];
462 	if (unlikely(!chan))
463 		goto out;
464 
465 	bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
466 	if (likely(bind)) {
467 		if (!mthd || !nv_call(bind->object, mthd, data))
468 			ret = 0;
469 		nouveau_namedb_put(bind);
470 	}
471 
472 out:
473 	spin_unlock_irqrestore(&priv->base.lock, flags);
474 	return ret;
475 }
476 
477 static void
nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv * priv,int unit)478 nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
479 {
480 	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
481 	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
482 	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
483 	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
484 	u32 subc = (addr & 0x00070000) >> 16;
485 	u32 mthd = (addr & 0x00003ffc);
486 	u32 show = stat;
487 
488 	if (stat & 0x00200000) {
489 		if (mthd == 0x0054) {
490 			if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
491 				show &= ~0x00200000;
492 		}
493 	}
494 
495 	if (stat & 0x00800000) {
496 		if (!nve0_fifo_swmthd(priv, chid, mthd, data))
497 			show &= ~0x00800000;
498 	}
499 
500 	if (show) {
501 		nv_error(priv, "SUBFIFO%d:", unit);
502 		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
503 		pr_cont("\n");
504 		nv_error(priv,
505 			 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
506 			 unit, chid,
507 			 nouveau_client_name_for_fifo_chid(&priv->base, chid),
508 			 subc, mthd, data);
509 	}
510 
511 	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
512 	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
513 }
514 
515 static void
nve0_fifo_intr(struct nouveau_subdev * subdev)516 nve0_fifo_intr(struct nouveau_subdev *subdev)
517 {
518 	struct nve0_fifo_priv *priv = (void *)subdev;
519 	u32 mask = nv_rd32(priv, 0x002140);
520 	u32 stat = nv_rd32(priv, 0x002100) & mask;
521 
522 	if (stat & 0x00000100) {
523 		nv_warn(priv, "unknown status 0x00000100\n");
524 		nv_wr32(priv, 0x002100, 0x00000100);
525 		stat &= ~0x00000100;
526 	}
527 
528 	if (stat & 0x10000000) {
529 		u32 units = nv_rd32(priv, 0x00259c);
530 		u32 u = units;
531 
532 		while (u) {
533 			int i = ffs(u) - 1;
534 			nve0_fifo_isr_vm_fault(priv, i);
535 			u &= ~(1 << i);
536 		}
537 
538 		nv_wr32(priv, 0x00259c, units);
539 		stat &= ~0x10000000;
540 	}
541 
542 	if (stat & 0x20000000) {
543 		u32 units = nv_rd32(priv, 0x0025a0);
544 		u32 u = units;
545 
546 		while (u) {
547 			int i = ffs(u) - 1;
548 			nve0_fifo_isr_subfifo_intr(priv, i);
549 			u &= ~(1 << i);
550 		}
551 
552 		nv_wr32(priv, 0x0025a0, units);
553 		stat &= ~0x20000000;
554 	}
555 
556 	if (stat & 0x40000000) {
557 		nv_warn(priv, "unknown status 0x40000000\n");
558 		nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
559 		stat &= ~0x40000000;
560 	}
561 
562 	if (stat & 0x80000000) {
563 		nouveau_event_trigger(priv->base.uevent, 0);
564 		nv_wr32(priv, 0x002100, 0x80000000);
565 		stat &= ~0x80000000;
566 	}
567 
568 	if (stat) {
569 		nv_fatal(priv, "unhandled status 0x%08x\n", stat);
570 		nv_wr32(priv, 0x002100, stat);
571 		nv_wr32(priv, 0x002140, 0);
572 	}
573 }
574 
575 static void
nve0_fifo_uevent_enable(struct nouveau_event * event,int index)576 nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
577 {
578 	struct nve0_fifo_priv *priv = event->priv;
579 	nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
580 }
581 
582 static void
nve0_fifo_uevent_disable(struct nouveau_event * event,int index)583 nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
584 {
585 	struct nve0_fifo_priv *priv = event->priv;
586 	nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
587 }
588 
589 static int
nve0_fifo_ctor(struct nouveau_object * parent,struct nouveau_object * engine,struct nouveau_oclass * oclass,void * data,u32 size,struct nouveau_object ** pobject)590 nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
591 	       struct nouveau_oclass *oclass, void *data, u32 size,
592 	       struct nouveau_object **pobject)
593 {
594 	struct nve0_fifo_priv *priv;
595 	int ret;
596 
597 	ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
598 	*pobject = nv_object(priv);
599 	if (ret)
600 		return ret;
601 
602 	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
603 				 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
604 	if (ret)
605 		return ret;
606 
607 	ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
608 				&priv->user.bar);
609 	if (ret)
610 		return ret;
611 
612 	priv->base.uevent->enable = nve0_fifo_uevent_enable;
613 	priv->base.uevent->disable = nve0_fifo_uevent_disable;
614 	priv->base.uevent->priv = priv;
615 
616 	nv_subdev(priv)->unit = 0x00000100;
617 	nv_subdev(priv)->intr = nve0_fifo_intr;
618 	nv_engine(priv)->cclass = &nve0_fifo_cclass;
619 	nv_engine(priv)->sclass = nve0_fifo_sclass;
620 	return 0;
621 }
622 
623 static void
nve0_fifo_dtor(struct nouveau_object * object)624 nve0_fifo_dtor(struct nouveau_object *object)
625 {
626 	struct nve0_fifo_priv *priv = (void *)object;
627 	int i;
628 
629 	nouveau_gpuobj_unmap(&priv->user.bar);
630 	nouveau_gpuobj_ref(NULL, &priv->user.mem);
631 
632 	for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
633 		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
634 		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
635 	}
636 
637 	nouveau_fifo_destroy(&priv->base);
638 }
639 
640 static int
nve0_fifo_init(struct nouveau_object * object)641 nve0_fifo_init(struct nouveau_object *object)
642 {
643 	struct nve0_fifo_priv *priv = (void *)object;
644 	int ret, i;
645 
646 	ret = nouveau_fifo_init(&priv->base);
647 	if (ret)
648 		return ret;
649 
650 	/* enable all available PSUBFIFOs */
651 	nv_wr32(priv, 0x000204, 0xffffffff);
652 	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
653 	nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
654 
655 	/* PSUBFIFO[n] */
656 	for (i = 0; i < priv->spoon_nr; i++) {
657 		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
658 		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
659 		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
660 	}
661 
662 	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
663 
664 	nv_wr32(priv, 0x002a00, 0xffffffff);
665 	nv_wr32(priv, 0x002100, 0xffffffff);
666 	nv_wr32(priv, 0x002140, 0x3fffffff);
667 	return 0;
668 }
669 
670 struct nouveau_oclass
671 nve0_fifo_oclass = {
672 	.handle = NV_ENGINE(FIFO, 0xe0),
673 	.ofuncs = &(struct nouveau_ofuncs) {
674 		.ctor = nve0_fifo_ctor,
675 		.dtor = nve0_fifo_dtor,
676 		.init = nve0_fifo_init,
677 		.fini = _nouveau_fifo_fini,
678 	},
679 };
680