Lines Matching refs:chan
42 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx) in nvkm_chan_cctx_bind() argument
44 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_cctx_bind()
51 CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name); in nvkm_chan_cctx_bind()
59 nvkm_chan_block(chan); in nvkm_chan_cctx_bind()
60 nvkm_chan_preempt(chan, true); in nvkm_chan_cctx_bind()
63 engn->func->bind(engn, cctx, chan); in nvkm_chan_cctx_bind()
69 nvkm_chan_allow(chan); in nvkm_chan_cctx_bind()
73 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx) in nvkm_chan_cctx_put() argument
80 if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) { in nvkm_chan_cctx_put()
81 CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name); in nvkm_chan_cctx_put()
82 nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx); in nvkm_chan_cctx_put()
85 mutex_unlock(&chan->cgrp->mutex); in nvkm_chan_cctx_put()
93 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx, in nvkm_chan_cctx_get() argument
96 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_cctx_get()
103 cctx = nvkm_list_find(cctx, &chan->cctxs, head, in nvkm_chan_cctx_get()
104 cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm); in nvkm_chan_cctx_get()
108 mutex_unlock(&chan->cgrp->mutex); in nvkm_chan_cctx_get()
113 ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client); in nvkm_chan_cctx_get()
115 CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret); in nvkm_chan_cctx_get()
120 CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name); in nvkm_chan_cctx_get()
130 list_add_tail(&cctx->head, &chan->cctxs); in nvkm_chan_cctx_get()
137 nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait) in nvkm_chan_preempt_locked() argument
139 struct nvkm_runl *runl = chan->cgrp->runl; in nvkm_chan_preempt_locked()
141 CHAN_TRACE(chan, "preempt"); in nvkm_chan_preempt_locked()
142 chan->func->preempt(chan); in nvkm_chan_preempt_locked()
150 nvkm_chan_preempt(struct nvkm_chan *chan, bool wait) in nvkm_chan_preempt() argument
154 if (!chan->func->preempt) in nvkm_chan_preempt()
157 mutex_lock(&chan->cgrp->runl->mutex); in nvkm_chan_preempt()
158 ret = nvkm_chan_preempt_locked(chan, wait); in nvkm_chan_preempt()
159 mutex_unlock(&chan->cgrp->runl->mutex); in nvkm_chan_preempt()
164 nvkm_chan_remove_locked(struct nvkm_chan *chan) in nvkm_chan_remove_locked() argument
166 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_remove_locked()
169 if (list_empty(&chan->head)) in nvkm_chan_remove_locked()
172 CHAN_TRACE(chan, "remove"); in nvkm_chan_remove_locked()
178 list_del_init(&chan->head); in nvkm_chan_remove_locked()
183 nvkm_chan_remove(struct nvkm_chan *chan, bool preempt) in nvkm_chan_remove() argument
185 struct nvkm_runl *runl = chan->cgrp->runl; in nvkm_chan_remove()
188 if (preempt && chan->func->preempt) in nvkm_chan_remove()
189 nvkm_chan_preempt_locked(chan, true); in nvkm_chan_remove()
190 nvkm_chan_remove_locked(chan); in nvkm_chan_remove()
196 nvkm_chan_insert(struct nvkm_chan *chan) in nvkm_chan_insert() argument
198 struct nvkm_cgrp *cgrp = chan->cgrp; in nvkm_chan_insert()
202 if (WARN_ON(!list_empty(&chan->head))) { in nvkm_chan_insert()
207 CHAN_TRACE(chan, "insert"); in nvkm_chan_insert()
208 list_add_tail(&chan->head, &cgrp->chans); in nvkm_chan_insert()
220 nvkm_chan_block_locked(struct nvkm_chan *chan) in nvkm_chan_block_locked() argument
222 CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked)); in nvkm_chan_block_locked()
223 if (atomic_inc_return(&chan->blocked) == 1) in nvkm_chan_block_locked()
224 chan->func->stop(chan); in nvkm_chan_block_locked()
228 nvkm_chan_error(struct nvkm_chan *chan, bool preempt) in nvkm_chan_error() argument
232 spin_lock_irqsave(&chan->lock, flags); in nvkm_chan_error()
233 if (atomic_inc_return(&chan->errored) == 1) { in nvkm_chan_error()
234 CHAN_ERROR(chan, "errored - disabling channel"); in nvkm_chan_error()
235 nvkm_chan_block_locked(chan); in nvkm_chan_error()
237 chan->func->preempt(chan); in nvkm_chan_error()
238 nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED); in nvkm_chan_error()
240 spin_unlock_irqrestore(&chan->lock, flags); in nvkm_chan_error()
244 nvkm_chan_block(struct nvkm_chan *chan) in nvkm_chan_block() argument
246 spin_lock_irq(&chan->lock); in nvkm_chan_block()
247 nvkm_chan_block_locked(chan); in nvkm_chan_block()
248 spin_unlock_irq(&chan->lock); in nvkm_chan_block()
252 nvkm_chan_allow(struct nvkm_chan *chan) in nvkm_chan_allow() argument
254 spin_lock_irq(&chan->lock); in nvkm_chan_allow()
255 CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked)); in nvkm_chan_allow()
256 if (atomic_dec_and_test(&chan->blocked)) in nvkm_chan_allow()
257 chan->func->start(chan); in nvkm_chan_allow()
258 spin_unlock_irq(&chan->lock); in nvkm_chan_allow()
264 struct nvkm_chan *chan = *pchan; in nvkm_chan_del() local
266 if (!chan) in nvkm_chan_del()
269 if (chan->func->ramfc->clear) in nvkm_chan_del()
270 chan->func->ramfc->clear(chan); in nvkm_chan_del()
272 nvkm_ramht_del(&chan->ramht); in nvkm_chan_del()
273 nvkm_gpuobj_del(&chan->pgd); in nvkm_chan_del()
274 nvkm_gpuobj_del(&chan->eng); in nvkm_chan_del()
275 nvkm_gpuobj_del(&chan->cache); in nvkm_chan_del()
276 nvkm_gpuobj_del(&chan->ramfc); in nvkm_chan_del()
278 nvkm_memory_unref(&chan->userd.mem); in nvkm_chan_del()
280 if (chan->cgrp) { in nvkm_chan_del()
281 nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); in nvkm_chan_del()
282 nvkm_cgrp_unref(&chan->cgrp); in nvkm_chan_del()
285 if (chan->vmm) { in nvkm_chan_del()
286 nvkm_vmm_part(chan->vmm, chan->inst->memory); in nvkm_chan_del()
287 nvkm_vmm_unref(&chan->vmm); in nvkm_chan_del()
290 nvkm_gpuobj_del(&chan->push); in nvkm_chan_del()
291 nvkm_gpuobj_del(&chan->inst); in nvkm_chan_del()
292 kfree(chan); in nvkm_chan_del()
298 struct nvkm_chan *chan = *pchan; in nvkm_chan_put() local
300 if (!chan) in nvkm_chan_put()
304 spin_unlock_irqrestore(&chan->cgrp->lock, irqflags); in nvkm_chan_put()
313 struct nvkm_chan *chan; in nvkm_chan_get_inst() local
318 chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags); in nvkm_chan_get_inst()
319 if (chan || engn->engine == engine) in nvkm_chan_get_inst()
320 return chan; in nvkm_chan_get_inst()
353 struct nvkm_chan *chan; in nvkm_chan_new_() local
371 if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL))) in nvkm_chan_new_()
374 chan->func = func; in nvkm_chan_new_()
375 strscpy(chan->name, name, sizeof(chan->name)); in nvkm_chan_new_()
376 chan->runq = runq; in nvkm_chan_new_()
377 chan->id = -1; in nvkm_chan_new_()
378 spin_lock_init(&chan->lock); in nvkm_chan_new_()
379 atomic_set(&chan->blocked, 1); in nvkm_chan_new_()
380 atomic_set(&chan->errored, 0); in nvkm_chan_new_()
381 INIT_LIST_HEAD(&chan->cctxs); in nvkm_chan_new_()
382 INIT_LIST_HEAD(&chan->head); in nvkm_chan_new_()
393 ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp); in nvkm_chan_new_()
399 cgrp = chan->cgrp; in nvkm_chan_new_()
406 chan->cgrp = nvkm_cgrp_ref(cgrp); in nvkm_chan_new_()
411 &chan->inst); in nvkm_chan_new_()
422 ret = nvkm_vmm_join(vmm, chan->inst->memory); in nvkm_chan_new_()
428 chan->vmm = nvkm_vmm_ref(vmm); in nvkm_chan_new_()
433 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push); in nvkm_chan_new_()
441 chan->id = nvkm_chid_get(runl->chid, chan); in nvkm_chan_new_()
442 if (chan->id < 0) { in nvkm_chan_new_()
448 cgrp->id = chan->id; in nvkm_chan_new_()
452 if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) { in nvkm_chan_new_()
457 ret = nvkm_memory_kmap(userd, &chan->userd.mem); in nvkm_chan_new_()
463 chan->userd.base = ouserd; in nvkm_chan_new_()
465 chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); in nvkm_chan_new_()
466 chan->userd.base = chan->id * chan->func->userd->size; in nvkm_chan_new_()
469 if (chan->func->userd->clear) in nvkm_chan_new_()
470 chan->func->userd->clear(chan); in nvkm_chan_new_()
473 ret = chan->func->ramfc->write(chan, offset, length, devm, priv); in nvkm_chan_new_()