• Home
  • Raw
  • Download

Lines Matching refs:subdev

44 	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;  in gk104_fifo_engine_status()  local
45 struct nvkm_device *device = subdev->device; in gk104_fifo_engine_status()
76 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d " in gk104_fifo_engine_status()
128 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_fini()
135 struct nvkm_device *device = fifo->engine.subdev.device; in gk104_fifo_uevent_init()
143 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_runlist_commit() local
144 struct nvkm_device *device = subdev->device; in gk104_fifo_runlist_commit()
163 nvkm_error(subdev, "runlist %d update timeout\n", runl); in gk104_fifo_runlist_commit()
242 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_pbdma_init()
249 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_pbdma_nr()
265 return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0); in gk104_fifo_id_engine()
276 if (engine->subdev.type == NVKM_ENGINE_SW) in gk104_fifo_engine_id()
292 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_recover_work()
309 nvkm_subdev_fini(&engine->subdev, false); in gk104_fifo_recover_work()
310 WARN_ON(nvkm_subdev_init(&engine->subdev)); in gk104_fifo_recover_work()
326 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_runl() local
327 struct nvkm_device *device = subdev->device; in gk104_fifo_recover_runl()
339 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl); in gk104_fifo_recover_runl()
373 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_chan() local
374 struct nvkm_device *device = subdev->device; in gk104_fifo_recover_chan()
394 nvkm_warn(subdev, "channel %d: killed\n", chid); in gk104_fifo_recover_chan()
413 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_recover_engn() local
414 struct nvkm_device *device = subdev->device; in gk104_fifo_recover_engn()
439 mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst); in gk104_fifo_recover_engn()
443 if (en->data2 == engine->subdev.type && in gk104_fifo_recover_engn()
444 en->inst == engine->subdev.inst) { in gk104_fifo_recover_engn()
475 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn); in gk104_fifo_recover_engn()
483 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_fault() local
484 struct nvkm_device *device = subdev->device; in gk104_fifo_fault()
520 struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine); in gk104_fifo_fault() local
521 if (subdev) { in gk104_fifo_fault()
522 if (subdev->func == &nvkm_engine) in gk104_fifo_fault()
523 engine = container_of(subdev, typeof(*engine), subdev); in gk104_fifo_fault()
524 en = engine->subdev.name; in gk104_fifo_fault()
533 nvkm_error(subdev, in gk104_fifo_fault()
573 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_bind() local
574 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_bind()
580 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : ""); in gk104_fifo_intr_bind()
592 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_sched_ctxsw()
623 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_sched() local
624 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_sched()
630 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : ""); in gk104_fifo_intr_sched()
644 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_chsw() local
645 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_chsw()
647 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat); in gk104_fifo_intr_chsw()
654 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_dropped_fault() local
655 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_dropped_fault()
657 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat); in gk104_fifo_intr_dropped_fault()
697 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_0() local
698 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_pbdma_0()
723 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] " in gk104_fifo_intr_pbdma_0()
746 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr_pbdma_1() local
747 struct nvkm_device *device = subdev->device; in gk104_fifo_intr_pbdma_1()
755 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n", in gk104_fifo_intr_pbdma_1()
767 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_intr_runlist()
787 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_intr() local
788 struct nvkm_device *device = subdev->device; in gk104_fifo_intr()
799 nvkm_error(subdev, "PIO_ERROR\n"); in gk104_fifo_intr()
817 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n"); in gk104_fifo_intr()
823 nvkm_error(subdev, "LB_ERROR\n"); in gk104_fifo_intr()
869 nvkm_error(subdev, "INTR %08x\n", stat); in gk104_fifo_intr()
879 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_fini()
902 switch (engine->subdev.type) { in gk104_fifo_info()
939 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_oneinit() local
940 struct nvkm_device *device = subdev->device; in gk104_fifo_oneinit()
947 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); in gk104_fifo_oneinit()
978 en = fifo->engine[engn].engine->subdev.name; in gk104_fifo_oneinit()
981 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", in gk104_fifo_oneinit()
1030 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_init()
1062 struct nvkm_device *device = fifo->base.engine.subdev.device; in gk104_fifo_dtor()