• Home
  • Raw
  • Download

Lines Matching refs:svmm

66 			struct nouveau_svmm *svmm;  member
87 struct nouveau_svmm *svmm; member
171 if (!cli->svm.svmm) { in nouveau_svmm_bind()
188 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr, in nouveau_svmm_bind()
208 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst) in nouveau_svmm_part() argument
211 if (svmm) { in nouveau_svmm_part()
212 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
213 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst); in nouveau_svmm_part()
218 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_part()
224 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst) in nouveau_svmm_join() argument
227 if (svmm) { in nouveau_svmm_join()
230 ivmm->svmm = svmm; in nouveau_svmm_join()
233 mutex_lock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
234 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); in nouveau_svmm_join()
235 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); in nouveau_svmm_join()
242 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) in nouveau_svmm_invalidate() argument
245 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, in nouveau_svmm_invalidate()
257 struct nouveau_svmm *svmm = in nouveau_svmm_invalidate_range_start() local
265 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); in nouveau_svmm_invalidate_range_start()
267 mutex_lock(&svmm->mutex); in nouveau_svmm_invalidate_range_start()
268 if (unlikely(!svmm->vmm)) in nouveau_svmm_invalidate_range_start()
276 update->owner == svmm->vmm->cli->drm->dev) in nouveau_svmm_invalidate_range_start()
279 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { in nouveau_svmm_invalidate_range_start()
280 if (start < svmm->unmanaged.start) { in nouveau_svmm_invalidate_range_start()
281 nouveau_svmm_invalidate(svmm, start, in nouveau_svmm_invalidate_range_start()
282 svmm->unmanaged.limit); in nouveau_svmm_invalidate_range_start()
284 start = svmm->unmanaged.limit; in nouveau_svmm_invalidate_range_start()
287 nouveau_svmm_invalidate(svmm, start, limit); in nouveau_svmm_invalidate_range_start()
290 mutex_unlock(&svmm->mutex); in nouveau_svmm_invalidate_range_start()
307 struct nouveau_svmm *svmm = *psvmm; in nouveau_svmm_fini() local
308 if (svmm) { in nouveau_svmm_fini()
309 mutex_lock(&svmm->mutex); in nouveau_svmm_fini()
310 svmm->vmm = NULL; in nouveau_svmm_fini()
311 mutex_unlock(&svmm->mutex); in nouveau_svmm_fini()
312 mmu_notifier_put(&svmm->notifier); in nouveau_svmm_fini()
322 struct nouveau_svmm *svmm; in nouveau_svmm_init() local
331 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) in nouveau_svmm_init()
333 svmm->vmm = &cli->svm; in nouveau_svmm_init()
334 svmm->unmanaged.start = args->unmanaged_addr; in nouveau_svmm_init()
335 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size; in nouveau_svmm_init()
336 mutex_init(&svmm->mutex); in nouveau_svmm_init()
361 svmm->notifier.ops = &nouveau_mn_ops; in nouveau_svmm_init()
362 ret = __mmu_notifier_register(&svmm->notifier, current->mm); in nouveau_svmm_init()
367 cli->svm.svmm = svmm; in nouveau_svmm_init()
377 kfree(svmm); in nouveau_svmm_init()
505 struct nouveau_svmm *svmm; member
516 range->owner == sn->svmm->vmm->cli->drm->dev) in nouveau_svm_range_invalidate()
527 mutex_lock(&sn->svmm->mutex); in nouveau_svm_range_invalidate()
528 else if (!mutex_trylock(&sn->svmm->mutex)) in nouveau_svm_range_invalidate()
531 mutex_unlock(&sn->svmm->mutex); in nouveau_svm_range_invalidate()
587 static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, in nouveau_atomic_range_fault() argument
594 struct mm_struct *mm = svmm->notifier.mm; in nouveau_atomic_range_fault()
622 mutex_lock(&svmm->mutex); in nouveau_atomic_range_fault()
626 mutex_unlock(&svmm->mutex); in nouveau_atomic_range_fault()
639 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); in nouveau_atomic_range_fault()
640 mutex_unlock(&svmm->mutex); in nouveau_atomic_range_fault()
650 static int nouveau_range_fault(struct nouveau_svmm *svmm, in nouveau_range_fault() argument
666 struct mm_struct *mm = svmm->notifier.mm; in nouveau_range_fault()
694 mutex_lock(&svmm->mutex); in nouveau_range_fault()
697 mutex_unlock(&svmm->mutex); in nouveau_range_fault()
705 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); in nouveau_range_fault()
706 mutex_unlock(&svmm->mutex); in nouveau_range_fault()
722 struct nouveau_svmm *svmm; in nouveau_svm_fault() local
762 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) { in nouveau_svm_fault()
763 if (!svmm || buffer->fault[fi]->inst != inst) { in nouveau_svm_fault()
766 svmm = ivmm ? ivmm->svmm : NULL; in nouveau_svm_fault()
768 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm); in nouveau_svm_fault()
770 buffer->fault[fi]->svmm = svmm; in nouveau_svm_fault()
786 if (!(svmm = buffer->fault[fi]->svmm)) { in nouveau_svm_fault()
790 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); in nouveau_svm_fault()
797 if (start < svmm->unmanaged.limit) in nouveau_svm_fault()
798 limit = min_t(u64, limit, svmm->unmanaged.start); in nouveau_svm_fault()
827 mm = svmm->notifier.mm; in nouveau_svm_fault()
833 notifier.svmm = svmm; in nouveau_svm_fault()
835 ret = nouveau_atomic_range_fault(svmm, svm->drm, in nouveau_svm_fault()
839 ret = nouveau_range_fault(svmm, svm->drm, &args.i, in nouveau_svm_fault()
855 if (buffer->fault[fn]->svmm != svmm || in nouveau_svm_fault()
918 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, in nouveau_pfns_map() argument
927 mutex_lock(&svmm->mutex); in nouveau_pfns_map()
929 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, in nouveau_pfns_map()
932 mutex_unlock(&svmm->mutex); in nouveau_pfns_map()