Lines Matching refs:vm
32 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_at() local
33 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_at()
38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_at()
49 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_map_at()
70 mmu->func->flush(vm); in nvkm_vm_map_at()
77 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg_table() local
78 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg_table()
83 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_sg_table()
92 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_map_sg_table()
128 mmu->func->flush(vm); in nvkm_vm_map_sg_table()
135 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg() local
136 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg()
142 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_sg()
148 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_map_sg()
166 mmu->func->flush(vm); in nvkm_vm_map_sg()
184 struct nvkm_vm *vm = vma->vm; in nvkm_vm_unmap_at() local
185 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unmap_at()
190 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_unmap_at()
196 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_unmap_at()
213 mmu->func->flush(vm); in nvkm_vm_unmap_at()
223 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) in nvkm_vm_unmap_pgt() argument
225 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unmap_pgt()
232 vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_unmap_pgt()
239 list_for_each_entry(vpgd, &vm->pgd_list, head) { in nvkm_vm_unmap_pgt()
243 mmu->func->flush(vm); in nvkm_vm_unmap_pgt()
250 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) in nvkm_vm_map_pgt() argument
252 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_pgt()
253 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_map_pgt()
267 list_for_each_entry(vpgd, &vm->pgd_list, head) { in nvkm_vm_map_pgt()
276 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, in nvkm_vm_get() argument
279 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_get()
285 mutex_lock(&vm->mutex); in nvkm_vm_get()
286 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, in nvkm_vm_get()
289 mutex_unlock(&vm->mutex); in nvkm_vm_get()
297 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_get()
305 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); in nvkm_vm_get()
308 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); in nvkm_vm_get()
309 nvkm_mm_free(&vm->mm, &vma->node); in nvkm_vm_get()
310 mutex_unlock(&vm->mutex); in nvkm_vm_get()
314 mutex_unlock(&vm->mutex); in nvkm_vm_get()
316 vma->vm = NULL; in nvkm_vm_get()
317 nvkm_vm_ref(vm, &vma->vm, NULL); in nvkm_vm_get()
327 struct nvkm_vm *vm; in nvkm_vm_put() local
332 vm = vma->vm; in nvkm_vm_put()
333 mmu = vm->mmu; in nvkm_vm_put()
338 mutex_lock(&vm->mutex); in nvkm_vm_put()
339 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); in nvkm_vm_put()
340 nvkm_mm_free(&vm->mm, &vma->node); in nvkm_vm_put()
341 mutex_unlock(&vm->mutex); in nvkm_vm_put()
343 nvkm_vm_ref(NULL, &vma->vm, NULL); in nvkm_vm_put()
347 nvkm_vm_boot(struct nvkm_vm *vm, u64 size) in nvkm_vm_boot() argument
349 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_boot()
356 vm->pgt[0].refcount[0] = 1; in nvkm_vm_boot()
357 vm->pgt[0].mem[0] = pgt; in nvkm_vm_boot()
358 nvkm_memory_boot(pgt, vm); in nvkm_vm_boot()
369 struct nvkm_vm *vm; in nvkm_vm_create() local
373 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in nvkm_vm_create()
374 if (!vm) in nvkm_vm_create()
377 __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key); in nvkm_vm_create()
378 INIT_LIST_HEAD(&vm->pgd_list); in nvkm_vm_create()
379 vm->mmu = mmu; in nvkm_vm_create()
380 kref_init(&vm->refcount); in nvkm_vm_create()
381 vm->fpde = offset >> (mmu->func->pgt_bits + 12); in nvkm_vm_create()
382 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12); in nvkm_vm_create()
384 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); in nvkm_vm_create()
385 if (!vm->pgt) { in nvkm_vm_create()
386 kfree(vm); in nvkm_vm_create()
390 ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, in nvkm_vm_create()
393 vfree(vm->pgt); in nvkm_vm_create()
394 kfree(vm); in nvkm_vm_create()
398 *pvm = vm; in nvkm_vm_create()
414 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) in nvkm_vm_link() argument
416 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_link()
429 mutex_lock(&vm->mutex); in nvkm_vm_link()
430 for (i = vm->fpde; i <= vm->lpde; i++) in nvkm_vm_link()
431 mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem); in nvkm_vm_link()
432 list_add(&vpgd->head, &vm->pgd_list); in nvkm_vm_link()
433 mutex_unlock(&vm->mutex); in nvkm_vm_link()
438 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) in nvkm_vm_unlink() argument
445 mutex_lock(&vm->mutex); in nvkm_vm_unlink()
446 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { in nvkm_vm_unlink()
453 mutex_unlock(&vm->mutex); in nvkm_vm_unlink()
459 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); in nvkm_vm_del() local
462 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { in nvkm_vm_del()
463 nvkm_vm_unlink(vm, vpgd->obj); in nvkm_vm_del()
466 nvkm_mm_fini(&vm->mm); in nvkm_vm_del()
467 vfree(vm->pgt); in nvkm_vm_del()
468 kfree(vm); in nvkm_vm_del()