• Home
  • Raw
  • Download

Lines Matching refs:vmm

32 nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,  in nv50_vmm_pgt_pte()  argument
53 VMM_WO064(pt, vmm, ptei++ * 8, data); in nv50_vmm_pgt_pte()
58 nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in nv50_vmm_pgt_sgl() argument
61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); in nv50_vmm_pgt_sgl()
65 nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in nv50_vmm_pgt_dma() argument
69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in nv50_vmm_pgt_dma()
73 VMM_WO064(pt, vmm, ptei++ * 8, data); in nv50_vmm_pgt_dma()
80 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); in nv50_vmm_pgt_dma()
84 nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in nv50_vmm_pgt_mem() argument
87 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); in nv50_vmm_pgt_mem()
91 nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm, in nv50_vmm_pgt_unmap() argument
94 VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes); in nv50_vmm_pgt_unmap()
106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument
145 nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) in nv50_vmm_pgd_pde() argument
148 u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8); in nv50_vmm_pgd_pde()
151 if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data)) in nv50_vmm_pgd_pde()
154 list_for_each_entry(join, &vmm->join, head) { in nv50_vmm_pgd_pde()
181 nv50_vmm_flush(struct nvkm_vmm *vmm, int level) in nv50_vmm_flush() argument
183 struct nvkm_subdev *subdev = &vmm->mmu->subdev; in nv50_vmm_flush()
187 mutex_lock(&vmm->mmu->mutex); in nv50_vmm_flush()
189 if (!atomic_read(&vmm->engref[i])) in nv50_vmm_flush()
222 mutex_unlock(&vmm->mmu->mutex); in nv50_vmm_flush()
226 nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, in nv50_vmm_valid() argument
234 struct nvkm_device *device = vmm->mmu->subdev.device; in nv50_vmm_valid()
256 VMM_DEBUG(vmm, "args"); in nv50_vmm_valid()
280 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); in nv50_vmm_valid()
282 VMM_DEBUG(vmm, "kind %02x", kind); in nv50_vmm_valid()
287 VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind, in nv50_vmm_valid()
295 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type); in nv50_vmm_valid()
302 VMM_DEBUG(vmm, "comp %d", ret); in nv50_vmm_valid()
324 nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in nv50_vmm_part() argument
328 list_for_each_entry(join, &vmm->join, head) { in nv50_vmm_part()
338 nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in nv50_vmm_join() argument
340 const u32 pd_offset = vmm->mmu->func->vmm.pd_offset; in nv50_vmm_join()
349 list_add_tail(&join->head, &vmm->join); in nv50_vmm_join()
352 for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) { in nv50_vmm_join()
353 if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) { in nv50_vmm_join()