• Home
  • Raw
  • Download

Lines Matching refs:page

41 		const struct nvkm_vmm_page *page)  in nvkm_vmm_pt_new()  argument
49 const struct nvkm_vmm_desc *pair = page[-1].desc; in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
73 const struct nvkm_vmm_page *page; member
200 const struct nvkm_vmm_desc *pair = it->page[-1].desc; in nvkm_vmm_unref_sptes()
299 const struct nvkm_vmm_desc *pair = it->page[-1].desc; in nvkm_vmm_ref_sptes()
489 pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page); in nvkm_vmm_ref_swpt()
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_iter() argument
507 const struct nvkm_vmm_desc *desc = page->desc; in nvkm_vmm_iter()
509 u64 bits = addr >> page->shift; in nvkm_vmm_iter()
511 it.page = page; in nvkm_vmm_iter()
514 it.cnt = size >> page->shift; in nvkm_vmm_iter()
527 addr, size, page->shift, it.cnt); in nvkm_vmm_iter()
598 return addr << page->shift; in nvkm_vmm_iter()
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_sparse_put() argument
605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false, in nvkm_vmm_ptes_sparse_put()
607 page->desc->func->invalid ? in nvkm_vmm_ptes_sparse_put()
608 page->desc->func->invalid : page->desc->func->unmap); in nvkm_vmm_ptes_sparse_put()
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_sparse_get() argument
615 if ((page->type & NVKM_VMM_PAGE_SPARSE)) { in nvkm_vmm_ptes_sparse_get()
616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref", in nvkm_vmm_ptes_sparse_get()
618 NULL, NULL, page->desc->func->sparse); in nvkm_vmm_ptes_sparse_get()
621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size); in nvkm_vmm_ptes_sparse_get()
632 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_ptes_sparse() local
639 while (size < (1ULL << page[m].shift)) in nvkm_vmm_ptes_sparse()
644 while (!IS_ALIGNED(addr, 1ULL << page[i].shift)) in nvkm_vmm_ptes_sparse()
650 u64 next = 1ULL << page[i - 1].shift; in nvkm_vmm_ptes_sparse()
653 block = (part >> page[i].shift) << page[i].shift; in nvkm_vmm_ptes_sparse()
655 block = (size >> page[i].shift) << page[i].shift; in nvkm_vmm_ptes_sparse()
657 block = (size >> page[i].shift) << page[i].shift; in nvkm_vmm_ptes_sparse()
662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block); in nvkm_vmm_ptes_sparse()
669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block); in nvkm_vmm_ptes_sparse()
680 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_unmap() argument
683 const struct nvkm_vmm_desc_func *func = page->desc->func; in nvkm_vmm_ptes_unmap()
686 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn, in nvkm_vmm_ptes_unmap()
694 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_map() argument
699 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false, in nvkm_vmm_ptes_map()
705 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_put_locked() argument
708 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false, in nvkm_vmm_ptes_put_locked()
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_put() argument
717 nvkm_vmm_ptes_put_locked(vmm, page, addr, size); in nvkm_vmm_ptes_put()
722 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_get() argument
728 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false, in nvkm_vmm_ptes_get()
732 nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr); in nvkm_vmm_ptes_get()
741 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in __nvkm_vmm_ptes_unmap_put() argument
744 const struct nvkm_vmm_desc_func *func = page->desc->func; in __nvkm_vmm_ptes_unmap_put()
746 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref", in __nvkm_vmm_ptes_unmap_put()
753 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_unmap_put() argument
757 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn); in nvkm_vmm_ptes_unmap_put()
758 nvkm_vmm_ptes_put(vmm, page, addr, size); in nvkm_vmm_ptes_unmap_put()
760 __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn); in nvkm_vmm_ptes_unmap_put()
765 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in __nvkm_vmm_ptes_get_map() argument
769 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true, in __nvkm_vmm_ptes_get_map()
773 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false); in __nvkm_vmm_ptes_get_map()
780 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_get_map() argument
787 ret = nvkm_vmm_ptes_get(vmm, page, addr, size); in nvkm_vmm_ptes_get_map()
791 nvkm_vmm_ptes_map(vmm, page, addr, size, map, func); in nvkm_vmm_ptes_get_map()
795 return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func); in nvkm_vmm_ptes_get_map()
806 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
825 new->page = vma->page; in nvkm_vma_tail()
1010 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', in nvkm_vma_dump()
1042 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_dtor() local
1045 while (page[1].shift) in nvkm_vmm_dtor()
1046 page++; in nvkm_vmm_dtor()
1049 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit); in nvkm_vmm_dtor()
1089 const struct nvkm_vmm_page *page = func->page; in nvkm_vmm_ctor() local
1107 while (page[1].shift) in nvkm_vmm_ctor()
1108 page++; in nvkm_vmm_ctor()
1114 for (levels = 0, desc = page->desc; desc->bits; desc++, levels++) in nvkm_vmm_ctor()
1116 bits += page->shift; in nvkm_vmm_ctor()
1209 u64 addr, u64 size, u8 page, bool map) in nvkm_vmm_pfn_split_merge() argument
1247 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd], in nvkm_vmm_pfn_unmap()
1269 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_pfn_map() local
1279 while (page->shift && (page->shift != shift || in nvkm_vmm_pfn_map()
1280 page->desc->func->pfn == NULL)) in nvkm_vmm_pfn_map()
1281 page++; in nvkm_vmm_pfn_map()
1283 if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) || in nvkm_vmm_pfn_map()
1287 shift, page->shift, addr, size); in nvkm_vmm_pfn_map()
1308 size = min_t(u64, size, pn << page->shift); in nvkm_vmm_pfn_map()
1332 page - in nvkm_vmm_pfn_map()
1333 vmm->func->page, map); in nvkm_vmm_pfn_map()
1340 tmp->refd = page - vmm->func->page; in nvkm_vmm_pfn_map()
1349 args.page = page; in nvkm_vmm_pfn_map()
1353 ret = nvkm_vmm_ptes_get_map(vmm, page, addr, in nvkm_vmm_pfn_map()
1354 size, &args, page-> in nvkm_vmm_pfn_map()
1357 nvkm_vmm_ptes_map(vmm, page, addr, size, &args, in nvkm_vmm_pfn_map()
1358 page->desc->func->pfn); in nvkm_vmm_pfn_map()
1362 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, in nvkm_vmm_pfn_map()
1379 size -= 1 << page->shift; in nvkm_vmm_pfn_map()
1382 pi += size >> page->shift; in nvkm_vmm_pfn_map()
1409 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; in nvkm_vmm_unmap_locked() local
1412 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1415 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1437 if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) { in nvkm_vmm_map_valid()
1438 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift); in nvkm_vmm_map_valid()
1444 if (!(map->page->type & NVKM_VMM_PAGE_HOST)) { in nvkm_vmm_map_valid()
1445 VMM_DEBUG(vmm, "%d !HOST", map->page->shift); in nvkm_vmm_map_valid()
1454 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1455 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1456 !IS_ALIGNED( map->offset, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1457 nvkm_memory_page(map->memory) < map->page->shift) { in nvkm_vmm_map_valid()
1459 vma->addr, (u64)vma->size, map->offset, map->page->shift, in nvkm_vmm_map_valid()
1471 for (map->page = vmm->func->page; map->page->shift; map->page++) { in nvkm_vmm_map_choose()
1472 VMM_DEBUG(vmm, "trying %d", map->page->shift); in nvkm_vmm_map_choose()
1497 if (vma->page == NVKM_VMA_PAGE_NONE && in nvkm_vmm_map_locked()
1512 map->page = &vmm->func->page[vma->refd]; in nvkm_vmm_map_locked()
1514 map->page = &vmm->func->page[vma->page]; in nvkm_vmm_map_locked()
1532 func = map->page->desc->func->mem; in nvkm_vmm_map_locked()
1541 func = map->page->desc->func->sgl; in nvkm_vmm_map_locked()
1545 func = map->page->desc->func->dma; in nvkm_vmm_map_locked()
1550 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1554 vma->refd = map->page - vmm->func->page; in nvkm_vmm_map_locked()
1556 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1606 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_put_locked() local
1631 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr, in nvkm_vmm_put_locked()
1637 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size); in nvkm_vmm_put_locked()
1661 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size); in nvkm_vmm_put_locked()
1679 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1701 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE]; in nvkm_vmm_get_locked() local
1734 for (page = vmm->func->page; page->shift; page++) { in nvkm_vmm_get_locked()
1735 if (shift == page->shift) in nvkm_vmm_get_locked()
1739 if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) { in nvkm_vmm_get_locked()
1770 const int p = page - vmm->func->page; in nvkm_vmm_get_locked()
1773 if (vmm->func->page_block && prev && prev->page != p) in nvkm_vmm_get_locked()
1778 if (vmm->func->page_block && next && next->page != p) in nvkm_vmm_get_locked()
1813 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1817 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1827 vma->page = page - vmm->func->page; in nvkm_vmm_get_locked()
1828 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE; in nvkm_vmm_get_locked()
1836 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) in nvkm_vmm_get() argument
1840 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma); in nvkm_vmm_get()
1849 const struct nvkm_vmm_page *page = &vmm->func->page[refd]; in nvkm_vmm_raw_unmap() local
1851 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false); in nvkm_vmm_raw_unmap()
1857 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_raw_put() local
1859 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size); in nvkm_vmm_raw_put()
1865 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_raw_get() local
1870 return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size); in nvkm_vmm_raw_get()
1919 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_boot() local
1923 while (page[1].shift) in nvkm_vmm_boot()
1924 page++; in nvkm_vmm_boot()
1926 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit); in nvkm_vmm_boot()
1930 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false, in nvkm_vmm_boot()