Lines Matching refs:vma
802 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
803 if (vma) { in nvkm_vma_new()
804 vma->addr = addr; in nvkm_vma_new()
805 vma->size = size; in nvkm_vma_new()
806 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
807 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
809 return vma; in nvkm_vma_new()
813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
817 BUG_ON(vma->size == tail); in nvkm_vma_tail()
819 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
821 vma->size -= tail; in nvkm_vma_tail()
823 new->mapref = vma->mapref; in nvkm_vma_tail()
824 new->sparse = vma->sparse; in nvkm_vma_tail()
825 new->page = vma->page; in nvkm_vma_tail()
826 new->refd = vma->refd; in nvkm_vma_tail()
827 new->used = vma->used; in nvkm_vma_tail()
828 new->part = vma->part; in nvkm_vma_tail()
829 new->busy = vma->busy; in nvkm_vma_tail()
830 new->mapped = vma->mapped; in nvkm_vma_tail()
831 list_add(&new->head, &vma->head); in nvkm_vma_tail()
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_remove() argument
838 rb_erase(&vma->tree, &vmm->free); in nvkm_vmm_free_remove()
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_delete() argument
844 nvkm_vmm_free_remove(vmm, vma); in nvkm_vmm_free_delete()
845 list_del(&vma->head); in nvkm_vmm_free_delete()
846 kfree(vma); in nvkm_vmm_free_delete()
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_insert() argument
858 if (vma->size < this->size) in nvkm_vmm_free_insert()
861 if (vma->size > this->size) in nvkm_vmm_free_insert()
864 if (vma->addr < this->addr) in nvkm_vmm_free_insert()
867 if (vma->addr > this->addr) in nvkm_vmm_free_insert()
873 rb_link_node(&vma->tree, parent, ptr); in nvkm_vmm_free_insert()
874 rb_insert_color(&vma->tree, &vmm->free); in nvkm_vmm_free_insert()
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_remove() argument
880 rb_erase(&vma->tree, &vmm->root); in nvkm_vmm_node_remove()
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_delete() argument
886 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_delete()
887 list_del(&vma->head); in nvkm_vmm_node_delete()
888 kfree(vma); in nvkm_vmm_node_delete()
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_insert() argument
900 if (vma->addr < this->addr) in nvkm_vmm_node_insert()
903 if (vma->addr > this->addr) in nvkm_vmm_node_insert()
909 rb_link_node(&vma->tree, parent, ptr); in nvkm_vmm_node_insert()
910 rb_insert_color(&vma->tree, &vmm->root); in nvkm_vmm_node_insert()
918 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); in nvkm_vmm_node_search() local
919 if (addr < vma->addr) in nvkm_vmm_node_search()
922 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search()
925 return vma; in nvkm_vmm_node_search()
935 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size) in nvkm_vmm_node_merge() argument
938 if (vma->size == size) { in nvkm_vmm_node_merge()
939 vma->size += next->size; in nvkm_vmm_node_merge()
942 prev->size += vma->size; in nvkm_vmm_node_merge()
943 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
946 return vma; in nvkm_vmm_node_merge()
951 vma->size -= size; in nvkm_vmm_node_merge()
959 if (vma->size != size) { in nvkm_vmm_node_merge()
960 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_merge()
962 vma->addr += size; in nvkm_vmm_node_merge()
963 vma->size -= size; in nvkm_vmm_node_merge()
964 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_merge()
966 prev->size += vma->size; in nvkm_vmm_node_merge()
967 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
972 return vma; in nvkm_vmm_node_merge()
977 struct nvkm_vma *vma, u64 addr, u64 size) in nvkm_vmm_node_split() argument
981 if (vma->addr != addr) { in nvkm_vmm_node_split()
982 prev = vma; in nvkm_vmm_node_split()
983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split()
985 vma->part = true; in nvkm_vmm_node_split()
986 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_split()
989 if (vma->size != size) { in nvkm_vmm_node_split()
991 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { in nvkm_vmm_node_split()
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size); in nvkm_vmm_node_split()
999 return vma; in nvkm_vmm_node_split()
1003 nvkm_vma_dump(struct nvkm_vma *vma) in nvkm_vma_dump() argument
1006 vma->addr, (u64)vma->size, in nvkm_vma_dump()
1007 vma->used ? '-' : 'F', in nvkm_vma_dump()
1008 vma->mapref ? 'R' : '-', in nvkm_vma_dump()
1009 vma->sparse ? 'S' : '-', in nvkm_vma_dump()
1010 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', in nvkm_vma_dump()
1011 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', in nvkm_vma_dump()
1012 vma->part ? 'P' : '-', in nvkm_vma_dump()
1013 vma->busy ? 'B' : '-', in nvkm_vma_dump()
1014 vma->mapped ? 'M' : '-', in nvkm_vma_dump()
1015 vma->memory); in nvkm_vma_dump()
1021 struct nvkm_vma *vma; in nvkm_vmm_dump() local
1022 list_for_each_entry(vma, &vmm->list, head) { in nvkm_vmm_dump()
1023 nvkm_vma_dump(vma); in nvkm_vmm_dump()
1030 struct nvkm_vma *vma; in nvkm_vmm_dtor() local
1037 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); in nvkm_vmm_dtor() local
1038 nvkm_vmm_put(vmm, &vma); in nvkm_vmm_dtor()
1052 vma = list_first_entry(&vmm->list, typeof(*vma), head); in nvkm_vmm_dtor()
1053 list_del(&vma->head); in nvkm_vmm_dtor()
1054 kfree(vma); in nvkm_vmm_dtor()
1071 struct nvkm_vma *vma; in nvkm_vmm_ctor_managed() local
1072 if (!(vma = nvkm_vma_new(addr, size))) in nvkm_vmm_ctor_managed()
1074 vma->mapref = true; in nvkm_vmm_ctor_managed()
1075 vma->sparse = false; in nvkm_vmm_ctor_managed()
1076 vma->used = true; in nvkm_vmm_ctor_managed()
1077 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_ctor_managed()
1078 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor_managed()
1091 struct nvkm_vma *vma; in nvkm_vmm_ctor() local
1163 if (!(vma = nvkm_vma_new(addr, size))) in nvkm_vmm_ctor()
1165 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1166 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1186 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) in nvkm_vmm_ctor()
1189 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1190 list_add(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1208 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_pfn_split_merge() argument
1214 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge()
1219 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) { in nvkm_vmm_pfn_split_merge()
1226 return nvkm_vmm_node_merge(vmm, prev, vma, next, size); in nvkm_vmm_pfn_split_merge()
1227 return nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_vmm_pfn_split_merge()
1233 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); in nvkm_vmm_pfn_unmap() local
1238 if (!vma) in nvkm_vmm_pfn_unmap()
1242 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap()
1245 size = min(limit - start, vma->size - (start - vma->addr)); in nvkm_vmm_pfn_unmap()
1247 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd], in nvkm_vmm_pfn_unmap()
1250 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false); in nvkm_vmm_pfn_unmap()
1252 vma = next; in nvkm_vmm_pfn_unmap()
1253 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_pfn_unmap()
1254 vma->mapped = false; in nvkm_vmm_pfn_unmap()
1256 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap()
1270 struct nvkm_vma *vma, *tmp; in nvkm_vmm_pfn_map() local
1291 if (!(vma = nvkm_vmm_node_search(vmm, addr))) in nvkm_vmm_pfn_map()
1296 bool mapped = vma->mapped; in nvkm_vmm_pfn_map()
1309 size = min_t(u64, size, vma->size + vma->addr - addr); in nvkm_vmm_pfn_map()
1314 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map()
1331 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size, in nvkm_vmm_pfn_map()
1343 vma = tmp; in nvkm_vmm_pfn_map()
1369 if (vma->addr + vma->size == addr + size) in nvkm_vmm_pfn_map()
1370 vma = node(vma, next); in nvkm_vmm_pfn_map()
1384 } while (vma && start < limit); in nvkm_vmm_pfn_map()
1390 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap_region() argument
1395 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_unmap_region()
1396 nvkm_memory_unref(&vma->memory); in nvkm_vmm_unmap_region()
1397 vma->mapped = false; in nvkm_vmm_unmap_region()
1399 if (vma->part && (prev = node(vma, prev)) && prev->mapped) in nvkm_vmm_unmap_region()
1401 if ((next = node(vma, next)) && (!next->part || next->mapped)) in nvkm_vmm_unmap_region()
1403 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); in nvkm_vmm_unmap_region()
1407 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn) in nvkm_vmm_unmap_locked() argument
1409 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; in nvkm_vmm_unmap_locked()
1411 if (vma->mapref) { in nvkm_vmm_unmap_locked()
1412 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1413 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_unmap_locked()
1415 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1418 nvkm_vmm_unmap_region(vmm, vma); in nvkm_vmm_unmap_locked()
1422 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap() argument
1424 if (vma->memory) { in nvkm_vmm_unmap()
1426 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_vmm_unmap()
1432 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_valid() argument
1454 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1455 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1459 vma->addr, (u64)vma->size, map->offset, map->page->shift, in nvkm_vmm_map_valid()
1468 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_choose() argument
1473 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map)) in nvkm_vmm_map_choose()
1480 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_locked() argument
1486 map->no_comp = vma->no_comp; in nvkm_vmm_map_locked()
1489 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) { in nvkm_vmm_map_locked()
1492 map->offset, (u64)vma->size); in nvkm_vmm_map_locked()
1497 if (vma->page == NVKM_VMA_PAGE_NONE && in nvkm_vmm_map_locked()
1498 vma->refd == NVKM_VMA_PAGE_NONE) { in nvkm_vmm_map_locked()
1502 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1506 nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1511 if (vma->refd != NVKM_VMA_PAGE_NONE) in nvkm_vmm_map_locked()
1512 map->page = &vmm->func->page[vma->refd]; in nvkm_vmm_map_locked()
1514 map->page = &vmm->func->page[vma->page]; in nvkm_vmm_map_locked()
1516 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1549 if (vma->refd == NVKM_VMA_PAGE_NONE) { in nvkm_vmm_map_locked()
1550 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1554 vma->refd = map->page - vmm->func->page; in nvkm_vmm_map_locked()
1556 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1559 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_map_locked()
1560 nvkm_memory_unref(&vma->memory); in nvkm_vmm_map_locked()
1561 vma->memory = nvkm_memory_ref(map->memory); in nvkm_vmm_map_locked()
1562 vma->mapped = true; in nvkm_vmm_map_locked()
1563 vma->tags = map->tags; in nvkm_vmm_map_locked()
1568 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, in nvkm_vmm_map() argument
1573 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) && in nvkm_vmm_map()
1575 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1578 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1579 vma->busy = false; in nvkm_vmm_map()
1585 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_region() argument
1589 if ((prev = node(vma, prev)) && !prev->used) { in nvkm_vmm_put_region()
1590 vma->addr = prev->addr; in nvkm_vmm_put_region()
1591 vma->size += prev->size; in nvkm_vmm_put_region()
1595 if ((next = node(vma, next)) && !next->used) { in nvkm_vmm_put_region()
1596 vma->size += next->size; in nvkm_vmm_put_region()
1600 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_put_region()
1604 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_locked() argument
1607 struct nvkm_vma *next = vma; in nvkm_vmm_put_locked()
1609 BUG_ON(vma->part); in nvkm_vmm_put_locked()
1611 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked()
1632 size, vma->sparse, in nvkm_vmm_put_locked()
1646 next = vma; in nvkm_vmm_put_locked()
1650 } while ((next = node(vma, next)) && next->part); in nvkm_vmm_put_locked()
1652 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked()
1661 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size); in nvkm_vmm_put_locked()
1663 if (vma->sparse) { in nvkm_vmm_put_locked()
1672 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false); in nvkm_vmm_put_locked()
1676 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_put_locked()
1679 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1680 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1681 vma->used = false; in nvkm_vmm_put_locked()
1682 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_put_locked()
1688 struct nvkm_vma *vma = *pvma; in nvkm_vmm_put() local
1689 if (vma) { in nvkm_vmm_put()
1691 nvkm_vmm_put_locked(vmm, vma); in nvkm_vmm_put()
1703 struct nvkm_vma *vma = NULL, *tmp; in nvkm_vmm_get_locked() local
1783 vma = this; in nvkm_vmm_get_locked()
1788 if (unlikely(!vma)) in nvkm_vmm_get_locked()
1794 if (addr != vma->addr) { in nvkm_vmm_get_locked()
1795 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) { in nvkm_vmm_get_locked()
1796 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1799 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_get_locked()
1800 vma = tmp; in nvkm_vmm_get_locked()
1803 if (size != vma->size) { in nvkm_vmm_get_locked()
1804 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { in nvkm_vmm_get_locked()
1805 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1813 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1815 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true); in nvkm_vmm_get_locked()
1817 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1821 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1825 vma->mapref = mapref && !getref; in nvkm_vmm_get_locked()
1826 vma->sparse = sparse; in nvkm_vmm_get_locked()
1827 vma->page = page - vmm->func->page; in nvkm_vmm_get_locked()
1828 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE; in nvkm_vmm_get_locked()
1829 vma->used = true; in nvkm_vmm_get_locked()
1830 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_get_locked()
1831 *pvma = vma; in nvkm_vmm_get_locked()