Lines Matching full:va
766 * All vmap_area objects in this tree are sorted by va->va_start
784 va_size(struct vmap_area *va) in va_size() argument
786 return (va->va_end - va->va_start); in va_size()
792 struct vmap_area *va; in get_subtree_max_size() local
794 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size()
795 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
813 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
816 struct vmap_area *va = NULL; in find_vmap_area_exceed_addr() local
826 va = tmp; in find_vmap_area_exceed_addr()
835 return va; in find_vmap_area_exceed_addr()
845 struct vmap_area *va; in __find_vmap_area() local
847 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
848 if (addr < va->va_start) in __find_vmap_area()
850 else if (addr >= va->va_end) in __find_vmap_area()
853 return va; in __find_vmap_area()
868 find_va_links(struct vmap_area *va, in find_va_links() argument
888 * it link, where the new va->rb_node will be attached to. in find_va_links()
898 if (va->va_end <= tmp_va->va_start) in find_va_links()
900 else if (va->va_start >= tmp_va->va_end) in find_va_links()
904 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
921 * The red-black tree where we try to find VA neighbors in get_va_next_sibling()
933 __link_va(struct vmap_area *va, struct rb_root *root, in __link_va() argument
938 * VA is still not in the list, but we can in __link_va()
948 rb_link_node(&va->rb_node, parent, link); in __link_va()
952 * to the tree. We do not set va->subtree_max_size to in __link_va()
961 rb_insert_augmented(&va->rb_node, in __link_va()
963 va->subtree_max_size = 0; in __link_va()
965 rb_insert_color(&va->rb_node, root); in __link_va()
969 list_add(&va->list, head); in __link_va()
973 link_va(struct vmap_area *va, struct rb_root *root, in link_va() argument
977 __link_va(va, root, parent, link, head, false); in link_va()
981 link_va_augment(struct vmap_area *va, struct rb_root *root, in link_va_augment() argument
985 __link_va(va, root, parent, link, head, true); in link_va_augment()
989 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) in __unlink_va() argument
991 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in __unlink_va()
995 rb_erase_augmented(&va->rb_node, in __unlink_va()
998 rb_erase(&va->rb_node, root); in __unlink_va()
1000 list_del_init(&va->list); in __unlink_va()
1001 RB_CLEAR_NODE(&va->rb_node); in __unlink_va()
1005 unlink_va(struct vmap_area *va, struct rb_root *root) in unlink_va() argument
1007 __unlink_va(va, root, false); in unlink_va()
1011 unlink_va_augment(struct vmap_area *va, struct rb_root *root) in unlink_va_augment() argument
1013 __unlink_va(va, root, true); in unlink_va_augment()
1021 compute_subtree_max_size(struct vmap_area *va) in compute_subtree_max_size() argument
1023 return max3(va_size(va), in compute_subtree_max_size()
1024 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
1025 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
1031 struct vmap_area *va; in augment_tree_propagate_check() local
1034 list_for_each_entry(va, &free_vmap_area_list, list) { in augment_tree_propagate_check()
1035 computed_size = compute_subtree_max_size(va); in augment_tree_propagate_check()
1036 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
1038 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
1045 * levels starting from VA point. The propagation must be done
1046 * when VA size is modified by changing its va_start/va_end. Or
1047 * in case of newly inserting of VA to the tree.
1050 * - After VA has been inserted to the tree(free path);
1051 * - After VA has been shrunk(allocation path);
1052 * - After VA has been increased(merging path).
1071 augment_tree_propagate_from(struct vmap_area *va) in augment_tree_propagate_from() argument
1078 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
1086 insert_vmap_area(struct vmap_area *va, in insert_vmap_area() argument
1092 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area()
1094 link_va(va, root, parent, link, head); in insert_vmap_area()
1098 insert_vmap_area_augment(struct vmap_area *va, in insert_vmap_area_augment() argument
1106 link = find_va_links(va, NULL, from, &parent); in insert_vmap_area_augment()
1108 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area_augment()
1111 link_va_augment(va, root, parent, link, head); in insert_vmap_area_augment()
1112 augment_tree_propagate_from(va); in insert_vmap_area_augment()
1117 * Merge de-allocated chunk of VA memory with previous
1119 * free area is inserted. If VA has been merged, it is
1128 __merge_or_add_vmap_area(struct vmap_area *va, in __merge_or_add_vmap_area() argument
1138 * Find a place in the tree where VA potentially will be in __merge_or_add_vmap_area()
1141 link = find_va_links(va, root, NULL, &parent); in __merge_or_add_vmap_area()
1146 * Get next node of VA to check if merging can be done. in __merge_or_add_vmap_area()
1155 * |<------VA------>|<-----Next----->| in __merge_or_add_vmap_area()
1161 if (sibling->va_start == va->va_end) { in __merge_or_add_vmap_area()
1162 sibling->va_start = va->va_start; in __merge_or_add_vmap_area()
1165 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1168 va = sibling; in __merge_or_add_vmap_area()
1176 * |<-----Prev----->|<------VA------>| in __merge_or_add_vmap_area()
1182 if (sibling->va_end == va->va_start) { in __merge_or_add_vmap_area()
1191 __unlink_va(va, root, augment); in __merge_or_add_vmap_area()
1193 sibling->va_end = va->va_end; in __merge_or_add_vmap_area()
1196 kmem_cache_free(vmap_area_cachep, va); in __merge_or_add_vmap_area()
1199 va = sibling; in __merge_or_add_vmap_area()
1206 __link_va(va, root, parent, link, head, augment); in __merge_or_add_vmap_area()
1208 return va; in __merge_or_add_vmap_area()
1212 merge_or_add_vmap_area(struct vmap_area *va, in merge_or_add_vmap_area() argument
1215 return __merge_or_add_vmap_area(va, root, head, false); in merge_or_add_vmap_area()
1219 merge_or_add_vmap_area_augment(struct vmap_area *va, in merge_or_add_vmap_area_augment() argument
1222 va = __merge_or_add_vmap_area(va, root, head, true); in merge_or_add_vmap_area_augment()
1223 if (va) in merge_or_add_vmap_area_augment()
1224 augment_tree_propagate_from(va); in merge_or_add_vmap_area_augment()
1226 return va; in merge_or_add_vmap_area_augment()
1230 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument
1235 if (va->va_start > vstart) in is_within_this_va()
1236 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
1245 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
1259 struct vmap_area *va; in find_vmap_lowest_match() local
1270 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1273 vstart < va->va_start) { in find_vmap_lowest_match()
1276 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1277 return va; in find_vmap_lowest_match()
1296 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
1297 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
1298 return va; in find_vmap_lowest_match()
1301 vstart <= va->va_start) { in find_vmap_lowest_match()
1308 vstart = va->va_start + 1; in find_vmap_lowest_match()
1326 struct vmap_area *va; in find_vmap_lowest_linear_match() local
1328 list_for_each_entry(va, head, list) { in find_vmap_lowest_linear_match()
1329 if (!is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_linear_match()
1332 return va; in find_vmap_lowest_linear_match()
1367 classify_va_fit_type(struct vmap_area *va, in classify_va_fit_type() argument
1372 /* Check if it is within VA. */ in classify_va_fit_type()
1373 if (nva_start_addr < va->va_start || in classify_va_fit_type()
1374 nva_start_addr + size > va->va_end) in classify_va_fit_type()
1378 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
1379 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
1383 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
1394 struct vmap_area *va, unsigned long nva_start_addr, in adjust_va_to_fit_type() argument
1398 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); in adjust_va_to_fit_type()
1402 * No need to split VA, it fully fits. in adjust_va_to_fit_type()
1408 unlink_va_augment(va, root); in adjust_va_to_fit_type()
1409 kmem_cache_free(vmap_area_cachep, va); in adjust_va_to_fit_type()
1412 * Split left edge of fit VA. in adjust_va_to_fit_type()
1418 va->va_start += size; in adjust_va_to_fit_type()
1421 * Split right edge of fit VA. in adjust_va_to_fit_type()
1427 va->va_end = nva_start_addr; in adjust_va_to_fit_type()
1430 * Split no edge of fit VA. in adjust_va_to_fit_type()
1471 lva->va_start = va->va_start; in adjust_va_to_fit_type()
1475 * Shrink this VA to remaining size. in adjust_va_to_fit_type()
1477 va->va_start = nva_start_addr + size; in adjust_va_to_fit_type()
1483 augment_tree_propagate_from(va); in adjust_va_to_fit_type()
1486 insert_vmap_area_augment(lva, &va->rb_node, root, head); in adjust_va_to_fit_type()
1503 struct vmap_area *va; in __alloc_vmap_area() local
1518 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); in __alloc_vmap_area()
1519 if (unlikely(!va)) in __alloc_vmap_area()
1522 if (va->va_start > vstart) in __alloc_vmap_area()
1523 nva_start_addr = ALIGN(va->va_start, align); in __alloc_vmap_area()
1532 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); in __alloc_vmap_area()
1546 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
1552 unlink_va(va, &vmap_area_root); in free_vmap_area()
1559 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); in free_vmap_area()
1566 struct vmap_area *va = NULL; in preload_this_cpu_lock() local
1578 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock()
1582 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) in preload_this_cpu_lock()
1583 kmem_cache_free(vmap_area_cachep, va); in preload_this_cpu_lock()
1596 struct vmap_area *va; in alloc_vmap_area() local
1611 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area()
1612 if (unlikely(!va)) in alloc_vmap_area()
1619 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
1636 va->va_start = addr; in alloc_vmap_area()
1637 va->va_end = addr + size; in alloc_vmap_area()
1638 va->vm = NULL; in alloc_vmap_area()
1639 va->flags = va_flags; in alloc_vmap_area()
1642 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); in alloc_vmap_area()
1645 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
1646 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
1647 BUG_ON(va->va_end > vend); in alloc_vmap_area()
1651 free_vmap_area(va); in alloc_vmap_area()
1655 return va; in alloc_vmap_area()
1676 kmem_cache_free(vmap_area_cachep, va); in alloc_vmap_area()
1737 struct vmap_area *va, *n_va; in __purge_vmap_area_lazy() local
1761 list_for_each_entry_safe(va, n_va, &local_purge_list, list) { in __purge_vmap_area_lazy()
1762 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
1763 unsigned long orig_start = va->va_start; in __purge_vmap_area_lazy()
1764 unsigned long orig_end = va->va_end; in __purge_vmap_area_lazy()
1771 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, in __purge_vmap_area_lazy()
1774 if (!va) in __purge_vmap_area_lazy()
1779 va->va_start, va->va_end); in __purge_vmap_area_lazy()
1825 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
1828 unsigned long va_start = va->va_start; in free_vmap_area_noflush()
1831 if (WARN_ON_ONCE(!list_empty(&va->list))) in free_vmap_area_noflush()
1834 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> in free_vmap_area_noflush()
1841 merge_or_add_vmap_area(va, in free_vmap_area_noflush()
1847 /* After this point, we may free va at any time */ in free_vmap_area_noflush()
1855 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
1857 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
1858 vunmap_range_noflush(va->va_start, va->va_end); in free_unmap_vmap_area()
1860 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
1862 free_vmap_area_noflush(va); in free_unmap_vmap_area()
1867 struct vmap_area *va; in find_vmap_area() local
1870 va = __find_vmap_area(addr, &vmap_area_root); in find_vmap_area()
1873 return va; in find_vmap_area()
1878 struct vmap_area *va; in find_unlink_vmap_area() local
1881 va = __find_vmap_area(addr, &vmap_area_root); in find_unlink_vmap_area()
1882 if (va) in find_unlink_vmap_area()
1883 unlink_va(va, &vmap_area_root); in find_unlink_vmap_area()
1886 return va; in find_unlink_vmap_area()
1943 struct vmap_area *va; member
2043 struct vmap_area *va; in new_vmap_block() local
2056 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
2060 if (IS_ERR(va)) { in new_vmap_block()
2062 return ERR_CAST(va); in new_vmap_block()
2065 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
2067 vb->va = va; in new_vmap_block()
2079 xa = addr_to_vb_xa(va->va_start); in new_vmap_block()
2080 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
2084 free_vmap_area(va); in new_vmap_block()
2107 xa = addr_to_vb_xa(vb->va->va_start); in free_vmap_block()
2108 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
2112 unlink_va(vb->va, &vmap_area_root); in free_vmap_block()
2115 free_vmap_area_noflush(vb->va); in free_vmap_block()
2220 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
2310 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
2367 struct vmap_area *va; in vm_unmap_ram() local
2383 va = find_unlink_vmap_area(addr); in vm_unmap_ram()
2384 if (WARN_ON_ONCE(!va)) in vm_unmap_ram()
2387 debug_check_no_locks_freed((void *)va->va_start, in vm_unmap_ram()
2388 (va->va_end - va->va_start)); in vm_unmap_ram()
2389 free_unmap_vmap_area(va); in vm_unmap_ram()
2419 struct vmap_area *va; in vm_map_ram() local
2420 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
2423 if (IS_ERR(va)) in vm_map_ram()
2426 addr = va->va_start; in vm_map_ram()
2567 struct vmap_area *va, unsigned long flags, const void *caller) in setup_vmalloc_vm_locked() argument
2570 vm->addr = (void *)va->va_start; in setup_vmalloc_vm_locked()
2571 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm_locked()
2573 va->vm = vm; in setup_vmalloc_vm_locked()
2576 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
2580 setup_vmalloc_vm_locked(vm, va, flags, caller); in setup_vmalloc_vm()
2600 struct vmap_area *va; in __get_vm_area_node() local
2620 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0); in __get_vm_area_node()
2621 if (IS_ERR(va)) { in __get_vm_area_node()
2626 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2690 struct vmap_area *va; in find_vm_area() local
2692 va = find_vmap_area((unsigned long)addr); in find_vm_area()
2693 if (!va) in find_vm_area()
2696 return va->vm; in find_vm_area()
2711 struct vmap_area *va; in remove_vm_area() local
2720 va = find_unlink_vmap_area((unsigned long)addr); in remove_vm_area()
2721 if (!va || !va->vm) in remove_vm_area()
2723 vm = va->vm; in remove_vm_area()
2730 free_unmap_vmap_area(va); in remove_vm_area()
3698 start = vmap_block_vaddr(vb->va->va_start, rs); in vmap_ram_vread_iter()
3763 struct vmap_area *va; in vread_iter() local
3777 va = find_vmap_area_exceed_addr((unsigned long)addr); in vread_iter()
3778 if (!va) in vread_iter()
3782 if ((unsigned long)addr + remains <= va->va_start) in vread_iter()
3785 list_for_each_entry_from(va, &vmap_area_list, list) { in vread_iter()
3791 vm = va->vm; in vread_iter()
3792 flags = va->flags & VMAP_FLAGS_MASK; in vread_iter()
3808 vaddr = (char *) va->va_start; in vread_iter()
3809 size = vm ? get_vm_area_size(vm) : va_size(va); in vread_iter()
3961 * i.e. va->va_start < addr && va->va_end < addr or NULL
3967 struct vmap_area *va, *tmp; in pvm_find_va_enclose_addr() local
3971 va = NULL; in pvm_find_va_enclose_addr()
3976 va = tmp; in pvm_find_va_enclose_addr()
3986 return va; in pvm_find_va_enclose_addr()
3992 * @va:
3993 * in - the VA we start the search(reverse order);
3994 * out - the VA with the highest aligned end address.
4000 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) in pvm_determine_end_from_reverse() argument
4005 if (likely(*va)) { in pvm_determine_end_from_reverse()
4006 list_for_each_entry_from_reverse((*va), in pvm_determine_end_from_reverse()
4008 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
4009 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
4047 struct vmap_area **vas, *va; in pcpu_get_vm_areas() local
4100 va = pvm_find_va_enclose_addr(vmalloc_end); in pcpu_get_vm_areas()
4101 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4114 if (va == NULL) in pcpu_get_vm_areas()
4118 * If required width exceeds current VA block, move in pcpu_get_vm_areas()
4121 if (base + end > va->va_end) { in pcpu_get_vm_areas()
4122 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4128 * If this VA does not fit, move base downwards and recheck. in pcpu_get_vm_areas()
4130 if (base + start < va->va_start) { in pcpu_get_vm_areas()
4131 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
4132 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
4147 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas()
4150 /* we've found a fitting base, insert all va's */ in pcpu_get_vm_areas()
4157 va = pvm_find_va_enclose_addr(start); in pcpu_get_vm_areas()
4158 if (WARN_ON_ONCE(va == NULL)) in pcpu_get_vm_areas()
4164 va, start, size); in pcpu_get_vm_areas()
4170 va = vas[area]; in pcpu_get_vm_areas()
4171 va->va_start = start; in pcpu_get_vm_areas()
4172 va->va_end = start + size; in pcpu_get_vm_areas()
4216 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4218 if (va) in pcpu_get_vm_areas()
4220 va->va_start, va->va_end); in pcpu_get_vm_areas()
4266 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4268 if (va) in pcpu_get_vm_areas()
4270 va->va_start, va->va_end); in pcpu_get_vm_areas()
4303 struct vmap_area *va; in vmalloc_dump_obj() local
4309 va = __find_vmap_area((unsigned long)objp, &vmap_area_root); in vmalloc_dump_obj()
4310 if (!va) { in vmalloc_dump_obj()
4315 vm = va->vm; in vmalloc_dump_obj()
4380 struct vmap_area *va; in show_purge_info() local
4383 list_for_each_entry(va, &purge_vmap_area_list, list) { in show_purge_info()
4385 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
4386 va->va_end - va->va_start); in show_purge_info()
4393 struct vmap_area *va; in s_show() local
4396 va = list_entry(p, struct vmap_area, list); in s_show()
4398 if (!va->vm) { in s_show()
4399 if (va->flags & VMAP_RAM) in s_show()
4401 (void *)va->va_start, (void *)va->va_end, in s_show()
4402 va->va_end - va->va_start); in s_show()
4407 v = va->vm; in s_show()
4446 if (list_is_last(&va->list, &vmap_area_list)) in s_show()
4475 struct vmap_area *va; in vmalloc_init() local
4499 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); in vmalloc_init()
4500 if (WARN_ON_ONCE(!va)) in vmalloc_init()
4503 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
4504 va->va_end = va->va_start + tmp->size; in vmalloc_init()
4505 va->vm = tmp; in vmalloc_init()
4506 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); in vmalloc_init()