Lines Matching +full:vm +full:- +full:map
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
26 #include <linux/radix-tree.h>
55 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
147 return -ENOMEM; in vmap_pte_range()
152 return -EBUSY; in vmap_pte_range()
154 return -ENOMEM; in vmap_pte_range()
169 return -ENOMEM; in vmap_pmd_range()
173 return -ENOMEM; in vmap_pmd_range()
186 return -ENOMEM; in vmap_pud_range()
190 return -ENOMEM; in vmap_pud_range()
203 return -ENOMEM; in vmap_p4d_range()
207 return -ENOMEM; in vmap_p4d_range()
252 * ARM, x86-64 and sparc64 put modules in a special place, in is_vmalloc_or_module_addr()
316 * Map a vmalloc()-space virtual address to the physical page frame number.
352 if (addr < va->va_start) in __find_vmap_area()
353 n = n->rb_left; in __find_vmap_area()
354 else if (addr >= va->va_end) in __find_vmap_area()
355 n = n->rb_right; in __find_vmap_area()
374 if (va->va_start < tmp_va->va_end) in __insert_vmap_area()
375 p = &(*p)->rb_left; in __insert_vmap_area()
376 else if (va->va_end > tmp_va->va_start) in __insert_vmap_area()
377 p = &(*p)->rb_right; in __insert_vmap_area()
382 rb_link_node(&va->rb_node, parent, p); in __insert_vmap_area()
383 rb_insert_color(&va->rb_node, &vmap_area_root); in __insert_vmap_area()
385 /* address-sort this list */ in __insert_vmap_area()
386 tmp = rb_prev(&va->rb_node); in __insert_vmap_area()
390 list_add_rcu(&va->list, &prev->list); in __insert_vmap_area()
392 list_add_rcu(&va->list, &vmap_area_list); in __insert_vmap_area()
423 return ERR_PTR(-ENOMEM); in alloc_vmap_area()
429 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area()
457 addr = ALIGN(first->va_end, align); in alloc_vmap_area()
474 if (tmp->va_end >= addr) { in alloc_vmap_area()
476 if (tmp->va_start <= addr) in alloc_vmap_area()
478 n = n->rb_left; in alloc_vmap_area()
480 n = n->rb_right; in alloc_vmap_area()
488 while (addr + size > first->va_start && addr + size <= vend) { in alloc_vmap_area()
489 if (addr + cached_hole_size < first->va_start) in alloc_vmap_area()
490 cached_hole_size = first->va_start - addr; in alloc_vmap_area()
491 addr = ALIGN(first->va_end, align); in alloc_vmap_area()
495 if (list_is_last(&first->list, &vmap_area_list)) in alloc_vmap_area()
509 va->va_start = addr; in alloc_vmap_area()
510 va->va_end = addr + size; in alloc_vmap_area()
511 va->flags = 0; in alloc_vmap_area()
513 free_vmap_cache = &va->rb_node; in alloc_vmap_area()
516 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
517 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
518 BUG_ON(va->va_end > vend); in alloc_vmap_area()
543 return ERR_PTR(-EBUSY); in alloc_vmap_area()
560 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); in __free_vmap_area()
563 if (va->va_end < cached_vstart) { in __free_vmap_area()
568 if (va->va_start <= cache->va_start) { in __free_vmap_area()
569 free_vmap_cache = rb_prev(&va->rb_node); in __free_vmap_area()
577 rb_erase(&va->rb_node, &vmap_area_root); in __free_vmap_area()
578 RB_CLEAR_NODE(&va->rb_node); in __free_vmap_area()
579 list_del_rcu(&va->list); in __free_vmap_area()
587 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) in __free_vmap_area()
588 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); in __free_vmap_area()
608 vunmap_page_range(va->va_start, va->va_end); in unmap_vmap_area()
645 /* for per-CPU blocks */
658 * Purges all lazily-freed vmap areas.
671 if (va->va_start < start) in __purge_vmap_area_lazy()
672 start = va->va_start; in __purge_vmap_area_lazy()
673 if (va->va_end > end) in __purge_vmap_area_lazy()
674 end = va->va_end; in __purge_vmap_area_lazy()
685 int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
727 nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, in free_vmap_area_noflush()
731 llist_add(&va->purge_list, &vmap_purge_list); in free_vmap_area_noflush()
742 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
745 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
769 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
828 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
843 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
848 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
864 return ERR_PTR(-ENOMEM); in new_vmap_block()
881 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
882 spin_lock_init(&vb->lock); in new_vmap_block()
883 vb->va = va; in new_vmap_block()
886 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
887 vb->dirty = 0; in new_vmap_block()
888 vb->dirty_min = VMAP_BBMAP_BITS; in new_vmap_block()
889 vb->dirty_max = 0; in new_vmap_block()
890 INIT_LIST_HEAD(&vb->free_list); in new_vmap_block()
892 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
900 spin_lock(&vbq->lock); in new_vmap_block()
901 list_add_tail_rcu(&vb->free_list, &vbq->free); in new_vmap_block()
902 spin_unlock(&vbq->lock); in new_vmap_block()
913 vb_idx = addr_to_vb_idx(vb->va->va_start); in free_vmap_block()
919 free_vmap_area_noflush(vb->va); in free_vmap_block()
931 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in purge_fragmented_blocks()
933 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) in purge_fragmented_blocks()
936 spin_lock(&vb->lock); in purge_fragmented_blocks()
937 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { in purge_fragmented_blocks()
938 vb->free = 0; /* prevent further allocs after releasing lock */ in purge_fragmented_blocks()
939 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ in purge_fragmented_blocks()
940 vb->dirty_min = 0; in purge_fragmented_blocks()
941 vb->dirty_max = VMAP_BBMAP_BITS; in purge_fragmented_blocks()
942 spin_lock(&vbq->lock); in purge_fragmented_blocks()
943 list_del_rcu(&vb->free_list); in purge_fragmented_blocks()
944 spin_unlock(&vbq->lock); in purge_fragmented_blocks()
945 spin_unlock(&vb->lock); in purge_fragmented_blocks()
946 list_add_tail(&vb->purge, &purge); in purge_fragmented_blocks()
948 spin_unlock(&vb->lock); in purge_fragmented_blocks()
953 list_del(&vb->purge); in purge_fragmented_blocks()
987 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vb_alloc()
990 spin_lock(&vb->lock); in vb_alloc()
991 if (vb->free < (1UL << order)) { in vb_alloc()
992 spin_unlock(&vb->lock); in vb_alloc()
996 pages_off = VMAP_BBMAP_BITS - vb->free; in vb_alloc()
997 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
998 vb->free -= 1UL << order; in vb_alloc()
999 if (vb->free == 0) { in vb_alloc()
1000 spin_lock(&vbq->lock); in vb_alloc()
1001 list_del_rcu(&vb->free_list); in vb_alloc()
1002 spin_unlock(&vbq->lock); in vb_alloc()
1005 spin_unlock(&vb->lock); in vb_alloc()
1033 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); in vb_free()
1048 spin_lock(&vb->lock); in vb_free()
1051 vb->dirty_min = min(vb->dirty_min, offset); in vb_free()
1052 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free()
1054 vb->dirty += 1UL << order; in vb_free()
1055 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free()
1056 BUG_ON(vb->free); in vb_free()
1057 spin_unlock(&vb->lock); in vb_free()
1060 spin_unlock(&vb->lock); in vb_free()
1064 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1092 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vm_unmap_aliases()
1093 spin_lock(&vb->lock); in vm_unmap_aliases()
1094 if (vb->dirty) { in vm_unmap_aliases()
1095 unsigned long va_start = vb->va->va_start; in vm_unmap_aliases()
1098 s = va_start + (vb->dirty_min << PAGE_SHIFT); in vm_unmap_aliases()
1099 e = va_start + (vb->dirty_max << PAGE_SHIFT); in vm_unmap_aliases()
1106 spin_unlock(&vb->lock); in vm_unmap_aliases()
1120 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1144 debug_check_no_locks_freed((void *)va->va_start, in vm_unmap_ram()
1145 (va->va_end - va->va_start)); in vm_unmap_ram()
1151 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1158 * faster than vmap so it's good. But if you mix long-life and short-life
1161 * the end. Please use this function for short-lived objects.
1183 addr = va->va_start; in vm_map_ram()
1196 * vm_area_add_early - add vmap area early during boot
1197 * @vm: vm_struct to add
1199 * This function is used to add fixed kernel vm area to vmlist before
1200 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1205 void __init vm_area_add_early(struct vm_struct *vm) in vm_area_add_early() argument
1210 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
1211 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1212 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1215 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1217 vm->next = *p; in vm_area_add_early()
1218 *p = vm; in vm_area_add_early()
1222 * vm_area_register_early - register vmap area early during boot
1223 * @vm: vm_struct to register
1226 * This function is used to register kernel vm area before
1227 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1229 * vm->addr contains the allocated address.
1233 void __init vm_area_register_early(struct vm_struct *vm, size_t align) in vm_area_register_early() argument
1239 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1241 vm->addr = (void *)addr; in vm_area_register_early()
1243 vm_area_add_early(vm); in vm_area_register_early()
1257 spin_lock_init(&vbq->lock); in vmalloc_init()
1258 INIT_LIST_HEAD(&vbq->free); in vmalloc_init()
1260 init_llist_head(&p->list); in vmalloc_init()
1261 INIT_WORK(&p->wq, free_work); in vmalloc_init()
1265 for (tmp = vmlist; tmp; tmp = tmp->next) { in vmalloc_init()
1267 va->flags = VM_VM_AREA; in vmalloc_init()
1268 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1269 va->va_end = va->va_start + tmp->size; in vmalloc_init()
1270 va->vm = tmp; in vmalloc_init()
1280 * map_kernel_range_noflush - map kernel VM area with the specified pages
1281 * @addr: start of the VM area to map
1282 * @size: size of the VM area to map
1284 * @pages: pages to map
1286 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1292 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1296 * The number of pages mapped on success, -errno on failure.
1305 * unmap_kernel_range_noflush - unmap kernel VM area
1306 * @addr: start of the VM area to unmap
1307 * @size: size of the VM area to unmap
1309 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1315 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1325 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1326 * @addr: start of the VM area to unmap
1327 * @size: size of the VM area to unmap
1344 unsigned long addr = (unsigned long)area->addr; in map_vm_area()
1354 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
1358 vm->flags = flags; in setup_vmalloc_vm()
1359 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1360 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm()
1361 vm->caller = caller; in setup_vmalloc_vm()
1362 va->vm = vm; in setup_vmalloc_vm()
1363 va->flags |= VM_VM_AREA; in setup_vmalloc_vm()
1367 static void clear_vm_uninitialized_flag(struct vm_struct *vm) in clear_vm_uninitialized_flag() argument
1371 * we should make sure that vm has proper values. in clear_vm_uninitialized_flag()
1375 vm->flags &= ~VM_UNINITIALIZED; in clear_vm_uninitialized_flag()
1429 * get_vm_area - reserve a contiguous kernel virtual area
1452 * find_vm_area - find a continuous kernel virtual area
1455 * Search for the kernel VM area starting at @addr, and return it.
1464 if (va && va->flags & VM_VM_AREA) in find_vm_area()
1465 return va->vm; in find_vm_area()
1471 * remove_vm_area - find and remove a continuous kernel virtual area
1474 * Search for the kernel VM area starting at @addr, and remove it.
1475 * This function returns the found VM area, but using it is NOT safe
1485 if (va && va->flags & VM_VM_AREA) { in remove_vm_area()
1486 struct vm_struct *vm = va->vm; in remove_vm_area() local
1489 va->vm = NULL; in remove_vm_area()
1490 va->flags &= ~VM_VM_AREA; in remove_vm_area()
1491 va->flags |= VM_LAZY_FREE; in remove_vm_area()
1494 kasan_free_shadow(vm); in remove_vm_area()
1497 return vm; in remove_vm_area()
1515 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in __vunmap()
1520 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
1521 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
1527 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
1528 struct page *page = area->pages[i]; in __vunmap()
1534 kvfree(area->pages); in __vunmap()
1551 if (llist_add((struct llist_node *)addr, &p->list)) in __vfree_deferred()
1552 schedule_work(&p->wq); in __vfree_deferred()
1556 * vfree_atomic - release memory allocated by vmalloc()
1574 * vfree - release memory allocated by vmalloc()
1583 * conventions for vfree() arch-depenedent would be a really bad idea)
1603 * vunmap - release virtual mapping obtained by vmap()
1621 * vmap - map an array of pages into virtually contiguous space
1623 * @count: number of pages to map
1624 * @flags: vm_area->flags
1647 vunmap(area->addr); in vmap()
1651 return area->addr; in vmap()
1675 PAGE_KERNEL, node, area->caller); in __vmalloc_area_node()
1681 remove_vm_area(area->addr); in __vmalloc_area_node()
1686 area->pages = pages; in __vmalloc_area_node()
1687 area->nr_pages = nr_pages; in __vmalloc_area_node()
1689 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
1699 area->nr_pages = i; in __vmalloc_area_node()
1702 area->pages[i] = page; in __vmalloc_area_node()
1709 return area->addr; in __vmalloc_area_node()
1714 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
1715 vfree(area->addr); in __vmalloc_area_node()
1720 * __vmalloc_node_range - allocate virtually contiguous memory
1723 * @start: vm area range start
1724 * @end: vm area range end
1727 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
1732 * allocator with @gfp_mask flags. Map them into contiguous
1758 * First make sure the mappings are removed from all page-tables in __vmalloc_node_range()
1781 * __vmalloc_node - allocate virtually contiguous memory
1790 * allocator with @gfp_mask flags. Map them into contiguous
1793 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
1830 * vmalloc - allocate virtually contiguous memory
1833 * allocator and map them into contiguous kernel virtual space.
1846 * vzalloc - allocate virtually contiguous memory with zero fill
1849 * allocator and map them into contiguous kernel virtual space.
1863 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1880 area->flags |= VM_USERMAP; in vmalloc_user()
1887 * vmalloc_node - allocate memory on a specific node
1892 * allocator and map them into contiguous kernel virtual space.
1905 * vzalloc_node - allocate memory on a specific node with zero fill
1910 * allocator and map them into contiguous kernel virtual space.
1924 * vmalloc_exec - allocate virtually contiguous, executable memory
1927 * Kernel-internal function to allocate enough pages to cover @size
1928 * the page level allocator and map them into contiguous and
1954 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1958 * page level allocator and map them into contiguous kernel virtual space.
1968 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1983 area->flags |= VM_USERMAP; in vmalloc_32_user()
2003 length = PAGE_SIZE - offset; in aligned_vread()
2019 void *map = kmap_atomic(p); in aligned_vread() local
2020 memcpy(buf, map + offset, length); in aligned_vread()
2021 kunmap_atomic(map); in aligned_vread()
2028 count -= length; in aligned_vread()
2042 length = PAGE_SIZE - offset; in aligned_vwrite()
2058 void *map = kmap_atomic(p); in aligned_vwrite() local
2059 memcpy(map + offset, buf, length); in aligned_vwrite()
2060 kunmap_atomic(map); in aligned_vwrite()
2065 count -= length; in aligned_vwrite()
2071 * vread() - read vmalloc area in a safe way.
2073 * @addr: vm address.
2083 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2099 struct vm_struct *vm; in vread() local
2106 count = -(unsigned long) addr; in vread()
2113 if (!(va->flags & VM_VM_AREA)) in vread()
2116 vm = va->vm; in vread()
2117 vaddr = (char *) vm->addr; in vread()
2118 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
2126 count--; in vread()
2128 n = vaddr + get_vm_area_size(vm) - addr; in vread()
2131 if (!(vm->flags & VM_IOREMAP)) in vread()
2137 count -= n; in vread()
2144 /* zero-fill memory holes */ in vread()
2146 memset(buf, 0, buflen - (buf - buf_start)); in vread()
2152 * vwrite() - write vmalloc area in a safe way.
2154 * @addr: vm address.
2180 struct vm_struct *vm; in vwrite() local
2187 count = -(unsigned long) addr; in vwrite()
2195 if (!(va->flags & VM_VM_AREA)) in vwrite()
2198 vm = va->vm; in vwrite()
2199 vaddr = (char *) vm->addr; in vwrite()
2200 if (addr >= vaddr + get_vm_area_size(vm)) in vwrite()
2207 count--; in vwrite()
2209 n = vaddr + get_vm_area_size(vm) - addr; in vwrite()
2212 if (!(vm->flags & VM_IOREMAP)) { in vwrite()
2218 count -= n; in vwrite()
2228 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2233 * @size: size of map area
2235 * Returns: 0 for success, -Exxx on failure
2253 return -EINVAL; in remap_vmalloc_range_partial()
2258 return -EINVAL; in remap_vmalloc_range_partial()
2262 return -EINVAL; in remap_vmalloc_range_partial()
2264 if (!(area->flags & VM_USERMAP)) in remap_vmalloc_range_partial()
2265 return -EINVAL; in remap_vmalloc_range_partial()
2269 return -EINVAL; in remap_vmalloc_range_partial()
2282 size -= PAGE_SIZE; in remap_vmalloc_range_partial()
2285 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in remap_vmalloc_range_partial()
2292 * remap_vmalloc_range - map vmalloc pages to userspace
2293 * @vma: vma to cover (map full range of vma)
2295 * @pgoff: number of pages into addr before first page to map
2297 * Returns: 0 for success, -Exxx on failure
2308 return remap_vmalloc_range_partial(vma, vma->vm_start, in remap_vmalloc_range()
2310 vma->vm_end - vma->vm_start); in remap_vmalloc_range()
2319 * mappings are identical in all page-tables in the system.
2341 * alloc_vm_area - allocate a range of kernel address space
2348 * allocates pagetables to map that range. No actual mappings
2351 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2352 * allocated for the VM area are returned.
2367 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in alloc_vm_area()
2380 ret = remove_vm_area(area->addr); in free_vm_area()
2393 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2402 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2413 if (end < va->va_end) in pvm_find_next_prev()
2414 n = n->rb_left; in pvm_find_next_prev()
2415 else if (end > va->va_end) in pvm_find_next_prev()
2416 n = n->rb_right; in pvm_find_next_prev()
2424 if (va->va_end > end) { in pvm_find_next_prev()
2426 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); in pvm_find_next_prev()
2429 *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); in pvm_find_next_prev()
2435 * pvm_determine_end - find the highest aligned address between two vmap_areas
2454 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pvm_determine_end()
2458 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); in pvm_determine_end()
2462 while (*pprev && (*pprev)->va_end > addr) { in pvm_determine_end()
2464 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); in pvm_determine_end()
2471 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2480 * Percpu allocator wants to use congruent vm areas so that it can
2488 * does everything top-down and scans areas from the end looking for
2499 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pcpu_get_vm_areas()
2529 if (vmalloc_end - vmalloc_start < last_end) { in pcpu_get_vm_areas()
2548 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
2554 base = vmalloc_end - last_end; in pcpu_get_vm_areas()
2557 base = pvm_determine_end(&next, &prev, align) - end; in pcpu_get_vm_areas()
2560 BUG_ON(next && next->va_end <= base + end); in pcpu_get_vm_areas()
2561 BUG_ON(prev && prev->va_end > base + end); in pcpu_get_vm_areas()
2581 if (next && next->va_start < base + end) { in pcpu_get_vm_areas()
2582 base = pvm_determine_end(&next, &prev, align) - end; in pcpu_get_vm_areas()
2592 if (prev && prev->va_end > base + start) { in pcpu_get_vm_areas()
2594 prev = node_to_va(rb_prev(&next->rb_node)); in pcpu_get_vm_areas()
2595 base = pvm_determine_end(&next, &prev, align) - end; in pcpu_get_vm_areas()
2604 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
2616 va->va_start = base + offsets[area]; in pcpu_get_vm_areas()
2617 va->va_end = va->va_start + sizes[area]; in pcpu_get_vm_areas()
2625 /* insert all vm's */ in pcpu_get_vm_areas()
2645 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2683 unsigned int nr, *counters = m->private; in show_numa_info()
2688 if (v->flags & VM_UNINITIALIZED) in show_numa_info()
2695 for (nr = 0; nr < v->nr_pages; nr++) in show_numa_info()
2696 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
2715 if (!(va->flags & VM_VM_AREA)) { in s_show()
2716 seq_printf(m, "0x%pK-0x%pK %7ld %s\n", in s_show()
2717 (void *)va->va_start, (void *)va->va_end, in s_show()
2718 va->va_end - va->va_start, in s_show()
2719 va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram"); in s_show()
2724 v = va->vm; in s_show()
2726 seq_printf(m, "0x%pK-0x%pK %7ld", in s_show()
2727 v->addr, v->addr + v->size, v->size); in s_show()
2729 if (v->caller) in s_show()
2730 seq_printf(m, " %pS", v->caller); in s_show()
2732 if (v->nr_pages) in s_show()
2733 seq_printf(m, " pages=%d", v->nr_pages); in s_show()
2735 if (v->phys_addr) in s_show()
2736 seq_printf(m, " phys=%pa", &v->phys_addr); in s_show()
2738 if (v->flags & VM_IOREMAP) in s_show()
2741 if (v->flags & VM_ALLOC) in s_show()
2744 if (v->flags & VM_MAP) in s_show()
2747 if (v->flags & VM_USERMAP) in s_show()
2750 if (is_vmalloc_addr(v->pages)) in s_show()