Lines Matching refs:area
2005 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) in map_vm_area() argument
2007 unsigned long addr = (unsigned long)area->addr; in map_vm_area()
2008 unsigned long end = addr + get_vm_area_size(area); in map_vm_area()
2045 struct vm_struct *area; in __get_vm_area_node() local
2056 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2057 if (unlikely(!area)) in __get_vm_area_node()
2065 kfree(area); in __get_vm_area_node()
2069 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2071 return area; in __get_vm_area_node()
2170 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2175 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2176 if (page_address(area->pages[i])) in set_area_direct_map()
2177 set_direct_map(area->pages[i]); in set_area_direct_map()
2181 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) in vm_remove_mappings() argument
2184 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2188 remove_vm_area(area->addr); in vm_remove_mappings()
2208 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings()
2209 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2222 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_remove_mappings()
2224 set_area_direct_map(area, set_direct_map_default_noflush); in vm_remove_mappings()
2229 struct vm_struct *area; in __vunmap() local
2238 area = find_vm_area(addr); in __vunmap()
2239 if (unlikely(!area)) { in __vunmap()
2245 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2246 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2248 vm_remove_mappings(area, deallocate_pages); in __vunmap()
2253 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2254 struct page *page = area->pages[i]; in __vunmap()
2259 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2261 kvfree(area->pages); in __vunmap()
2264 kfree(area); in __vunmap()
2372 struct vm_struct *area; in vmap() local
2381 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2382 if (!area) in vmap()
2385 if (map_vm_area(area, prot, pages)) { in vmap()
2386 vunmap(area->addr); in vmap()
2390 return area->addr; in vmap()
2397 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
2408 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node()
2414 PAGE_KERNEL, node, area->caller); in __vmalloc_area_node()
2420 remove_vm_area(area->addr); in __vmalloc_area_node()
2421 kfree(area); in __vmalloc_area_node()
2425 area->pages = pages; in __vmalloc_area_node()
2426 area->nr_pages = nr_pages; in __vmalloc_area_node()
2428 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
2438 area->nr_pages = i; in __vmalloc_area_node()
2439 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2442 area->pages[i] = page; in __vmalloc_area_node()
2446 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2448 if (map_vm_area(area, prot, pages)) in __vmalloc_area_node()
2450 return area->addr; in __vmalloc_area_node()
2455 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
2456 __vfree(area->addr); in __vmalloc_area_node()
2483 struct vm_struct *area; in __vmalloc_node_range() local
2491 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | in __vmalloc_node_range()
2493 if (!area) in __vmalloc_node_range()
2496 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
2505 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
2507 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
2993 struct vm_struct *area; in remap_vmalloc_range_partial() local
3000 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3001 if (!area) in remap_vmalloc_range_partial()
3004 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3007 if (kaddr + size > area->addr + get_vm_area_size(area)) in remap_vmalloc_range_partial()
3091 struct vm_struct *area; in alloc_vm_area() local
3093 area = get_vm_area_caller(size, VM_IOREMAP, in alloc_vm_area()
3095 if (area == NULL) in alloc_vm_area()
3102 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in alloc_vm_area()
3104 free_vm_area(area); in alloc_vm_area()
3108 return area; in alloc_vm_area()
3112 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3115 ret = remove_vm_area(area->addr); in free_vm_area()
3116 BUG_ON(ret != area); in free_vm_area()
3117 kfree(area); in free_vm_area()
3220 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
3227 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3228 start = offsets[area]; in pcpu_get_vm_areas()
3229 end = start + sizes[area]; in pcpu_get_vm_areas()
3232 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
3233 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
3237 last_area = area; in pcpu_get_vm_areas()
3239 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
3258 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3259 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
3260 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
3261 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
3268 area = term_area = last_area; in pcpu_get_vm_areas()
3269 start = offsets[area]; in pcpu_get_vm_areas()
3270 end = start + sizes[area]; in pcpu_get_vm_areas()
3295 term_area = area; in pcpu_get_vm_areas()
3305 term_area = area; in pcpu_get_vm_areas()
3313 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3314 if (area == term_area) in pcpu_get_vm_areas()
3317 start = offsets[area]; in pcpu_get_vm_areas()
3318 end = start + sizes[area]; in pcpu_get_vm_areas()
3323 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3326 start = base + offsets[area]; in pcpu_get_vm_areas()
3327 size = sizes[area]; in pcpu_get_vm_areas()
3344 va = vas[area]; in pcpu_get_vm_areas()
3354 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
3355 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
3363 while (area--) { in pcpu_get_vm_areas()
3364 __free_vmap_area(vas[area]); in pcpu_get_vm_areas()
3365 vas[area] = NULL; in pcpu_get_vm_areas()
3375 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3376 if (vas[area]) in pcpu_get_vm_areas()
3379 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
3381 if (!vas[area]) in pcpu_get_vm_areas()
3389 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3390 if (vas[area]) in pcpu_get_vm_areas()
3391 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
3393 kfree(vms[area]); in pcpu_get_vm_areas()