Lines Matching refs:area
2061 struct vm_struct *area; in __get_vm_area_node() local
2073 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2074 if (unlikely(!area)) in __get_vm_area_node()
2082 kfree(area); in __get_vm_area_node()
2088 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2090 return area; in __get_vm_area_node()
2181 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2186 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2187 if (page_address(area->pages[i])) in set_area_direct_map()
2188 set_direct_map(area->pages[i]); in set_area_direct_map()
2192 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) in vm_remove_mappings() argument
2195 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2199 remove_vm_area(area->addr); in vm_remove_mappings()
2219 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings()
2220 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2233 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_remove_mappings()
2235 set_area_direct_map(area, set_direct_map_default_noflush); in vm_remove_mappings()
2240 struct vm_struct *area; in __vunmap() local
2249 area = find_vm_area(addr); in __vunmap()
2250 if (unlikely(!area)) { in __vunmap()
2256 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2257 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2259 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2261 vm_remove_mappings(area, deallocate_pages); in __vunmap()
2266 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2267 struct page *page = area->pages[i]; in __vunmap()
2272 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2274 kvfree(area->pages); in __vunmap()
2277 kfree(area); in __vunmap()
2389 struct vm_struct *area; in vmap() local
2398 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2399 if (!area) in vmap()
2402 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), in vmap()
2404 vunmap(area->addr); in vmap()
2409 area->pages = pages; in vmap()
2410 area->nr_pages = count; in vmap()
2412 return area->addr; in vmap()
2445 struct vm_struct *area; in vmap_pfn() local
2447 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
2449 if (!area) in vmap_pfn()
2451 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2453 free_vm_area(area); in vmap_pfn()
2456 return area->addr; in vmap_pfn()
2461 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
2465 unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node()
2478 area->caller); in __vmalloc_area_node()
2484 remove_vm_area(area->addr); in __vmalloc_area_node()
2485 kfree(area); in __vmalloc_area_node()
2489 area->pages = pages; in __vmalloc_area_node()
2490 area->nr_pages = nr_pages; in __vmalloc_area_node()
2492 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
2502 area->nr_pages = i; in __vmalloc_area_node()
2503 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2506 area->pages[i] = page; in __vmalloc_area_node()
2510 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2512 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), in __vmalloc_area_node()
2516 return area->addr; in __vmalloc_area_node()
2521 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
2522 __vfree(area->addr); in __vmalloc_area_node()
2549 struct vm_struct *area; in __vmalloc_node_range() local
2557 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | in __vmalloc_node_range()
2559 if (!area) in __vmalloc_node_range()
2562 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
2571 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
2573 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
3020 struct vm_struct *area; in remap_vmalloc_range_partial() local
3032 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3033 if (!area) in remap_vmalloc_range_partial()
3036 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3040 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
3086 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3089 ret = remove_vm_area(area->addr); in free_vm_area()
3090 BUG_ON(ret != area); in free_vm_area()
3091 kfree(area); in free_vm_area()
3194 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
3201 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3202 start = offsets[area]; in pcpu_get_vm_areas()
3203 end = start + sizes[area]; in pcpu_get_vm_areas()
3206 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
3207 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
3211 last_area = area; in pcpu_get_vm_areas()
3213 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
3232 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3233 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
3234 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
3235 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
3242 area = term_area = last_area; in pcpu_get_vm_areas()
3243 start = offsets[area]; in pcpu_get_vm_areas()
3244 end = start + sizes[area]; in pcpu_get_vm_areas()
3269 term_area = area; in pcpu_get_vm_areas()
3279 term_area = area; in pcpu_get_vm_areas()
3287 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3288 if (area == term_area) in pcpu_get_vm_areas()
3291 start = offsets[area]; in pcpu_get_vm_areas()
3292 end = start + sizes[area]; in pcpu_get_vm_areas()
3297 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3300 start = base + offsets[area]; in pcpu_get_vm_areas()
3301 size = sizes[area]; in pcpu_get_vm_areas()
3318 va = vas[area]; in pcpu_get_vm_areas()
3326 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3327 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3330 kasan_unpoison_vmalloc((void *)vas[area]->va_start, in pcpu_get_vm_areas()
3331 sizes[area]); in pcpu_get_vm_areas()
3336 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3337 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
3339 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
3354 while (area--) { in pcpu_get_vm_areas()
3355 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3356 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3357 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3362 vas[area] = NULL; in pcpu_get_vm_areas()
3372 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3373 if (vas[area]) in pcpu_get_vm_areas()
3376 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
3378 if (!vas[area]) in pcpu_get_vm_areas()
3386 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3387 if (vas[area]) in pcpu_get_vm_areas()
3388 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
3390 kfree(vms[area]); in pcpu_get_vm_areas()
3404 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3405 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3406 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3407 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3412 vas[area] = NULL; in pcpu_get_vm_areas()
3413 kfree(vms[area]); in pcpu_get_vm_areas()