Searched refs:area (Results 1 – 9 of 9) sorted by relevance
/mm/ |
D | vmalloc.c | 1273 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) in map_vm_area() argument 1275 unsigned long addr = (unsigned long)area->addr; in map_vm_area() 1276 unsigned long end = addr + get_vm_area_size(area); in map_vm_area() 1314 struct vm_struct *area; in __get_vm_area_node() local 1324 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node() 1325 if (unlikely(!area)) in __get_vm_area_node() 1335 kfree(area); in __get_vm_area_node() 1339 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node() 1341 return area; in __get_vm_area_node() 1434 struct vm_struct *area; in __vunmap() local [all …]
|
D | zsmalloc.c | 1026 static inline int __zs_cpu_up(struct mapping_area *area) in __zs_cpu_up() argument 1032 if (area->vm) in __zs_cpu_up() 1034 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); in __zs_cpu_up() 1035 if (!area->vm) in __zs_cpu_up() 1040 static inline void __zs_cpu_down(struct mapping_area *area) in __zs_cpu_down() argument 1042 if (area->vm) in __zs_cpu_down() 1043 free_vm_area(area->vm); in __zs_cpu_down() 1044 area->vm = NULL; in __zs_cpu_down() 1047 static inline void *__zs_map_object(struct mapping_area *area, in __zs_map_object() argument 1050 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object() [all …]
|
D | kmemleak.c | 443 struct kmemleak_scan_area *area; in free_object_rcu() local 451 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 452 hlist_del(&area->node); in free_object_rcu() 453 kmem_cache_free(scan_area_cache, area); in free_object_rcu() 743 struct kmemleak_scan_area *area; in add_scan_area() local 752 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area() 753 if (!area) { in add_scan_area() 764 kmem_cache_free(scan_area_cache, area); in add_scan_area() 768 INIT_HLIST_NODE(&area->node); in add_scan_area() 769 area->start = ptr; in add_scan_area() [all …]
|
D | page_alloc.c | 884 int low, int high, struct free_area *area, in expand() argument 890 area--; in expand() 912 list_add(&page[size].lru, &area->free_list[migratetype]); in expand() 913 area->nr_free++; in expand() 979 struct free_area *area; in __rmqueue_smallest() local 984 area = &(zone->free_area[current_order]); in __rmqueue_smallest() 985 if (list_empty(&area->free_list[migratetype])) in __rmqueue_smallest() 988 page = list_entry(area->free_list[migratetype].next, in __rmqueue_smallest() 992 area->nr_free--; in __rmqueue_smallest() 993 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest() [all …]
|
D | compaction.c | 1125 struct free_area *area = &zone->free_area[order]; in compact_finished() local 1128 if (!list_empty(&area->free_list[migratetype])) in compact_finished() 1132 if (order >= pageblock_order && area->nr_free) in compact_finished()
|
D | vmstat.c | 943 struct free_area *area; in pagetypeinfo_showfree_print() local 946 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print() 948 list_for_each(curr, &area->free_list[mtype]) in pagetypeinfo_showfree_print()
|
D | Kconfig | 340 until a program has madvised that an area is MADV_MERGEABLE, and 505 pagecache and when a subsystem requests for contiguous area, the 525 used as device private area. This parameter sets the maximum 526 number of CMA area in the system.
|
D | mmap.c | 1080 struct vm_area_struct *area, *next; in vma_merge() local 1094 area = next; in vma_merge() 1137 err = vma_adjust(area, addr, next->vm_end, in vma_merge() 1141 khugepaged_enter_vma_merge(area, vm_flags); in vma_merge() 1142 return area; in vma_merge()
|
D | nommu.c | 490 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
|