/mm/ |
D | vmalloc.c | 2005 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) in map_vm_area() argument 2007 unsigned long addr = (unsigned long)area->addr; in map_vm_area() 2008 unsigned long end = addr + get_vm_area_size(area); in map_vm_area() 2045 struct vm_struct *area; in __get_vm_area_node() local 2056 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node() 2057 if (unlikely(!area)) in __get_vm_area_node() 2065 kfree(area); in __get_vm_area_node() 2069 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node() 2071 return area; in __get_vm_area_node() 2170 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument [all …]
|
D | zsmalloc.c | 1117 static inline int __zs_cpu_up(struct mapping_area *area) in __zs_cpu_up() argument 1123 if (area->vm) in __zs_cpu_up() 1125 area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); in __zs_cpu_up() 1126 if (!area->vm) in __zs_cpu_up() 1131 static inline void __zs_cpu_down(struct mapping_area *area) in __zs_cpu_down() argument 1133 if (area->vm) in __zs_cpu_down() 1134 free_vm_area(area->vm); in __zs_cpu_down() 1135 area->vm = NULL; in __zs_cpu_down() 1138 static inline void *__zs_map_object(struct mapping_area *area, in __zs_map_object() argument 1141 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object() [all …]
|
D | kmemleak.c | 467 struct kmemleak_scan_area *area; in free_object_rcu() local 475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 476 hlist_del(&area->node); in free_object_rcu() 477 kmem_cache_free(scan_area_cache, area); in free_object_rcu() 789 struct kmemleak_scan_area *area = NULL; in add_scan_area() local 799 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area() 802 if (!area) { in add_scan_area() 813 kmem_cache_free(scan_area_cache, area); in add_scan_area() 817 INIT_HLIST_NODE(&area->node); in add_scan_area() 818 area->start = ptr; in add_scan_area() [all …]
|
D | shuffle.c | 186 void add_to_free_area_random(struct page *page, struct free_area *area, in add_to_free_area_random() argument 202 add_to_free_area(page, area, migratetype); in add_to_free_area_random() 204 add_to_free_area_tail(page, area, migratetype); in add_to_free_area_random()
|
D | page_alloc.c | 2029 int low, int high, struct free_area *area, in expand() argument 2035 area--; in expand() 2049 add_to_free_area(&page[size], area, migratetype); in expand() 2198 struct free_area *area; in __rmqueue_smallest() local 2203 area = &(zone->free_area[current_order]); in __rmqueue_smallest() 2204 page = get_page_from_free_area(area, migratetype); in __rmqueue_smallest() 2207 del_page_from_free_area(page, area); in __rmqueue_smallest() 2208 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest() 2398 struct free_area *area; in steal_suitable_fallback() local 2469 area = &zone->free_area[current_order]; in steal_suitable_fallback() [all …]
|
D | compaction.c | 1319 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages() local 1325 if (!area->nr_free) in fast_isolate_freepages() 1329 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_isolate_freepages() 1676 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock() local 1681 if (!area->nr_free) in fast_find_migrateblock() 1685 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_find_migrateblock() 1890 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished() local 1894 if (!free_area_empty(area, migratetype)) in __compact_finished() 1900 !free_area_empty(area, MIGRATE_CMA)) in __compact_finished() 1907 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
|
D | mmap.c | 1151 struct vm_area_struct *area, *next; in vma_merge() local 1165 area = next; in vma_merge() 1166 if (area && area->vm_end == end) /* cases 6, 7, 8 */ in vma_merge() 1171 VM_WARN_ON(area && end > area->vm_end); in vma_merge() 1221 err = __vma_adjust(area, addr, next->vm_end, in vma_merge() 1228 area = next; in vma_merge() 1232 khugepaged_enter_vma_merge(area, vm_flags); in vma_merge() 1233 return area; in vma_merge()
|
D | Kconfig | 300 until a program has madvised that an area is MADV_MERGEABLE, and 482 pagecache and when a subsystem requests for contiguous area, the 508 used as device private area. This parameter sets the maximum 509 number of CMA area in the system.
|
D | vmstat.c | 1387 struct free_area *area; in pagetypeinfo_showfree_print() local 1391 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print() 1393 list_for_each(curr, &area->free_list[mtype]) { in pagetypeinfo_showfree_print()
|
D | internal.h | 223 int find_suitable_fallback(struct free_area *area, unsigned int order,
|
D | nommu.c | 376 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
|