/mm/ |
D | vmalloc.c | 2069 struct vm_struct *area; in __get_vm_area_node() local 2081 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node() 2082 if (unlikely(!area)) in __get_vm_area_node() 2090 kfree(area); in __get_vm_area_node() 2096 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node() 2098 return area; in __get_vm_area_node() 2191 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument 2196 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map() 2197 if (page_address(area->pages[i])) in set_area_direct_map() 2198 set_direct_map(area->pages[i]); in set_area_direct_map() [all …]
|
D | kmemleak.c | 468 struct kmemleak_scan_area *area; in free_object_rcu() local 476 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 477 hlist_del(&area->node); in free_object_rcu() 478 kmem_cache_free(scan_area_cache, area); in free_object_rcu() 790 struct kmemleak_scan_area *area = NULL; in add_scan_area() local 805 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area() 808 if (!area) { in add_scan_area() 819 kmem_cache_free(scan_area_cache, area); in add_scan_area() 823 INIT_HLIST_NODE(&area->node); in add_scan_area() 824 area->start = ptr; in add_scan_area() [all …]
|
D | zsmalloc.c | 1112 static inline int __zs_cpu_up(struct mapping_area *area) in __zs_cpu_up() argument 1118 if (area->vm_buf) in __zs_cpu_up() 1120 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); in __zs_cpu_up() 1121 if (!area->vm_buf) in __zs_cpu_up() 1126 static inline void __zs_cpu_down(struct mapping_area *area) in __zs_cpu_down() argument 1128 kfree(area->vm_buf); in __zs_cpu_down() 1129 area->vm_buf = NULL; in __zs_cpu_down() 1132 static void *__zs_map_object(struct mapping_area *area, in __zs_map_object() argument 1137 char *buf = area->vm_buf; in __zs_map_object() 1143 if (area->vm_mm == ZS_MM_WO) in __zs_map_object() [all …]
|
D | ioremap.c | 257 struct vm_struct *area; in ioremap_prot() local 269 area = get_vm_area_caller(size, VM_IOREMAP, in ioremap_prot() 271 if (!area) in ioremap_prot() 273 vaddr = (unsigned long)area->addr; in ioremap_prot() 276 free_vm_area(area); in ioremap_prot()
|
D | page_reporting.c | 113 struct free_area *area = &zone->free_area[order]; in page_reporting_cycle() local 114 struct list_head *list = &area->free_list[mt]; in page_reporting_cycle() 143 budget = DIV_ROUND_UP(area->nr_free, PAGE_REPORTING_CAPACITY * 16); in page_reporting_cycle()
|
D | compaction.c | 1371 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages() local 1378 if (!area->nr_free) in fast_isolate_freepages() 1382 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_isolate_freepages() 1738 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock() local 1743 if (!area->nr_free) in fast_find_migrateblock() 1747 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_find_migrateblock() 2048 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished() local 2052 if (!free_area_empty(area, migratetype)) in __compact_finished() 2058 !free_area_empty(area, MIGRATE_CMA)) in __compact_finished() 2065 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
|
D | page_alloc.c | 953 struct free_area *area = &zone->free_area[order]; in add_to_free_list() local 955 list_add(&page->lru, &area->free_list[migratetype]); in add_to_free_list() 956 area->nr_free++; in add_to_free_list() 963 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail() local 965 list_add_tail(&page->lru, &area->free_list[migratetype]); in add_to_free_list_tail() 966 area->nr_free++; in add_to_free_list_tail() 977 struct free_area *area = &zone->free_area[order]; in move_to_free_list() local 979 list_move_tail(&page->lru, &area->free_list[migratetype]); in move_to_free_list() 2424 struct free_area *area; in __rmqueue_smallest() local 2429 area = &(zone->free_area[current_order]); in __rmqueue_smallest() [all …]
|
D | mmap.c | 1225 struct vm_area_struct *area, *next; in __vma_merge() local 1236 area = next; in __vma_merge() 1237 if (area && area->vm_end == end) /* cases 6, 7, 8 */ in __vma_merge() 1242 VM_WARN_ON(area && end > area->vm_end); in __vma_merge() 1294 err = __vma_adjust(area, addr, next->vm_end, in __vma_merge() 1302 area = next; in __vma_merge() 1306 khugepaged_enter_vma_merge(area, vm_flags); in __vma_merge() 1307 return area; in __vma_merge()
|
D | vmstat.c | 1449 struct free_area *area; in pagetypeinfo_showfree_print() local 1453 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print() 1455 list_for_each(curr, &area->free_list[mtype]) { in pagetypeinfo_showfree_print()
|
D | Kconfig | 318 until a program has madvised that an area is MADV_MERGEABLE, and 496 pagecache and when a subsystem requests for contiguous area, the 530 used as device private area. This parameter sets the maximum 531 number of CMA area in the system.
|
D | internal.h | 282 int find_suitable_fallback(struct free_area *area, unsigned int order,
|
D | nommu.c | 357 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
|