Searched refs:free_pages (Results 1 – 8 of 8) sorted by relevance
/mm/ |
D | Kconfig.debug | 17 Unmap pages from the kernel linear mapping after free_pages(). 29 fill the pages with poison patterns after free_pages() and verify 69 Fill the pages with poison patterns after free_pages() and verify
|
D | vmstat.c | 1007 unsigned long free_pages; member 1026 info->free_pages = 0; in fill_contig_page_info() 1038 info->free_pages += blocks << order; in fill_contig_page_info() 1074 …return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_… in __fragmentation_index() 2011 if (info->free_pages == 0) in unusable_free_index() 2021 …return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pa… in unusable_free_index()
|
D | page_alloc.c | 2399 int free_pages, movable_pages, alike_pages; in steal_suitable_fallback() local 2430 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback() 2449 - (free_pages + movable_pages); in steal_suitable_fallback() 2455 if (!free_pages) in steal_suitable_fallback() 2462 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || in steal_suitable_fallback() 3404 long free_pages) in __zone_watermark_ok() argument 3411 free_pages -= (1 << order) - 1; in __zone_watermark_ok() 3422 free_pages -= z->nr_reserved_highatomic; in __zone_watermark_ok() 3440 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); in __zone_watermark_ok() 3448 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) in __zone_watermark_ok() [all …]
|
D | vmscan.c | 1127 LIST_HEAD(free_pages); in shrink_page_list() 1493 list_add(&page->lru, &free_pages); in shrink_page_list() 1526 mem_cgroup_uncharge_list(&free_pages); in shrink_page_list() 1528 free_unref_page_list(&free_pages); in shrink_page_list() 3133 unsigned long free_pages = 0; in allow_direct_reclaim() local 3149 free_pages += zone_page_state(zone, NR_FREE_PAGES); in allow_direct_reclaim() 3156 wmark_ok = free_pages > pfmemalloc_reserve / 2; in allow_direct_reclaim()
|
D | mmu_gather.c | 61 free_pages((unsigned long)batch, 0); in tlb_batch_list_free()
|
D | zsmalloc.c | 2195 LIST_HEAD(free_pages); in async_free_zspage() 2205 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage() 2210 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { in async_free_zspage()
|
D | sparse.c | 702 free_pages((unsigned long)memmap, in depopulate_section_memmap()
|
D | slub.c | 4496 free_pages((unsigned long)t->loc, in free_loc_track()
|