Lines Matching refs:page
213 static inline int get_pcppage_migratetype(struct page *page) in get_pcppage_migratetype() argument
215 return page->index; in get_pcppage_migratetype()
218 static inline void set_pcppage_migratetype(struct page *page, int migratetype) in set_pcppage_migratetype() argument
220 page->index = migratetype; in set_pcppage_migratetype()
265 static void __free_pages_ok(struct page *page, unsigned int order,
411 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) in should_skip_kasan_poison() argument
416 PageSkipKASanPoison(page); in should_skip_kasan_poison()
467 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) in should_skip_kasan_poison() argument
471 PageSkipKASanPoison(page); in should_skip_kasan_poison()
486 static inline unsigned long *get_pageblock_bitmap(struct page *page, in get_pageblock_bitmap() argument
492 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap()
496 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) in pfn_to_bitidx() argument
501 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); in pfn_to_bitidx()
515 unsigned long __get_pfnblock_flags_mask(struct page *page, in __get_pfnblock_flags_mask() argument
523 bitmap = get_pageblock_bitmap(page, pfn); in __get_pfnblock_flags_mask()
524 bitidx = pfn_to_bitidx(page, pfn); in __get_pfnblock_flags_mask()
532 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, in get_pfnblock_flags_mask() argument
535 return __get_pfnblock_flags_mask(page, pfn, mask); in get_pfnblock_flags_mask()
539 int isolate_anon_lru_page(struct page *page) in isolate_anon_lru_page() argument
543 if (!PageLRU(page) || !PageAnon(page)) in isolate_anon_lru_page()
546 if (!get_page_unless_zero(page)) in isolate_anon_lru_page()
549 ret = isolate_lru_page(page); in isolate_anon_lru_page()
550 put_page(page); in isolate_anon_lru_page()
556 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) in get_pfnblock_migratetype() argument
558 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); in get_pfnblock_migratetype()
568 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, in set_pfnblock_flags_mask() argument
579 bitmap = get_pageblock_bitmap(page, pfn); in set_pfnblock_flags_mask()
580 bitidx = pfn_to_bitidx(page, pfn); in set_pfnblock_flags_mask()
584 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); in set_pfnblock_flags_mask()
598 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument
604 set_pfnblock_flags_mask(page, (unsigned long)migratetype, in set_pageblock_migratetype()
605 page_to_pfn(page), MIGRATETYPE_MASK); in set_pageblock_migratetype()
609 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
613 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
632 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
634 if (!pfn_valid_within(page_to_pfn(page))) in page_is_consistent()
636 if (zone != page_zone(page)) in page_is_consistent()
644 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
646 if (page_outside_zone_boundaries(zone, page)) in bad_range()
648 if (!page_is_consistent(zone, page)) in bad_range()
654 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
660 static void bad_page(struct page *page, const char *reason) in bad_page() argument
687 current->comm, page_to_pfn(page)); in bad_page()
688 __dump_page(page, reason); in bad_page()
689 dump_page_owner(page); in bad_page()
695 page_mapcount_reset(page); /* remove PageBuddy */ in bad_page()
714 void free_compound_page(struct page *page) in free_compound_page() argument
716 mem_cgroup_uncharge(page); in free_compound_page()
717 __free_pages_ok(page, compound_order(page), FPI_NONE); in free_compound_page()
720 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
725 __SetPageHead(page); in prep_compound_page()
727 struct page *p = page + i; in prep_compound_page()
730 set_compound_head(p, page); in prep_compound_page()
733 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); in prep_compound_page()
734 set_compound_order(page, order); in prep_compound_page()
735 atomic_set(compound_mapcount_ptr(page), -1); in prep_compound_page()
736 if (hpage_pincount_available(page)) in prep_compound_page()
737 atomic_set(compound_pincount_ptr(page), 0); in prep_compound_page()
771 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
780 __SetPageGuard(page); in set_page_guard()
781 INIT_LIST_HEAD(&page->lru); in set_page_guard()
782 set_page_private(page, order); in set_page_guard()
789 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
795 __ClearPageGuard(page); in clear_page_guard()
797 set_page_private(page, 0); in clear_page_guard()
802 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
804 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
859 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
861 set_page_private(page, order); in set_buddy_order()
862 __SetPageBuddy(page); in set_buddy_order()
878 static inline bool page_is_buddy(struct page *page, struct page *buddy, in page_is_buddy() argument
891 if (page_zone_id(page) != page_zone_id(buddy)) in page_is_buddy()
906 !capc->page && in task_capc()
911 compaction_capture(struct capture_control *capc, struct page *page, in compaction_capture() argument
931 capc->page = page; in compaction_capture()
942 compaction_capture(struct capture_control *capc, struct page *page, in compaction_capture() argument
950 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
955 list_add(&page->lru, &area->free_list[migratetype]); in add_to_free_list()
960 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
965 list_add_tail(&page->lru, &area->free_list[migratetype]); in add_to_free_list_tail()
974 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
979 list_move_tail(&page->lru, &area->free_list[migratetype]); in move_to_free_list()
982 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
986 if (page_reported(page)) in del_page_from_free_list()
987 __ClearPageReported(page); in del_page_from_free_list()
989 list_del(&page->lru); in del_page_from_free_list()
990 __ClearPageBuddy(page); in del_page_from_free_list()
991 set_page_private(page, 0); in del_page_from_free_list()
1005 struct page *page, unsigned int order) in buddy_merge_likely() argument
1007 struct page *higher_page, *higher_buddy; in buddy_merge_likely()
1017 higher_page = page + (combined_pfn - pfn); in buddy_merge_likely()
1049 static inline void __free_one_page(struct page *page, in __free_one_page() argument
1058 struct page *buddy; in __free_one_page()
1064 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
1070 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
1071 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1075 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
1081 buddy = page + (buddy_pfn - pfn); in __free_one_page()
1085 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
1096 page = page + (combined_pfn - pfn); in __free_one_page()
1113 buddy = page + (buddy_pfn - pfn); in __free_one_page()
1126 set_buddy_order(page, order); in __free_one_page()
1133 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
1136 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1138 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1150 static inline bool page_expected_state(struct page *page, in page_expected_state() argument
1153 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_expected_state()
1156 if (unlikely((unsigned long)page->mapping | in page_expected_state()
1157 page_ref_count(page) | in page_expected_state()
1159 (unsigned long)page->mem_cgroup | in page_expected_state()
1161 (page->flags & check_flags))) in page_expected_state()
1167 static const char *page_bad_reason(struct page *page, unsigned long flags) in page_bad_reason() argument
1171 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_bad_reason()
1173 if (unlikely(page->mapping != NULL)) in page_bad_reason()
1175 if (unlikely(page_ref_count(page) != 0)) in page_bad_reason()
1177 if (unlikely(page->flags & flags)) { in page_bad_reason()
1184 if (unlikely(page->mem_cgroup)) in page_bad_reason()
1190 static void check_free_page_bad(struct page *page) in check_free_page_bad() argument
1192 bad_page(page, in check_free_page_bad()
1193 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); in check_free_page_bad()
1196 static inline int check_free_page(struct page *page) in check_free_page() argument
1198 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) in check_free_page()
1202 check_free_page_bad(page); in check_free_page()
1206 static int free_tail_pages_check(struct page *head_page, struct page *page) in free_tail_pages_check() argument
1220 switch (page - head_page) { in free_tail_pages_check()
1223 if (unlikely(compound_mapcount(page))) { in free_tail_pages_check()
1224 bad_page(page, "nonzero compound_mapcount"); in free_tail_pages_check()
1235 if (page->mapping != TAIL_MAPPING) { in free_tail_pages_check()
1236 bad_page(page, "corrupted mapping in tail page"); in free_tail_pages_check()
1241 if (unlikely(!PageTail(page))) { in free_tail_pages_check()
1242 bad_page(page, "PageTail not set"); in free_tail_pages_check()
1245 if (unlikely(compound_head(page) != head_page)) { in free_tail_pages_check()
1246 bad_page(page, "compound_head not consistent"); in free_tail_pages_check()
1251 page->mapping = NULL; in free_tail_pages_check()
1252 clear_compound_head(page); in free_tail_pages_check()
1256 static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags) in kernel_init_free_pages() argument
1262 tag_clear_highpage(page + i); in kernel_init_free_pages()
1269 u8 tag = page_kasan_tag(page + i); in kernel_init_free_pages()
1270 page_kasan_tag_reset(page + i); in kernel_init_free_pages()
1271 clear_highpage(page + i); in kernel_init_free_pages()
1272 page_kasan_tag_set(page + i, tag); in kernel_init_free_pages()
1277 static __always_inline bool free_pages_prepare(struct page *page, in free_pages_prepare() argument
1281 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); in free_pages_prepare()
1283 VM_BUG_ON_PAGE(PageTail(page), page); in free_pages_prepare()
1285 trace_mm_page_free(page, order); in free_pages_prepare()
1287 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1292 if (memcg_kmem_enabled() && PageKmemcg(page)) in free_pages_prepare()
1293 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1294 reset_page_owner(page, order); in free_pages_prepare()
1295 free_page_pinner(page, order); in free_pages_prepare()
1304 bool compound = PageCompound(page); in free_pages_prepare()
1307 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1310 ClearPageDoubleMap(page); in free_pages_prepare()
1313 bad += free_tail_pages_check(page, page + i); in free_pages_prepare()
1314 if (unlikely(check_free_page(page + i))) { in free_pages_prepare()
1318 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1321 if (PageMappingFlags(page)) in free_pages_prepare()
1322 page->mapping = NULL; in free_pages_prepare()
1323 if (memcg_kmem_enabled() && PageKmemcg(page)) in free_pages_prepare()
1324 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1326 bad += check_free_page(page); in free_pages_prepare()
1330 page_cpupid_reset_last(page); in free_pages_prepare()
1331 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1332 reset_page_owner(page, order); in free_pages_prepare()
1333 free_page_pinner(page, order); in free_pages_prepare()
1335 if (!PageHighMem(page)) { in free_pages_prepare()
1336 debug_check_no_locks_freed(page_address(page), in free_pages_prepare()
1338 debug_check_no_obj_freed(page_address(page), in free_pages_prepare()
1342 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1354 kasan_free_pages(page, order); in free_pages_prepare()
1359 kernel_init_free_pages(page, 1 << order, false); in free_pages_prepare()
1361 kasan_poison_pages(page, order, init); in free_pages_prepare()
1369 arch_free_page(page, order); in free_pages_prepare()
1371 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1382 static bool free_pcp_prepare(struct page *page) in free_pcp_prepare() argument
1384 return free_pages_prepare(page, 0, true, FPI_NONE); in free_pcp_prepare()
1387 static bool bulkfree_pcp_prepare(struct page *page) in bulkfree_pcp_prepare() argument
1390 return check_free_page(page); in bulkfree_pcp_prepare()
1401 static bool free_pcp_prepare(struct page *page) in free_pcp_prepare() argument
1404 return free_pages_prepare(page, 0, true, FPI_NONE); in free_pcp_prepare()
1406 return free_pages_prepare(page, 0, false, FPI_NONE); in free_pcp_prepare()
1409 static bool bulkfree_pcp_prepare(struct page *page) in bulkfree_pcp_prepare() argument
1411 return check_free_page(page); in bulkfree_pcp_prepare()
1415 static inline void prefetch_buddy(struct page *page) in prefetch_buddy() argument
1417 unsigned long pfn = page_to_pfn(page); in prefetch_buddy()
1419 struct page *buddy = page + (buddy_pfn - pfn); in prefetch_buddy()
1442 struct page *page, *tmp; in free_pcppages_bulk() local
1472 page = list_last_entry(list, struct page, lru); in free_pcppages_bulk()
1474 list_del(&page->lru); in free_pcppages_bulk()
1477 if (bulkfree_pcp_prepare(page)) in free_pcppages_bulk()
1480 list_add_tail(&page->lru, &head); in free_pcppages_bulk()
1492 prefetch_buddy(page); in free_pcppages_bulk()
1503 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk()
1504 int mt = get_pcppage_migratetype(page); in free_pcppages_bulk()
1506 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); in free_pcppages_bulk()
1509 mt = get_pageblock_migratetype(page); in free_pcppages_bulk()
1511 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1512 trace_mm_page_pcpu_drain(page, 0, mt); in free_pcppages_bulk()
1518 struct page *page, unsigned long pfn, in free_one_page() argument
1525 migratetype = get_pfnblock_migratetype(page, pfn); in free_one_page()
1527 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1531 static void __meminit __init_single_page(struct page *page, unsigned long pfn, in __init_single_page() argument
1534 mm_zero_struct_page(page); in __init_single_page()
1535 set_page_links(page, zone, nid, pfn); in __init_single_page()
1536 init_page_count(page); in __init_single_page()
1537 page_mapcount_reset(page); in __init_single_page()
1538 page_cpupid_reset_last(page); in __init_single_page()
1539 page_kasan_tag_reset(page); in __init_single_page()
1541 INIT_LIST_HEAD(&page->lru); in __init_single_page()
1545 set_page_address(page, __va(pfn << PAGE_SHIFT)); in __init_single_page()
1588 struct page *page = pfn_to_page(start_pfn); in reserve_bootmem_region() local
1593 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region()
1600 __SetPageReserved(page); in reserve_bootmem_region()
1605 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1610 unsigned long pfn = page_to_pfn(page); in __free_pages_ok()
1613 if (!free_pages_prepare(page, order, true, fpi_flags)) in __free_pages_ok()
1616 migratetype = get_pfnblock_migratetype(page, pfn); in __free_pages_ok()
1617 trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page); in __free_pages_ok()
1623 free_one_page(page_zone(page), page, pfn, order, migratetype, in __free_pages_ok()
1628 void __free_pages_core(struct page *page, unsigned int order) in __free_pages_core() argument
1631 struct page *p = page; in __free_pages_core()
1648 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1654 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); in __free_pages_core()
1701 void __init memblock_free_pages(struct page *page, unsigned long pfn, in memblock_free_pages() argument
1706 __free_pages_core(page, order); in memblock_free_pages()
1726 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, in __pageblock_pfn_to_page()
1729 struct page *start_page; in __pageblock_pfn_to_page()
1730 struct page *end_page; in __pageblock_pfn_to_page()
1785 struct page *page; in deferred_free_range() local
1791 page = pfn_to_page(pfn); in deferred_free_range()
1796 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in deferred_free_range()
1797 __free_pages_core(page, pageblock_order); in deferred_free_range()
1801 for (i = 0; i < nr_pages; i++, page++, pfn++) { in deferred_free_range()
1803 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in deferred_free_range()
1804 __free_pages_core(page, 0); in deferred_free_range()
1875 struct page *page = NULL; in deferred_init_pages() local
1879 page = NULL; in deferred_init_pages()
1881 } else if (!page || !(pfn & nr_pgmask)) { in deferred_init_pages()
1882 page = pfn_to_page(pfn); in deferred_init_pages()
1884 page++; in deferred_init_pages()
1886 __init_single_page(page, pfn, zid, nid); in deferred_init_pages()
2216 void __init init_cma_reserved_pageblock(struct page *page) in init_cma_reserved_pageblock() argument
2219 struct page *p = page; in init_cma_reserved_pageblock()
2226 set_pageblock_migratetype(page, MIGRATE_CMA); in init_cma_reserved_pageblock()
2230 p = page; in init_cma_reserved_pageblock()
2237 set_page_refcounted(page); in init_cma_reserved_pageblock()
2238 __free_pages(page, pageblock_order); in init_cma_reserved_pageblock()
2241 adjust_managed_page_count(page, pageblock_nr_pages); in init_cma_reserved_pageblock()
2242 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_reserved_pageblock()
2260 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2268 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2276 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2279 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2280 set_buddy_order(&page[size], high); in expand()
2284 static void check_new_page_bad(struct page *page) in check_new_page_bad() argument
2286 if (unlikely(page->flags & __PG_HWPOISON)) { in check_new_page_bad()
2288 page_mapcount_reset(page); /* remove PageBuddy */ in check_new_page_bad()
2292 bad_page(page, in check_new_page_bad()
2293 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); in check_new_page_bad()
2299 static inline int check_new_page(struct page *page) in check_new_page() argument
2301 if (likely(page_expected_state(page, in check_new_page()
2305 check_new_page_bad(page); in check_new_page()
2315 static inline bool check_pcp_refill(struct page *page) in check_pcp_refill() argument
2318 return check_new_page(page); in check_pcp_refill()
2323 static inline bool check_new_pcp(struct page *page) in check_new_pcp() argument
2325 return check_new_page(page); in check_new_pcp()
2333 static inline bool check_pcp_refill(struct page *page) in check_pcp_refill() argument
2335 return check_new_page(page); in check_pcp_refill()
2337 static inline bool check_new_pcp(struct page *page) in check_new_pcp() argument
2340 return check_new_page(page); in check_new_pcp()
2346 static bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
2350 struct page *p = page + i; in check_new_pages()
2359 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
2362 set_page_private(page, 0); in post_alloc_hook()
2363 set_page_refcounted(page); in post_alloc_hook()
2365 arch_alloc_page(page, order); in post_alloc_hook()
2366 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
2373 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
2381 kasan_alloc_pages(page, order, gfp_flags); in post_alloc_hook()
2385 kasan_unpoison_pages(page, order, init); in post_alloc_hook()
2387 kernel_init_free_pages(page, 1 << order, in post_alloc_hook()
2391 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
2394 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
2397 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
2400 prep_compound_page(page, order); in prep_new_page()
2409 set_page_pfmemalloc(page); in prep_new_page()
2411 clear_page_pfmemalloc(page); in prep_new_page()
2412 trace_android_vh_test_clear_look_around_ref(page); in prep_new_page()
2420 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest()
2425 struct page *page; in __rmqueue_smallest() local
2430 page = get_page_from_free_area(area, migratetype); in __rmqueue_smallest()
2431 if (!page) in __rmqueue_smallest()
2433 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2434 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2435 set_pcppage_migratetype(page, migratetype); in __rmqueue_smallest()
2436 return page; in __rmqueue_smallest()
2460 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
2466 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
2476 struct page *start_page, struct page *end_page, in move_freepages()
2479 struct page *page; in move_freepages() local
2483 for (page = start_page; page <= end_page;) { in move_freepages()
2484 if (!pfn_valid_within(page_to_pfn(page))) { in move_freepages()
2485 page++; in move_freepages()
2489 if (!PageBuddy(page)) { in move_freepages()
2496 (PageLRU(page) || __PageMovable(page))) in move_freepages()
2499 page++; in move_freepages()
2504 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2505 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2507 order = buddy_order(page); in move_freepages()
2508 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2509 page += 1 << order; in move_freepages()
2516 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2520 struct page *start_page, *end_page; in move_freepages_block()
2525 start_pfn = page_to_pfn(page); in move_freepages_block()
2533 start_page = page; in move_freepages_block()
2541 static void change_pageblock_range(struct page *pageblock_page, in change_pageblock_range()
2630 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2633 unsigned int current_order = buddy_order(page); in steal_suitable_fallback()
2637 old_block_type = get_pageblock_migratetype(page); in steal_suitable_fallback()
2648 change_pageblock_range(page, current_order, start_type); in steal_suitable_fallback()
2664 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2698 set_pageblock_migratetype(page, start_type); in steal_suitable_fallback()
2703 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2747 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2768 mt = get_pageblock_migratetype(page); in reserve_highatomic_pageblock()
2772 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
2773 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2796 struct page *page; in unreserve_highatomic_pageblock() local
2820 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); in unreserve_highatomic_pageblock()
2821 if (!page) in unreserve_highatomic_pageblock()
2831 if (is_migrate_highatomic_page(page)) { in unreserve_highatomic_pageblock()
2853 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
2854 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2884 struct page *page; in __rmqueue_fallback() local
2943 page = get_page_from_free_area(area, fallback_mt); in __rmqueue_fallback()
2945 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2948 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2959 static __always_inline struct page *
2963 struct page *page; in __rmqueue() local
2966 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2968 if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2972 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
2973 return page; in __rmqueue()
2977 static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, in __rmqueue_cma()
2981 struct page *page = __rmqueue_cma_fallback(zone, order); in __rmqueue_cma() local
2982 trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA); in __rmqueue_cma()
2983 return page; in __rmqueue_cma()
2986 static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order, in __rmqueue_cma()
3007 struct page *page; in rmqueue_bulk() local
3010 page = __rmqueue_cma(zone, order, migratetype, in rmqueue_bulk()
3013 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_bulk()
3015 if (unlikely(page == NULL)) in rmqueue_bulk()
3018 if (unlikely(check_pcp_refill(page))) in rmqueue_bulk()
3031 list_add_tail(&page->lru, list); in rmqueue_bulk()
3033 if (is_migrate_cma(get_pcppage_migratetype(page))) in rmqueue_bulk()
3261 struct page *page; in mark_free_pages() local
3271 page = pfn_to_page(pfn); in mark_free_pages()
3278 if (page_zone(page) != zone) in mark_free_pages()
3281 if (!swsusp_page_is_forbidden(page)) in mark_free_pages()
3282 swsusp_unset_page_free(page); in mark_free_pages()
3286 list_for_each_entry(page, in mark_free_pages()
3290 pfn = page_to_pfn(page); in mark_free_pages()
3304 static bool free_unref_page_prepare(struct page *page, unsigned long pfn) in free_unref_page_prepare() argument
3308 if (!free_pcp_prepare(page)) in free_unref_page_prepare()
3311 migratetype = get_pfnblock_migratetype(page, pfn); in free_unref_page_prepare()
3312 set_pcppage_migratetype(page, migratetype); in free_unref_page_prepare()
3316 static void free_unref_page_commit(struct page *page, unsigned long pfn) in free_unref_page_commit() argument
3318 struct zone *zone = page_zone(page); in free_unref_page_commit()
3323 migratetype = get_pcppage_migratetype(page); in free_unref_page_commit()
3338 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3346 list_add(&page->lru, &pcp->lists[migratetype]); in free_unref_page_commit()
3357 void free_unref_page(struct page *page) in free_unref_page() argument
3360 unsigned long pfn = page_to_pfn(page); in free_unref_page()
3364 if (!free_unref_page_prepare(page, pfn)) in free_unref_page()
3367 migratetype = get_pfnblock_migratetype(page, pfn); in free_unref_page()
3368 trace_android_vh_free_unref_page_bypass(page, 0, migratetype, &skip_free_unref_page); in free_unref_page()
3373 free_unref_page_commit(page, pfn); in free_unref_page()
3382 struct page *page, *next; in free_unref_page_list() local
3387 list_for_each_entry_safe(page, next, list, lru) { in free_unref_page_list()
3388 pfn = page_to_pfn(page); in free_unref_page_list()
3389 if (!free_unref_page_prepare(page, pfn)) in free_unref_page_list()
3390 list_del(&page->lru); in free_unref_page_list()
3391 set_page_private(page, pfn); in free_unref_page_list()
3395 list_for_each_entry_safe(page, next, list, lru) { in free_unref_page_list()
3396 unsigned long pfn = page_private(page); in free_unref_page_list()
3398 set_page_private(page, 0); in free_unref_page_list()
3399 trace_mm_page_free_batched(page); in free_unref_page_list()
3400 free_unref_page_commit(page, pfn); in free_unref_page_list()
3423 void split_page(struct page *page, unsigned int order) in split_page() argument
3427 VM_BUG_ON_PAGE(PageCompound(page), page); in split_page()
3428 VM_BUG_ON_PAGE(!page_count(page), page); in split_page()
3431 set_page_refcounted(page + i); in split_page()
3432 split_page_owner(page, 1 << order); in split_page()
3433 split_page_memcg(page, 1 << order); in split_page()
3437 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
3443 BUG_ON(!PageBuddy(page)); in __isolate_free_page()
3445 zone = page_zone(page); in __isolate_free_page()
3446 mt = get_pageblock_migratetype(page); in __isolate_free_page()
3464 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3471 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3472 for (; page < endpage; page += pageblock_nr_pages) { in __isolate_free_page()
3473 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
3476 set_pageblock_migratetype(page, in __isolate_free_page()
3494 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
3496 struct zone *zone = page_zone(page); in __putback_isolated_page()
3502 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3534 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist()
3539 struct page *page = NULL; in __rmqueue_pcplist() local
3562 page = list_first_entry(list, struct page, lru); in __rmqueue_pcplist()
3563 list_del(&page->lru); in __rmqueue_pcplist()
3565 } while (check_new_pcp(page)); in __rmqueue_pcplist()
3567 return page; in __rmqueue_pcplist()
3571 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3576 struct page *page; in rmqueue_pcplist() local
3581 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, in rmqueue_pcplist()
3583 if (page) { in rmqueue_pcplist()
3584 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); in rmqueue_pcplist()
3588 return page; in rmqueue_pcplist()
3595 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3601 struct page *page; in rmqueue() local
3604 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3617 page = NULL; in rmqueue()
3625 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3626 if (page) in rmqueue()
3627 trace_mm_page_alloc_zone_locked(page, order, migratetype); in rmqueue()
3629 if (!page) { in rmqueue()
3632 page = __rmqueue_cma(zone, order, migratetype, in rmqueue()
3634 if (!page) in rmqueue()
3635 page = __rmqueue(zone, order, migratetype, in rmqueue()
3638 } while (page && check_new_pages(page, order)); in rmqueue()
3640 if (!page) in rmqueue()
3643 get_pcppage_migratetype(page)); in rmqueue()
3645 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue()
3658 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3659 return page; in rmqueue()
3978 static struct page *
3996 struct page *page; in get_page_from_freelist() local
4092 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4094 if (page) { in get_page_from_freelist()
4095 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
4102 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4104 return page; in get_page_from_freelist()
4172 static inline struct page *
4177 struct page *page; in __alloc_pages_cpuset_fallback() local
4179 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4185 if (!page) in __alloc_pages_cpuset_fallback()
4186 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4189 return page; in __alloc_pages_cpuset_fallback()
4192 static inline struct page *
4203 struct page *page; in __alloc_pages_may_oom() local
4224 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
4227 if (page) in __alloc_pages_may_oom()
4270 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4275 return page; in __alloc_pages_may_oom()
4286 static struct page *
4291 struct page *page = NULL; in __alloc_pages_direct_compact() local
4302 prio, &page); in __alloc_pages_direct_compact()
4314 if (page) in __alloc_pages_direct_compact()
4315 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4318 if (!page) in __alloc_pages_direct_compact()
4319 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4321 if (page) { in __alloc_pages_direct_compact()
4322 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact()
4327 return page; in __alloc_pages_direct_compact()
4419 static inline struct page *
4558 static inline struct page *
4563 struct page *page = NULL; in __alloc_pages_direct_reclaim() local
4574 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4581 if (!page && !drained) { in __alloc_pages_direct_reclaim()
4593 return page; in __alloc_pages_direct_reclaim()
4830 static inline struct page *
4836 struct page *page = NULL; in __alloc_pages_slowpath() local
4890 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4891 if (page) in __alloc_pages_slowpath()
4907 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4911 if (page) in __alloc_pages_slowpath()
4970 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4971 if (page) in __alloc_pages_slowpath()
4983 alloc_flags, ac->migratetype, &page); in __alloc_pages_slowpath()
4985 if (page) in __alloc_pages_slowpath()
4990 &page, &should_alloc_retry); in __alloc_pages_slowpath()
4995 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4997 if (page) in __alloc_pages_slowpath()
5001 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5003 if (page) in __alloc_pages_slowpath()
5043 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
5044 if (page) in __alloc_pages_slowpath()
5101 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
5102 if (page) in __alloc_pages_slowpath()
5110 alloc_flags, ac->migratetype, &page); in __alloc_pages_slowpath()
5111 if (page) in __alloc_pages_slowpath()
5118 return page; in __alloc_pages_slowpath()
5170 struct page *
5174 struct page *page; in __alloc_pages_nodemask() local
5200 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
5201 if (likely(page)) in __alloc_pages_nodemask()
5219 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
5222 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && in __alloc_pages_nodemask()
5223 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { in __alloc_pages_nodemask()
5224 __free_pages(page, order); in __alloc_pages_nodemask()
5225 page = NULL; in __alloc_pages_nodemask()
5228 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
5230 return page; in __alloc_pages_nodemask()
5241 struct page *page; in __get_free_pages() local
5243 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
5244 if (!page) in __get_free_pages()
5246 return (unsigned long) page_address(page); in __get_free_pages()
5256 static inline void free_the_page(struct page *page, unsigned int order) in free_the_page() argument
5259 free_unref_page(page); in free_the_page()
5261 __free_pages_ok(page, order, FPI_NONE); in free_the_page()
5264 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5267 int head = PageHead(page); in __free_pages()
5269 trace_android_vh_free_pages(page, order); in __free_pages()
5270 if (put_page_testzero(page)) in __free_pages()
5271 free_the_page(page, order); in __free_pages()
5274 free_the_page(page + (1 << order), order); in __free_pages()
5299 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, in __page_frag_cache_refill()
5302 struct page *page = NULL; in __page_frag_cache_refill() local
5308 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_cache_refill()
5310 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; in __page_frag_cache_refill()
5312 if (unlikely(!page)) in __page_frag_cache_refill()
5313 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); in __page_frag_cache_refill()
5315 nc->va = page ? page_address(page) : NULL; in __page_frag_cache_refill()
5317 return page; in __page_frag_cache_refill()
5320 void __page_frag_cache_drain(struct page *page, unsigned int count) in __page_frag_cache_drain() argument
5322 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); in __page_frag_cache_drain()
5324 if (page_ref_sub_and_test(page, count)) in __page_frag_cache_drain()
5325 free_the_page(page, compound_order(page)); in __page_frag_cache_drain()
5333 struct page *page; in page_frag_alloc() local
5338 page = __page_frag_cache_refill(nc, gfp_mask); in page_frag_alloc()
5339 if (!page) in page_frag_alloc()
5349 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); in page_frag_alloc()
5352 nc->pfmemalloc = page_is_pfmemalloc(page); in page_frag_alloc()
5359 page = virt_to_page(nc->va); in page_frag_alloc()
5361 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) in page_frag_alloc()
5365 free_the_page(page, compound_order(page)); in page_frag_alloc()
5374 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); in page_frag_alloc()
5405 struct page *page = virt_to_head_page(addr); in page_frag_free() local
5407 if (unlikely(put_page_testzero(page))) in page_frag_free()
5408 free_the_page(page, compound_order(page)); in page_frag_free()
5471 struct page *p; in alloc_pages_exact_nid()
6343 struct page *page; in memmap_init_zone() local
6378 page = pfn_to_page(pfn); in memmap_init_zone()
6379 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
6381 __SetPageReserved(page); in memmap_init_zone()
6389 set_pageblock_migratetype(page, migratetype); in memmap_init_zone()
6423 struct page *page = pfn_to_page(pfn); in memmap_init_zone_device() local
6425 __init_single_page(page, pfn, zone_idx, nid); in memmap_init_zone_device()
6434 __SetPageReserved(page); in memmap_init_zone_device()
6441 page->pgmap = pgmap; in memmap_init_zone_device()
6442 page->zone_device_data = NULL; in memmap_init_zone_device()
6455 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in memmap_init_zone_device()
7143 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; in calc_memmap_size()
7303 struct page *map; in alloc_node_mem_map()
7312 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
7877 void adjust_managed_page_count(struct page *page, long count) in adjust_managed_page_count() argument
7879 atomic_long_add(count, &page_zone(page)->managed_pages); in adjust_managed_page_count()
7882 if (PageHighMem(page)) in adjust_managed_page_count()
7896 struct page *page = virt_to_page(pos); in free_reserved_area() local
7906 direct_map_addr = page_address(page); in free_reserved_area()
7915 free_reserved_page(page); in free_reserved_area()
7926 void free_highmem_page(struct page *page) in free_highmem_page() argument
7928 __free_reserved_page(page); in free_highmem_page()
7930 atomic_long_inc(&page_zone(page)->managed_pages); in free_highmem_page()
8587 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8591 unsigned long pfn = page_to_pfn(page); in has_unmovable_pages()
8594 if (is_migrate_cma_page(page)) { in has_unmovable_pages()
8603 return page; in has_unmovable_pages()
8610 page = pfn_to_page(pfn + iter); in has_unmovable_pages()
8618 if (PageReserved(page)) in has_unmovable_pages()
8619 return page; in has_unmovable_pages()
8635 if (PageHuge(page) || PageTransCompound(page)) { in has_unmovable_pages()
8636 struct page *head = compound_head(page); in has_unmovable_pages()
8639 if (PageHuge(page)) { in has_unmovable_pages()
8641 return page; in has_unmovable_pages()
8643 return page; in has_unmovable_pages()
8646 skip_pages = compound_nr(head) - (page - head); in has_unmovable_pages()
8657 if (!page_ref_count(page)) { in has_unmovable_pages()
8658 if (PageBuddy(page)) in has_unmovable_pages()
8659 iter += (1 << buddy_order(page)) - 1; in has_unmovable_pages()
8667 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) in has_unmovable_pages()
8680 if ((flags & MEMORY_OFFLINE) && PageOffline(page)) in has_unmovable_pages()
8683 if (__PageMovable(page) || PageLRU(page)) in has_unmovable_pages()
8691 return page; in has_unmovable_pages()
8717 struct page *page; in alloc_contig_dump_pages() local
8722 list_for_each_entry(page, page_list, lru) { in alloc_contig_dump_pages()
8725 if (page_count(page) == 1) { in alloc_contig_dump_pages()
8729 dump_page(page, "migration failure"); in alloc_contig_dump_pages()
8751 struct page *page; in __alloc_contig_migrate_range() local
8786 list_for_each_entry(page, &cc->migratepages, lru) in __alloc_contig_migrate_range()
8787 info->nr_mapped += page_mapcount(page); in __alloc_contig_migrate_range()
8803 page = list_first_entry(&cc->migratepages, struct page , lru); in __alloc_contig_migrate_range()
8804 info->failed_pfn = page_to_pfn(page); in __alloc_contig_migrate_range()
8991 struct page *page; in pfn_range_valid_contig() local
8994 page = pfn_to_online_page(i); in pfn_range_valid_contig()
8995 if (!page) in pfn_range_valid_contig()
8998 if (page_zone(page) != z) in pfn_range_valid_contig()
9001 if (PageReserved(page)) in pfn_range_valid_contig()
9004 if (page_count(page) > 0) in pfn_range_valid_contig()
9007 if (PageHuge(page)) in pfn_range_valid_contig()
9042 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages()
9085 struct page *page = pfn_to_page(pfn); in free_contig_range() local
9087 count += page_count(page) != 1; in free_contig_range()
9088 __free_page(page); in free_contig_range()
9132 struct page *page; in __offline_isolated_pages() local
9141 page = pfn_to_page(pfn); in __offline_isolated_pages()
9146 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { in __offline_isolated_pages()
9154 if (PageOffline(page)) { in __offline_isolated_pages()
9155 BUG_ON(page_count(page)); in __offline_isolated_pages()
9156 BUG_ON(PageBuddy(page)); in __offline_isolated_pages()
9161 BUG_ON(page_count(page)); in __offline_isolated_pages()
9162 BUG_ON(!PageBuddy(page)); in __offline_isolated_pages()
9163 order = buddy_order(page); in __offline_isolated_pages()
9164 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9171 bool is_free_buddy_page(struct page *page) in is_free_buddy_page() argument
9173 struct zone *zone = page_zone(page); in is_free_buddy_page()
9174 unsigned long pfn = page_to_pfn(page); in is_free_buddy_page()
9180 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
9195 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
9196 struct page *target, int low, int high, in break_down_buddy_pages()
9200 struct page *current_buddy, *next_page; in break_down_buddy_pages()
9206 if (target >= &page[size]) { in break_down_buddy_pages()
9207 next_page = page + size; in break_down_buddy_pages()
9208 current_buddy = page; in break_down_buddy_pages()
9210 next_page = page; in break_down_buddy_pages()
9211 current_buddy = page + size; in break_down_buddy_pages()
9213 page = next_page; in break_down_buddy_pages()
9228 bool take_page_off_buddy(struct page *page) in take_page_off_buddy() argument
9230 struct zone *zone = page_zone(page); in take_page_off_buddy()
9231 unsigned long pfn = page_to_pfn(page); in take_page_off_buddy()
9238 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
9247 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()