• Home
  • Raw
  • Download

Lines Matching full:order

233 static void __free_pages_ok(struct page *page, unsigned int order,
310 static bool page_contains_unaccepted(struct page *page, unsigned int order);
311 static void accept_page(struct page *page, unsigned int order);
312 static bool cond_accept_memory(struct zone *zone, unsigned int order);
337 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
339 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
526 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument
531 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex()
532 VM_BUG_ON(order != pageblock_order); in order_to_pindex()
539 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex()
542 return (MIGRATE_PCPTYPES * order) + migratetype; in order_to_pindex()
547 int order = pindex / MIGRATE_PCPTYPES; in pindex_to_order() local
551 order = pageblock_order; in pindex_to_order()
553 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in pindex_to_order()
556 return order; in pindex_to_order()
559 static inline bool pcp_allowed_order(unsigned int order) in pcp_allowed_order() argument
561 if (order <= PAGE_ALLOC_COSTLY_ORDER) in pcp_allowed_order()
564 if (order == pageblock_order) in pcp_allowed_order()
570 static inline void free_the_page(struct page *page, unsigned int order) in free_the_page() argument
572 if (pcp_allowed_order(order)) /* Via pcp? */ in free_the_page()
573 free_unref_page(page, order); in free_the_page()
575 __free_pages_ok(page, order, FPI_NONE); in free_the_page()
579 * Higher-order pages are called "compound pages". They are structured thusly:
586 * The first tail page's ->compound_order holds the order of allocation.
587 * This usage means that zero-order pages may not be compound.
590 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
593 int nr_pages = 1 << order; in prep_compound_page()
599 prep_compound_head(page, order); in prep_compound_page()
614 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
616 set_page_private(page, order); in set_buddy_order()
633 int order, int migratetype) in compaction_capture() argument
635 if (!capc || order != capc->cc->order) in compaction_capture()
644 * Do not let lower order allocations pollute a movable pageblock. in compaction_capture()
647 * have trouble finding a high-order free page. in compaction_capture()
649 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) in compaction_capture()
664 int order, int migratetype) in compaction_capture() argument
672 unsigned int order, int migratetype) in add_to_free_list() argument
674 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
682 unsigned int order, int migratetype) in add_to_free_list_tail() argument
684 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
696 unsigned int order, int migratetype) in move_to_free_list() argument
698 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
704 unsigned int order) in del_page_from_free_list() argument
713 zone->free_area[order].nr_free--; in del_page_from_free_list()
725 * of the next-highest order is free. If it is, it's possible
729 * as a higher order page
733 struct page *page, unsigned int order) in buddy_merge_likely() argument
738 if (order >= MAX_ORDER - 1) in buddy_merge_likely()
744 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, in buddy_merge_likely()
761 * free pages of length of (1 << order) and marked with PageBuddy.
762 * Page's order is recorded in page_private(page) field.
774 struct zone *zone, unsigned int order, in __free_one_page() argument
788 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
790 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
793 while (order < MAX_ORDER) { in __free_one_page()
794 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
795 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
800 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); in __free_one_page()
804 if (unlikely(order >= pageblock_order)) { in __free_one_page()
821 * merge with it and move up one order. in __free_one_page()
824 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
826 del_page_from_free_list(buddy, zone, order); in __free_one_page()
830 order++; in __free_one_page()
834 set_buddy_order(page, order); in __free_one_page()
838 else if (is_shuffle_order(order)) in __free_one_page()
841 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
844 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
846 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
850 page_reporting_notify_free(order); in __free_one_page()
856 * @order: the order of the page
867 unsigned int order, unsigned long split_pfn_offset) in split_free_page() argument
882 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { in split_free_page()
889 __mod_zone_freepage_state(zone, -(1UL << order), mt); in split_free_page()
891 del_page_from_free_list(free_page, zone, order); in split_free_page()
893 pfn < free_page_pfn + (1UL << order);) { in split_free_page()
897 pfn ? __ffs(pfn) : order, in split_free_page()
905 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); in split_free_page()
1086 unsigned int order, fpi_t fpi_flags) in free_pages_prepare() argument
1095 trace_mm_page_free(page, order); in free_pages_prepare()
1096 kmsan_free_page(page, order); in free_pages_prepare()
1112 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1118 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1119 reset_page_owner(page, order); in free_pages_prepare()
1120 page_table_check_free(page, order); in free_pages_prepare()
1126 * avoid checking PageCompound for order-0 pages. in free_pages_prepare()
1128 if (unlikely(order)) { in free_pages_prepare()
1132 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1136 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1151 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1161 reset_page_owner(page, order); in free_pages_prepare()
1162 page_table_check_free(page, order); in free_pages_prepare()
1166 PAGE_SIZE << order); in free_pages_prepare()
1168 PAGE_SIZE << order); in free_pages_prepare()
1171 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1182 kasan_poison_pages(page, order, init); in free_pages_prepare()
1189 kernel_init_pages(page, 1 << order); in free_pages_prepare()
1196 arch_free_page(page, order); in free_pages_prepare()
1198 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1213 unsigned int order; in free_pcppages_bulk() local
1240 order = pindex_to_order(pindex); in free_pcppages_bulk()
1241 nr_pages = 1 << order; in free_pcppages_bulk()
1259 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1260 trace_mm_page_pcpu_drain(page, order, mt); in free_pcppages_bulk()
1269 unsigned int order, in free_one_page() argument
1279 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1283 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1291 if (!free_pages_prepare(page, order, fpi_flags)) in __free_pages_ok()
1306 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in __free_pages_ok()
1309 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1312 void __free_pages_core(struct page *page, unsigned int order) in __free_pages_core() argument
1314 unsigned int nr_pages = 1 << order; in __free_pages_core()
1334 if (page_contains_unaccepted(page, order)) { in __free_pages_core()
1335 if (order == MAX_ORDER && __free_unaccepted(page)) in __free_pages_core()
1338 accept_page(page, order); in __free_pages_core()
1345 __free_pages_ok(page, order, FPI_TO_TAIL); in __free_pages_core()
1365 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
1401 * The order of subdivision here is critical for the IO subsystem.
1402 * Please do not alter this order without good reasons and regression
1404 * the order in which smaller blocks are delivered depends on the order
1406 * influencing the order in which pages are delivered to the IO
1463 static inline bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1466 for (int i = 0; i < (1 << order); i++) { in check_new_pages()
1505 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1516 arch_alloc_page(page, order); in post_alloc_hook()
1517 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
1524 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
1538 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
1545 kasan_unpoison_pages(page, order, init)) { in post_alloc_hook()
1554 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
1559 kernel_init_pages(page, 1 << order); in post_alloc_hook()
1561 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1562 page_table_check_alloc(page, order); in post_alloc_hook()
1565 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1568 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1570 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1571 prep_compound_page(page, order); in prep_new_page()
1590 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1598 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { in __rmqueue_smallest()
1604 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
1606 trace_mm_page_alloc_zone_locked(page, order, migratetype, in __rmqueue_smallest()
1607 pcp_allowed_order(order) && in __rmqueue_smallest()
1617 * This array describes the order lists are fallen back to when
1630 unsigned int order) in __rmqueue_cma_fallback() argument
1632 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1636 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
1650 unsigned int order; in move_freepages() local
1672 order = buddy_order(page); in move_freepages()
1673 move_to_free_list(page, zone, order, migratetype); in move_freepages()
1674 pfn += 1 << order; in move_freepages()
1675 pages_moved += 1 << order; in move_freepages()
1726 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
1729 * Leaving this order check is intended, although there is in can_steal_fallback()
1730 * relaxed order check in next check. The reason is that in can_steal_fallback()
1735 if (order >= pageblock_order) in can_steal_fallback()
1738 if (order >= pageblock_order / 2 || in can_steal_fallback()
1785 * This function implements actual steal behaviour. If order is large enough,
1868 * Check whether there is a suitable fallback freepage with requested order.
1873 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
1888 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
1902 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1903 * there are no empty page blocks that contain a page with a suitable order
1939 * potentially hurts the reliability of high-order allocations when under
1954 int order; in unreserve_highatomic_pageblock() local
1968 for (order = 0; order < NR_PAGE_ORDERS; order++) { in unreserve_highatomic_pageblock()
1969 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2024 * The use of signed ints for order and current_order is a deliberate
2029 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2034 int min_order = order; in __rmqueue_fallback()
2044 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_fallback()
2069 && current_order > order) in __rmqueue_fallback()
2078 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { in __rmqueue_fallback()
2098 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2106 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, in __rmqueue_with_cma_reuse() argument
2111 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2116 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2120 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse()
2131 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2137 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); in __rmqueue()
2151 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2157 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2160 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2162 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2174 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2183 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2190 * physical page order. The page is added to the tail of in rmqueue_bulk()
2194 * head, thus also in the physical page order. This is useful in rmqueue_bulk()
2201 -(1 << order)); in rmqueue_bulk()
2204 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2366 unsigned int order) in free_unref_page_prepare() argument
2370 if (!free_pages_prepare(page, order, FPI_NONE)) in free_unref_page_prepare()
2383 /* Free everything if batch freeing high-order pages. */ in nr_pcp_free()
2427 unsigned int order) in free_unref_page_commit() argument
2433 __count_vm_events(PGFREE, 1 << order); in free_unref_page_commit()
2434 pindex = order_to_pindex(migratetype, order); in free_unref_page_commit()
2436 pcp->count += 1 << order; in free_unref_page_commit()
2439 * As high-order pages other than THP's stored on PCP can contribute in free_unref_page_commit()
2444 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); in free_unref_page_commit()
2455 void free_unref_page(struct page *page, unsigned int order) in free_unref_page() argument
2463 if (!free_unref_page_prepare(page, pfn, order)) in free_unref_page()
2476 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
2486 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order); in free_unref_page()
2489 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
2495 * Free a list of 0-order pages
2580 * split_page takes a non-compound higher-order page, and splits it into
2581 * n (1<<order) sub-pages: page[0..n]
2587 void split_page(struct page *page, unsigned int order) in split_page() argument
2594 for (i = 1; i < (1 << order); i++) in split_page()
2596 split_page_owner(page, 1 << order); in split_page()
2597 split_page_memcg(page, 1 << order); in split_page()
2601 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2610 * emulate a high-order watermark check with a raised order-0 in __isolate_free_page()
2611 * watermark, because we already know our high-order page in __isolate_free_page()
2614 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
2618 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2621 del_page_from_free_list(page, zone, order); in __isolate_free_page()
2627 if (order >= pageblock_order - 1) { in __isolate_free_page()
2628 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2641 return 1UL << order; in __isolate_free_page()
2647 * @order: Order of the isolated page
2653 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
2661 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
2693 unsigned int order, unsigned int alloc_flags, in rmqueue_buddy() argument
2703 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2705 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_buddy()
2709 * order-0 (atomic) allocs access to HIGHATOMIC in rmqueue_buddy()
2711 * high-order atomic allocation in the future. in rmqueue_buddy()
2714 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2721 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue_buddy()
2724 } while (check_new_pages(page, order)); in rmqueue_buddy()
2726 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_buddy()
2734 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
2748 * Scale batch relative to order if batch implies in __rmqueue_pcplist()
2755 batch = max(batch >> order, 2); in __rmqueue_pcplist()
2756 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
2760 pcp->count += alloced << order; in __rmqueue_pcplist()
2767 pcp->count -= 1 << order; in __rmqueue_pcplist()
2768 } while (check_new_pages(page, order)); in __rmqueue_pcplist()
2775 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
2797 list = &pcp->lists[order_to_pindex(migratetype, order)]; in rmqueue_pcplist()
2798 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
2802 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
2810 * Use pcplists for THP or "cheap" high-order allocations.
2822 struct zone *zone, unsigned int order, in rmqueue() argument
2830 * allocate greater than order-1 page units with __GFP_NOFAIL. in rmqueue()
2832 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); in rmqueue()
2834 if (likely(pcp_allowed_order(order))) { in rmqueue()
2835 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
2841 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
2856 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2858 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
2863 unsigned int order, unsigned int alloc_flags) in __zone_watermark_unusable_free() argument
2865 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
2885 * Return true if free base pages are above 'mark'. For high-order checks it
2886 * will return true of the order-0 watermark is reached and there is at least
2890 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
2898 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
2930 * Check watermarks for an order-0 allocation request. If these in __zone_watermark_ok()
2931 * are not met, then a high-order request also cannot go ahead in __zone_watermark_ok()
2937 /* If this is an order-0 request then the watermark is fine */ in __zone_watermark_ok()
2938 if (!order) in __zone_watermark_ok()
2941 /* For a high-order request, check at least one suitable page is free */ in __zone_watermark_ok()
2942 for (o = order; o < NR_PAGE_ORDERS; o++) { in __zone_watermark_ok()
2968 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
2971 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_ok()
2975 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
2984 * Fast check for order-0 only. If this fails then the reserves in zone_watermark_fast()
2987 if (!order) { in zone_watermark_fast()
3000 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_fast()
3005 * Ignore watermark boosting for __GFP_HIGH order-0 allocations in zone_watermark_fast()
3010 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost in zone_watermark_fast()
3013 return __zone_watermark_ok(z, order, mark, highest_zoneidx, in zone_watermark_fast()
3020 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
3028 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, in zone_watermark_ok_safe()
3103 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3173 cond_accept_memory(zone, order); in get_page_from_freelist()
3176 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3181 if (cond_accept_memory(zone, order)) in get_page_from_freelist()
3190 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3203 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3213 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3222 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3225 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3228 * If this is a high-order atomic allocation then check in get_page_from_freelist()
3236 if (cond_accept_memory(zone, order)) in get_page_from_freelist()
3242 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3306 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
3312 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3319 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3326 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
3334 .order = order, in __alloc_pages_may_oom()
3358 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
3366 /* The OOM killer will not help higher order allocs */ in __alloc_pages_may_oom()
3367 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
3404 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3419 /* Try memory compaction for high-order allocations before reclaim */
3421 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3429 if (!order) in __alloc_pages_direct_compact()
3436 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3453 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
3457 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3463 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
3480 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
3491 if (!order) in should_compact_retry()
3498 * Compaction was skipped due to a lack of free order-0 in should_compact_retry()
3502 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
3520 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3532 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
3541 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
3546 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3555 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
3563 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3569 * Let's give them a good hope and keep retrying while the order-0 in should_compact_retry()
3667 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
3680 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
3693 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
3702 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
3707 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
3732 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
3745 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
3752 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) in gfp_to_alloc_flags() argument
3781 if (order > 0) in gfp_to_alloc_flags()
3853 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
3863 * their order will become available due to high fragmentation so in should_reclaim_retry()
3866 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
3895 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
3897 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
3958 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
3963 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
3989 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); in __alloc_pages_slowpath()
4016 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4022 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4029 * movable high-order allocations, do that as well, as compaction will in __alloc_pages_slowpath()
4037 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4039 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4055 * order, fail immediately unless the allocator has in __alloc_pages_slowpath()
4061 * bursty high order allocations, in __alloc_pages_slowpath()
4092 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4111 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4127 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4136 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4146 * Do not retry costly high order allocations unless they are in __alloc_pages_slowpath()
4153 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4158 * It doesn't make any sense to retry for the compaction if the order-0 in __alloc_pages_slowpath()
4164 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4179 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4238 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); in __alloc_pages_slowpath()
4247 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4252 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4281 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
4301 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4473 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, in __alloc_pages() argument
4482 * There are several places where we assume that the order value is sane in __alloc_pages()
4485 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) in __alloc_pages()
4498 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, in __alloc_pages()
4509 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in __alloc_pages()
4522 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); in __alloc_pages()
4526 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { in __alloc_pages()
4527 __free_pages(page, order); in __alloc_pages()
4531 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in __alloc_pages()
4532 kmsan_alloc_page(page, order, alloc_gfp); in __alloc_pages()
4538 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, in __folio_alloc() argument
4541 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, in __folio_alloc()
4552 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
4556 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
4572 * @order: The order of the allocation.
4575 * pages. It does not check that the @order passed in matches that of
4589 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
4595 free_the_page(page, order); in __free_pages()
4597 while (order-- > 0) in __free_pages()
4598 free_the_page(page + (1 << order), order); in __free_pages()
4602 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
4606 __free_pages(virt_to_page((void *)addr), order); in free_pages()
4615 * within a 0 or higher order page. Multiple fragments within that page
4727 * Frees a page fragment allocated out of either a compound or order 0 page.
4738 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
4746 split_page_owner(page, 1 << order); in make_alloc_exact()
4747 split_page_memcg(page, 1 << order); in make_alloc_exact()
4751 last = page + (1UL << order); in make_alloc_exact()
4775 unsigned int order = get_order(size); in alloc_pages_exact() local
4781 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
4782 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
4800 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
4806 p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
4809 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
5074 pr_info("Fallback order for Node %d: ", local_node); in build_zonelists()
5239 * needs the percpu allocator in order to allocate its pagesets in build_all_zonelists_init()
5330 * fragmented and becoming unavailable for high-order allocations. in zone_batchsize()
6192 int order; in alloc_contig_range() local
6197 .order = -1, in alloc_contig_range()
6209 * MIGRATE_ISOLATE. Because pageblock and max order pages may in alloc_contig_range()
6258 * page allocator holds, ie. they can be part of higher order in alloc_contig_range()
6266 order = 0; in alloc_contig_range()
6269 if (++order > MAX_ORDER) { in alloc_contig_range()
6273 outer_start &= ~0UL << order; in alloc_contig_range()
6277 order = buddy_order(pfn_to_page(outer_start)); in alloc_contig_range()
6280 * outer_start page could be small order buddy page and in alloc_contig_range()
6285 if (outer_start + (1UL << order) <= start) in alloc_contig_range()
6477 unsigned int order; in __offline_isolated_pages() local
6506 order = buddy_order(page); in __offline_isolated_pages()
6507 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
6508 pfn += (1 << order); in __offline_isolated_pages()
6520 unsigned int order; in is_free_buddy_page() local
6522 for (order = 0; order < NR_PAGE_ORDERS; order++) { in is_free_buddy_page()
6523 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
6526 buddy_order_unsafe(page_head) >= order) in is_free_buddy_page()
6530 return order <= MAX_ORDER; in is_free_buddy_page()
6536 * Break down a higher-order page in sub-pages, and keep our target out of
6577 unsigned int order; in take_page_off_buddy() local
6581 for (order = 0; order < NR_PAGE_ORDERS; order++) { in take_page_off_buddy()
6582 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
6585 if (PageBuddy(page_head) && page_order >= order) { in take_page_off_buddy()
6664 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
6667 phys_addr_t end = start + (PAGE_SIZE << order); in page_contains_unaccepted()
6672 static void accept_page(struct page *page, unsigned int order) in accept_page() argument
6676 accept_memory(start, start + (PAGE_SIZE << order)); in accept_page()
6705 static bool cond_accept_memory(struct zone *zone, unsigned int order) in cond_accept_memory() argument
6726 __zone_watermark_unusable_free(zone, order, 0) - in cond_accept_memory()
6758 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
6763 static void accept_page(struct page *page, unsigned int order) in accept_page() argument
6767 static bool cond_accept_memory(struct zone *zone, unsigned int order) in cond_accept_memory() argument