Lines Matching refs:order
193 static void __free_pages_ok(struct page *page, unsigned int order);
615 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
618 int nr_pages = 1 << order; in prep_compound_page()
621 set_compound_order(page, order); in prep_compound_page()
690 unsigned int order, int migratetype) in set_page_guard() argument
697 if (order >= debug_guardpage_minorder()) in set_page_guard()
707 set_page_private(page, order); in set_page_guard()
709 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
715 unsigned int order, int migratetype) in clear_page_guard() argument
730 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
735 unsigned int order, int migratetype) { return false; } in set_page_guard() argument
737 unsigned int order, int migratetype) {} in clear_page_guard() argument
740 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
742 set_page_private(page, order); in set_page_order()
768 unsigned int order) in page_is_buddy() argument
770 if (page_is_guard(buddy) && page_order(buddy) == order) { in page_is_buddy()
779 if (PageBuddy(buddy) && page_order(buddy) == order) { in page_is_buddy()
822 struct zone *zone, unsigned int order, in __free_one_page() argument
837 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
839 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
843 while (order < max_order - 1) { in __free_one_page()
844 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
849 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
856 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
859 zone->free_area[order].nr_free--; in __free_one_page()
865 order++; in __free_one_page()
879 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
893 set_page_order(page, order); in __free_one_page()
903 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) { in __free_one_page()
907 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); in __free_one_page()
910 page_is_buddy(higher_page, higher_buddy, order + 1)) { in __free_one_page()
912 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
917 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
919 zone->free_area[order].nr_free++; in __free_one_page()
1030 unsigned int order, bool check_free) in free_pages_prepare() argument
1036 trace_mm_page_free(page, order); in free_pages_prepare()
1042 if (unlikely(order)) { in free_pages_prepare()
1046 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1050 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1063 memcg_kmem_uncharge(page, order); in free_pages_prepare()
1071 reset_page_owner(page, order); in free_pages_prepare()
1075 PAGE_SIZE << order); in free_pages_prepare()
1077 PAGE_SIZE << order); in free_pages_prepare()
1079 arch_free_page(page, order); in free_pages_prepare()
1080 kernel_poison_pages(page, 1 << order, 0); in free_pages_prepare()
1081 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
1082 kasan_free_pages(page, order); in free_pages_prepare()
1178 unsigned int order, in free_one_page() argument
1186 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
1263 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
1269 if (!free_pages_prepare(page, order, true)) in __free_pages_ok()
1274 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1275 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
1279 static void __init __free_pages_boot_core(struct page *page, unsigned int order) in __free_pages_boot_core() argument
1281 unsigned int nr_pages = 1 << order; in __free_pages_boot_core()
1296 __free_pages(page, order); in __free_pages_boot_core()
1354 unsigned int order) in __free_pages_bootmem() argument
1358 return __free_pages_boot_core(page, order); in __free_pages_bootmem()
1759 static bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1762 for (i = 0; i < (1 << order); i++) { in check_new_pages()
1772 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1778 arch_alloc_page(page, order); in post_alloc_hook()
1779 kernel_map_pages(page, 1 << order, 1); in post_alloc_hook()
1780 kasan_alloc_pages(page, order); in post_alloc_hook()
1781 kernel_poison_pages(page, 1 << order, 1); in post_alloc_hook()
1782 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1785 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1790 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1793 for (i = 0; i < (1 << order); i++) in prep_new_page()
1796 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1797 prep_compound_page(page, order); in prep_new_page()
1816 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1824 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
1833 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1860 unsigned int order) in __rmqueue_cma_fallback() argument
1862 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1866 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
1879 unsigned int order; in move_freepages() local
1919 order = page_order(page); in move_freepages()
1921 &zone->free_area[order].free_list[migratetype]); in move_freepages()
1922 page += 1 << order; in move_freepages()
1923 pages_moved += 1 << order; in move_freepages()
1974 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
1983 if (order >= pageblock_order) in can_steal_fallback()
1986 if (order >= pageblock_order / 2 || in can_steal_fallback()
2079 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2097 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
2164 int order; in unreserve_highatomic_pageblock() local
2178 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock()
2179 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2241 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) in __rmqueue_fallback() argument
2254 for (current_order = MAX_ORDER - 1; current_order >= order; in __rmqueue_fallback()
2271 && current_order > order) in __rmqueue_fallback()
2280 for (current_order = order; current_order < MAX_ORDER; in __rmqueue_fallback()
2301 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2312 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
2318 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2321 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2323 if (!page && __rmqueue_fallback(zone, order, migratetype)) in __rmqueue()
2327 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
2336 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2344 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
2368 -(1 << order)); in rmqueue_bulk()
2377 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2565 unsigned int order, t; in mark_free_pages() local
2590 for_each_migratetype_order(order, t) { in mark_free_pages()
2592 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
2596 for (i = 0; i < (1UL << order); i++) { in mark_free_pages()
2681 void split_page(struct page *page, unsigned int order) in split_page() argument
2688 for (i = 1; i < (1 << order); i++) in split_page()
2690 split_page_owner(page, order); in split_page()
2694 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2712 watermark = min_wmark_pages(zone) + (1UL << order); in __isolate_free_page()
2716 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2721 zone->free_area[order].nr_free--; in __isolate_free_page()
2728 if (order >= pageblock_order - 1) { in __isolate_free_page()
2729 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2740 return 1UL << order; in __isolate_free_page()
2796 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
2810 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
2822 struct zone *zone, unsigned int order, in rmqueue() argument
2829 if (likely(order == 0)) { in rmqueue()
2830 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
2839 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); in rmqueue()
2845 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
2847 trace_mm_page_alloc_zone_locked(page, order, migratetype); in rmqueue()
2850 page = __rmqueue(zone, order, migratetype); in rmqueue()
2851 } while (page && check_new_pages(page, order)); in rmqueue()
2855 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
2858 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue()
2892 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2894 if (order < fail_page_alloc.min_order) in should_fail_alloc_page()
2904 return should_fail(&fail_page_alloc.attr, 1 << order); in should_fail_alloc_page()
2942 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2955 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
2964 free_pages -= (1 << order) - 1; in __zone_watermark_ok()
3005 if (!order) in __zone_watermark_ok()
3009 for (o = order; o < MAX_ORDER; o++) { in __zone_watermark_ok()
3034 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
3037 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
3041 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
3060 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) in zone_watermark_fast()
3063 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_fast()
3067 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
3075 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, in zone_watermark_ok_safe()
3097 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3147 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3160 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3170 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3179 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3182 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3188 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) in get_page_from_freelist()
3189 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3266 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
3272 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3279 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3286 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
3294 .order = order, in __alloc_pages_may_oom()
3318 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
3327 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
3365 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3382 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3390 if (!order) in __alloc_pages_direct_compact()
3396 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3411 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3417 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
3434 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
3445 if (!order) in should_compact_retry()
3466 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
3478 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3490 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
3499 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
3504 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3513 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
3521 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3583 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
3601 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
3616 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
3623 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
3628 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
3645 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) in wake_all_kswapds() argument
3654 wakeup_kswapd(zone, order, ac->high_zoneidx); in wake_all_kswapds()
3750 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
3762 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
3796 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
3798 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
3874 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
3878 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
3922 wake_all_kswapds(order, ac); in __alloc_pages_slowpath()
3928 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
3943 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
3945 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
3980 wake_all_kswapds(order, ac); in __alloc_pages_slowpath()
3997 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4010 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4016 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4032 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4043 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4054 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4100 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); in __alloc_pages_slowpath()
4108 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
4117 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4122 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4145 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
4156 unsigned int order, struct alloc_context *ac) in finalise_ac() argument
4174 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, in __alloc_pages_nodemask() argument
4186 if (unlikely(order >= MAX_ORDER)) { in __alloc_pages_nodemask()
4193 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) in __alloc_pages_nodemask()
4196 finalise_ac(gfp_mask, order, &ac); in __alloc_pages_nodemask()
4199 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
4219 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
4223 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { in __alloc_pages_nodemask()
4224 __free_pages(page, order); in __alloc_pages_nodemask()
4228 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
4237 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
4247 page = alloc_pages(gfp_mask, order); in __get_free_pages()
4260 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
4263 if (order == 0) in __free_pages()
4266 __free_pages_ok(page, order); in __free_pages()
4272 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
4276 __free_pages(virt_to_page((void *)addr), order); in free_pages()
4319 unsigned int order = compound_order(page); in __page_frag_cache_drain() local
4321 if (order == 0) in __page_frag_cache_drain()
4324 __free_pages_ok(page, order); in __page_frag_cache_drain()
4395 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
4399 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
4402 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
4426 unsigned int order = get_order(size); in alloc_pages_exact() local
4429 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
4430 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
4446 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
4447 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
4450 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
4836 unsigned int order; in show_free_areas() local
4846 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
4847 struct free_area *area = &zone->free_area[order]; in show_free_areas()
4850 nr[order] = area->nr_free; in show_free_areas()
4851 total += nr[order] << order; in show_free_areas()
4853 types[order] = 0; in show_free_areas()
4856 types[order] |= 1 << type; in show_free_areas()
4860 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
4862 nr[order], K(1UL) << order); in show_free_areas()
4863 if (nr[order]) in show_free_areas()
4864 show_migration_types(types[order]); in show_free_areas()
5376 unsigned int order, t; in zone_init_free_lists() local
5377 for_each_migratetype_order(order, t) { in zone_init_free_lists()
5378 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
5379 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
5978 unsigned int order; in set_pageblock_order() local
5985 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
5987 order = MAX_ORDER - 1; in set_pageblock_order()
5994 pageblock_order = order; in set_pageblock_order()
7547 unsigned int order; in alloc_contig_range() local
7552 .order = -1, in alloc_contig_range()
7625 order = 0; in alloc_contig_range()
7628 if (++order >= MAX_ORDER) { in alloc_contig_range()
7632 outer_start &= ~0UL << order; in alloc_contig_range()
7636 order = page_order(pfn_to_page(outer_start)); in alloc_contig_range()
7644 if (outer_start + (1UL << order) <= start) in alloc_contig_range()
7734 unsigned int order, i; in __offline_isolated_pages() local
7765 order = page_order(page); in __offline_isolated_pages()
7768 pfn, 1 << order, end_pfn); in __offline_isolated_pages()
7772 zone->free_area[order].nr_free--; in __offline_isolated_pages()
7773 for (i = 0; i < (1 << order); i++) in __offline_isolated_pages()
7775 pfn += (1 << order); in __offline_isolated_pages()
7786 unsigned int order; in is_free_buddy_page() local
7789 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
7790 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
7792 if (PageBuddy(page_head) && page_order(page_head) >= order) in is_free_buddy_page()
7797 return order < MAX_ORDER; in is_free_buddy_page()