Lines Matching refs:order
282 static void __free_pages_ok(struct page *page, unsigned int order,
419 static inline void kasan_free_nondeferred_pages(struct page *page, int order) in kasan_free_nondeferred_pages() argument
422 kasan_free_pages(page, order); in kasan_free_nondeferred_pages()
703 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
706 int nr_pages = 1 << order; in prep_compound_page()
717 set_compound_order(page, order); in prep_compound_page()
768 unsigned int order, int migratetype) in set_page_guard() argument
773 if (order >= debug_guardpage_minorder()) in set_page_guard()
778 set_page_private(page, order); in set_page_guard()
780 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
786 unsigned int order, int migratetype) in clear_page_guard() argument
795 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
799 unsigned int order, int migratetype) { return false; } in set_page_guard() argument
801 unsigned int order, int migratetype) {} in clear_page_guard() argument
804 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
806 set_page_private(page, order); in set_buddy_order()
824 unsigned int order) in page_is_buddy() argument
829 if (buddy_order(buddy) != order) in page_is_buddy()
857 int order, int migratetype) in compaction_capture() argument
859 if (!capc || order != capc->cc->order) in compaction_capture()
873 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) in compaction_capture()
888 int order, int migratetype) in compaction_capture() argument
896 unsigned int order, int migratetype) in add_to_free_list() argument
898 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
906 unsigned int order, int migratetype) in add_to_free_list_tail() argument
908 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
920 unsigned int order, int migratetype) in move_to_free_list() argument
922 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
928 unsigned int order) in del_page_from_free_list() argument
937 zone->free_area[order].nr_free--; in del_page_from_free_list()
950 struct page *page, unsigned int order) in buddy_merge_likely() argument
955 if (order >= MAX_ORDER - 2) in buddy_merge_likely()
963 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); in buddy_merge_likely()
967 page_is_buddy(higher_page, higher_buddy, order + 1); in buddy_merge_likely()
996 struct zone *zone, unsigned int order, in __free_one_page() argument
1013 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1015 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
1019 while (order < max_order) { in __free_one_page()
1020 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
1021 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1025 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
1030 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
1037 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1039 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1043 order++; in __free_one_page()
1045 if (order < MAX_ORDER - 1) { in __free_one_page()
1057 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
1066 max_order = order + 1; in __free_one_page()
1071 set_buddy_order(page, order); in __free_one_page()
1075 else if (is_shuffle_order(order)) in __free_one_page()
1078 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
1081 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1083 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1087 page_reporting_notify_free(order); in __free_one_page()
1213 unsigned int order, bool check_free) in free_pages_prepare() argument
1219 trace_mm_page_free(page, order); in free_pages_prepare()
1221 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1227 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1228 reset_page_owner(page, order); in free_pages_prepare()
1236 if (unlikely(order)) { in free_pages_prepare()
1240 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1244 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1257 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1265 reset_page_owner(page, order); in free_pages_prepare()
1269 PAGE_SIZE << order); in free_pages_prepare()
1271 PAGE_SIZE << order); in free_pages_prepare()
1274 kernel_init_free_pages(page, 1 << order); in free_pages_prepare()
1276 kernel_poison_pages(page, 1 << order, 0); in free_pages_prepare()
1282 arch_free_page(page, order); in free_pages_prepare()
1285 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
1287 kasan_free_nondeferred_pages(page, order); in free_pages_prepare()
1435 unsigned int order, in free_one_page() argument
1443 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1521 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1528 if (!free_pages_prepare(page, order, true)) in __free_pages_ok()
1533 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1534 free_one_page(page_zone(page), page, pfn, order, migratetype, in __free_pages_ok()
1539 void __free_pages_core(struct page *page, unsigned int order) in __free_pages_core() argument
1541 unsigned int nr_pages = 1 << order; in __free_pages_core()
1565 __free_pages_ok(page, order, FPI_TO_TAIL); in __free_pages_core()
1613 unsigned int order) in memblock_free_pages() argument
1617 __free_pages_core(page, order); in memblock_free_pages()
2007 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2009 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); in deferred_grow_zone()
2074 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2076 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2262 static bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
2265 for (i = 0; i < (1 << order); i++) { in check_new_pages()
2275 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
2281 arch_alloc_page(page, order); in post_alloc_hook()
2283 kernel_map_pages(page, 1 << order, 1); in post_alloc_hook()
2284 kasan_alloc_pages(page, order); in post_alloc_hook()
2285 kernel_poison_pages(page, 1 << order, 1); in post_alloc_hook()
2286 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
2289 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
2292 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
2295 kernel_init_free_pages(page, 1 << order); in prep_new_page()
2297 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
2298 prep_compound_page(page, order); in prep_new_page()
2317 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2325 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
2331 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2358 unsigned int order) in __rmqueue_cma_fallback() argument
2360 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2364 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
2378 unsigned int order; in move_freepages() local
2405 order = buddy_order(page); in move_freepages()
2406 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2407 pfn += 1 << order; in move_freepages()
2408 pages_moved += 1 << order; in move_freepages()
2459 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
2468 if (order >= pageblock_order) in can_steal_fallback()
2471 if (order >= pageblock_order / 2 || in can_steal_fallback()
2607 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2625 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
2692 int order; in unreserve_highatomic_pageblock() local
2706 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock()
2707 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2767 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2772 int min_order = order; in __rmqueue_fallback()
2807 && current_order > order) in __rmqueue_fallback()
2816 for (current_order = order; current_order < MAX_ORDER; in __rmqueue_fallback()
2837 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2845 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, in __rmqueue_with_cma_reuse() argument
2850 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2855 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue_with_cma_reuse()
2859 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse()
2870 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2876 page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); in __rmqueue()
2889 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2895 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2898 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2900 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2906 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
2915 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2923 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2945 -(1 << order)); in rmqueue_bulk()
2954 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
3144 unsigned int order, t; in mark_free_pages() local
3169 for_each_migratetype_order(order, t) { in mark_free_pages()
3171 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3175 for (i = 0; i < (1UL << order); i++) { in mark_free_pages()
3296 void split_page(struct page *page, unsigned int order) in split_page() argument
3303 for (i = 1; i < (1 << order); i++) in split_page()
3305 split_page_owner(page, 1 << order); in split_page()
3306 split_page_memcg(page, 1 << order); in split_page()
3310 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
3328 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3332 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3337 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3343 if (order >= pageblock_order - 1) { in __isolate_free_page()
3344 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3355 return 1UL << order; in __isolate_free_page()
3367 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
3375 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3458 struct zone *zone, unsigned int order, in rmqueue() argument
3465 if (likely(order == 0)) { in rmqueue()
3483 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); in rmqueue()
3494 if (order > 0 && alloc_flags & ALLOC_HARDER) { in rmqueue()
3495 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3497 trace_mm_page_alloc_zone_locked(page, order, migratetype); in rmqueue()
3500 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3501 } while (page && check_new_pages(page, order)); in rmqueue()
3505 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3508 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue()
3548 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3550 if (order < fail_page_alloc.min_order) in __should_fail_alloc_page()
3560 return should_fail(&fail_page_alloc.attr, 1 << order); in __should_fail_alloc_page()
3588 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3595 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3597 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3602 unsigned int order, unsigned int alloc_flags) in __zone_watermark_unusable_free() argument
3605 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
3630 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
3639 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
3666 if (!order) in __zone_watermark_ok()
3670 for (o = order; o < MAX_ORDER; o++) { in __zone_watermark_ok()
3694 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
3697 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_ok()
3701 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
3713 if (!order) { in zone_watermark_fast()
3726 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_fast()
3735 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
3738 return __zone_watermark_ok(z, order, mark, highest_zoneidx, in zone_watermark_fast()
3745 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
3753 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, in zone_watermark_ok_safe()
3829 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3899 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3910 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3923 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3933 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3942 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3945 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3951 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) in get_page_from_freelist()
3952 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3959 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4023 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
4029 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4036 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4043 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
4051 .order = order, in __alloc_pages_may_oom()
4075 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
4084 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
4120 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4137 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4145 if (!order) in __alloc_pages_direct_compact()
4151 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
4165 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4169 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4175 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4192 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
4203 if (!order) in should_compact_retry()
4222 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
4244 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4256 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
4265 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
4270 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4279 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
4287 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4383 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4397 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4411 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4418 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4423 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4446 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4457 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4557 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4570 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
4604 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4606 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
4681 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4685 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
4734 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4740 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4755 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4757 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4802 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4820 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4836 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4845 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4861 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4872 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4887 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4937 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); in __alloc_pages_slowpath()
4945 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
4954 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4959 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4991 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
5014 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, in __alloc_pages_nodemask() argument
5026 if (unlikely(order >= MAX_ORDER)) { in __alloc_pages_nodemask()
5033 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) in __alloc_pages_nodemask()
5043 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
5062 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
5066 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { in __alloc_pages_nodemask()
5067 __free_pages(page, order); in __alloc_pages_nodemask()
5071 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
5082 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
5086 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
5099 static inline void free_the_page(struct page *page, unsigned int order) in free_the_page() argument
5101 if (order == 0) /* Via pcp? */ in free_the_page()
5104 __free_pages_ok(page, order, FPI_NONE); in free_the_page()
5107 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5110 free_the_page(page, order); in __free_pages()
5112 while (order-- > 0) in __free_pages()
5113 free_the_page(page + (1 << order), order); in __free_pages()
5117 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
5121 __free_pages(virt_to_page((void *)addr), order); in free_pages()
5158 int order = get_order(nc->size); in __page_frag_cache_refill() local
5161 unsigned int deta = 1U << (unsigned int)order; in __page_frag_cache_refill()
5163 for (i = 0; i < (1 << order); i++) { in __page_frag_cache_refill()
5284 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
5288 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
5291 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
5317 unsigned int order = get_order(size); in alloc_pages_exact() local
5323 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
5324 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
5342 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
5348 p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
5351 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
5731 unsigned int order; in show_free_areas() local
5741 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
5742 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5745 nr[order] = area->nr_free; in show_free_areas()
5746 total += nr[order] << order; in show_free_areas()
5748 types[order] = 0; in show_free_areas()
5751 types[order] |= 1 << type; in show_free_areas()
5755 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
5757 nr[order], K(1UL) << order); in show_free_areas()
5758 if (nr[order]) in show_free_areas()
5759 show_migration_types(types[order]); in show_free_areas()
6322 unsigned int order, t; in zone_init_free_lists() local
6323 for_each_migratetype_order(order, t) { in zone_init_free_lists()
6324 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6325 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6956 unsigned int order; in set_pageblock_order() local
6963 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
6965 order = MAX_ORDER - 1; in set_pageblock_order()
6972 pageblock_order = order; in set_pageblock_order()
8643 unsigned int order; in alloc_contig_range() local
8648 .order = -1, in alloc_contig_range()
8721 order = 0; in alloc_contig_range()
8724 if (++order >= MAX_ORDER) { in alloc_contig_range()
8728 outer_start &= ~0UL << order; in alloc_contig_range()
8732 order = buddy_order(pfn_to_page(outer_start)); in alloc_contig_range()
8740 if (outer_start + (1UL << order) <= start) in alloc_contig_range()
8928 unsigned int order; in __offline_isolated_pages() local
8957 order = buddy_order(page); in __offline_isolated_pages()
8958 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
8959 pfn += (1 << order); in __offline_isolated_pages()
8970 unsigned int order; in is_free_buddy_page() local
8973 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
8974 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
8976 if (PageBuddy(page_head) && buddy_order(page_head) >= order) in is_free_buddy_page()
8981 return order < MAX_ORDER; in is_free_buddy_page()
9027 unsigned int order; in take_page_off_buddy() local
9031 for (order = 0; order < MAX_ORDER; order++) { in take_page_off_buddy()
9032 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
9035 if (PageBuddy(page_head) && page_order >= order) { in take_page_off_buddy()