Lines Matching refs:order
166 static void __free_pages_ok(struct page *page, unsigned int order);
378 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
381 int nr_pages = 1 << order; in prep_compound_page()
384 set_compound_order(page, order); in prep_compound_page()
397 static int destroy_compound_page(struct page *page, unsigned long order) in destroy_compound_page() argument
400 int nr_pages = 1 << order; in destroy_compound_page()
403 if (unlikely(compound_order(page) != order)) { in destroy_compound_page()
426 static inline void prep_zero_page(struct page *page, unsigned int order, in prep_zero_page() argument
436 for (i = 0; i < (1 << order); i++) in prep_zero_page()
471 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
473 set_page_private(page, order); in set_page_order()
499 unsigned int order) in page_is_buddy() argument
504 if (page_is_guard(buddy) && page_order(buddy) == order) { in page_is_buddy()
513 if (PageBuddy(buddy) && page_order(buddy) == order) { in page_is_buddy()
556 struct zone *zone, unsigned int order, in __free_one_page() argument
570 if (unlikely(destroy_compound_page(page, order))) in __free_one_page()
575 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
579 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); in __free_one_page()
583 while (order < max_order - 1) { in __free_one_page()
584 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
586 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
596 __mod_zone_freepage_state(zone, 1 << order, in __free_one_page()
601 zone->free_area[order].nr_free--; in __free_one_page()
607 order++; in __free_one_page()
621 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
635 set_page_order(page, order); in __free_one_page()
645 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { in __free_one_page()
649 buddy_idx = __find_buddy_index(combined_idx, order + 1); in __free_one_page()
651 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { in __free_one_page()
653 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
658 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
660 zone->free_area[order].nr_free++; in __free_one_page()
756 unsigned int order, in free_one_page() argument
769 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
773 static bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
778 trace_mm_page_free(page, order); in free_pages_prepare()
779 kmemcheck_free_shadow(page, order); in free_pages_prepare()
783 for (i = 0; i < (1 << order); i++) in free_pages_prepare()
790 PAGE_SIZE << order); in free_pages_prepare()
792 PAGE_SIZE << order); in free_pages_prepare()
794 arch_free_page(page, order); in free_pages_prepare()
795 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
800 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
806 if (!free_pages_prepare(page, order)) in __free_pages_ok()
811 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
813 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
818 unsigned int order) in __free_pages_bootmem() argument
820 unsigned int nr_pages = 1 << order; in __free_pages_bootmem()
835 __free_pages(page, order); in __free_pages_bootmem()
945 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) in prep_new_page() argument
949 for (i = 0; i < (1 << order); i++) { in prep_new_page()
958 arch_alloc_page(page, order); in prep_new_page()
959 kernel_map_pages(page, 1 << order, 1); in prep_new_page()
962 prep_zero_page(page, order, gfp_flags); in prep_new_page()
964 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
965 prep_compound_page(page, order); in prep_new_page()
975 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
983 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
993 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1031 unsigned int order; in move_freepages() local
1059 order = page_order(page); in move_freepages()
1061 &zone->free_area[order].free_list[migratetype]); in move_freepages()
1063 page += 1 << order; in move_freepages()
1064 pages_moved += 1 << order; in move_freepages()
1145 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) in __rmqueue_fallback() argument
1153 current_order >= order && current_order <= MAX_ORDER-1; in __rmqueue_fallback()
1190 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1203 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
1217 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
1223 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1226 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1239 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
1248 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
1256 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
1276 -(1 << order)); in rmqueue_bulk()
1278 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
1394 unsigned int order, t; in mark_free_pages() local
1411 for_each_migratetype_order(order, t) { in mark_free_pages()
1412 list_for_each(curr, &zone->free_area[order].free_list[t]) { in mark_free_pages()
1416 for (i = 0; i < (1UL << order); i++) in mark_free_pages()
1496 void split_page(struct page *page, unsigned int order) in split_page() argument
1509 split_page(virt_to_page(page[0].shadow), order); in split_page()
1512 for (i = 1; i < (1 << order); i++) in split_page()
1517 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
1530 watermark = low_wmark_pages(zone) + (1 << order); in __isolate_free_page()
1534 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
1539 zone->free_area[order].nr_free--; in __isolate_free_page()
1543 if (order >= pageblock_order - 1) { in __isolate_free_page()
1544 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
1553 return 1UL << order; in __isolate_free_page()
1568 unsigned int order; in split_free_page() local
1571 order = page_order(page); in split_free_page()
1573 nr_pages = __isolate_free_page(page, order); in split_free_page()
1579 split_page(page, order); in split_free_page()
1590 struct zone *zone, unsigned int order, in buffered_rmqueue() argument
1598 if (likely(order == 0)) { in buffered_rmqueue()
1632 WARN_ON_ONCE(order > 1); in buffered_rmqueue()
1635 page = __rmqueue(zone, order, migratetype); in buffered_rmqueue()
1639 __mod_zone_freepage_state(zone, -(1 << order), in buffered_rmqueue()
1643 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); in buffered_rmqueue()
1648 __count_zone_vm_events(PGALLOC, zone, 1 << order); in buffered_rmqueue()
1653 if (prep_new_page(page, order, gfp_flags)) in buffered_rmqueue()
1683 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1685 if (order < fail_page_alloc.min_order) in should_fail_alloc_page()
1694 return should_fail(&fail_page_alloc.attr, 1 << order); in should_fail_alloc_page()
1732 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1743 static bool __zone_watermark_ok(struct zone *z, unsigned int order, in __zone_watermark_ok() argument
1752 free_pages -= (1 << order) - 1; in __zone_watermark_ok()
1765 for (o = 0; o < order; o++) { in __zone_watermark_ok()
1778 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
1781 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
1785 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
1793 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok_safe()
1975 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, in get_page_from_freelist() argument
2052 if (!zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2085 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2095 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2117 page = buffered_rmqueue(preferred_zone, zone, order, in get_page_from_freelist()
2186 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) in warn_alloc_failed() argument
2221 current->comm, order, gfp_mask); in warn_alloc_failed()
2229 should_alloc_retry(gfp_t gfp_mask, unsigned int order, in should_alloc_retry() argument
2254 if (order <= PAGE_ALLOC_COSTLY_ORDER) in should_alloc_retry()
2264 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) in should_alloc_retry()
2271 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
2298 order, zonelist, high_zoneidx, in __alloc_pages_may_oom()
2306 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
2322 out_of_memory(zonelist, gfp_mask, order, nodemask, false); in __alloc_pages_may_oom()
2332 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2342 if (!order) in __alloc_pages_direct_compact()
2346 compact_result = try_to_compact_pages(zonelist, order, gfp_mask, in __alloc_pages_direct_compact()
2373 order, zonelist, high_zoneidx, in __alloc_pages_direct_compact()
2381 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
2392 defer_compaction(last_compact_zone, order); in __alloc_pages_direct_compact()
2406 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2418 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, in __perform_reclaim() argument
2433 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); in __perform_reclaim()
2446 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
2454 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, in __alloc_pages_direct_reclaim()
2464 page = get_page_from_freelist(gfp_mask, nodemask, order, in __alloc_pages_direct_reclaim()
2488 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, in __alloc_pages_high_priority() argument
2496 page = get_page_from_freelist(gfp_mask, nodemask, order, in __alloc_pages_high_priority()
2507 static void wake_all_kswapds(unsigned int order, in wake_all_kswapds() argument
2518 wakeup_kswapd(zone, order, zone_idx(preferred_zone)); in wake_all_kswapds()
2576 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
2596 if (order >= MAX_ORDER) { in __alloc_pages_slowpath()
2615 wake_all_kswapds(order, zonelist, high_zoneidx, in __alloc_pages_slowpath()
2638 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, in __alloc_pages_slowpath()
2653 page = __alloc_pages_high_priority(gfp_mask, order, in __alloc_pages_slowpath()
2684 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, in __alloc_pages_slowpath()
2734 page = __alloc_pages_direct_reclaim(gfp_mask, order, in __alloc_pages_slowpath()
2755 page = __alloc_pages_may_oom(gfp_mask, order, in __alloc_pages_slowpath()
2769 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_slowpath()
2786 if (should_alloc_retry(gfp_mask, order, did_some_progress, in __alloc_pages_slowpath()
2797 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, in __alloc_pages_slowpath()
2808 warn_alloc_failed(gfp_mask, order, NULL); in __alloc_pages_slowpath()
2812 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_slowpath()
2821 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask() argument
2839 if (should_fail_alloc_page(gfp_mask, order)) in __alloc_pages_nodemask()
2865 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, in __alloc_pages_nodemask()
2875 page = __alloc_pages_slowpath(gfp_mask, order, in __alloc_pages_nodemask()
2880 trace_mm_page_alloc(page, order, gfp_mask, migratetype); in __alloc_pages_nodemask()
2899 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
2909 page = alloc_pages(gfp_mask, order); in __get_free_pages()
2922 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
2925 if (order == 0) in __free_pages()
2928 __free_pages_ok(page, order); in __free_pages()
2934 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
2938 __free_pages(virt_to_page((void *)addr), order); in free_pages()
2951 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages() argument
2956 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages()
2958 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
2959 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages()
2963 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node() argument
2968 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages_node()
2970 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
2971 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages_node()
2979 void __free_kmem_pages(struct page *page, unsigned int order) in __free_kmem_pages() argument
2981 memcg_kmem_uncharge_pages(page, order); in __free_kmem_pages()
2982 __free_pages(page, order); in __free_kmem_pages()
2985 void free_kmem_pages(unsigned long addr, unsigned int order) in free_kmem_pages() argument
2989 __free_kmem_pages(virt_to_page((void *)addr), order); in free_kmem_pages()
2993 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
2997 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
3000 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
3024 unsigned int order = get_order(size); in alloc_pages_exact() local
3027 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
3028 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
3046 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
3047 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
3050 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
3345 unsigned int order; in show_free_areas() local
3355 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
3356 struct free_area *area = &zone->free_area[order]; in show_free_areas()
3359 nr[order] = area->nr_free; in show_free_areas()
3360 total += nr[order] << order; in show_free_areas()
3362 types[order] = 0; in show_free_areas()
3365 types[order] |= 1 << type; in show_free_areas()
3369 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
3370 printk("%lu*%lukB ", nr[order], K(1UL) << order); in show_free_areas()
3371 if (nr[order]) in show_free_areas()
3372 show_migration_types(types[order]); in show_free_areas()
3695 unsigned int order = current_zonelist_order; in build_zonelists() local
3725 if (order == ZONELIST_ORDER_NODE) in build_zonelists()
3731 if (order == ZONELIST_ORDER_ZONE) { in build_zonelists()
4191 unsigned int order, t; in zone_init_free_lists() local
4192 for_each_migratetype_order(order, t) { in zone_init_free_lists()
4193 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
4194 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
4799 unsigned int order; in set_pageblock_order() local
4806 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
4808 order = MAX_ORDER - 1; in set_pageblock_order()
4815 pageblock_order = order; in set_pageblock_order()
6376 unsigned int order; in alloc_contig_range() local
6381 .order = -1, in alloc_contig_range()
6442 order = 0; in alloc_contig_range()
6445 if (++order >= MAX_ORDER) { in alloc_contig_range()
6449 outer_start &= ~0UL << order; in alloc_contig_range()
6537 unsigned int order, i; in __offline_isolated_pages() local
6567 order = page_order(page); in __offline_isolated_pages()
6570 pfn, 1 << order, end_pfn); in __offline_isolated_pages()
6574 zone->free_area[order].nr_free--; in __offline_isolated_pages()
6575 for (i = 0; i < (1 << order); i++) in __offline_isolated_pages()
6577 pfn += (1 << order); in __offline_isolated_pages()
6589 unsigned int order; in is_free_buddy_page() local
6592 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
6593 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
6595 if (PageBuddy(page_head) && page_order(page_head) >= order) in is_free_buddy_page()
6600 return order < MAX_ORDER; in is_free_buddy_page()