Lines Matching full:order
47 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
48 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
70 unsigned int i, order, nr_pages; in map_pages() local
77 order = page_private(page); in map_pages()
78 nr_pages = 1 << order; in map_pages()
80 post_alloc_hook(page, order, __GFP_MOVABLE); in map_pages()
81 if (order) in map_pages()
82 split_page(page, order); in map_pages()
141 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
146 if (order < zone->compact_order_failed) in defer_compaction()
147 zone->compact_order_failed = order; in defer_compaction()
152 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
156 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
160 if (order < zone->compact_order_failed) in compaction_deferred()
170 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
176 * Update defer tracking counters after successful compaction of given order,
180 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
187 if (order >= zone->compact_order_failed) in compaction_defer_reset()
188 zone->compact_order_failed = order + 1; in compaction_defer_reset()
190 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
194 bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
196 if (order < zone->compact_order_failed) in compaction_restarting()
223 * released. It is always pointless to compact pages of such order (if they are
444 unsigned int order; in isolate_freepages_block() local
477 const unsigned int order = compound_order(page); in isolate_freepages_block() local
479 if (likely(order < MAX_ORDER)) { in isolate_freepages_block()
480 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
481 cursor += (1UL << order) - 1; in isolate_freepages_block()
515 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
516 order = page_order(page); in isolate_freepages_block()
517 isolated = __isolate_free_page(page, order); in isolate_freepages_block()
520 set_page_private(page, order); in isolate_freepages_block()
614 * is more than pageblock order. In this case, we adjust in isolate_freepages_range()
640 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
726 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
735 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
743 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
746 * next_skip_pfn by 1 << order, as low_pfn might have in isolate_migratepages_block()
748 * a compound or a high-order buddy page in the in isolate_migratepages_block()
751 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
774 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
784 * a valid page order. Consider only values in the in isolate_migratepages_block()
785 * valid order range to prevent low_pfn overflow. in isolate_migratepages_block()
800 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
802 if (likely(order < MAX_ORDER)) in isolate_migratepages_block()
803 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
861 * and it's on LRU. It can only be a THP so the order in isolate_migratepages_block()
910 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
930 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1035 * pageblock, so it's not worth to check order for valid range. in suitable_migration_target()
1304 * order == -1 is expected when compacting via
1307 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument
1309 return order == -1; in is_via_compact_memory()
1315 unsigned int order; in __compact_finished() local
1341 if (is_via_compact_memory(cc->order)) in __compact_finished()
1356 for (order = cc->order; order < MAX_ORDER; order++) { in __compact_finished()
1357 struct free_area *area = &zone->free_area[order]; in __compact_finished()
1374 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
1409 trace_mm_compaction_finished(zone, cc->order, ret); in compact_finished()
1423 static enum compact_result __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
1430 if (is_via_compact_memory(order)) in __compaction_suitable()
1435 * If watermarks for high-order allocation are already met, there in __compaction_suitable()
1438 if (zone_watermark_ok(zone, order, watermark, classzone_idx, in __compaction_suitable()
1443 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
1456 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? in __compaction_suitable()
1458 watermark += compact_gap(order); in __compaction_suitable()
1466 enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
1473 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, in compaction_suitable()
1480 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
1491 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { in compaction_suitable()
1492 fragindex = fragmentation_index(zone, order); in compaction_suitable()
1497 trace_mm_compaction_suitable(zone, order, ret); in compaction_suitable()
1504 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, in compaction_zonelist_suitable() argument
1521 * want to trash just for a single high order allocation which in compaction_zonelist_suitable()
1525 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
1527 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable()
1555 ret = compaction_suitable(zone, cc->order, cc->alloc_flags, in compact_zone()
1568 if (compaction_restarting(zone, cc->order)) in compact_zone()
1617 * previous cc->order aligned block. in compact_zone()
1645 * order-aligned block, so skip the rest of it. in compact_zone()
1650 cc->migrate_pfn - 1, cc->order); in compact_zone()
1660 * cc->order aligned block where we migrated from? If yes, in compact_zone()
1665 if (cc->order > 0 && cc->last_migrated_pfn) { in compact_zone()
1668 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
1711 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
1717 .order = order, in compact_zone_order()
1741 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1743 * @order: The order of the current allocation
1750 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
1766 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
1774 && compaction_deferred(zone, order)) { in try_to_compact_pages()
1779 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
1791 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
1803 defer_compaction(zone, order); in try_to_compact_pages()
1826 .order = -1, in compact_node()
1943 * order is allocatable. in kcompactd_do_work()
1948 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
1954 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
1965 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
1968 if (compaction_suitable(zone, cc.order, 0, zoneid) != in kcompactd_do_work()
1979 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
1984 * order >= cc.order. This is ratelimited by the in kcompactd_do_work()
1993 defer_compaction(zone, cc.order); in kcompactd_do_work()
2007 * the requested order/classzone_idx in case it was higher/tighter than in kcompactd_do_work()
2010 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
2016 void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) in wakeup_kcompactd() argument
2018 if (!order) in wakeup_kcompactd()
2021 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
2022 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
2037 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()