• Home
  • Raw
  • Download

Lines Matching refs:order

53 #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))  argument
54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
94 unsigned int i, order, nr_pages; in split_map_pages() local
101 order = page_private(page); in split_map_pages()
102 nr_pages = 1 << order; in split_map_pages()
104 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
105 if (order) in split_map_pages()
106 split_page(page, order); in split_map_pages()
165 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
170 if (order < zone->compact_order_failed) in defer_compaction()
171 zone->compact_order_failed = order; in defer_compaction()
176 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
180 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
184 if (order < zone->compact_order_failed) in compaction_deferred()
193 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
203 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
210 if (order >= zone->compact_order_failed) in compaction_defer_reset()
211 zone->compact_order_failed = order + 1; in compaction_defer_reset()
213 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
217 bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
219 if (order < zone->compact_order_failed) in compaction_restarting()
570 unsigned int order; in isolate_freepages_block() local
604 const unsigned int order = compound_order(page); in isolate_freepages_block() local
606 if (likely(order < MAX_ORDER)) { in isolate_freepages_block()
607 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
608 cursor += (1UL << order) - 1; in isolate_freepages_block()
633 order = buddy_order(page); in isolate_freepages_block()
634 isolated = __isolate_free_page(page, order); in isolate_freepages_block()
637 set_page_private(page, order); in isolate_freepages_block()
776 unsigned int order; in isolate_and_split_free_page() local
781 order = buddy_order(page); in isolate_and_split_free_page()
782 isolated = __isolate_free_page(page, order); in isolate_and_split_free_page()
786 set_page_private(page, order); in isolate_and_split_free_page()
868 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
893 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
956 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
958 if (likely(order < MAX_ORDER)) in isolate_migratepages_block()
959 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1090 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1309 static int next_search_order(struct compact_control *cc, int order) in next_search_order() argument
1311 order--; in next_search_order()
1312 if (order < 0) in next_search_order()
1313 order = cc->order - 1; in next_search_order()
1316 if (order == cc->search_order) { in next_search_order()
1319 cc->search_order = cc->order - 1; in next_search_order()
1323 return order; in next_search_order()
1336 int order; in fast_isolate_freepages() local
1339 if (cc->order <= 0) in fast_isolate_freepages()
1366 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1368 for (order = cc->search_order; in fast_isolate_freepages()
1369 !page && order >= 0; in fast_isolate_freepages()
1370 order = next_search_order(cc, order)) { in fast_isolate_freepages()
1371 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1396 cc->search_order = order; in fast_isolate_freepages()
1425 if (__isolate_free_page(page, order)) { in fast_isolate_freepages()
1426 set_page_private(page, order); in fast_isolate_freepages()
1427 nr_isolated = 1 << order; in fast_isolate_freepages()
1433 order = cc->search_order + 1; in fast_isolate_freepages()
1692 int order; in fast_find_migrateblock() local
1712 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1735 for (order = cc->order - 1; in fast_find_migrateblock()
1736 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; in fast_find_migrateblock()
1737 order--) { in fast_find_migrateblock()
1738 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1904 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument
1906 return order == -1; in is_via_compact_memory()
1989 unsigned int order; in __compact_finished() local
2033 if (is_via_compact_memory(cc->order)) in __compact_finished()
2047 for (order = cc->order; order < MAX_ORDER; order++) { in __compact_finished()
2048 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2065 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
2104 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2118 static enum compact_result __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2125 if (is_via_compact_memory(order)) in __compaction_suitable()
2133 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, in __compaction_suitable()
2151 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? in __compaction_suitable()
2153 watermark += compact_gap(order); in __compaction_suitable()
2161 enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
2168 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, in compaction_suitable()
2186 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { in compaction_suitable()
2187 fragindex = fragmentation_index(zone, order); in compaction_suitable()
2192 trace_mm_compaction_suitable(zone, order, ret); in compaction_suitable()
2199 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, in compaction_zonelist_suitable() argument
2220 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2222 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable()
2253 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, in compact_zone()
2266 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2383 cc->migrate_pfn - 1, cc->order); in compact_zone()
2397 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2399 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2444 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2451 .order = order, in compact_zone_order()
2452 .search_order = order, in compact_zone_order()
2506 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
2522 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
2530 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2535 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2547 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2559 defer_compaction(zone, order); in try_to_compact_pages()
2588 .order = -1, in proactive_compact_node()
2617 .order = -1, in compact_node()
2763 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2770 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
2781 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
2784 if (compaction_suitable(zone, cc.order, 0, zoneid) != in kcompactd_do_work()
2795 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
2809 defer_compaction(zone, cc.order); in kcompactd_do_work()
2826 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
2832 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) in wakeup_kcompactd() argument
2834 if (!order) in wakeup_kcompactd()
2837 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
2838 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
2853 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()