Lines Matching +full:scaled +full:- +full:sync
1 // SPDX-License-Identifier: GPL-2.0
9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
17 #include <linux/backing-dev.h>
20 #include <linux/page-isolation.h>
59 * Page order with-respect-to which proactive compaction
68 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
78 list_del(&page->lru); in release_freepages()
94 list_del(&page->lru); in split_map_pages()
104 list_add(&page->lru, &tmp_list); in split_map_pages()
123 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) in PageMovable()
134 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); in __SetPageMovable()
147 page->mapping = (void *)((unsigned long)page->mapping & in __ClearPageMovable()
162 zone->compact_considered = 0; in defer_compaction()
163 zone->compact_defer_shift++; in defer_compaction()
165 if (order < zone->compact_order_failed) in defer_compaction()
166 zone->compact_order_failed = order; in defer_compaction()
168 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
169 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
177 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
179 if (order < zone->compact_order_failed) in compaction_deferred()
183 if (++zone->compact_considered >= defer_limit) { in compaction_deferred()
184 zone->compact_considered = defer_limit; in compaction_deferred()
202 zone->compact_considered = 0; in compaction_defer_reset()
203 zone->compact_defer_shift = 0; in compaction_defer_reset()
205 if (order >= zone->compact_order_failed) in compaction_defer_reset()
206 zone->compact_order_failed = order + 1; in compaction_defer_reset()
214 if (order < zone->compact_order_failed) in compaction_restarting()
217 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
218 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
225 if (cc->ignore_skip_hint) in isolation_suitable()
233 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
234 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
235 zone->compact_cached_free_pfn = in reset_cached_positions()
236 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
282 * non-movable pageblock as the starting point. in __reset_isolation_pfn()
290 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
298 block_pfn = pageblock_end_pfn(pfn) - 1; in __reset_isolation_pfn()
299 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
336 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
337 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
343 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
346 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
355 free_pfn -= pageblock_nr_pages) { in __reset_isolation_suitable()
363 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
364 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
365 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
373 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
374 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
380 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
381 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
382 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
391 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable()
396 if (zone->compact_blockskip_flush) in reset_isolation_suitable()
411 if (cc->ignore_skip_hint) in test_and_set_skip()
418 if (!skip && !cc->no_set_skip_hint) in test_and_set_skip()
426 struct zone *zone = cc->zone; in update_cached_migrate()
431 if (cc->no_set_skip_hint) in update_cached_migrate()
434 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
435 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
436 if (cc->mode != MIGRATE_ASYNC && in update_cached_migrate()
437 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
438 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
448 struct zone *zone = cc->zone; in update_pageblock_skip()
450 if (cc->no_set_skip_hint) in update_pageblock_skip()
458 /* Update where async and sync compaction should restart */ in update_pageblock_skip()
459 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
460 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
495 * Sync compaction acquires the lock.
504 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { in compact_lock_irqsave()
508 cc->contended = true; in compact_lock_irqsave()
521 * aborts. Sync compaction schedules.
527 * Returns false when compaction can continue (sync compaction might have
539 cc->contended = true; in compact_unlock_should_abort()
550 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
584 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
602 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
603 cursor += (1UL << order) - 1; in isolate_freepages_block()
619 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
627 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
635 cc->nr_freepages += isolated; in isolate_freepages_block()
636 list_add_tail(&page->lru, freelist); in isolate_freepages_block()
638 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { in isolate_freepages_block()
643 blockpfn += isolated - 1; in isolate_freepages_block()
644 cursor += isolated - 1; in isolate_freepages_block()
656 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
679 cc->total_free_scanned += nr_scanned; in isolate_freepages_block()
686 * isolate_freepages_range() - isolate free pages.
689 * @end_pfn: The one-past-last PFN.
691 * Non-free pages, invalid PFNs, or zone boundaries within the
695 * Otherwise, function returns one-past-the-last PFN of isolated page
708 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
709 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
732 block_end_pfn, cc->zone)) in isolate_freepages_range()
741 * non-free pages). in isolate_freepages_range()
748 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
782 * isolate_migratepages_block() - isolate all migrate-able pages within
786 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
795 * The pages are isolated on cc->migratepages list (not required to be empty),
796 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
803 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
821 if (cc->nr_migratepages) in isolate_migratepages_block()
825 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block()
836 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { in isolate_migratepages_block()
838 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
847 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
855 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
860 * a compound or a high-order buddy page in the in isolate_migratepages_block()
863 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
872 && compact_unlock_should_abort(&pgdat->lru_lock, in isolate_migratepages_block()
891 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { in isolate_migratepages_block()
913 low_pfn += (1UL << freepage_order) - 1; in isolate_migratepages_block()
925 if (PageCompound(page) && !cc->alloc_contig) { in isolate_migratepages_block()
929 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
935 * It's possible to migrate LRU and non-lru movable pages. in isolate_migratepages_block()
946 spin_unlock_irqrestore(&pgdat->lru_lock, in isolate_migratepages_block()
971 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block()
976 locked = compact_lock_irqsave(&pgdat->lru_lock, in isolate_migratepages_block()
991 * Page become compound since the non-locked check, in isolate_migratepages_block()
995 if (unlikely(PageCompound(page) && !cc->alloc_contig)) { in isolate_migratepages_block()
996 low_pfn += compound_nr(page) - 1; in isolate_migratepages_block()
1009 low_pfn += compound_nr(page) - 1; in isolate_migratepages_block()
1018 list_add(&page->lru, &cc->migratepages); in isolate_migratepages_block()
1019 cc->nr_migratepages += compound_nr(page); in isolate_migratepages_block()
1028 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && in isolate_migratepages_block()
1029 !cc->rescan && !cc->contended) { in isolate_migratepages_block()
1041 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1046 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in isolate_migratepages_block()
1049 putback_movable_pages(&cc->migratepages); in isolate_migratepages_block()
1050 cc->nr_migratepages = 0; in isolate_migratepages_block()
1055 low_pfn = next_skip_pfn - 1; in isolate_migratepages_block()
1060 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1073 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in isolate_migratepages_block()
1083 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { in isolate_migratepages_block()
1093 cc->total_migrate_scanned += nr_scanned; in isolate_migratepages_block()
1101 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1104 * @end_pfn: The one-past-last PFN.
1107 * Otherwise, function returns one-past-the-last PFN of isolated page
1119 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1120 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1130 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1139 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) in isolate_migratepages_range()
1157 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) in suitable_migration_source()
1162 if (cc->migratetype == MIGRATE_MOVABLE) in suitable_migration_source()
1165 return block_mt == cc->migratetype; in suitable_migration_source()
1175 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1183 if (cc->ignore_block_suitable) in suitable_migration_target()
1197 unsigned short shift = BITS_PER_LONG - 1; in freelist_scan_limit()
1199 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; in freelist_scan_limit()
1208 return (cc->free_pfn >> pageblock_order) in compact_scanners_met()
1209 <= (cc->migrate_pfn >> pageblock_order); in compact_scanners_met()
1222 if (!list_is_last(freelist, &freepage->lru)) { in move_freelist_head()
1223 list_cut_before(&sublist, freelist, &freepage->lru); in move_freelist_head()
1240 if (!list_is_first(freelist, &freepage->lru)) { in move_freelist_tail()
1241 list_cut_position(&sublist, freelist, &freepage->lru); in move_freelist_tail()
1254 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_around()
1258 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) in fast_isolate_around()
1262 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1263 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1265 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1271 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); in fast_isolate_around()
1272 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_around()
1279 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); in fast_isolate_around()
1282 if (cc->nr_freepages < cc->nr_migratepages) in fast_isolate_around()
1286 /* Search orders in round-robin fashion */
1289 order--; in next_search_order()
1291 order = cc->order - 1; in next_search_order()
1294 if (order == cc->search_order) { in next_search_order()
1295 cc->search_order--; in next_search_order()
1296 if (cc->search_order < 0) in next_search_order()
1297 cc->search_order = cc->order - 1; in next_search_order()
1298 return -1; in next_search_order()
1317 if (cc->order <= 0) in fast_isolate_freepages()
1318 return cc->free_pfn; in fast_isolate_freepages()
1324 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1333 distance = (cc->free_pfn - cc->migrate_pfn); in fast_isolate_freepages()
1334 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); in fast_isolate_freepages()
1335 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); in fast_isolate_freepages()
1344 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1346 for (order = cc->search_order; in fast_isolate_freepages()
1349 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1356 if (!area->nr_free) in fast_isolate_freepages()
1359 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1360 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_isolate_freepages()
1370 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1373 cc->fast_search_fail = 0; in fast_isolate_freepages()
1374 cc->search_order = order; in fast_isolate_freepages()
1406 cc->nr_freepages += nr_isolated; in fast_isolate_freepages()
1407 list_add_tail(&page->lru, &cc->freepages); in fast_isolate_freepages()
1411 order = cc->search_order + 1; in fast_isolate_freepages()
1416 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1427 cc->fast_search_fail++; in fast_isolate_freepages()
1436 cc->free_pfn = highest; in fast_isolate_freepages()
1438 if (cc->direct_compaction && pfn_valid(min_pfn)) { in fast_isolate_freepages()
1441 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1442 cc->zone); in fast_isolate_freepages()
1443 cc->free_pfn = min_pfn; in fast_isolate_freepages()
1449 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1450 highest -= pageblock_nr_pages; in fast_isolate_freepages()
1451 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1454 cc->total_free_scanned += nr_scanned; in fast_isolate_freepages()
1456 return cc->free_pfn; in fast_isolate_freepages()
1469 struct zone *zone = cc->zone; in isolate_freepages()
1475 struct list_head *freelist = &cc->freepages; in isolate_freepages()
1480 if (cc->nr_freepages) in isolate_freepages()
1485 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1488 * block_start_pfn -= pageblock_nr_pages in the for loop. in isolate_freepages()
1494 isolate_start_pfn = cc->free_pfn; in isolate_freepages()
1498 low_pfn = pageblock_end_pfn(cc->migrate_pfn); in isolate_freepages()
1499 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; in isolate_freepages()
1503 * pages on cc->migratepages. We stop searching if the migrate in isolate_freepages()
1508 block_start_pfn -= pageblock_nr_pages, in isolate_freepages()
1541 if (cc->nr_freepages >= cc->nr_migratepages) { in isolate_freepages()
1548 block_start_pfn - pageblock_nr_pages; in isolate_freepages()
1573 cc->free_pfn = isolate_start_pfn; in isolate_freepages()
1581 * This is a migrate-callback that "allocates" freepages by taking pages
1590 if (list_empty(&cc->freepages)) { in compaction_alloc()
1593 if (list_empty(&cc->freepages)) in compaction_alloc()
1597 freepage = list_entry(cc->freepages.next, struct page, lru); in compaction_alloc()
1598 list_del(&freepage->lru); in compaction_alloc()
1599 cc->nr_freepages--; in compaction_alloc()
1605 * This is a migrate-callback that "frees" freepages back to the isolated
1613 list_add(&page->lru, &cc->freepages); in compaction_free()
1614 cc->nr_freepages++; in compaction_free()
1637 if (cc->fast_start_pfn == ULONG_MAX) in update_fast_start_pfn()
1640 if (!cc->fast_start_pfn) in update_fast_start_pfn()
1641 cc->fast_start_pfn = pfn; in update_fast_start_pfn()
1643 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); in update_fast_start_pfn()
1649 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) in reinit_migrate_pfn()
1650 return cc->migrate_pfn; in reinit_migrate_pfn()
1652 cc->migrate_pfn = cc->fast_start_pfn; in reinit_migrate_pfn()
1653 cc->fast_start_pfn = ULONG_MAX; in reinit_migrate_pfn()
1655 return cc->migrate_pfn; in reinit_migrate_pfn()
1668 unsigned long pfn = cc->migrate_pfn; in fast_find_migrateblock()
1674 if (cc->ignore_skip_hint) in fast_find_migrateblock()
1682 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1690 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1699 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) in fast_find_migrateblock()
1708 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; in fast_find_migrateblock()
1709 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
1711 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); in fast_find_migrateblock()
1713 for (order = cc->order - 1; in fast_find_migrateblock()
1715 order--) { in fast_find_migrateblock()
1716 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1721 if (!area->nr_free) in fast_find_migrateblock()
1724 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
1725 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_find_migrateblock()
1750 cc->fast_search_fail = 0; in fast_find_migrateblock()
1756 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
1759 cc->total_migrate_scanned += nr_scanned; in fast_find_migrateblock()
1766 cc->fast_search_fail++; in fast_find_migrateblock()
1785 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); in isolate_migratepages()
1795 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
1796 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
1803 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; in isolate_migratepages()
1812 for (; block_end_pfn <= cc->free_pfn; in isolate_migratepages()
1827 block_end_pfn, cc->zone); in isolate_migratepages()
1871 cc->migrate_pfn = low_pfn; in isolate_migratepages()
1873 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; in isolate_migratepages()
1877 * order == -1 is expected when compacting via
1882 return order == -1; in is_via_compact_memory()
1887 return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); in kswapd_is_running()
1892 * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value
1904 score = zone->present_pages * in fragmentation_score_zone()
1906 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); in fragmentation_score_zone()
1910 * The per-node proactive (background) compaction process is started by its
1913 * the node's score falls below the low threshold, or one of the back-off
1924 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node()
1940 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); in fragmentation_score_wmark()
1958 const int migratetype = cc->migratetype; in __compact_finished()
1964 reset_cached_positions(cc->zone); in __compact_finished()
1972 if (cc->direct_compaction) in __compact_finished()
1973 cc->zone->compact_blockskip_flush = true; in __compact_finished()
1975 if (cc->whole_zone) in __compact_finished()
1981 if (cc->proactive_compaction) { in __compact_finished()
1985 pgdat = cc->zone->zone_pgdat; in __compact_finished()
1989 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2000 if (is_via_compact_memory(cc->order)) in __compact_finished()
2009 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) in __compact_finished()
2014 for (order = cc->order; order < MAX_ORDER; order++) { in __compact_finished()
2015 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2033 true, &can_steal) != -1) { in __compact_finished()
2040 * We are stealing for a non-movable allocation. Make in __compact_finished()
2044 * to sync compaction, as async compaction operates in __compact_finished()
2047 if (cc->mode == MIGRATE_ASYNC || in __compact_finished()
2048 IS_ALIGNED(cc->migrate_pfn, in __compact_finished()
2059 if (cc->contended || fatal_signal_pending(current)) in __compact_finished()
2070 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2080 * COMPACT_SKIPPED - If there are too few free pages for compaction
2081 * COMPACT_SUCCESS - If the allocation would succeed without compaction
2082 * COMPACT_CONTINUE - If compaction should run now
2096 * If watermarks for high-order allocation are already met, there in __compaction_suitable()
2104 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
2140 * index of -1000 would imply allocations might succeed depending on in compaction_suitable()
2141 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
2146 * ignore fragindex for non-costly orders where the alternative to in compaction_suitable()
2175 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable()
2176 ac->highest_zoneidx, ac->nodemask) { in compaction_zonelist_suitable()
2189 ac->highest_zoneidx, available); in compaction_zonelist_suitable()
2201 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2202 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2204 const bool sync = cc->mode != MIGRATE_ASYNC; in compact_zone() local
2211 cc->total_migrate_scanned = 0; in compact_zone()
2212 cc->total_free_scanned = 0; in compact_zone()
2213 cc->nr_migratepages = 0; in compact_zone()
2214 cc->nr_freepages = 0; in compact_zone()
2215 INIT_LIST_HEAD(&cc->freepages); in compact_zone()
2216 INIT_LIST_HEAD(&cc->migratepages); in compact_zone()
2218 cc->migratetype = gfp_migratetype(cc->gfp_mask); in compact_zone()
2219 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, in compact_zone()
2220 cc->highest_zoneidx); in compact_zone()
2232 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2233 __reset_isolation_suitable(cc->zone); in compact_zone()
2241 cc->fast_start_pfn = 0; in compact_zone()
2242 if (cc->whole_zone) { in compact_zone()
2243 cc->migrate_pfn = start_pfn; in compact_zone()
2244 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2246 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2247 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2248 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { in compact_zone()
2249 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2250 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2252 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { in compact_zone()
2253 cc->migrate_pfn = start_pfn; in compact_zone()
2254 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2255 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2258 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2259 cc->whole_zone = true; in compact_zone()
2265 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on in compact_zone()
2268 * no isolation candidates, then the sync state does not matter. in compact_zone()
2270 * cached PFNs in sync to avoid revisiting the same blocks. in compact_zone()
2272 update_cached = !sync && in compact_zone()
2273 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2275 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, in compact_zone()
2276 cc->free_pfn, end_pfn, sync); in compact_zone()
2282 unsigned long start_pfn = cc->migrate_pfn; in compact_zone()
2292 cc->rescan = false; in compact_zone()
2295 cc->rescan = true; in compact_zone()
2301 putback_movable_pages(&cc->migratepages); in compact_zone()
2302 cc->nr_migratepages = 0; in compact_zone()
2306 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2307 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2313 * previous cc->order aligned block. in compact_zone()
2322 err = migrate_pages(&cc->migratepages, compaction_alloc, in compact_zone()
2323 compaction_free, (unsigned long)cc, cc->mode, in compact_zone()
2326 trace_mm_compaction_migratepages(cc->nr_migratepages, err, in compact_zone()
2327 &cc->migratepages); in compact_zone()
2330 cc->nr_migratepages = 0; in compact_zone()
2332 putback_movable_pages(&cc->migratepages); in compact_zone()
2334 * migrate_pages() may return -ENOMEM when scanners meet in compact_zone()
2337 if (err == -ENOMEM && !compact_scanners_met(cc)) { in compact_zone()
2343 * order-aligned block, so skip the rest of it. in compact_zone()
2345 if (cc->direct_compaction && in compact_zone()
2346 (cc->mode == MIGRATE_ASYNC)) { in compact_zone()
2347 cc->migrate_pfn = block_end_pfn( in compact_zone()
2348 cc->migrate_pfn - 1, cc->order); in compact_zone()
2357 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2362 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2364 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2367 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2374 if (capc && capc->page) { in compact_zone()
2385 if (cc->nr_freepages > 0) { in compact_zone()
2386 unsigned long free_pfn = release_freepages(&cc->freepages); in compact_zone()
2388 cc->nr_freepages = 0; in compact_zone()
2396 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2397 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2400 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); in compact_zone()
2401 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); in compact_zone()
2403 trace_mm_compaction_end(start_pfn, cc->migrate_pfn, in compact_zone()
2404 cc->free_pfn, end_pfn, sync, ret); in compact_zone()
2440 WRITE_ONCE(current->capture_control, &capc); in compact_zone_order()
2452 WRITE_ONCE(current->capture_control, NULL); in compact_zone_order()
2461 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2481 * Check if the GFP flags allow compaction - GFP_NOIO is really in try_to_compact_pages()
2490 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
2491 ac->highest_zoneidx, ac->nodemask) { in try_to_compact_pages()
2501 alloc_flags, ac->highest_zoneidx, capture); in try_to_compact_pages()
2545 * due to various back-off conditions, such as, contention on per-node or
2546 * per-zone locks.
2553 .order = -1, in proactive_compact_node()
2562 zone = &pgdat->node_zones[zoneid]; in proactive_compact_node()
2582 .order = -1, in compact_node()
2592 zone = &pgdat->node_zones[zoneid]; in compact_node()
2645 int nid = dev->id; in sysfs_compact_node()
2660 return device_create_file(&node->dev, &dev_attr_compact); in compaction_register_node()
2665 return device_remove_file(&node->dev, &dev_attr_compact); in compaction_unregister_node()
2671 return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); in kcompactd_work_requested()
2678 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; in kcompactd_node_suitable()
2681 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
2686 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, in kcompactd_node_suitable()
2703 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2704 .search_order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2705 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, in kcompactd_do_work()
2710 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
2717 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
2746 * We use sync migration mode here, so we defer like in kcompactd_do_work()
2747 * sync direct compaction does. in kcompactd_do_work()
2766 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
2767 pgdat->kcompactd_max_order = 0; in kcompactd_do_work()
2768 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) in kcompactd_do_work()
2769 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd_do_work()
2777 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
2778 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
2780 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) in wakeup_kcompactd()
2781 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; in wakeup_kcompactd()
2787 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) in wakeup_kcompactd()
2793 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()
2795 wake_up_interruptible(&pgdat->kcompactd_wait); in wakeup_kcompactd()
2808 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kcompactd()
2815 pgdat->kcompactd_max_order = 0; in kcompactd()
2816 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd()
2821 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); in kcompactd()
2822 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, in kcompactd()
2837 proactive_defer--; in kcompactd()
2856 * This kcompactd start function will be called by init and node-hot-add.
2857 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2864 if (pgdat->kcompactd) in kcompactd_run()
2867 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); in kcompactd_run()
2868 if (IS_ERR(pgdat->kcompactd)) { in kcompactd_run()
2870 ret = PTR_ERR(pgdat->kcompactd); in kcompactd_run()
2871 pgdat->kcompactd = NULL; in kcompactd_run()
2882 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; in kcompactd_stop()
2886 NODE_DATA(nid)->kcompactd = NULL; in kcompactd_stop()
2904 mask = cpumask_of_node(pgdat->node_id); in kcompactd_cpu_online()
2908 set_cpus_allowed_ptr(pgdat->kcompactd, mask); in kcompactd_cpu_online()