• Home
  • Raw
  • Download

Lines Matching +full:set +full:- +full:io +full:- +full:isolation

1 // SPDX-License-Identifier: GPL-2.0
9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
17 #include <linux/backing-dev.h>
20 #include <linux/page-isolation.h>
57 * Page order with-respect-to which proactive compaction
66 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
76 list_del(&page->lru); in release_freepages()
92 list_del(&page->lru); in split_map_pages()
102 list_add(&page->lru, &tmp_list); in split_map_pages()
130 page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); in __SetPageMovable()
141 page->mapping = (void *)PAGE_MAPPING_MOVABLE; in __ClearPageMovable()
155 zone->compact_considered = 0; in defer_compaction()
156 zone->compact_defer_shift++; in defer_compaction()
158 if (order < zone->compact_order_failed) in defer_compaction()
159 zone->compact_order_failed = order; in defer_compaction()
161 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
162 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
170 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
172 if (order < zone->compact_order_failed) in compaction_deferred()
176 if (++zone->compact_considered >= defer_limit) { in compaction_deferred()
177 zone->compact_considered = defer_limit; in compaction_deferred()
195 zone->compact_considered = 0; in compaction_defer_reset()
196 zone->compact_defer_shift = 0; in compaction_defer_reset()
198 if (order >= zone->compact_order_failed) in compaction_defer_reset()
199 zone->compact_order_failed = order + 1; in compaction_defer_reset()
207 if (order < zone->compact_order_failed) in compaction_restarting()
210 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
211 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
218 if (cc->ignore_skip_hint) in isolation_suitable()
226 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
227 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
228 zone->compact_cached_free_pfn = in reset_cached_positions()
229 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
265 while (start_nr-- > 0) { in skip_offline_sections_reverse()
320 * restart points have been set. in __reset_isolation_pfn()
327 * non-movable pageblock as the starting point. in __reset_isolation_pfn()
335 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
343 block_pfn = pageblock_end_pfn(pfn) - 1; in __reset_isolation_pfn()
344 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
373 * should be skipped for page isolation when the migrate and free page scanner
378 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
379 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
385 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
388 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
397 free_pfn -= pageblock_nr_pages) { in __reset_isolation_suitable()
405 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
406 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
407 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
415 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
416 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
422 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
423 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
424 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
433 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable()
438 if (zone->compact_blockskip_flush) in reset_isolation_suitable()
445 * locks are not required for read/writers. Returns true if it was already set.
452 if (cc->ignore_skip_hint) in test_and_set_skip()
456 if (!skip && !cc->no_set_skip_hint) in test_and_set_skip()
464 struct zone *zone = cc->zone; in update_cached_migrate()
466 /* Set for isolation rather than compaction */ in update_cached_migrate()
467 if (cc->no_set_skip_hint) in update_cached_migrate()
473 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
474 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
475 if (cc->mode != MIGRATE_ASYNC && in update_cached_migrate()
476 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
477 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
487 struct zone *zone = cc->zone; in update_pageblock_skip()
489 if (cc->no_set_skip_hint) in update_pageblock_skip()
494 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
495 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
538 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { in compact_lock_irqsave()
542 cc->contended = true; in compact_lock_irqsave()
570 cc->contended = true; in compact_unlock_should_abort()
581 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
598 /* Strict mode is for isolation, speed is secondary */ in isolate_freepages_block()
614 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
630 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
631 page += (1UL << order) - 1; in isolate_freepages_block()
632 nr_scanned += (1UL << order) - 1; in isolate_freepages_block()
642 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
650 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
657 nr_scanned += isolated - 1; in isolate_freepages_block()
659 cc->nr_freepages += isolated; in isolate_freepages_block()
660 list_add_tail(&page->lru, freelist); in isolate_freepages_block()
662 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { in isolate_freepages_block()
667 blockpfn += isolated - 1; in isolate_freepages_block()
668 page += isolated - 1; in isolate_freepages_block()
678 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
694 * If strict isolation is requested by CMA then check that all the in isolate_freepages_block()
701 cc->total_free_scanned += nr_scanned; in isolate_freepages_block()
708 * isolate_freepages_range() - isolate free pages.
711 * @end_pfn: The one-past-last PFN.
713 * Non-free pages, invalid PFNs, or zone boundaries within the
717 * Otherwise, function returns one-past-the-last PFN of isolated page
730 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
731 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
753 block_end_pfn, cc->zone)) in isolate_freepages_range()
762 * non-free pages). in isolate_freepages_range()
769 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
790 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated()
803 * Allow GFP_NOFS to isolate past the limit set for regular in too_many_isolated()
808 if (cc->gfp_mask & __GFP_FS) { in too_many_isolated()
821 * isolate_migratepages_block() - isolate all migrate-able pages within
825 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
826 * @mode: Isolation mode to be used.
830 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion,
831 * -ENOMEM in case we could not allocate a page, or 0.
832 * cc->migrate_pfn will contain the next pfn to scan.
834 * The pages are isolated on cc->migratepages list (not required to be empty),
835 * and cc->nr_migratepages is updated accordingly.
841 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
855 cc->migrate_pfn = low_pfn; in isolate_migratepages_block()
863 /* stop isolation if there are still pages not migrated */ in isolate_migratepages_block()
864 if (cc->nr_migratepages) in isolate_migratepages_block()
865 return -EAGAIN; in isolate_migratepages_block()
868 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block()
869 return -EAGAIN; in isolate_migratepages_block()
874 return -EINTR; in isolate_migratepages_block()
879 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { in isolate_migratepages_block()
881 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
890 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
898 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
899 * block. Set the new boundary to the end of the in isolate_migratepages_block()
903 * a compound or a high-order buddy page in the in isolate_migratepages_block()
906 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
921 cc->contended = true; in isolate_migratepages_block()
922 ret = -EINTR; in isolate_migratepages_block()
941 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block()
950 if (PageHuge(page) && cc->alloc_contig) { in isolate_migratepages_block()
956 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); in isolate_migratepages_block()
959 * Fail isolation in case isolate_or_dissolve_huge_page() in isolate_migratepages_block()
960 * reports an error. In case of -ENOMEM, abort right away. in isolate_migratepages_block()
963 /* Do not report -EBUSY down the chain */ in isolate_migratepages_block()
964 if (ret == -EBUSY) in isolate_migratepages_block()
966 low_pfn += compound_nr(page) - 1; in isolate_migratepages_block()
967 nr_scanned += compound_nr(page) - 1; in isolate_migratepages_block()
974 * on the cc->migratepages list. in isolate_migratepages_block()
977 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
983 * Buddy and cannot be re-allocated because they are in isolate_migratepages_block()
984 * isolated. Fall-through as the check below handles in isolate_migratepages_block()
993 * potential isolation targets. in isolate_migratepages_block()
1004 low_pfn += (1UL << freepage_order) - 1; in isolate_migratepages_block()
1005 nr_scanned += (1UL << freepage_order) - 1; in isolate_migratepages_block()
1018 if (PageCompound(page) && !cc->alloc_contig) { in isolate_migratepages_block()
1022 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1023 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1030 * It's possible to migrate LRU and non-lru movable pages. in isolate_migratepages_block()
1056 * sure the page is not being freed elsewhere -- the in isolate_migratepages_block()
1069 if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) in isolate_migratepages_block()
1076 if (!(cc->gfp_mask & __GFP_FS) && mapping) in isolate_migratepages_block()
1090 * it will be able to migrate without blocking - clean pages in isolate_migratepages_block()
1101 * a ->migrate_folio callback are possible to in isolate_migratepages_block()
1113 mapping->a_ops->migrate_folio; in isolate_migratepages_block()
1130 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block()
1143 !cc->finish_pageblock) { in isolate_migratepages_block()
1150 * folio become large since the non-locked check, in isolate_migratepages_block()
1153 if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { in isolate_migratepages_block()
1154 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1155 nr_scanned += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1163 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1172 list_add(&folio->lru, &cc->migratepages); in isolate_migratepages_block()
1174 cc->nr_migratepages += folio_nr_pages(folio); in isolate_migratepages_block()
1176 nr_scanned += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1184 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && in isolate_migratepages_block()
1185 !cc->finish_pageblock && !cc->contended) { in isolate_migratepages_block()
1201 if (!skip_on_failure && ret != -ENOMEM) in isolate_migratepages_block()
1206 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1214 putback_movable_pages(&cc->migratepages); in isolate_migratepages_block()
1215 cc->nr_migratepages = 0; in isolate_migratepages_block()
1220 low_pfn = next_skip_pfn - 1; in isolate_migratepages_block()
1225 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1228 if (ret == -ENOMEM) in isolate_migratepages_block()
1257 if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { in isolate_migratepages_block()
1258 if (!cc->no_set_skip_hint && valid_page && !skip_updated) in isolate_migratepages_block()
1267 cc->total_migrate_scanned += nr_scanned; in isolate_migratepages_block()
1271 cc->migrate_pfn = low_pfn; in isolate_migratepages_block()
1277 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1280 * @end_pfn: The one-past-last PFN.
1282 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM
1295 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1296 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1306 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1315 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) in isolate_migratepages_range()
1333 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) in suitable_migration_source()
1338 if (cc->migratetype == MIGRATE_MOVABLE) in suitable_migration_source()
1341 return block_mt == cc->migratetype; in suitable_migration_source()
1351 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1359 if (cc->ignore_block_suitable) in suitable_migration_target()
1373 unsigned short shift = BITS_PER_LONG - 1; in freelist_scan_limit()
1375 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; in freelist_scan_limit()
1384 return (cc->free_pfn >> pageblock_order) in compact_scanners_met()
1385 <= (cc->migrate_pfn >> pageblock_order); in compact_scanners_met()
1398 if (!list_is_last(freelist, &freepage->lru)) { in move_freelist_head()
1399 list_cut_before(&sublist, freelist, &freepage->lru); in move_freelist_head()
1415 if (!list_is_first(freelist, &freepage->lru)) { in move_freelist_tail()
1416 list_cut_position(&sublist, freelist, &freepage->lru); in move_freelist_tail()
1428 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_around()
1432 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) in fast_isolate_around()
1436 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1437 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1439 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1443 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); in fast_isolate_around()
1446 if (start_pfn == end_pfn && !cc->no_set_skip_hint) in fast_isolate_around()
1450 /* Search orders in round-robin fashion */
1453 order--; in next_search_order()
1455 order = cc->order - 1; in next_search_order()
1458 if (order == cc->search_order) { in next_search_order()
1459 cc->search_order--; in next_search_order()
1460 if (cc->search_order < 0) in next_search_order()
1461 cc->search_order = cc->order - 1; in next_search_order()
1462 return -1; in next_search_order()
1480 if (cc->order <= 0) in fast_isolate_freepages()
1487 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1496 distance = (cc->free_pfn - cc->migrate_pfn); in fast_isolate_freepages()
1497 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); in fast_isolate_freepages()
1498 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); in fast_isolate_freepages()
1504 * Search starts from the last successful isolation order or the next in fast_isolate_freepages()
1507 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1509 for (order = cc->search_order; in fast_isolate_freepages()
1512 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1519 if (!area->nr_free) in fast_isolate_freepages()
1522 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1523 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_isolate_freepages()
1533 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1536 cc->fast_search_fail = 0; in fast_isolate_freepages()
1537 cc->search_order = order; in fast_isolate_freepages()
1569 nr_scanned += nr_isolated - 1; in fast_isolate_freepages()
1571 cc->nr_freepages += nr_isolated; in fast_isolate_freepages()
1572 list_add_tail(&page->lru, &cc->freepages); in fast_isolate_freepages()
1575 /* If isolation fails, abort the search */ in fast_isolate_freepages()
1576 order = cc->search_order + 1; in fast_isolate_freepages()
1581 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1584 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_freepages()
1595 trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, in fast_isolate_freepages()
1599 cc->fast_search_fail++; in fast_isolate_freepages()
1608 cc->free_pfn = highest; in fast_isolate_freepages()
1610 if (cc->direct_compaction && pfn_valid(min_pfn)) { in fast_isolate_freepages()
1613 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1614 cc->zone); in fast_isolate_freepages()
1615 cc->free_pfn = min_pfn; in fast_isolate_freepages()
1621 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1622 highest -= pageblock_nr_pages; in fast_isolate_freepages()
1623 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1626 cc->total_free_scanned += nr_scanned; in fast_isolate_freepages()
1640 struct zone *zone = cc->zone; in isolate_freepages()
1646 struct list_head *freelist = &cc->freepages; in isolate_freepages()
1651 if (cc->nr_freepages) in isolate_freepages()
1656 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1659 * block_start_pfn -= pageblock_nr_pages in the for loop. in isolate_freepages()
1665 isolate_start_pfn = cc->free_pfn; in isolate_freepages()
1669 low_pfn = pageblock_end_pfn(cc->migrate_pfn); in isolate_freepages()
1670 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; in isolate_freepages()
1674 * pages on cc->migratepages. We stop searching if the migrate in isolate_freepages()
1679 block_start_pfn -= pageblock_nr_pages, in isolate_freepages()
1706 /* If isolation recently failed, do not retry */ in isolate_freepages()
1716 update_pageblock_skip(cc, page, block_start_pfn - in isolate_freepages()
1720 if (cc->nr_freepages >= cc->nr_migratepages) { in isolate_freepages()
1727 block_start_pfn - pageblock_nr_pages; in isolate_freepages()
1732 * If isolation failed early, do not continue in isolate_freepages()
1738 /* Adjust stride depending on isolation */ in isolate_freepages()
1748 * broke from the loop and set isolate_start_pfn based on the last in isolate_freepages()
1752 cc->free_pfn = isolate_start_pfn; in isolate_freepages()
1760 * This is a migrate-callback that "allocates" freepages by taking pages
1768 if (list_empty(&cc->freepages)) { in compaction_alloc()
1771 if (list_empty(&cc->freepages)) in compaction_alloc()
1775 dst = list_entry(cc->freepages.next, struct folio, lru); in compaction_alloc()
1776 list_del(&dst->lru); in compaction_alloc()
1777 cc->nr_freepages--; in compaction_alloc()
1783 * This is a migrate-callback that "frees" freepages back to the isolated
1791 list_add(&dst->lru, &cc->freepages); in compaction_free()
1792 cc->nr_freepages++; in compaction_free()
1819 if (cc->fast_start_pfn == ULONG_MAX) in update_fast_start_pfn()
1822 if (!cc->fast_start_pfn) in update_fast_start_pfn()
1823 cc->fast_start_pfn = pfn; in update_fast_start_pfn()
1825 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); in update_fast_start_pfn()
1831 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) in reinit_migrate_pfn()
1832 return cc->migrate_pfn; in reinit_migrate_pfn()
1834 cc->migrate_pfn = cc->fast_start_pfn; in reinit_migrate_pfn()
1835 cc->fast_start_pfn = ULONG_MAX; in reinit_migrate_pfn()
1837 return cc->migrate_pfn; in reinit_migrate_pfn()
1850 unsigned long pfn = cc->migrate_pfn; in fast_find_migrateblock()
1856 if (cc->ignore_skip_hint) in fast_find_migrateblock()
1863 if (cc->finish_pageblock) in fast_find_migrateblock()
1871 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1879 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1888 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) in fast_find_migrateblock()
1897 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; in fast_find_migrateblock()
1898 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
1900 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); in fast_find_migrateblock()
1902 for (order = cc->order - 1; in fast_find_migrateblock()
1904 order--) { in fast_find_migrateblock()
1905 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1910 if (!area->nr_free) in fast_find_migrateblock()
1913 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
1914 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_find_migrateblock()
1939 if (pfn < cc->zone->zone_start_pfn) in fast_find_migrateblock()
1940 pfn = cc->zone->zone_start_pfn; in fast_find_migrateblock()
1941 cc->fast_search_fail = 0; in fast_find_migrateblock()
1946 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
1949 cc->total_migrate_scanned += nr_scanned; in fast_find_migrateblock()
1956 cc->fast_search_fail++; in fast_find_migrateblock()
1975 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); in isolate_migratepages()
1985 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
1986 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
1990 * set with a skipped flag, so to avoid the isolation_suitable check in isolate_migratepages()
1993 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; in isolate_migratepages()
2002 for (; block_end_pfn <= cc->free_pfn; in isolate_migratepages()
2004 cc->migrate_pfn = low_pfn = block_end_pfn, in isolate_migratepages()
2017 block_end_pfn, cc->zone); in isolate_migratepages()
2023 block_end_pfn = min(next_pfn, cc->free_pfn); in isolate_migratepages()
2028 * If isolation recently failed, do not retry. Only check the in isolate_migratepages()
2035 low_pfn == cc->zone->zone_start_pfn) && in isolate_migratepages()
2052 /* Perform the isolation */ in isolate_migratepages()
2065 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; in isolate_migratepages()
2069 * order == -1 is expected when compacting via
2074 return order == -1; in is_via_compact_memory()
2080 * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
2088 running = pgdat->kswapd && task_is_running(pgdat->kswapd); in kswapd_is_running()
2117 score = zone->present_pages * fragmentation_score_zone(zone); in fragmentation_score_zone_weighted()
2118 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); in fragmentation_score_zone_weighted()
2122 * The per-node proactive (background) compaction process is started by its
2125 * the node's score falls below the low threshold, or one of the back-off
2136 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node()
2154 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); in fragmentation_score_wmark()
2172 const int migratetype = cc->migratetype; in __compact_finished()
2178 reset_cached_positions(cc->zone); in __compact_finished()
2182 * by kswapd when it goes to sleep. kcompactd does not set the in __compact_finished()
2186 if (cc->direct_compaction) in __compact_finished()
2187 cc->zone->compact_blockskip_flush = true; in __compact_finished()
2189 if (cc->whole_zone) in __compact_finished()
2195 if (cc->proactive_compaction) { in __compact_finished()
2199 pgdat = cc->zone->zone_pgdat; in __compact_finished()
2203 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2214 if (is_via_compact_memory(cc->order)) in __compact_finished()
2223 if (!pageblock_aligned(cc->migrate_pfn)) in __compact_finished()
2228 for (order = cc->order; order <= MAX_ORDER; order++) { in __compact_finished()
2229 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2247 true, &can_steal) != -1) in __compact_finished()
2250 * stealing for a non-movable allocation, make sure in __compact_finished()
2260 if (cc->contended || fatal_signal_pending(current)) in __compact_finished()
2271 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2284 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
2289 * isolation. We however do use the direct compactor's highest_zoneidx in __compaction_suitable()
2318 * index of -1000 would imply allocations might succeed depending on in compaction_suitable()
2319 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
2324 * ignore fragindex for non-costly orders where the alternative to in compaction_suitable()
2360 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable()
2361 ac->highest_zoneidx, ac->nodemask) { in compaction_zonelist_suitable()
2372 if (__compaction_suitable(zone, order, ac->highest_zoneidx, in compaction_zonelist_suitable()
2384 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2385 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2387 const bool sync = cc->mode != MIGRATE_ASYNC; in compact_zone()
2395 cc->total_migrate_scanned = 0; in compact_zone()
2396 cc->total_free_scanned = 0; in compact_zone()
2397 cc->nr_migratepages = 0; in compact_zone()
2398 cc->nr_freepages = 0; in compact_zone()
2399 INIT_LIST_HEAD(&cc->freepages); in compact_zone()
2400 INIT_LIST_HEAD(&cc->migratepages); in compact_zone()
2402 cc->migratetype = gfp_migratetype(cc->gfp_mask); in compact_zone()
2404 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2408 watermark = wmark_pages(cc->zone, in compact_zone()
2409 cc->alloc_flags & ALLOC_WMARK_MASK); in compact_zone()
2410 if (zone_watermark_ok(cc->zone, cc->order, watermark, in compact_zone()
2411 cc->highest_zoneidx, cc->alloc_flags)) in compact_zone()
2415 if (!compaction_suitable(cc->zone, cc->order, in compact_zone()
2416 cc->highest_zoneidx)) in compact_zone()
2424 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2425 __reset_isolation_suitable(cc->zone); in compact_zone()
2433 cc->fast_start_pfn = 0; in compact_zone()
2434 if (cc->whole_zone) { in compact_zone()
2435 cc->migrate_pfn = start_pfn; in compact_zone()
2436 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2438 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2439 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2440 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { in compact_zone()
2441 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2442 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2444 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { in compact_zone()
2445 cc->migrate_pfn = start_pfn; in compact_zone()
2446 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2447 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2450 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2451 cc->whole_zone = true; in compact_zone()
2460 * no isolation candidates, then the sync state does not matter. in compact_zone()
2461 * Until a pageblock with isolation candidates is found, keep the in compact_zone()
2465 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2474 unsigned long iteration_start_pfn = cc->migrate_pfn; in compact_zone()
2484 cc->finish_pageblock = false; in compact_zone()
2487 cc->finish_pageblock = true; in compact_zone()
2494 putback_movable_pages(&cc->migratepages); in compact_zone()
2495 cc->nr_migratepages = 0; in compact_zone()
2499 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2500 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2506 * previous cc->order aligned block. in compact_zone()
2511 last_migrated_pfn = max(cc->zone->zone_start_pfn, in compact_zone()
2512 pageblock_start_pfn(cc->migrate_pfn - 1)); in compact_zone()
2515 err = migrate_pages(&cc->migratepages, compaction_alloc, in compact_zone()
2516 compaction_free, (unsigned long)cc, cc->mode, in compact_zone()
2522 cc->nr_migratepages = 0; in compact_zone()
2524 putback_movable_pages(&cc->migratepages); in compact_zone()
2526 * migrate_pages() may return -ENOMEM when scanners meet in compact_zone()
2529 if (err == -ENOMEM && !compact_scanners_met(cc)) { in compact_zone()
2535 * within the pageblock_order-aligned block and in compact_zone()
2544 if (!pageblock_aligned(cc->migrate_pfn) && in compact_zone()
2545 !cc->ignore_skip_hint && !cc->finish_pageblock && in compact_zone()
2546 (cc->mode < MIGRATE_SYNC)) { in compact_zone()
2547 cc->finish_pageblock = true; in compact_zone()
2554 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2562 if (capc && capc->page) { in compact_zone()
2570 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2575 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2577 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2580 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2592 if (cc->nr_freepages > 0) { in compact_zone()
2593 unsigned long free_pfn = release_freepages(&cc->freepages); in compact_zone()
2595 cc->nr_freepages = 0; in compact_zone()
2603 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2604 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2607 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); in compact_zone()
2608 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); in compact_zone()
2612 VM_BUG_ON(!list_empty(&cc->freepages)); in compact_zone()
2613 VM_BUG_ON(!list_empty(&cc->migratepages)); in compact_zone()
2649 WRITE_ONCE(current->capture_control, &capc); in compact_zone_order()
2658 WRITE_ONCE(current->capture_control, NULL); in compact_zone_order()
2673 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2693 * Check if the GFP flags allow compaction - GFP_NOIO is really in try_to_compact_pages()
2694 * tricky context because the migration might require IO in try_to_compact_pages()
2702 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
2703 ac->highest_zoneidx, ac->nodemask) { in try_to_compact_pages()
2713 alloc_flags, ac->highest_zoneidx, capture); in try_to_compact_pages()
2757 * due to various back-off conditions, such as, contention on per-node or
2758 * per-zone locks.
2765 .order = -1, in proactive_compact_node()
2774 zone = &pgdat->node_zones[zoneid]; in proactive_compact_node()
2796 .order = -1, in compact_node()
2806 zone = &pgdat->node_zones[zoneid]; in compact_node()
2841 if (pgdat->proactive_compact_trigger) in compaction_proactiveness_sysctl_handler()
2844 pgdat->proactive_compact_trigger = true; in compaction_proactiveness_sysctl_handler()
2845 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1, in compaction_proactiveness_sysctl_handler()
2846 pgdat->nr_zones - 1); in compaction_proactiveness_sysctl_handler()
2847 wake_up_interruptible(&pgdat->kcompactd_wait); in compaction_proactiveness_sysctl_handler()
2868 return -EINVAL; in sysctl_compaction_handler()
2881 int nid = dev->id; in compact_store()
2896 return device_create_file(&node->dev, &dev_attr_compact); in compaction_register_node()
2901 device_remove_file(&node->dev, &dev_attr_compact); in compaction_unregister_node()
2907 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || in kcompactd_work_requested()
2908 pgdat->proactive_compact_trigger; in kcompactd_work_requested()
2915 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; in kcompactd_node_suitable()
2918 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
2924 if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, in kcompactd_node_suitable()
2929 if (compaction_suitable(zone, pgdat->kcompactd_max_order, in kcompactd_node_suitable()
2946 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2947 .search_order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2948 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, in kcompactd_do_work()
2953 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
2960 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
3010 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3011 pgdat->kcompactd_max_order = 0; in kcompactd_do_work()
3012 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) in kcompactd_do_work()
3013 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd_do_work()
3021 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
3022 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
3024 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) in wakeup_kcompactd()
3025 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; in wakeup_kcompactd()
3031 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) in wakeup_kcompactd()
3037 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()
3039 wake_up_interruptible(&pgdat->kcompactd_wait); in wakeup_kcompactd()
3053 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kcompactd()
3060 pgdat->kcompactd_max_order = 0; in kcompactd()
3061 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd()
3072 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); in kcompactd()
3073 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, in kcompactd()
3075 !pgdat->proactive_compact_trigger) { in kcompactd()
3110 if (unlikely(pgdat->proactive_compact_trigger)) in kcompactd()
3111 pgdat->proactive_compact_trigger = false; in kcompactd()
3118 * This kcompactd start function will be called by init and node-hot-add.
3119 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
3125 if (pgdat->kcompactd) in kcompactd_run()
3128 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); in kcompactd_run()
3129 if (IS_ERR(pgdat->kcompactd)) { in kcompactd_run()
3131 pgdat->kcompactd = NULL; in kcompactd_run()
3141 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; in kcompactd_stop()
3145 NODE_DATA(nid)->kcompactd = NULL; in kcompactd_stop()
3163 mask = cpumask_of_node(pgdat->node_id); in kcompactd_cpu_online()
3167 if (pgdat->kcompactd) in kcompactd_cpu_online()
3168 set_cpus_allowed_ptr(pgdat->kcompactd, mask); in kcompactd_cpu_online()
3181 old = *(int *)table->data; in proc_dointvec_minmax_warn_RT_change()
3185 if (old != *(int *)table->data) in proc_dointvec_minmax_warn_RT_change()
3187 table->procname, current->comm, in proc_dointvec_minmax_warn_RT_change()