Lines Matching full:zone
141 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
143 zone->compact_considered = 0; in defer_compaction()
144 zone->compact_defer_shift++; in defer_compaction()
146 if (order < zone->compact_order_failed) in defer_compaction()
147 zone->compact_order_failed = order; in defer_compaction()
149 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
150 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
152 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
156 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
158 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
160 if (order < zone->compact_order_failed) in compaction_deferred()
164 if (++zone->compact_considered > defer_limit) in compaction_deferred()
165 zone->compact_considered = defer_limit; in compaction_deferred()
167 if (zone->compact_considered >= defer_limit) in compaction_deferred()
170 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
180 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
184 zone->compact_considered = 0; in compaction_defer_reset()
185 zone->compact_defer_shift = 0; in compaction_defer_reset()
187 if (order >= zone->compact_order_failed) in compaction_defer_reset()
188 zone->compact_order_failed = order + 1; in compaction_defer_reset()
190 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
194 bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
196 if (order < zone->compact_order_failed) in compaction_restarting()
199 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
200 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
213 static void reset_cached_positions(struct zone *zone) in reset_cached_positions() argument
215 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
216 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
217 zone->compact_cached_free_pfn = in reset_cached_positions()
218 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
244 static void __reset_isolation_suitable(struct zone *zone) in __reset_isolation_suitable() argument
246 unsigned long start_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
247 unsigned long end_pfn = zone_end_pfn(zone); in __reset_isolation_suitable()
250 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
252 /* Walk the zone and mark every pageblock as suitable for isolation */ in __reset_isolation_suitable()
261 if (zone != page_zone(page)) in __reset_isolation_suitable()
269 reset_cached_positions(zone); in __reset_isolation_suitable()
277 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() local
278 if (!populated_zone(zone)) in reset_isolation_suitable()
282 if (zone->compact_blockskip_flush) in reset_isolation_suitable()
283 __reset_isolation_suitable(zone); in reset_isolation_suitable()
295 struct zone *zone = cc->zone; in update_pageblock_skip() local
313 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_pageblock_skip()
314 zone->compact_cached_migrate_pfn[0] = pfn; in update_pageblock_skip()
316 pfn > zone->compact_cached_migrate_pfn[1]) in update_pageblock_skip()
317 zone->compact_cached_migrate_pfn[1] = pfn; in update_pageblock_skip()
319 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
320 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
459 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
498 * The zone lock must be held to isolate freepages. in isolate_freepages_block()
505 locked = compact_trylock_irqsave(&cc->zone->lock, in isolate_freepages_block()
544 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
583 * Non-free pages, invalid PFNs, or zone boundaries within the
600 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
601 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
624 block_end_pfn, cc->zone)) in isolate_freepages_range()
659 static bool too_many_isolated(struct zone *zone) in too_many_isolated() argument
663 inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + in too_many_isolated()
664 node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); in too_many_isolated()
665 active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + in too_many_isolated()
666 node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); in too_many_isolated()
667 isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + in too_many_isolated()
668 node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); in too_many_isolated()
695 struct zone *zone = cc->zone; in isolate_migratepages_block() local
710 while (unlikely(too_many_isolated(zone))) { in isolate_migratepages_block()
760 && compact_unlock_should_abort(zone_lru_lock(zone), flags, in isolate_migratepages_block()
774 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
820 spin_unlock_irqrestore(zone_lru_lock(zone), in isolate_migratepages_block()
850 locked = compact_trylock_irqsave(zone_lru_lock(zone), in isolate_migratepages_block()
870 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); in isolate_migratepages_block()
915 spin_unlock_irqrestore(zone_lru_lock(zone), flags); in isolate_migratepages_block()
942 spin_unlock_irqrestore(zone_lru_lock(zone), flags); in isolate_migratepages_block()
980 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
981 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
991 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1033 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1068 struct zone *zone = cc->zone; in isolate_freepages() local
1078 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1079 * zone when isolating for the first time. For looping we also need in isolate_freepages()
1083 * a zone which ends in the middle of a pageblock. in isolate_freepages()
1090 zone_end_pfn(zone)); in isolate_freepages()
1103 * This can iterate a massively long zone without finding any in isolate_freepages()
1112 zone); in isolate_freepages()
1195 * freelist. All pages on the freelist are from the same zone, so there is no
1224 static isolate_migrate_t isolate_migratepages(struct zone *zone, in isolate_migratepages() argument
1236 * Start at where we last stopped, or beginning of the zone as in isolate_migratepages()
1241 if (block_start_pfn < zone->zone_start_pfn) in isolate_migratepages()
1242 block_start_pfn = zone->zone_start_pfn; in isolate_migratepages()
1257 * This can potentially iterate a massively long zone with in isolate_migratepages()
1266 zone); in isolate_migratepages()
1312 static enum compact_result __compact_finished(struct zone *zone, in __compact_finished() argument
1324 reset_cached_positions(zone); in __compact_finished()
1333 zone->compact_blockskip_flush = true; in __compact_finished()
1357 struct free_area *area = &zone->free_area[order]; in __compact_finished()
1403 static enum compact_result compact_finished(struct zone *zone, in compact_finished() argument
1408 ret = __compact_finished(zone, cc); in compact_finished()
1409 trace_mm_compaction_finished(zone, cc->order, ret); in compact_finished()
1417 * compaction_suitable: Is this suitable to run compaction on this zone now?
1423 static enum compact_result __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
1433 watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; in __compaction_suitable()
1438 if (zone_watermark_ok(zone, order, watermark, classzone_idx, in __compaction_suitable()
1457 low_wmark_pages(zone) : min_wmark_pages(zone); in __compaction_suitable()
1459 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, in __compaction_suitable()
1466 enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
1473 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, in compaction_suitable()
1474 zone_page_state(zone, NR_FREE_PAGES)); in compaction_suitable()
1492 fragindex = fragmentation_index(zone, order); in compaction_suitable()
1497 trace_mm_compaction_suitable(zone, order, ret); in compaction_suitable()
1507 struct zone *zone; in compaction_zonelist_suitable() local
1511 * Make sure at least one zone would pass __compaction_suitable if we continue in compaction_zonelist_suitable()
1514 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in compaction_zonelist_suitable()
1525 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
1526 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in compaction_zonelist_suitable()
1527 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable()
1536 static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) in compact_zone() argument
1539 unsigned long start_pfn = zone->zone_start_pfn; in compact_zone()
1540 unsigned long end_pfn = zone_end_pfn(zone); in compact_zone()
1544 * These counters track activities during zone compaction. Initialize in compact_zone()
1545 * them before compacting a new zone. in compact_zone()
1555 ret = compaction_suitable(zone, cc->order, cc->alloc_flags, in compact_zone()
1568 if (compaction_restarting(zone, cc->order)) in compact_zone()
1569 __reset_isolation_suitable(zone); in compact_zone()
1572 * Setup to move all movable pages to the end of the zone. Used cached in compact_zone()
1574 * want to compact the whole zone), but check that it is initialised in compact_zone()
1575 * by ensuring the values are within zone boundaries. in compact_zone()
1581 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; in compact_zone()
1582 cc->free_pfn = zone->compact_cached_free_pfn; in compact_zone()
1585 zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
1589 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
1590 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
1604 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { in compact_zone()
1607 switch (isolate_migratepages(zone, cc)) { in compact_zone()
1673 drain_local_pages(zone); in compact_zone()
1696 * already reset to zone end in compact_finished() in compact_zone()
1698 if (free_pfn > zone->compact_cached_free_pfn) in compact_zone()
1699 zone->compact_cached_free_pfn = free_pfn; in compact_zone()
1711 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
1719 .zone = zone, in compact_zone_order()
1730 ret = compact_zone(zone, &cc); in compact_zone_order()
1756 struct zone *zone; in try_to_compact_pages() local
1768 /* Compact each zone in the list */ in try_to_compact_pages()
1769 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in try_to_compact_pages()
1774 && compaction_deferred(zone, order)) { in try_to_compact_pages()
1779 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
1786 * We think the allocation will succeed in this zone, in try_to_compact_pages()
1789 * succeeds in this zone. in try_to_compact_pages()
1791 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
1799 * We think that allocation won't succeed in this zone in try_to_compact_pages()
1803 defer_compaction(zone, order); in try_to_compact_pages()
1824 struct zone *zone; in compact_node() local
1836 zone = &pgdat->node_zones[zoneid]; in compact_node()
1837 if (!populated_zone(zone)) in compact_node()
1840 cc.zone = zone; in compact_node()
1842 compact_zone(zone, &cc); in compact_node()
1922 struct zone *zone; in kcompactd_node_suitable() local
1926 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
1928 if (!populated_zone(zone)) in kcompactd_node_suitable()
1931 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, in kcompactd_node_suitable()
1946 struct zone *zone; in kcompactd_do_work() local
1961 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
1962 if (!populated_zone(zone)) in kcompactd_do_work()
1965 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
1968 if (compaction_suitable(zone, cc.order, 0, zoneid) != in kcompactd_do_work()
1975 cc.zone = zone; in kcompactd_do_work()
1976 status = compact_zone(zone, &cc); in kcompactd_do_work()
1979 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
1983 * otherwise coalesce on the zone's free area for in kcompactd_do_work()
1987 drain_all_pages(zone); in kcompactd_do_work()
1993 defer_compaction(zone, cc.order); in kcompactd_do_work()