Lines Matching refs:zone
142 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
144 zone->compact_considered = 0; in defer_compaction()
145 zone->compact_defer_shift++; in defer_compaction()
147 if (order < zone->compact_order_failed) in defer_compaction()
148 zone->compact_order_failed = order; in defer_compaction()
150 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
151 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
153 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
157 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
159 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
161 if (order < zone->compact_order_failed) in compaction_deferred()
165 if (++zone->compact_considered > defer_limit) in compaction_deferred()
166 zone->compact_considered = defer_limit; in compaction_deferred()
168 if (zone->compact_considered >= defer_limit) in compaction_deferred()
171 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
181 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
185 zone->compact_considered = 0; in compaction_defer_reset()
186 zone->compact_defer_shift = 0; in compaction_defer_reset()
188 if (order >= zone->compact_order_failed) in compaction_defer_reset()
189 zone->compact_order_failed = order + 1; in compaction_defer_reset()
191 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
195 bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
197 if (order < zone->compact_order_failed) in compaction_restarting()
200 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
201 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
214 static void reset_cached_positions(struct zone *zone) in reset_cached_positions() argument
216 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
217 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
218 zone->compact_cached_free_pfn = in reset_cached_positions()
219 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
241 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, in __reset_isolation_pfn() argument
251 if (zone != page_zone(page)) in __reset_isolation_pfn()
273 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
282 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
317 static void __reset_isolation_suitable(struct zone *zone) in __reset_isolation_suitable() argument
319 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
320 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
326 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
329 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
342 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && in __reset_isolation_suitable()
346 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
347 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
348 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
352 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && in __reset_isolation_suitable()
356 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
357 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
363 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
364 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
365 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
374 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() local
375 if (!populated_zone(zone)) in reset_isolation_suitable()
379 if (zone->compact_blockskip_flush) in reset_isolation_suitable()
380 __reset_isolation_suitable(zone); in reset_isolation_suitable()
409 struct zone *zone = cc->zone; in update_cached_migrate() local
417 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
418 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
420 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
421 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
431 struct zone *zone = cc->zone; in update_pageblock_skip() local
442 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
443 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
566 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
601 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
638 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
690 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
691 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
714 block_end_pfn, cc->zone)) in isolate_freepages_range()
785 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
1093 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1094 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1104 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1237 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1; in fast_isolate_around()
1294 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1319 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1328 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1384 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1414 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1416 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1434 struct zone *zone = cc->zone; in isolate_freepages() local
1462 zone_end_pfn(zone)); in isolate_freepages()
1485 zone); in isolate_freepages()
1642 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1669 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
1676 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1684 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
1721 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
1759 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
1760 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
1791 block_end_pfn, cc->zone); in isolate_migratepages()
1858 reset_cached_positions(cc->zone); in __compact_finished()
1867 cc->zone->compact_blockskip_flush = true; in __compact_finished()
1890 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
1944 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
1958 static enum compact_result __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
1968 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in __compaction_suitable()
1973 if (zone_watermark_ok(zone, order, watermark, classzone_idx, in __compaction_suitable()
1992 low_wmark_pages(zone) : min_wmark_pages(zone); in __compaction_suitable()
1994 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, in __compaction_suitable()
2001 enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
2008 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, in compaction_suitable()
2009 zone_page_state(zone, NR_FREE_PAGES)); in compaction_suitable()
2027 fragindex = fragmentation_index(zone, order); in compaction_suitable()
2032 trace_mm_compaction_suitable(zone, order, ret); in compaction_suitable()
2042 struct zone *zone; in compaction_zonelist_suitable() local
2049 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in compaction_zonelist_suitable()
2060 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2061 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in compaction_zonelist_suitable()
2062 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable()
2075 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2076 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2093 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, in compact_zone()
2106 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2107 __reset_isolation_suitable(cc->zone); in compact_zone()
2120 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2121 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2124 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2128 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2129 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2132 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2147 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2181 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2182 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2245 drain_local_pages(cc->zone); in compact_zone()
2275 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2276 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2288 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2298 .zone = zone, in compact_zone_order()
2345 struct zone *zone; in try_to_compact_pages() local
2358 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in try_to_compact_pages()
2363 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2368 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2380 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2392 defer_compaction(zone, order); in try_to_compact_pages()
2413 struct zone *zone; in compact_node() local
2425 zone = &pgdat->node_zones[zoneid]; in compact_node()
2426 if (!populated_zone(zone)) in compact_node()
2429 cc.zone = zone; in compact_node()
2503 struct zone *zone; in kcompactd_node_suitable() local
2507 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
2509 if (!populated_zone(zone)) in kcompactd_node_suitable()
2512 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, in kcompactd_node_suitable()
2527 struct zone *zone; in kcompactd_do_work() local
2543 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
2544 if (!populated_zone(zone)) in kcompactd_do_work()
2547 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
2550 if (compaction_suitable(zone, cc.order, 0, zoneid) != in kcompactd_do_work()
2557 cc.zone = zone; in kcompactd_do_work()
2561 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
2569 drain_all_pages(zone); in kcompactd_do_work()
2575 defer_compaction(zone, cc.order); in kcompactd_do_work()