• Home
  • Raw
  • Download

Lines Matching refs:zone

162 static unsigned long zone_reclaimable_pages(struct zone *zone)  in zone_reclaimable_pages()  argument
166 nr = zone_page_state(zone, NR_ACTIVE_FILE) + in zone_reclaimable_pages()
167 zone_page_state(zone, NR_INACTIVE_FILE); in zone_reclaimable_pages()
170 nr += zone_page_state(zone, NR_ACTIVE_ANON) + in zone_reclaimable_pages()
171 zone_page_state(zone, NR_INACTIVE_ANON); in zone_reclaimable_pages()
176 bool zone_reclaimable(struct zone *zone) in zone_reclaimable() argument
178 return zone_page_state(zone, NR_PAGES_SCANNED) < in zone_reclaimable()
179 zone_reclaimable_pages(zone) * 6; in zone_reclaimable()
846 struct zone *zone, in shrink_page_list() argument
884 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list()
962 test_bit(ZONE_WRITEBACK, &zone->flags)) { in shrink_page_list()
1044 !test_bit(ZONE_DIRTY, &zone->flags))) { in shrink_page_list()
1188 unsigned long reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
1208 ret = shrink_page_list(&clean_pages, zone, &sc, in reclaim_clean_pages_from_list()
1212 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); in reclaim_clean_pages_from_list()
1384 struct zone *zone = page_zone(page); in isolate_lru_page() local
1387 spin_lock_irq(&zone->lru_lock); in isolate_lru_page()
1388 lruvec = mem_cgroup_page_lruvec(page, zone); in isolate_lru_page()
1396 spin_unlock_irq(&zone->lru_lock); in isolate_lru_page()
1408 static int too_many_isolated(struct zone *zone, int file, in too_many_isolated() argument
1420 inactive = zone_page_state(zone, NR_INACTIVE_FILE); in too_many_isolated()
1421 isolated = zone_page_state(zone, NR_ISOLATED_FILE); in too_many_isolated()
1423 inactive = zone_page_state(zone, NR_INACTIVE_ANON); in too_many_isolated()
1424 isolated = zone_page_state(zone, NR_ISOLATED_ANON); in too_many_isolated()
1442 struct zone *zone = lruvec_zone(lruvec); in putback_inactive_pages() local
1455 spin_unlock_irq(&zone->lru_lock); in putback_inactive_pages()
1457 spin_lock_irq(&zone->lru_lock); in putback_inactive_pages()
1461 lruvec = mem_cgroup_page_lruvec(page, zone); in putback_inactive_pages()
1478 spin_unlock_irq(&zone->lru_lock); in putback_inactive_pages()
1481 spin_lock_irq(&zone->lru_lock); in putback_inactive_pages()
1525 struct zone *zone = lruvec_zone(lruvec); in shrink_inactive_list() local
1528 while (unlikely(too_many_isolated(zone, file, sc))) { in shrink_inactive_list()
1543 spin_lock_irq(&zone->lru_lock); in shrink_inactive_list()
1548 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); in shrink_inactive_list()
1549 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
1552 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); in shrink_inactive_list()
1554 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); in shrink_inactive_list()
1556 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); in shrink_inactive_list()
1558 spin_unlock_irq(&zone->lru_lock); in shrink_inactive_list()
1563 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, in shrink_inactive_list()
1568 spin_lock_irq(&zone->lru_lock); in shrink_inactive_list()
1574 __count_zone_vm_events(PGSTEAL_KSWAPD, zone, in shrink_inactive_list()
1577 __count_zone_vm_events(PGSTEAL_DIRECT, zone, in shrink_inactive_list()
1583 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
1585 spin_unlock_irq(&zone->lru_lock); in shrink_inactive_list()
1605 set_bit(ZONE_WRITEBACK, &zone->flags); in shrink_inactive_list()
1617 set_bit(ZONE_CONGESTED, &zone->flags); in shrink_inactive_list()
1626 set_bit(ZONE_DIRTY, &zone->flags); in shrink_inactive_list()
1645 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); in shrink_inactive_list()
1647 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, in shrink_inactive_list()
1648 zone_idx(zone), in shrink_inactive_list()
1678 struct zone *zone = lruvec_zone(lruvec); in move_active_pages_to_lru() local
1685 lruvec = mem_cgroup_page_lruvec(page, zone); in move_active_pages_to_lru()
1701 spin_unlock_irq(&zone->lru_lock); in move_active_pages_to_lru()
1704 spin_lock_irq(&zone->lru_lock); in move_active_pages_to_lru()
1709 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); in move_active_pages_to_lru()
1730 struct zone *zone = lruvec_zone(lruvec); in shrink_active_list() local
1739 spin_lock_irq(&zone->lru_lock); in shrink_active_list()
1744 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); in shrink_active_list()
1748 __count_zone_vm_events(PGREFILL, zone, nr_scanned); in shrink_active_list()
1749 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); in shrink_active_list()
1750 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
1751 spin_unlock_irq(&zone->lru_lock); in shrink_active_list()
1796 spin_lock_irq(&zone->lru_lock); in shrink_active_list()
1807 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
1808 spin_unlock_irq(&zone->lru_lock); in shrink_active_list()
1815 static int inactive_anon_is_low_global(struct zone *zone) in inactive_anon_is_low_global() argument
1819 active = zone_page_state(zone, NR_ACTIVE_ANON); in inactive_anon_is_low_global()
1820 inactive = zone_page_state(zone, NR_INACTIVE_ANON); in inactive_anon_is_low_global()
1822 if (inactive * zone->inactive_ratio < active) in inactive_anon_is_low_global()
1923 struct zone *zone = lruvec_zone(lruvec); in get_scan_count() local
1943 if (current_is_kswapd() && !zone_reclaimable(zone)) in get_scan_count()
1989 zonefree = zone_page_state(zone, NR_FREE_PAGES); in get_scan_count()
1990 zonefile = zone_page_state(zone, NR_ACTIVE_FILE) + in get_scan_count()
1991 zone_page_state(zone, NR_INACTIVE_FILE); in get_scan_count()
1993 if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) { in get_scan_count()
2034 spin_lock_irq(&zone->lru_lock); in get_scan_count()
2055 spin_unlock_irq(&zone->lru_lock); in get_scan_count()
2243 static inline bool should_continue_reclaim(struct zone *zone, in should_continue_reclaim() argument
2283 inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); in should_continue_reclaim()
2285 inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); in should_continue_reclaim()
2291 switch (compaction_suitable(zone, sc->order)) { in should_continue_reclaim()
2300 static bool shrink_zone(struct zone *zone, struct scan_control *sc) in shrink_zone() argument
2308 .zone = zone, in shrink_zone()
2321 lruvec = mem_cgroup_zone_lruvec(zone, memcg); in shrink_zone()
2351 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, in shrink_zone()
2361 static inline bool compaction_ready(struct zone *zone, int order) in compaction_ready() argument
2372 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( in compaction_ready()
2373 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); in compaction_ready()
2374 watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); in compaction_ready()
2375 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); in compaction_ready()
2381 if (compaction_deferred(zone, order)) in compaction_ready()
2388 if (compaction_suitable(zone, order) == COMPACT_SKIPPED) in compaction_ready()
2415 struct zone *zone; in shrink_zones() local
2438 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2440 if (!populated_zone(zone)) in shrink_zones()
2447 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) in shrink_zones()
2450 lru_pages += zone_reclaimable_pages(zone); in shrink_zones()
2451 node_set(zone_to_nid(zone), shrink.nodes_to_scan); in shrink_zones()
2454 !zone_reclaimable(zone)) in shrink_zones()
2469 compaction_ready(zone, sc->order)) { in shrink_zones()
2481 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, in shrink_zones()
2491 if (shrink_zone(zone, sc)) in shrink_zones()
2495 !reclaimable && zone_reclaimable(zone)) in shrink_zones()
2603 struct zone *zone; in pfmemalloc_watermark_ok() local
2610 zone = &pgdat->node_zones[i]; in pfmemalloc_watermark_ok()
2611 if (!populated_zone(zone)) in pfmemalloc_watermark_ok()
2614 pfmemalloc_reserve += min_wmark_pages(zone); in pfmemalloc_watermark_ok()
2615 free_pages += zone_page_state(zone, NR_FREE_PAGES); in pfmemalloc_watermark_ok()
2647 struct zone *zone; in throttle_direct_reclaim() local
2681 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
2683 if (zone_idx(zone) > ZONE_NORMAL) in throttle_direct_reclaim()
2687 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
2716 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
2765 struct zone *zone, in mem_cgroup_shrink_node_zone() argument
2775 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); in mem_cgroup_shrink_node_zone()
2840 static void age_active_anon(struct zone *zone, struct scan_control *sc) in age_active_anon() argument
2849 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); in age_active_anon()
2859 static bool zone_balanced(struct zone *zone, int order, in zone_balanced() argument
2862 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + in zone_balanced()
2867 compaction_suitable(zone, order) == COMPACT_SKIPPED) in zone_balanced()
2901 struct zone *zone = pgdat->node_zones + i; in pgdat_balanced() local
2903 if (!populated_zone(zone)) in pgdat_balanced()
2906 managed_pages += zone->managed_pages; in pgdat_balanced()
2915 if (!zone_reclaimable(zone)) { in pgdat_balanced()
2916 balanced_pages += zone->managed_pages; in pgdat_balanced()
2920 if (zone_balanced(zone, order, 0, i)) in pgdat_balanced()
2921 balanced_pages += zone->managed_pages; in pgdat_balanced()
2972 static bool kswapd_shrink_zone(struct zone *zone, in kswapd_shrink_zone() argument
2987 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); in kswapd_shrink_zone()
2996 compaction_suitable(zone, sc->order) != in kswapd_shrink_zone()
3006 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( in kswapd_shrink_zone()
3007 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); in kswapd_shrink_zone()
3013 lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone)); in kswapd_shrink_zone()
3014 if (!lowmem_pressure && zone_balanced(zone, testorder, in kswapd_shrink_zone()
3018 shrink_zone(zone, sc); in kswapd_shrink_zone()
3020 node_set(zone_to_nid(zone), shrink.nodes_to_scan); in kswapd_shrink_zone()
3029 clear_bit(ZONE_WRITEBACK, &zone->flags); in kswapd_shrink_zone()
3037 if (zone_reclaimable(zone) && in kswapd_shrink_zone()
3038 zone_balanced(zone, testorder, 0, classzone_idx)) { in kswapd_shrink_zone()
3039 clear_bit(ZONE_CONGESTED, &zone->flags); in kswapd_shrink_zone()
3040 clear_bit(ZONE_DIRTY, &zone->flags); in kswapd_shrink_zone()
3097 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3099 if (!populated_zone(zone)) in balance_pgdat()
3103 !zone_reclaimable(zone)) in balance_pgdat()
3110 age_active_anon(zone, &sc); in balance_pgdat()
3123 if (!zone_balanced(zone, order, 0, 0)) { in balance_pgdat()
3131 clear_bit(ZONE_CONGESTED, &zone->flags); in balance_pgdat()
3132 clear_bit(ZONE_DIRTY, &zone->flags); in balance_pgdat()
3140 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3142 if (!populated_zone(zone)) in balance_pgdat()
3145 lru_pages += zone_reclaimable_pages(zone); in balance_pgdat()
3153 zone_watermark_ok(zone, order, in balance_pgdat()
3154 low_wmark_pages(zone), in balance_pgdat()
3176 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3178 if (!populated_zone(zone)) in balance_pgdat()
3182 !zone_reclaimable(zone)) in balance_pgdat()
3191 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, in balance_pgdat()
3202 if (kswapd_shrink_zone(zone, end_zone, &sc, in balance_pgdat()
3425 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) in wakeup_kswapd() argument
3429 if (!populated_zone(zone)) in wakeup_kswapd()
3432 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) in wakeup_kswapd()
3434 pgdat = zone->zone_pgdat; in wakeup_kswapd()
3441 if (zone_balanced(zone, order, 0, 0)) in wakeup_kswapd()
3444 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); in wakeup_kswapd()
3595 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) in zone_unmapped_file_pages() argument
3597 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); in zone_unmapped_file_pages()
3598 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + in zone_unmapped_file_pages()
3599 zone_page_state(zone, NR_ACTIVE_FILE); in zone_unmapped_file_pages()
3610 static long zone_pagecache_reclaimable(struct zone *zone) in zone_pagecache_reclaimable() argument
3622 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); in zone_pagecache_reclaimable()
3624 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); in zone_pagecache_reclaimable()
3628 delta += zone_page_state(zone, NR_FILE_DIRTY); in zone_pagecache_reclaimable()
3640 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in __zone_reclaim() argument
3671 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { in __zone_reclaim()
3677 shrink_zone(zone, &sc); in __zone_reclaim()
3681 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); in __zone_reclaim()
3682 if (nr_slab_pages0 > zone->min_slab_pages) { in __zone_reclaim()
3691 node_set(zone_to_nid(zone), shrink.nodes_to_scan); in __zone_reclaim()
3693 unsigned long lru_pages = zone_reclaimable_pages(zone); in __zone_reclaim()
3700 nr_slab_pages1 = zone_page_state(zone, in __zone_reclaim()
3710 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); in __zone_reclaim()
3721 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in zone_reclaim() argument
3736 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && in zone_reclaim()
3737 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) in zone_reclaim()
3740 if (!zone_reclaimable(zone)) in zone_reclaim()
3755 node_id = zone_to_nid(zone); in zone_reclaim()
3759 if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) in zone_reclaim()
3762 ret = __zone_reclaim(zone, gfp_mask, order); in zone_reclaim()
3763 clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); in zone_reclaim()
3802 struct zone *zone = NULL; in check_move_unevictable_pages() local
3809 struct zone *pagezone; in check_move_unevictable_pages()
3813 if (pagezone != zone) { in check_move_unevictable_pages()
3814 if (zone) in check_move_unevictable_pages()
3815 spin_unlock_irq(&zone->lru_lock); in check_move_unevictable_pages()
3816 zone = pagezone; in check_move_unevictable_pages()
3817 spin_lock_irq(&zone->lru_lock); in check_move_unevictable_pages()
3819 lruvec = mem_cgroup_page_lruvec(page, zone); in check_move_unevictable_pages()
3835 if (zone) { in check_move_unevictable_pages()
3838 spin_unlock_irq(&zone->lru_lock); in check_move_unevictable_pages()