• Home
  • Raw
  • Download

Lines Matching refs:zone

333 unsigned long zone_reclaimable_pages(struct zone *zone)  in zone_reclaimable_pages()  argument
337 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + in zone_reclaimable_pages()
338 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); in zone_reclaimable_pages()
340 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + in zone_reclaimable_pages()
341 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); in zone_reclaimable_pages()
364 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() local
367 if (!managed_zone(zone)) in lruvec_lru_size()
1536 unsigned long reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
1557 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1560 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret); in reclaim_clean_pages_from_list()
2367 struct zone *zone = &pgdat->node_zones[z]; in get_scan_count() local
2368 if (!managed_zone(zone)) in get_scan_count()
2371 total_high_wmark += high_wmark_pages(zone); in get_scan_count()
2721 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim() local
2722 if (!managed_zone(zone)) in should_continue_reclaim()
2725 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { in should_continue_reclaim()
2915 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) in compaction_ready() argument
2920 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); in compaction_ready()
2937 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
2939 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
2953 struct zone *zone; in shrink_zones() local
2970 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2977 if (!cpuset_zone_allowed(zone, in shrink_zones()
2992 compaction_ready(zone, sc)) { in shrink_zones()
3003 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
3013 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
3022 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
3024 last_pgdat = zone->zone_pgdat; in shrink_zones()
3025 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
3072 struct zone *zone; in do_try_to_free_pages() local
3100 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
3102 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
3104 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
3105 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
3131 struct zone *zone; in allow_direct_reclaim() local
3141 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
3142 if (!managed_zone(zone)) in allow_direct_reclaim()
3145 if (!zone_reclaimable_pages(zone)) in allow_direct_reclaim()
3148 pfmemalloc_reserve += min_wmark_pages(zone); in allow_direct_reclaim()
3149 free_pages += zone_page_state(zone, NR_FREE_PAGES); in allow_direct_reclaim()
3181 struct zone *zone; in throttle_direct_reclaim() local
3215 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
3217 if (zone_idx(zone) > ZONE_NORMAL) in throttle_direct_reclaim()
3221 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
3250 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
3418 struct zone *zone; in pgdat_watermark_boosted() local
3428 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
3429 if (!managed_zone(zone)) in pgdat_watermark_boosted()
3432 if (zone->watermark_boost) in pgdat_watermark_boosted()
3447 struct zone *zone; in pgdat_balanced() local
3454 zone = pgdat->node_zones + i; in pgdat_balanced()
3456 if (!managed_zone(zone)) in pgdat_balanced()
3459 mark = high_wmark_pages(zone); in pgdat_balanced()
3460 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx)) in pgdat_balanced()
3530 struct zone *zone; in kswapd_shrink_node() local
3536 zone = pgdat->node_zones + z; in kswapd_shrink_node()
3537 if (!managed_zone(zone)) in kswapd_shrink_node()
3540 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
3584 struct zone *zone; in balance_pgdat() local
3604 zone = pgdat->node_zones + i; in balance_pgdat()
3605 if (!managed_zone(zone)) in balance_pgdat()
3608 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
3609 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
3635 zone = pgdat->node_zones + i; in balance_pgdat()
3636 if (!managed_zone(zone)) in balance_pgdat()
3756 zone = pgdat->node_zones + i; in balance_pgdat()
3757 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
3758 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
3759 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
3973 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, in wakeup_kswapd() argument
3978 if (!managed_zone(zone)) in wakeup_kswapd()
3981 if (!cpuset_zone_allowed(zone, gfp_flags)) in wakeup_kswapd()
3983 pgdat = zone->zone_pgdat; in wakeup_kswapd()