• Home
  • Raw
  • Download

Lines Matching refs:zone

600 unsigned long zone_reclaimable_pages(struct zone *zone)  in zone_reclaimable_pages()  argument
604 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + in zone_reclaimable_pages()
605 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); in zone_reclaimable_pages()
606 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) in zone_reclaimable_pages()
607 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + in zone_reclaimable_pages()
608 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); in zone_reclaimable_pages()
626 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() local
628 if (!managed_zone(zone)) in lruvec_lru_size()
634 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); in lruvec_lru_size()
1866 unsigned int reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
1895 nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1900 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1908 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
1910 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
2674 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_count() local
2676 if (!managed_zone(zone)) in prepare_scan_count()
2679 total_high_wmark += high_wmark_pages(zone); in prepare_scan_count()
2947 #define for_each_gen_type_zone(gen, type, zone) \ argument
2950 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
3506 int zone = page_zonenum(page); in update_batch_size() local
3514 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3515 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3520 int gen, type, zone; in reset_batch_size() local
3525 for_each_gen_type_zone(gen, type, zone) { in reset_batch_size()
3527 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size()
3532 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3533 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3534 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3538 __update_lru_size(lruvec, lru, zone, delta); in reset_batch_size()
4047 int zone; in inc_min_seq() local
4056 for (zone = 0; zone < MAX_NR_ZONES; zone++) { in inc_min_seq()
4057 struct list_head *head = &lrugen->pages[old_gen][type][zone]; in inc_min_seq()
4065 VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); in inc_min_seq()
4068 list_move_tail(&page->lru, &lrugen->pages[new_gen][type][zone]); in inc_min_seq()
4083 int gen, type, zone; in try_to_inc_min_seq() local
4095 for (zone = 0; zone < MAX_NR_ZONES; zone++) { in try_to_inc_min_seq()
4096 if (!list_empty(&lrugen->pages[gen][type][zone])) in try_to_inc_min_seq()
4127 int type, zone; in inc_max_seq() local
4158 for (zone = 0; zone < MAX_NR_ZONES; zone++) { in inc_max_seq()
4160 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq()
4161 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
4166 __update_lru_size(lruvec, lru, zone, delta); in inc_max_seq()
4167 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
4233 int gen, type, zone; in lruvec_is_sizable() local
4247 for (zone = 0; zone < MAX_NR_ZONES; zone++) in lruvec_is_sizable()
4248 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
4460 int zone = page_zonenum(page); in sort_page() local
4489 list_move(&page->lru, &lrugen->pages[gen][type][zone]); in sort_page()
4498 list_move_tail(&page->lru, &lrugen->pages[gen][type][zone]); in sort_page()
4506 if (zone > sc->reclaim_idx || skip_cma(page, sc)) { in sort_page()
4508 list_move_tail(&page->lru, &lrugen->pages[gen][type][zone]); in sort_page()
4516 list_move(&page->lru, &lrugen->pages[gen][type][zone]); in sort_page()
4580 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_pages() local
4581 struct list_head *head = &lrugen->pages[gen][type][zone]; in scan_pages()
4590 VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); in scan_pages()
4610 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped); in scan_pages()
4818 int gen, type, zone; in should_run_aging() local
4840 for (zone = 0; zone < MAX_NR_ZONES; zone++) in should_run_aging()
4841 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
5218 int gen, type, zone; in state_is_valid() local
5220 for_each_gen_type_zone(gen, type, zone) { in state_is_valid()
5221 if (!list_empty(&lrugen->pages[gen][type][zone])) in state_is_valid()
5262 int gen, type, zone; in drain_evictable() local
5265 for_each_gen_type_zone(gen, type, zone) { in drain_evictable()
5266 struct list_head *head = &lruvec->lrugen.pages[gen][type][zone]; in drain_evictable()
5275 VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); in drain_evictable()
5566 int type, zone; in lru_gen_seq_show() local
5576 for (zone = 0; zone < MAX_NR_ZONES; zone++) in lru_gen_seq_show()
5577 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
5793 int gen, type, zone; in lru_gen_init_lruvec() local
5802 for_each_gen_type_zone(gen, type, zone) in lru_gen_init_lruvec()
5803 INIT_LIST_HEAD(&lrugen->pages[gen][type][zone]); in lru_gen_init_lruvec()
6107 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim() local
6108 if (!managed_zone(zone)) in should_continue_reclaim()
6111 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { in should_continue_reclaim()
6312 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) in compaction_ready() argument
6317 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); in compaction_ready()
6334 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
6336 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
6350 struct zone *zone; in shrink_zones() local
6367 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
6374 if (!cpuset_zone_allowed(zone, in shrink_zones()
6389 compaction_ready(zone, sc)) { in shrink_zones()
6400 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6410 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6419 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6421 last_pgdat = zone->zone_pgdat; in shrink_zones()
6422 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6469 struct zone *zone; in do_try_to_free_pages() local
6498 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6500 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6502 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6504 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6510 zone->zone_pgdat); in do_try_to_free_pages()
6554 struct zone *zone; in allow_direct_reclaim() local
6564 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6565 if (!managed_zone(zone)) in allow_direct_reclaim()
6568 if (!zone_reclaimable_pages(zone)) in allow_direct_reclaim()
6571 pfmemalloc_reserve += min_wmark_pages(zone); in allow_direct_reclaim()
6572 free_pages += zone_page_state(zone, NR_FREE_PAGES); in allow_direct_reclaim()
6605 struct zone *zone; in throttle_direct_reclaim() local
6639 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
6641 if (zone_idx(zone) > ZONE_NORMAL) in throttle_direct_reclaim()
6645 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6671 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6836 struct zone *zone; in pgdat_watermark_boosted() local
6846 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6847 if (!managed_zone(zone)) in pgdat_watermark_boosted()
6850 if (zone->watermark_boost) in pgdat_watermark_boosted()
6865 struct zone *zone; in pgdat_balanced() local
6872 zone = pgdat->node_zones + i; in pgdat_balanced()
6874 if (!managed_zone(zone)) in pgdat_balanced()
6877 mark = high_wmark_pages(zone); in pgdat_balanced()
6878 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) in pgdat_balanced()
6951 struct zone *zone; in kswapd_shrink_node() local
6957 zone = pgdat->node_zones + z; in kswapd_shrink_node()
6958 if (!managed_zone(zone)) in kswapd_shrink_node()
6961 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
6988 struct zone *zone; in update_reclaim_active() local
6991 zone = pgdat->node_zones + i; in update_reclaim_active()
6993 if (!managed_zone(zone)) in update_reclaim_active()
6997 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6999 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
7037 struct zone *zone; in balance_pgdat() local
7057 zone = pgdat->node_zones + i; in balance_pgdat()
7058 if (!managed_zone(zone)) in balance_pgdat()
7061 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
7062 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
7089 zone = pgdat->node_zones + i; in balance_pgdat()
7090 if (!managed_zone(zone)) in balance_pgdat()
7211 zone = pgdat->node_zones + i; in balance_pgdat()
7212 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
7213 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
7214 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
7436 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, in wakeup_kswapd() argument
7442 if (!managed_zone(zone)) in wakeup_kswapd()
7445 if (!cpuset_zone_allowed(zone, gfp_flags)) in wakeup_kswapd()
7448 pgdat = zone->zone_pgdat; in wakeup_kswapd()