Lines Matching refs:pgdat
1123 struct pglist_data *pgdat, in shrink_page_list() argument
1239 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_page_list()
1380 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_page_list()
1838 pg_data_t *pgdat = page_pgdat(page); in isolate_lru_page() local
1841 spin_lock_irq(&pgdat->lru_lock); in isolate_lru_page()
1842 lruvec = mem_cgroup_page_lruvec(page, pgdat); in isolate_lru_page()
1850 spin_unlock_irq(&pgdat->lru_lock); in isolate_lru_page()
1862 static int too_many_isolated(struct pglist_data *pgdat, int file, in too_many_isolated() argument
1874 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); in too_many_isolated()
1875 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); in too_many_isolated()
1877 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
1878 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
1915 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in move_pages_to_lru() local
1926 spin_unlock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1928 spin_lock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1931 lruvec = mem_cgroup_page_lruvec(page, pgdat); in move_pages_to_lru()
1947 spin_unlock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1949 spin_lock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1995 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list() local
1998 while (unlikely(too_many_isolated(pgdat, file, sc))) { in shrink_inactive_list()
2013 spin_lock_irq(&pgdat->lru_lock); in shrink_inactive_list()
2018 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
2025 spin_unlock_irq(&pgdat->lru_lock); in shrink_inactive_list()
2030 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); in shrink_inactive_list()
2033 spin_lock_irq(&pgdat->lru_lock); in shrink_inactive_list()
2037 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2044 spin_unlock_irq(&pgdat->lru_lock); in shrink_inactive_list()
2072 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2092 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list() local
2098 spin_lock_irq(&pgdat->lru_lock); in shrink_active_list()
2103 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
2109 spin_unlock_irq(&pgdat->lru_lock); in shrink_active_list()
2169 spin_lock_irq(&pgdat->lru_lock); in shrink_active_list()
2179 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2180 spin_unlock_irq(&pgdat->lru_lock); in shrink_active_list()
2184 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2661 static inline bool should_continue_reclaim(struct pglist_data *pgdat, in should_continue_reclaim() argument
2688 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
2707 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); in should_continue_reclaim()
2709 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); in should_continue_reclaim()
2714 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) in shrink_node_memcgs() argument
2721 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_node_memcgs()
2765 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
2776 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) in shrink_node() argument
2784 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
2795 spin_lock_irq(&pgdat->lru_lock); in shrink_node()
2798 spin_unlock_irq(&pgdat->lru_lock); in shrink_node()
2855 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in shrink_node()
2856 file = node_page_state(pgdat, NR_ACTIVE_FILE) + in shrink_node()
2857 node_page_state(pgdat, NR_INACTIVE_FILE); in shrink_node()
2860 struct zone *zone = &pgdat->node_zones[z]; in shrink_node()
2872 anon = node_page_state(pgdat, NR_INACTIVE_ANON); in shrink_node()
2880 shrink_node_memcgs(pgdat, sc); in shrink_node()
2914 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
2918 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
2954 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, in shrink_node()
2965 pgdat->kswapd_failures = 0; in shrink_node()
3093 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) in snapshot_refaults() argument
3098 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); in snapshot_refaults()
3210 static bool allow_direct_reclaim(pg_data_t *pgdat) in allow_direct_reclaim() argument
3218 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
3222 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
3240 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
3241 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
3242 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
3244 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
3264 pg_data_t *pgdat = NULL; in throttle_direct_reclaim() local
3303 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
3304 if (allow_direct_reclaim(pgdat)) in throttle_direct_reclaim()
3310 if (!pgdat) in throttle_direct_reclaim()
3325 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
3326 allow_direct_reclaim(pgdat), HZ); in throttle_direct_reclaim()
3333 allow_direct_reclaim(pgdat)); in throttle_direct_reclaim()
3391 pg_data_t *pgdat, in mem_cgroup_shrink_node() argument
3394 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in mem_cgroup_shrink_node()
3468 static void age_active_anon(struct pglist_data *pgdat, in age_active_anon() argument
3477 lruvec = mem_cgroup_lruvec(NULL, pgdat); in age_active_anon()
3483 lruvec = mem_cgroup_lruvec(memcg, pgdat); in age_active_anon()
3490 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) in pgdat_watermark_boosted() argument
3503 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
3518 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) in pgdat_balanced() argument
3529 zone = pgdat->node_zones + i; in pgdat_balanced()
3551 static void clear_pgdat_congested(pg_data_t *pgdat) in clear_pgdat_congested() argument
3553 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); in clear_pgdat_congested()
3556 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
3557 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
3566 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, in prepare_kswapd_sleep() argument
3582 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
3583 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
3586 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
3589 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { in prepare_kswapd_sleep()
3590 clear_pgdat_congested(pgdat); in prepare_kswapd_sleep()
3605 static bool kswapd_shrink_node(pg_data_t *pgdat, in kswapd_shrink_node() argument
3614 zone = pgdat->node_zones + z; in kswapd_shrink_node()
3625 shrink_node(pgdat, sc); in kswapd_shrink_node()
3653 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) in balance_pgdat() argument
3682 zone = pgdat->node_zones + i; in balance_pgdat()
3713 zone = pgdat->node_zones + i; in balance_pgdat()
3729 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); in balance_pgdat()
3762 age_active_anon(pgdat, &sc); in balance_pgdat()
3774 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, in balance_pgdat()
3783 if (kswapd_shrink_node(pgdat, &sc)) in balance_pgdat()
3791 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
3792 allow_direct_reclaim(pgdat)) in balance_pgdat()
3793 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
3822 pgdat->kswapd_failures++; in balance_pgdat()
3834 zone = pgdat->node_zones + i; in balance_pgdat()
3844 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); in balance_pgdat()
3847 snapshot_refaults(NULL, pgdat); in balance_pgdat()
3868 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, in kswapd_highest_zoneidx() argument
3871 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
3876 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, in kswapd_try_to_sleep() argument
3885 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
3894 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
3901 reset_isolation_suitable(pgdat); in kswapd_try_to_sleep()
3907 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); in kswapd_try_to_sleep()
3917 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
3918 kswapd_highest_zoneidx(pgdat, in kswapd_try_to_sleep()
3921 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
3922 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
3925 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
3926 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
3934 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
3935 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
3945 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); in kswapd_try_to_sleep()
3950 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); in kswapd_try_to_sleep()
3957 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
3977 pg_data_t *pgdat = (pg_data_t*)p; in kswapd() local
3979 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
3999 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
4000 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
4004 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
4005 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
4009 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, in kswapd()
4013 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
4014 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
4016 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
4017 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
4038 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
4040 reclaim_order = balance_pgdat(pgdat, alloc_order, in kswapd()
4042 trace_android_vh_vmscan_kswapd_done(pgdat->node_id, highest_zoneidx, in kswapd()
4055 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_per_node_run() local
4060 pgdat->mkswapd[hid] = kthread_run(kswapd, pgdat, "kswapd%d:%d", in kswapd_per_node_run()
4062 if (IS_ERR(pgdat->mkswapd[hid])) { in kswapd_per_node_run()
4067 ret = PTR_ERR(pgdat->mkswapd[hid]); in kswapd_per_node_run()
4068 pgdat->mkswapd[hid] = NULL; in kswapd_per_node_run()
4071 if (!pgdat->kswapd) in kswapd_per_node_run()
4072 pgdat->kswapd = pgdat->mkswapd[hid]; in kswapd_per_node_run()
4103 pg_data_t *pgdat; in wakeup_kswapd() local
4112 pgdat = zone->zone_pgdat; in wakeup_kswapd()
4113 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
4116 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
4118 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
4119 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
4121 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
4125 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
4126 (pgdat_balanced(pgdat, order, highest_zoneidx) && in wakeup_kswapd()
4127 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { in wakeup_kswapd()
4136 wakeup_kcompactd(pgdat, order, highest_zoneidx); in wakeup_kswapd()
4140 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
4142 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
4190 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_run() local
4193 if (pgdat->kswapd) in kswapd_run()
4199 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
4200 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
4204 ret = PTR_ERR(pgdat->kswapd); in kswapd_run()
4205 pgdat->kswapd = NULL; in kswapd_run()
4277 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) in node_unmapped_file_pages() argument
4279 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); in node_unmapped_file_pages()
4280 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + in node_unmapped_file_pages()
4281 node_page_state(pgdat, NR_ACTIVE_FILE); in node_unmapped_file_pages()
4292 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) in node_pagecache_reclaimable() argument
4304 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); in node_pagecache_reclaimable()
4306 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); in node_pagecache_reclaimable()
4310 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable()
4322 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in __node_reclaim() argument
4339 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
4353 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { in __node_reclaim()
4359 shrink_node(pgdat, &sc); in __node_reclaim()
4373 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in node_reclaim() argument
4387 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
4388 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= in node_reclaim()
4389 pgdat->min_slab_pages) in node_reclaim()
4404 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
4407 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
4410 ret = __node_reclaim(pgdat, gfp_mask, order); in node_reclaim()
4411 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
4432 struct pglist_data *pgdat = NULL; in check_move_unevictable_pages() local
4448 if (pagepgdat != pgdat) { in check_move_unevictable_pages()
4449 if (pgdat) in check_move_unevictable_pages()
4450 spin_unlock_irq(&pgdat->lru_lock); in check_move_unevictable_pages()
4451 pgdat = pagepgdat; in check_move_unevictable_pages()
4452 spin_lock_irq(&pgdat->lru_lock); in check_move_unevictable_pages()
4454 lruvec = mem_cgroup_page_lruvec(page, pgdat); in check_move_unevictable_pages()
4470 if (pgdat) { in check_move_unevictable_pages()
4473 spin_unlock_irq(&pgdat->lru_lock); in check_move_unevictable_pages()