• Home
  • Raw
  • Download

Lines Matching refs:sc

242 static bool global_reclaim(struct scan_control *sc)  in global_reclaim()  argument
244 return !sc->target_mem_cgroup; in global_reclaim()
260 static bool sane_reclaim(struct scan_control *sc) in sane_reclaim() argument
262 struct mem_cgroup *memcg = sc->target_mem_cgroup; in sane_reclaim()
305 static bool global_reclaim(struct scan_control *sc) in global_reclaim() argument
310 static bool sane_reclaim(struct scan_control *sc) in sane_reclaim() argument
612 struct shrink_control sc = { in shrink_slab_memcg() local
631 ret = do_shrink_slab(&sc, shrinker, priority); in shrink_slab_memcg()
650 ret = do_shrink_slab(&sc, shrinker, priority); in shrink_slab_memcg()
716 struct shrink_control sc = { in shrink_slab() local
722 ret = do_shrink_slab(&sc, shrinker, priority); in shrink_slab()
778 static int may_write_to_inode(struct inode *inode, struct scan_control *sc) in may_write_to_inode() argument
827 struct scan_control *sc) in pageout() argument
863 if (!may_write_to_inode(mapping->host, sc)) in pageout()
1032 struct scan_control *sc) in page_check_references() argument
1037 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, in page_check_references()
1121 struct scan_control *sc, in shrink_page_list() argument
1155 sc->nr_scanned += nr_pages; in shrink_page_list()
1160 if (!sc->may_unmap && page_mapped(page)) in shrink_page_list()
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
1164 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1242 } else if (sane_reclaim(sc) || in shrink_page_list()
1270 references = page_check_references(page, sc); in shrink_page_list()
1290 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
1339 sc->nr_scanned -= (nr_pages - 1); in shrink_page_list()
1388 if (!sc->may_writepage) in shrink_page_list()
1397 switch (pageout(page, mapping, sc)) { in shrink_page_list()
1444 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1502 sc->nr_scanned -= (nr_pages - 1); in shrink_page_list()
1539 struct scan_control sc = { in reclaim_clean_pages_from_list() local
1557 ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1685 unsigned long *nr_scanned, struct scan_control *sc, in isolate_lru_pages() argument
1695 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED); in isolate_lru_pages()
1710 if (page_zonenum(page) > sc->reclaim_idx) { in isolate_lru_pages()
1764 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_pages()
1829 struct scan_control *sc) in too_many_isolated() argument
1836 if (!sane_reclaim(sc)) in too_many_isolated()
1852 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()
1949 struct scan_control *sc, enum lru_list lru) in shrink_inactive_list() argument
1962 while (unlikely(too_many_isolated(pgdat, file, sc))) { in shrink_inactive_list()
1980 &nr_scanned, sc, lru); in shrink_inactive_list()
1986 if (global_reclaim(sc)) in shrink_inactive_list()
1994 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0, in shrink_inactive_list()
2000 if (global_reclaim(sc)) in shrink_inactive_list()
2029 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
2030 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
2031 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
2032 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
2033 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
2034 sc->nr.taken += nr_taken; in shrink_inactive_list()
2036 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
2039 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
2045 struct scan_control *sc, in shrink_active_list() argument
2066 &nr_scanned, sc, lru); in shrink_active_list()
2094 if (page_referenced(page, 0, sc->target_mem_cgroup, in shrink_active_list()
2143 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2153 struct scan_control sc = { in reclaim_pages() local
2176 &sc, 0, in reclaim_pages()
2190 &sc, 0, in reclaim_pages()
2231 struct scan_control *sc, bool trace) in inactive_list_is_low() argument
2248 inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); in inactive_list_is_low()
2249 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); in inactive_list_is_low()
2268 trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx, in inactive_list_is_low()
2277 struct lruvec *lruvec, struct scan_control *sc) in shrink_list() argument
2280 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) in shrink_list()
2281 shrink_active_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
2285 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
2305 struct scan_control *sc, unsigned long *nr, in get_scan_count() argument
2320 if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { in get_scan_count()
2332 if (!global_reclaim(sc) && !swappiness) { in get_scan_count()
2342 if (!sc->priority && swappiness) { in get_scan_count()
2356 if (global_reclaim(sc)) { in get_scan_count()
2380 if (!inactive_list_is_low(lruvec, false, sc, false) && in get_scan_count()
2381 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx) in get_scan_count()
2382 >> sc->priority) { in get_scan_count()
2398 if (!inactive_list_is_low(lruvec, true, sc, false) && in get_scan_count()
2399 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { in get_scan_count()
2464 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
2466 sc->memcg_low_reclaim); in get_scan_count()
2516 scan >>= sc->priority; in get_scan_count()
2561 struct scan_control *sc, unsigned long *lru_pages) in shrink_node_memcg() argument
2569 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_node_memcg()
2573 get_scan_count(lruvec, memcg, sc, nr, lru_pages); in shrink_node_memcg()
2589 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && in shrink_node_memcg()
2590 sc->priority == DEF_PRIORITY); in shrink_node_memcg()
2604 lruvec, sc); in shrink_node_memcg()
2665 sc->nr_reclaimed += nr_reclaimed; in shrink_node_memcg()
2671 if (inactive_list_is_low(lruvec, false, sc, true)) in shrink_node_memcg()
2673 sc, LRU_ACTIVE_ANON); in shrink_node_memcg()
2677 static bool in_reclaim_compaction(struct scan_control *sc) in in_reclaim_compaction() argument
2679 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
2680 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
2681 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
2696 struct scan_control *sc) in should_continue_reclaim() argument
2703 if (!in_reclaim_compaction(sc)) in should_continue_reclaim()
2720 for (z = 0; z <= sc->reclaim_idx; z++) { in should_continue_reclaim()
2725 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { in should_continue_reclaim()
2739 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
2753 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) in shrink_node() argument
2760 struct mem_cgroup *root = sc->target_mem_cgroup; in shrink_node()
2764 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
2766 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
2767 nr_scanned = sc->nr_scanned; in shrink_node()
2789 if (!sc->memcg_low_reclaim) { in shrink_node()
2790 sc->memcg_low_skipped = 1; in shrink_node()
2806 reclaimed = sc->nr_reclaimed; in shrink_node()
2807 scanned = sc->nr_scanned; in shrink_node()
2808 shrink_node_memcg(pgdat, memcg, sc, &lru_pages); in shrink_node()
2811 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node()
2812 sc->priority); in shrink_node()
2815 vmpressure(sc->gfp_mask, memcg, false, in shrink_node()
2816 sc->nr_scanned - scanned, in shrink_node()
2817 sc->nr_reclaimed - reclaimed); in shrink_node()
2822 sc->nr_reclaimed += reclaim_state->reclaimed_slab; in shrink_node()
2827 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
2828 sc->nr_scanned - nr_scanned, in shrink_node()
2829 sc->nr_reclaimed - nr_reclaimed); in shrink_node()
2831 if (sc->nr_reclaimed - nr_reclaimed) in shrink_node()
2852 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
2860 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) in shrink_node()
2864 if (sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
2873 if (sc->nr.immediate) in shrink_node()
2881 if (!global_reclaim(sc) && sane_reclaim(sc) && in shrink_node()
2882 sc->nr.dirty && sc->nr.dirty == sc->nr.congested) in shrink_node()
2891 if (!sc->hibernation_mode && !current_is_kswapd() && in shrink_node()
2895 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, in shrink_node()
2896 sc)); in shrink_node()
2915 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) in compaction_ready() argument
2920 suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); in compaction_ready()
2937 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
2939 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
2950 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument
2964 orig_mask = sc->gfp_mask; in shrink_zones()
2966 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
2967 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
2971 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
2976 if (global_reclaim(sc)) { in shrink_zones()
2991 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
2992 compaction_ready(zone, sc)) { in shrink_zones()
2993 sc->compaction_ready = true; in shrink_zones()
3014 sc->order, sc->gfp_mask, in shrink_zones()
3016 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
3017 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
3025 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
3032 sc->gfp_mask = orig_mask; in shrink_zones()
3067 struct scan_control *sc) in do_try_to_free_pages() argument
3069 int initial_priority = sc->priority; in do_try_to_free_pages()
3076 if (global_reclaim(sc)) in do_try_to_free_pages()
3077 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
3080 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
3081 sc->priority); in do_try_to_free_pages()
3082 sc->nr_scanned = 0; in do_try_to_free_pages()
3083 shrink_zones(zonelist, sc); in do_try_to_free_pages()
3085 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
3088 if (sc->compaction_ready) in do_try_to_free_pages()
3095 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
3096 sc->may_writepage = 1; in do_try_to_free_pages()
3097 } while (--sc->priority >= 0); in do_try_to_free_pages()
3100 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
3101 sc->nodemask) { in do_try_to_free_pages()
3105 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
3106 set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false); in do_try_to_free_pages()
3111 if (sc->nr_reclaimed) in do_try_to_free_pages()
3112 return sc->nr_reclaimed; in do_try_to_free_pages()
3115 if (sc->compaction_ready) in do_try_to_free_pages()
3119 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
3120 sc->priority = initial_priority; in do_try_to_free_pages()
3121 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
3122 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
3265 struct scan_control sc = { in try_to_free_pages() local
3290 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages()
3293 set_task_reclaim_state(current, &sc.reclaim_state); in try_to_free_pages()
3294 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); in try_to_free_pages()
3296 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages()
3312 struct scan_control sc = { in mem_cgroup_shrink_node() local
3324 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | in mem_cgroup_shrink_node()
3327 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, in mem_cgroup_shrink_node()
3328 sc.gfp_mask); in mem_cgroup_shrink_node()
3337 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); in mem_cgroup_shrink_node()
3339 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); in mem_cgroup_shrink_node()
3341 *nr_scanned = sc.nr_scanned; in mem_cgroup_shrink_node()
3343 return sc.nr_reclaimed; in mem_cgroup_shrink_node()
3356 struct scan_control sc = { in try_to_free_mem_cgroup_pages() local
3368 set_task_reclaim_state(current, &sc.reclaim_state); in try_to_free_mem_cgroup_pages()
3378 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); in try_to_free_mem_cgroup_pages()
3383 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_mem_cgroup_pages()
3396 struct scan_control *sc) in age_active_anon() argument
3407 if (inactive_list_is_low(lruvec, false, sc, true)) in age_active_anon()
3409 sc, LRU_ACTIVE_ANON); in age_active_anon()
3528 struct scan_control *sc) in kswapd_shrink_node() argument
3534 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
3535 for (z = 0; z <= sc->reclaim_idx; z++) { in kswapd_shrink_node()
3540 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
3547 shrink_node(pgdat, sc); in kswapd_shrink_node()
3556 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
3557 sc->order = 0; in kswapd_shrink_node()
3559 return sc->nr_scanned >= sc->nr_to_reclaim; in kswapd_shrink_node()
3585 struct scan_control sc = { in balance_pgdat() local
3591 set_task_reclaim_state(current, &sc.reclaim_state); in balance_pgdat()
3614 sc.priority = DEF_PRIORITY; in balance_pgdat()
3616 unsigned long nr_reclaimed = sc.nr_reclaimed; in balance_pgdat()
3621 sc.reclaim_idx = classzone_idx; in balance_pgdat()
3639 sc.reclaim_idx = i; in balance_pgdat()
3651 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx); in balance_pgdat()
3666 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
3675 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; in balance_pgdat()
3676 sc.may_swap = !nr_boost_reclaim; in balance_pgdat()
3684 age_active_anon(pgdat, &sc); in balance_pgdat()
3690 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
3691 sc.may_writepage = 1; in balance_pgdat()
3694 sc.nr_scanned = 0; in balance_pgdat()
3696 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, in balance_pgdat()
3697 sc.gfp_mask, &nr_soft_scanned); in balance_pgdat()
3698 sc.nr_reclaimed += nr_soft_reclaimed; in balance_pgdat()
3705 if (kswapd_shrink_node(pgdat, &sc)) in balance_pgdat()
3728 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
3740 sc.priority--; in balance_pgdat()
3741 } while (sc.priority >= 1); in balance_pgdat()
3743 if (!sc.nr_reclaimed) in balance_pgdat()
3780 return sc.order; in balance_pgdat()
4026 struct scan_control sc = { in shrink_all_memory() local
4036 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in shrink_all_memory()
4040 fs_reclaim_acquire(sc.gfp_mask); in shrink_all_memory()
4042 set_task_reclaim_state(current, &sc.reclaim_state); in shrink_all_memory()
4044 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in shrink_all_memory()
4048 fs_reclaim_release(sc.gfp_mask); in shrink_all_memory()
4212 struct scan_control sc = { in __node_reclaim() local
4224 sc.gfp_mask); in __node_reclaim()
4227 fs_reclaim_acquire(sc.gfp_mask); in __node_reclaim()
4235 set_task_reclaim_state(p, &sc.reclaim_state); in __node_reclaim()
4243 shrink_node(pgdat, &sc); in __node_reclaim()
4244 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __node_reclaim()
4250 fs_reclaim_release(sc.gfp_mask); in __node_reclaim()
4252 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); in __node_reclaim()
4254 return sc.nr_reclaimed >= nr_pages; in __node_reclaim()