Home
last modified time | relevance | path

Searched refs:lruvec (Results 1 – 21 of 21) sorted by relevance

/kernel/linux/linux-5.10/mm/
Dworkingset.c227 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) in workingset_age_nonresident() argument
241 atomic_long_add(nr_pages, &lruvec->nonresident_age); in workingset_age_nonresident()
242 } while ((lruvec = parent_lruvec(lruvec))); in workingset_age_nonresident()
257 struct lruvec *lruvec; in workingset_eviction() local
265 lruvec = mem_cgroup_lruvec(target_memcg, pgdat); in workingset_eviction()
268 lruvec = node_lruvec(pgdat); in workingset_eviction()
269 workingset_age_nonresident(lruvec, thp_nr_pages(page)); in workingset_eviction()
271 workingset_age_nonresident(lruvec, thp_nr_pages(page)); in workingset_eviction()
274 workingset_age_nonresident(lruvec, thp_nr_pages(page)); in workingset_eviction()
277 memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); in workingset_eviction()
[all …]
Dswap.c83 struct lruvec *lruvec; in __page_cache_release() local
87 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release()
90 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
207 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), in pagevec_lru_move_fn() argument
212 struct lruvec *lruvec; in pagevec_lru_move_fn() local
226 lruvec = mem_cgroup_page_lruvec(page, pgdat); in pagevec_lru_move_fn()
227 (*move_fn)(page, lruvec, arg); in pagevec_lru_move_fn()
235 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, in pagevec_move_tail_fn() argument
241 del_page_from_lru_list(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
243 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
[all …]
Dmemcg_control.c164 static unsigned long move_pages_to_page_list(struct lruvec *lruvec, enum lru_list lru, in move_pages_to_page_list() argument
167 struct list_head *src = &lruvec->lists[lru]; in move_pages_to_page_list()
209 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in reclaim_all_anon_memcg() local
225 move_pages_to_page_list(lruvec, LRU_INACTIVE_ANON, &page_list); in reclaim_all_anon_memcg()
288 struct lruvec *lruvec = NULL; in memcg_total_info_per_app_show() local
301 lruvec = &mz->lruvec; in memcg_total_info_per_app_show()
302 if (!lruvec) { in memcg_total_info_per_app_show()
307 anon_size = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + in memcg_total_info_per_app_show()
308 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); in memcg_total_info_per_app_show()
365 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata); in purgeable_memcg_node() local
[all …]
Dmemcg_reclaim.c36 struct lruvec *lruvec = node_lruvec(pgdat); in get_scan_count_hyperhold() local
84 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON) && in get_scan_count_hyperhold()
85 (lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, in get_scan_count_hyperhold()
105 !inactive_is_low(lruvec, LRU_INACTIVE_FILE) && in get_scan_count_hyperhold()
106 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) { in get_scan_count_hyperhold()
150 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count_hyperhold()
190 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_anon_memcg() local
205 lruvec, sc); in shrink_anon_memcg()
247 struct lruvec *lruvec = NULL; in shrink_anon() local
252 lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_anon()
[all …]
Dvmscan.c226 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) in lruvec_lru_size() argument
232 if (!mem_cgroup_disabled() && is_node_lruvec(lruvec)) { in lruvec_lru_size()
234 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
246 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
252 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size()
1560 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument
1569 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1594 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_pages() argument
1598 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_pages()
1676 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_pages()
[all …]
Dzswapd.c258 struct lruvec *lruvec = NULL; in get_memcg_anon_refault_status() local
271 lruvec = &mz->lruvec; in get_memcg_anon_refault_status()
272 if (!lruvec) in get_memcg_anon_refault_status()
275 anon_total = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + in get_memcg_anon_refault_status()
276 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES) + in get_memcg_anon_refault_status()
480 struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) in zswapd_shrink_active_list() argument
487 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in zswapd_shrink_active_list()
489 unsigned long *anon_cost = &lruvec->anon_cost; in zswapd_shrink_active_list()
496 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, lru); in zswapd_shrink_active_list()
501 count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); in zswapd_shrink_active_list()
[all …]
Dmmzone.c75 void lruvec_init(struct lruvec *lruvec) in lruvec_init() argument
79 memset(lruvec, 0, sizeof(struct lruvec)); in lruvec_init()
82 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init()
Dzswapd_control.c485 struct lruvec *lruvec = NULL; in memcg_active_app_info_list_show() local
499 lruvec = &mz->lruvec; in memcg_active_app_info_list_show()
500 if (!lruvec) { in memcg_active_app_info_list_show()
505 anon_size = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, in memcg_active_app_info_list_show()
506 MAX_NR_ZONES) + lruvec_lru_size(lruvec, in memcg_active_app_info_list_show()
684 struct lruvec *lruvec = NULL; in memcg_eswap_info_show() local
694 lruvec = &mz->lruvec; in memcg_eswap_info_show()
695 if (!lruvec) in memcg_eswap_info_show()
698 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + in memcg_eswap_info_show()
699 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); in memcg_eswap_info_show()
[all …]
Dinternal.h237 extern unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct lruvec *lruvec,
240 extern unsigned move_pages_to_lru(struct lruvec *lruvec, struct list_head *list);
241 extern void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec,
243 extern unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
245 extern void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc);
Dmemcontrol.c672 struct lruvec *lruvec = &mz->lruvec; in soft_limit_excess() local
673 unsigned long nr_pages = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, in soft_limit_excess()
674 MAX_NR_ZONES) + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, in soft_limit_excess()
818 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_memcg_lruvec_state() argument
825 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in __mod_memcg_lruvec_state()
839 pg_data_t *pgdat = lruvec_pgdat(lruvec); in __mod_memcg_lruvec_state()
859 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_lruvec_state() argument
863 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in __mod_lruvec_state()
868 if (is_node_lruvec(lruvec)) in __mod_lruvec_state()
871 __mod_memcg_lruvec_state(lruvec, idx, val); in __mod_lruvec_state()
[all …]
Dslab.h308 struct lruvec *lruvec; in mod_objcg_state() local
312 lruvec = mem_cgroup_lruvec(memcg, pgdat); in mod_objcg_state()
313 mod_memcg_lruvec_state(lruvec, idx, nr); in mod_objcg_state()
Dmlock.c115 struct lruvec *lruvec; in __munlock_isolate_lru_page() local
117 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); in __munlock_isolate_lru_page()
121 del_page_from_lru_list(page, lruvec, page_lru(page)); in __munlock_isolate_lru_page()
Dhuge_memory.c2379 struct lruvec *lruvec, struct list_head *list) in __split_huge_page_tail() argument
2440 lru_add_page_tail(head, page_tail, lruvec, list); in __split_huge_page_tail()
2448 struct lruvec *lruvec; in __split_huge_page() local
2454 lruvec = mem_cgroup_page_lruvec(head, pgdat); in __split_huge_page()
2468 __split_huge_page_tail(head, i, lruvec, list); in __split_huge_page()
Dpage-writeback.c2721 struct lruvec *lruvec; in test_clear_page_writeback() local
2725 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); in test_clear_page_writeback()
2753 dec_lruvec_state(lruvec, NR_WRITEBACK); in test_clear_page_writeback()
Dcompaction.c805 struct lruvec *lruvec; in isolate_migratepages_block() local
1001 lruvec = mem_cgroup_page_lruvec(page, pgdat); in isolate_migratepages_block()
1012 del_page_from_lru_list(page, lruvec, page_lru(page)); in isolate_migratepages_block()
Dmigrate.c495 struct lruvec *old_lruvec, *new_lruvec; in migrate_page_move_mapping()
/kernel/linux/linux-5.10/include/linux/
Dmemcontrol.h114 struct lruvec lruvec; member
479 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, in mem_cgroup_lruvec()
483 struct lruvec *lruvec; in mem_cgroup_lruvec() local
486 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec()
494 lruvec = &mz->lruvec; in mem_cgroup_lruvec()
501 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec()
502 lruvec->pgdat = pgdat; in mem_cgroup_lruvec()
503 return lruvec; in mem_cgroup_lruvec()
506 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
580 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) in lruvec_memcg() argument
[all …]
Dmm_inline.h28 static __always_inline void __update_lru_size(struct lruvec *lruvec, in __update_lru_size() argument
32 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in __update_lru_size()
34 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in __update_lru_size()
39 static __always_inline void update_lru_size(struct lruvec *lruvec, in update_lru_size() argument
43 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
45 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
50 struct lruvec *lruvec, enum lru_list lru) in add_page_to_lru_list() argument
52 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); in add_page_to_lru_list()
53 list_add(&page->lru, &lruvec->lists[lru]); in add_page_to_lru_list()
57 struct lruvec *lruvec, enum lru_list lru) in add_page_to_lru_list_tail() argument
[all …]
Dmmzone.h308 struct lruvec { struct
837 struct lruvec __lruvec;
860 static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) in node_lruvec()
900 extern void lruvec_init(struct lruvec *lruvec);
902 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) in lruvec_pgdat() argument
905 return lruvec->pgdat; in lruvec_pgdat()
907 return container_of(lruvec, struct pglist_data, __lruvec); in lruvec_pgdat()
912 static inline int is_node_lruvec(struct lruvec *lruvec) in is_node_lruvec() argument
914 return &lruvec_pgdat(lruvec)->__lruvec == lruvec; in is_node_lruvec()
918 extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
Dswap.h316 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
337 extern void lru_note_cost(struct lruvec *lruvec, bool file,
342 struct lruvec *lruvec, struct list_head *head);
386 struct lruvec *lruvec,
388 extern bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru);
/kernel/linux/patches/linux-5.10/unionpi_tiger_pacth/
Dlinux-5.10.patch25600 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata);
25602 shrink_list(LRU_ACTIVE_PURGEABLE, -1, lruvec, sc);