/mm/ |
D | swap.c | 87 struct lruvec *lruvec; in __page_cache_release() local 90 lruvec = folio_lruvec_lock_irqsave(folio, &flags); in __page_cache_release() 91 lruvec_del_folio(lruvec, folio); in __page_cache_release() 93 unlock_page_lruvec_irqrestore(lruvec, flags); in __page_cache_release() 164 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 166 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) in lru_add_fn() argument 202 lruvec_add_folio(lruvec, folio); in lru_add_fn() 209 struct lruvec *lruvec = NULL; in folio_batch_move_lru() local 219 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); in folio_batch_move_lru() 220 move_fn(lruvec, folio); in folio_batch_move_lru() [all …]
|
D | vmscan.c | 663 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, in lruvec_lru_size() argument 670 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() 676 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size() 2319 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument 2328 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes() 2374 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_folios() argument 2378 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios() 2465 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_folios() 2499 struct lruvec *lruvec; in folio_isolate_lru() local 2502 lruvec = folio_lruvec_lock_irq(folio); in folio_isolate_lru() [all …]
|
D | workingset.c | 237 struct lruvec *lruvec; in lru_gen_eviction() local 248 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 249 lrugen = &lruvec->lrugen; in lru_gen_eviction() 263 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, in lru_gen_test_recent() argument 274 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 276 min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); in lru_gen_test_recent() 286 struct lruvec *lruvec; in lru_gen_refault() local 293 recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); in lru_gen_refault() 294 if (lruvec != folio_lruvec(folio)) in lru_gen_refault() 297 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); in lru_gen_refault() [all …]
|
D | mlock.c | 63 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument 67 return lruvec; in __mlock_folio() 69 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio() 78 lruvec_del_folio(lruvec, folio); in __mlock_folio() 80 lruvec_add_folio(lruvec, folio); in __mlock_folio() 94 lruvec_del_folio(lruvec, folio); in __mlock_folio() 98 lruvec_add_folio(lruvec, folio); in __mlock_folio() 102 return lruvec; in __mlock_folio() 105 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_new_folio() argument 109 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_new_folio() [all …]
|
D | mmzone.c | 76 void lruvec_init(struct lruvec *lruvec) in lruvec_init() argument 80 memset(lruvec, 0, sizeof(struct lruvec)); in lruvec_init() 81 spin_lock_init(&lruvec->lru_lock); in lruvec_init() 84 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init() 91 list_del(&lruvec->lists[LRU_UNEVICTABLE]); in lruvec_init() 93 lru_gen_init_lruvec(lruvec); in lruvec_init()
|
D | memcontrol.c | 828 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_memcg_lruvec_state() argument 834 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in __mod_memcg_lruvec_state() 878 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_lruvec_state() argument 882 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in __mod_lruvec_state() 886 __mod_memcg_lruvec_state(lruvec, idx, val); in __mod_lruvec_state() 896 struct lruvec *lruvec; in __mod_lruvec_page_state() local 907 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_page_state() 908 __mod_lruvec_state(lruvec, idx, val); in __mod_lruvec_page_state() 917 struct lruvec *lruvec; in __mod_lruvec_kmem_state() local 931 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_kmem_state() [all …]
|
D | huge_memory.c | 2918 static bool lru_add_dst(struct lruvec *lruvec, struct folio *src, struct folio *dst) in lru_add_dst() argument 2927 if (!lru_gen_add_dst(lruvec, dst)) { in lru_add_dst() 2936 update_lru_size(lruvec, lru, zone, delta); in lru_add_dst() 2945 struct lruvec *lruvec, struct list_head *list) in lru_add_page_tail() argument 2950 lockdep_assert_held(&lruvec->lru_lock); in lru_add_page_tail() 2957 } else if (!lru_add_dst(lruvec, page_folio(head), page_folio(tail))) { in lru_add_page_tail() 2969 struct lruvec *lruvec, struct list_head *list) in __split_huge_page_tail() argument 3056 lru_add_page_tail(head, page_tail, lruvec, list); in __split_huge_page_tail() 3064 struct lruvec *lruvec; in __split_huge_page() local 3084 lruvec = folio_lruvec_lock(folio); in __split_huge_page() [all …]
|
D | compaction.c | 875 struct lruvec *lruvec; in isolate_migratepages_block() local 877 struct lruvec *locked = NULL; in isolate_migratepages_block() 1155 lruvec = folio_lruvec(folio); in isolate_migratepages_block() 1158 if (lruvec != locked) { in isolate_migratepages_block() 1162 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block() 1163 locked = lruvec; in isolate_migratepages_block() 1165 lruvec_memcg_debug(lruvec, folio); in isolate_migratepages_block() 1198 lruvec_del_folio(lruvec, folio); in isolate_migratepages_block()
|
D | migrate.c | 507 struct lruvec *old_lruvec, *new_lruvec; in folio_migrate_mapping()
|