/mm/ |
D | list_lru.c | 21 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 24 list_add(&lru->list, &list_lrus); in list_lru_register() 28 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 31 list_del(&lru->list); in list_lru_unregister() 35 static int lru_shrinker_id(struct list_lru *lru) in lru_shrinker_id() argument 37 return lru->shrinker_id; in lru_shrinker_id() 40 static inline bool list_lru_memcg_aware(struct list_lru *lru) in list_lru_memcg_aware() argument 42 return lru->memcg_aware; in list_lru_memcg_aware() 56 return memcg_lrus->lru[idx]; in list_lru_from_memcg_idx() 57 return &nlru->lru; in list_lru_from_memcg_idx() [all …]
|
D | vmscan.c | 168 if ((_page)->lru.prev != _base) { \ 171 prev = lru_to_page(&(_page->lru)); \ 342 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) in lruvec_lru_size() argument 354 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size() 356 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); in lruvec_lru_size() 1147 list_del(&page->lru); in shrink_page_list() 1266 list_add_tail(&page->lru, page_list); in shrink_page_list() 1507 list_add(&page->lru, &free_pages); in shrink_page_list() 1546 list_add(&page->lru, &ret_pages); in shrink_page_list() 1575 list_for_each_entry_safe(page, next, page_list, lru) { in reclaim_clean_pages_from_list() [all …]
|
D | zbud.c | 98 struct list_head lru; member 117 struct list_head lru; member 245 INIT_LIST_HEAD(&zhdr->lru); in init_zbud_page() 318 INIT_LIST_HEAD(&pool->lru); in zbud_create_pool() 409 if (!list_empty(&zhdr->lru)) in zbud_alloc() 410 list_del(&zhdr->lru); in zbud_alloc() 411 list_add(&zhdr->lru, &pool->lru); in zbud_alloc() 454 list_del(&zhdr->lru); in zbud_free() 508 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in zbud_reclaim_page() 514 zhdr = list_last_entry(&pool->lru, struct zbud_header, lru); in zbud_reclaim_page() [all …]
|
D | swap.c | 150 list_del(&victim->lru); in put_pages_list() 336 int lru = page_lru_base_type(page); in __activate_page() local 339 del_page_from_lru_list(page, lruvec, lru); in __activate_page() 341 lru += LRU_ACTIVE; in __activate_page() 342 add_page_to_lru_list(page, lruvec, lru); in __activate_page() 546 int lru; in lru_deactivate_file_fn() local 561 lru = page_lru_base_type(page); in lru_deactivate_file_fn() 563 del_page_from_lru_list(page, lruvec, lru + active); in lru_deactivate_file_fn() 573 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_file_fn() 580 add_page_to_lru_list_tail(page, lruvec, lru); in lru_deactivate_file_fn() [all …]
|
D | balloon_compaction.c | 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 49 list_del(&page->lru); in balloon_page_list_enqueue() 84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue() 104 list_add(&page->lru, pages); in balloon_page_list_dequeue() 200 return list_first_entry(&pages, struct page, lru); in balloon_page_dequeue() 213 list_del(&page->lru); in balloon_page_isolate() 226 list_add(&page->lru, &b_dev_info->pages); in balloon_page_putback()
|
D | page_reporting.c | 146 list_for_each_entry_safe(page, next, list, lru) { in page_reporting_cycle() 181 if (!list_is_first(&page->lru, list)) in page_reporting_cycle() 182 list_rotate_to_front(&page->lru, list); in page_reporting_cycle() 206 next = list_first_entry(list, struct page, lru); in page_reporting_cycle() 214 if (&next->lru != list && !list_is_first(&next->lru, list)) in page_reporting_cycle() 215 list_rotate_to_front(&next->lru, list); in page_reporting_cycle()
|
D | z3fold.c | 160 struct list_head lru; member 403 INIT_LIST_HEAD(&page->lru); in init_z3fold_page() 531 if (!list_empty(&page->lru)) in __release_z3fold_page() 532 list_del_init(&page->lru); in __release_z3fold_page() 1014 INIT_LIST_HEAD(&pool->lru); in z3fold_create_pool() 1189 if (!list_empty(&page->lru)) in z3fold_alloc() 1190 list_del(&page->lru); in z3fold_alloc() 1192 list_add(&page->lru, &pool->lru); in z3fold_alloc() 1231 list_del(&page->lru); in z3fold_free() 1346 if (list_empty(&pool->lru)) { in z3fold_reclaim_page() [all …]
|
D | pgtable-generic.c | 168 INIT_LIST_HEAD(&pgtable->lru); in pgtable_trans_huge_deposit() 170 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); in pgtable_trans_huge_deposit() 185 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, in pgtable_trans_huge_withdraw() 186 struct page, lru); in pgtable_trans_huge_withdraw() 188 list_del(&pgtable->lru); in pgtable_trans_huge_withdraw()
|
D | compaction.c | 81 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 83 list_del(&page->lru); in release_freepages() 98 list_for_each_entry_safe(page, next, list, lru) { in split_map_pages() 99 list_del(&page->lru); in split_map_pages() 109 list_add(&page->lru, &tmp_list); in split_map_pages() 641 list_add_tail(&page->lru, freelist); in isolate_freepages_block() 787 list_add(&page->lru, list); in isolate_and_split_free_page() 1048 list_add(&page->lru, &cc->migratepages); in isolate_migratepages_block() 1252 if (!list_is_last(freelist, &freepage->lru)) { in move_freelist_head() 1253 list_cut_before(&sublist, freelist, &freepage->lru); in move_freelist_head() [all …]
|
D | mmzone.c | 79 enum lru_list lru; in lruvec_init() local 83 for_each_lru(lru) in lruvec_init() 84 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init()
|
D | memory-failure.c | 856 #define lru (1UL << PG_lru) macro 891 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 892 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 904 #undef lru 1744 bool lru = PageLRU(page); in isolate_page() local 1749 if (lru) in isolate_page() 1755 list_add(&page->lru, pagelist); in isolate_page() 1758 if (isolated && lru) in isolate_page()
|
D | khugepaged.c | 582 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { in release_pte_pages() 583 list_del(&page->lru); in release_pte_pages() 652 list_for_each_entry(p, compound_pagelist, lru) { in __collapse_huge_page_isolate() 712 list_add_tail(&page->lru, compound_pagelist); in __collapse_huge_page_isolate() 790 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { in __collapse_huge_page_copy() 791 list_del(&src_page->lru); in __collapse_huge_page_copy() 1904 list_add_tail(&page->lru, &pagelist); in collapse_file() 1953 list_for_each_entry_safe(page, tmp, &pagelist, lru) { in collapse_file() 1960 list_del(&page->lru); in collapse_file() 2000 struct page, lru); in collapse_file() [all …]
|
D | shuffle.c | 136 list_swap(&page_i->lru, &page_j->lru); in __shuffle_zone()
|
D | swapfile.c | 1607 page = list_next_entry(page, lru); in swp_swapcount() 3707 INIT_LIST_HEAD(&head->lru); in add_swap_count_continuation() 3712 list_for_each_entry(list_page, &head->lru, lru) { in add_swap_count_continuation() 3734 list_add_tail(&page->lru, &head->lru); in add_swap_count_continuation() 3773 page = list_next_entry(head, lru); in swap_count_continued() 3785 page = list_next_entry(page, lru); in swap_count_continued() 3791 page = list_next_entry(page, lru); in swap_count_continued() 3801 while ((page = list_prev_entry(page, lru)) != head) { in swap_count_continued() 3815 page = list_next_entry(page, lru); in swap_count_continued() 3824 while ((page = list_prev_entry(page, lru)) != head) { in swap_count_continued() [all …]
|
D | page_alloc.c | 781 INIT_LIST_HEAD(&page->lru); in set_page_guard() 955 list_add(&page->lru, &area->free_list[migratetype]); in add_to_free_list() 965 list_add_tail(&page->lru, &area->free_list[migratetype]); in add_to_free_list_tail() 979 list_move_tail(&page->lru, &area->free_list[migratetype]); in move_to_free_list() 989 list_del(&page->lru); in del_page_from_free_list() 1472 page = list_last_entry(list, struct page, lru); in free_pcppages_bulk() 1474 list_del(&page->lru); in free_pcppages_bulk() 1480 list_add_tail(&page->lru, &head); in free_pcppages_bulk() 1503 list_for_each_entry_safe(page, tmp, &head, lru) { in free_pcppages_bulk() 1541 INIT_LIST_HEAD(&page->lru); in __init_single_page() [all …]
|
D | readahead.c | 73 list_del(&victim->lru); in read_cache_pages_invalidate_pages() 98 list_del(&page->lru); in read_cache_pages() 231 list_add(&page->lru, &page_pool); in page_cache_ra_unbounded()
|
D | hugetlb.c | 1073 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 1084 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact() 1091 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact() 1482 list_del(&page->lru); in __free_huge_page() 1487 list_del(&page->lru); in __free_huge_page() 1548 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page() 1770 struct page, lru); in free_pool_huge_page() 1771 list_del(&page->lru); in free_pool_huge_page() 1846 list_del(&head->lru); in dissolve_free_huge_page() 2039 list_add(&page->lru, &surplus_list); in gather_surplus_pages() [all …]
|
D | workingset.c | 517 struct list_lru_one *lru, in shadow_lru_isolate() argument 546 list_lru_isolate(lru, item); in shadow_lru_isolate()
|
D | migrate.c | 151 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages() 156 list_del(&page->lru); in putback_movable_pages() 1194 list_del(&page->lru); in unmap_and_move() 1428 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages() 1466 list_safe_reset_next(page, page2, lru); in migrate_pages() 1658 list_add_tail(&head->lru, pagelist); in add_page_for_migration() 2090 list_add(&page->lru, &migratepages); in migrate_misplaced_page() 2096 list_del(&page->lru); in migrate_misplaced_page()
|
D | memcontrol.c | 1420 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, in mem_cgroup_update_lru_size() argument 1431 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size() 1439 __func__, lruvec, lru, nr_pages, size)) { in mem_cgroup_update_lru_size() 4005 enum lru_list lru; in mem_cgroup_node_nr_lru_pages() local 4009 for_each_lru(lru) { in mem_cgroup_node_nr_lru_pages() 4010 if (!(BIT(lru) & lru_mask)) in mem_cgroup_node_nr_lru_pages() 4013 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages() 4015 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages() 4025 enum lru_list lru; in mem_cgroup_nr_lru_pages() local 4027 for_each_lru(lru) { in mem_cgroup_nr_lru_pages() [all …]
|
D | memory_hotplug.c | 165 INIT_LIST_HEAD(&page->lru); in put_page_bootmem() 1368 list_add_tail(&page->lru, &source); in do_migrate_range() 1392 mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); in do_migrate_range() 1405 list_for_each_entry(page, &source, lru) { in do_migrate_range()
|
D | hugetlb_cgroup.c | 208 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline() 797 list_move(&newhpage->lru, &h->hugepage_activelist); in hugetlb_cgroup_migrate()
|
D | madvise.c | 391 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range() 489 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
|
D | page_pinner.c | 367 list_for_each_entry(page, page_list, lru) { in __page_pinner_mark_migration_failed_pages()
|
/mm/damon/ |
D | paddr.c | 241 list_add(&page->lru, &page_list); in damon_pa_apply_scheme()
|