/mm/ |
D | list_lru.c | 19 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 22 list_add(&lru->list, &list_lrus); in list_lru_register() 26 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 29 list_del(&lru->list); in list_lru_unregister() 33 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 37 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 43 static inline bool list_lru_memcg_aware(struct list_lru *lru) in list_lru_memcg_aware() argument 49 return !!lru->node[0].memcg_lrus; in list_lru_memcg_aware() 61 return nlru->memcg_lrus->lru[idx]; in list_lru_from_memcg_idx() 63 return &nlru->lru; in list_lru_from_memcg_idx() [all …]
|
D | vmscan.c | 116 if ((_page)->lru.prev != _base) { \ 119 prev = lru_to_page(&(_page->lru)); \ 130 if ((_page)->lru.prev != _base) { \ 133 prev = lru_to_page(&(_page->lru)); \ 243 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) in lruvec_lru_size() argument 249 lru_size = mem_cgroup_get_lru_size(lruvec, lru); in lruvec_lru_size() 251 lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); in lruvec_lru_size() 261 size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size() 264 NR_ZONE_LRU_BASE + lru); in lruvec_lru_size() 973 list_del(&page->lru); in shrink_page_list() [all …]
|
D | swap.c | 118 victim = list_entry(pages->prev, struct page, lru); in put_pages_list() 119 list_del(&victim->lru); in put_pages_list() 212 enum lru_list lru = page_lru_base_type(page); in pagevec_move_tail_fn() local 213 list_move_tail(&page->lru, &lruvec->lists[lru]); in pagevec_move_tail_fn() 266 int lru = page_lru_base_type(page); in __activate_page() local 268 del_page_from_lru_list(page, lruvec, lru); in __activate_page() 270 lru += LRU_ACTIVE; in __activate_page() 271 add_page_to_lru_list(page, lruvec, lru); in __activate_page() 518 int lru, file; in lru_deactivate_file_fn() local 533 lru = page_lru_base_type(page); in lru_deactivate_file_fn() [all …]
|
D | zbud.c | 97 struct list_head lru; member 116 struct list_head lru; member 244 INIT_LIST_HEAD(&zhdr->lru); in init_zbud_page() 317 INIT_LIST_HEAD(&pool->lru); in zbud_create_pool() 409 if (!list_empty(&zhdr->lru)) in zbud_alloc() 410 list_del(&zhdr->lru); in zbud_alloc() 411 list_add(&zhdr->lru, &pool->lru); in zbud_alloc() 454 list_del(&zhdr->lru); in zbud_free() 508 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in zbud_reclaim_page() 514 zhdr = list_last_entry(&pool->lru, struct zbud_header, lru); in zbud_reclaim_page() [all …]
|
D | z3fold.c | 82 struct list_head lru; member 142 INIT_LIST_HEAD(&page->lru); in init_z3fold_page() 236 INIT_LIST_HEAD(&pool->lru); in z3fold_create_pool() 379 if (!list_empty(&page->lru)) in z3fold_alloc() 380 list_del(&page->lru); in z3fold_alloc() 382 list_add(&page->lru, &pool->lru); in z3fold_alloc() 451 list_del(&page->lru); in z3fold_free() 509 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in z3fold_reclaim_page() 515 page = list_last_entry(&pool->lru, struct page, lru); in z3fold_reclaim_page() 516 list_del(&page->lru); in z3fold_reclaim_page() [all …]
|
D | pgtable-generic.c | 136 INIT_LIST_HEAD(&pgtable->lru); in pgtable_trans_huge_deposit() 138 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); in pgtable_trans_huge_deposit() 153 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, in pgtable_trans_huge_withdraw() 154 struct page, lru); in pgtable_trans_huge_withdraw() 156 list_del(&pgtable->lru); in pgtable_trans_huge_withdraw()
|
D | swapfile.c | 913 page = list_next_entry(page, lru); in swp_swapcount() 2832 INIT_LIST_HEAD(&head->lru); in add_swap_count_continuation() 2837 list_for_each_entry(list_page, &head->lru, lru) { in add_swap_count_continuation() 2859 list_add_tail(&page->lru, &head->lru); in add_swap_count_continuation() 2891 page = list_entry(head->lru.next, struct page, lru); in swap_count_continued() 2903 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() 2909 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() 2917 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued() 2922 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued() 2933 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() [all …]
|
D | mmzone.c | 90 enum lru_list lru; in lruvec_init() local 94 for_each_lru(lru) in lruvec_init() 95 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init()
|
D | balloon_compaction.c | 65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue() 116 list_del(&page->lru); in balloon_page_isolate() 129 list_add(&page->lru, &b_dev_info->pages); in balloon_page_putback()
|
D | slab.c | 1241 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < in kmem_cache_init() 1391 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory() 1395 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory() 1765 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy() 1766 list_del(&page->lru); in slabs_destroy() 2319 page = list_entry(p, struct page, lru); in drain_freelist() 2320 list_del(&page->lru); in drain_freelist() 2752 INIT_LIST_HEAD(&page->lru); in cache_grow_end() 2757 list_add_tail(&page->lru, &(n->slabs_free)); in cache_grow_end() 2866 list_del(&page->lru); in fixup_slab_list() [all …]
|
D | compaction.c | 55 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 57 list_del(&page->lru); in release_freepages() 72 list_for_each_entry_safe(page, next, list, lru) { in map_pages() 73 list_del(&page->lru); in map_pages() 83 list_add(&page->lru, &tmp_list); in map_pages() 504 list_add_tail(&page->lru, freelist); in isolate_freepages_block() 857 list_add(&page->lru, &cc->migratepages); in isolate_migratepages_block() 1144 freepage = list_entry(cc->freepages.next, struct page, lru); in compaction_alloc() 1145 list_del(&freepage->lru); in compaction_alloc() 1160 list_add(&page->lru, &cc->freepages); in compaction_free()
|
D | page_alloc.c | 688 INIT_LIST_HEAD(&page->lru); in set_page_guard() 843 list_del(&buddy->lru); in __free_one_page() 895 list_add_tail(&page->lru, in __free_one_page() 901 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page() 1144 page = list_last_entry(list, struct page, lru); in free_pcppages_bulk() 1146 list_del(&page->lru); in free_pcppages_bulk() 1192 INIT_LIST_HEAD(&page->lru); in __init_single_page() 1250 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region() 1674 list_add(&page[size].lru, &area->free_list[migratetype]); in expand() 1824 struct page, lru); in __rmqueue_smallest() [all …]
|
D | readahead.c | 68 list_del(&victim->lru); in read_cache_pages_invalidate_pages() 91 list_del(&page->lru); in read_cache_pages() 129 list_del(&page->lru); in read_pages() 187 list_add(&page->lru, &page_pool); in __do_page_cache_readahead()
|
D | slob.c | 114 list_add(&sp->lru, list); in set_slob_page_free() 120 list_del(&sp->lru); in clear_slob_page_free() 285 list_for_each_entry(sp, slob_list, lru) { in slob_alloc() 299 prev = sp->lru.prev; in slob_alloc() 325 INIT_LIST_HEAD(&sp->lru); in slob_alloc()
|
D | memory-failure.c | 778 #define lru (1UL << PG_lru) macro 814 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 815 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 828 #undef lru 1678 list_add(&page->lru, &pagelist); in __soft_offline_page() 1683 list_del(&page->lru); in __soft_offline_page()
|
D | hugetlb.c | 863 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 872 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node() 879 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node() 881 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node() 1266 list_del(&page->lru); in free_huge_page() 1279 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page() 1421 struct page, lru); in free_pool_huge_page() 1422 list_del(&page->lru); in free_pool_huge_page() 1457 list_del(&head->lru); in dissolve_free_huge_page() 1626 INIT_LIST_HEAD(&page->lru); in __alloc_buddy_huge_page() [all …]
|
D | slub.c | 996 list_add(&page->lru, &n->full); in add_full() 1005 list_del(&page->lru); in remove_full() 1671 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) 1680 page = container_of((struct list_head *)h, struct page, lru); in rcu_free_slab() 1719 list_add_tail(&page->lru, &n->partial); in __add_partial() 1721 list_add(&page->lru, &n->partial); in __add_partial() 1735 list_del(&page->lru); in remove_partial() 1809 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node() 2363 list_for_each_entry(page, &n->partial, lru) in count_partial() 3659 list_for_each_entry_safe(page, h, &n->partial, lru) { in free_partial() [all …]
|
D | migrate.c | 166 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages() 171 list_del(&page->lru); in putback_movable_pages() 1124 list_del(&page->lru); in unmap_and_move() 1329 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages() 1472 list_add_tail(&page->lru, &pagelist); in do_move_page_to_node_array() 1887 list_add(&page->lru, &migratepages); in migrate_misplaced_page() 1893 list_del(&page->lru); in migrate_misplaced_page()
|
D | workingset.c | 382 struct list_lru_one *lru, in shadow_lru_isolate() argument 413 list_lru_isolate(lru, item); in shadow_lru_isolate()
|
D | hugetlb_cgroup.c | 171 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline() 431 list_move(&newhpage->lru, &h->hugepage_activelist); in hugetlb_cgroup_migrate()
|
D | khugepaged.c | 1431 list_add_tail(&page->lru, &pagelist); in collapse_shmem() 1485 list_for_each_entry_safe(page, tmp, &pagelist, lru) { in collapse_shmem() 1488 list_del(&page->lru); in collapse_shmem() 1529 struct page, lru); in collapse_shmem() 1544 list_del(&page->lru); in collapse_shmem()
|
D | memcontrol.c | 633 enum lru_list lru; in mem_cgroup_node_nr_lru_pages() local 637 for_each_lru(lru) { in mem_cgroup_node_nr_lru_pages() 638 if (!(BIT(lru) & lru_mask)) in mem_cgroup_node_nr_lru_pages() 640 nr += mem_cgroup_get_lru_size(lruvec, lru); in mem_cgroup_node_nr_lru_pages() 1014 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, in mem_cgroup_update_lru_size() argument 1025 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size() 1033 __func__, lruvec, lru, nr_pages, size)) { in mem_cgroup_update_lru_size() 5530 page = list_entry(next, struct page, lru); in uncharge_list() 5531 next = page->lru.next; in uncharge_list() 5596 INIT_LIST_HEAD(&page->lru); in mem_cgroup_uncharge() [all …]
|
D | memory_hotplug.c | 200 INIT_LIST_HEAD(&page->lru); in put_page_bootmem() 1629 list_add_tail(&page->lru, &source); in do_migrate_range()
|
D | mempolicy.c | 960 list_add_tail(&page->lru, pagelist); in migrate_page_add()
|