/mm/ |
D | list_lru.c | 19 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 22 list_add(&lru->list, &list_lrus); in list_lru_register() 26 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 29 list_del(&lru->list); in list_lru_unregister() 33 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 37 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 43 static inline bool list_lru_memcg_aware(struct list_lru *lru) in list_lru_memcg_aware() argument 45 return lru->memcg_aware; in list_lru_memcg_aware() 57 return nlru->memcg_lrus->lru[idx]; in list_lru_from_memcg_idx() 59 return &nlru->lru; in list_lru_from_memcg_idx() [all …]
|
D | vmscan.c | 109 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 114 if ((_page)->lru.prev != _base) { \ 117 prev = lru_to_page(&(_page->lru)); \ 128 if ((_page)->lru.prev != _base) { \ 131 prev = lru_to_page(&(_page->lru)); \ 215 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) in get_lru_size() argument 218 return mem_cgroup_get_lru_size(lruvec, lru); in get_lru_size() 220 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); in get_lru_size() 924 list_del(&page->lru); in shrink_page_list() 1036 list_add_tail(&page->lru, page_list); in shrink_page_list() [all …]
|
D | swap.c | 359 victim = list_entry(pages->prev, struct page, lru); in put_pages_list() 360 list_del(&victim->lru); in put_pages_list() 453 enum lru_list lru = page_lru_base_type(page); in pagevec_move_tail_fn() local 454 list_move_tail(&page->lru, &lruvec->lists[lru]); in pagevec_move_tail_fn() 507 int lru = page_lru_base_type(page); in __activate_page() local 509 del_page_from_lru_list(page, lruvec, lru); in __activate_page() 511 lru += LRU_ACTIVE; in __activate_page() 512 add_page_to_lru_list(page, lruvec, lru); in __activate_page() 759 int lru, file; in lru_deactivate_file_fn() local 774 lru = page_lru_base_type(page); in lru_deactivate_file_fn() [all …]
|
D | zbud.c | 97 struct list_head lru; member 116 struct list_head lru; member 244 INIT_LIST_HEAD(&zhdr->lru); in init_zbud_page() 317 INIT_LIST_HEAD(&pool->lru); in zbud_create_pool() 409 if (!list_empty(&zhdr->lru)) in zbud_alloc() 410 list_del(&zhdr->lru); in zbud_alloc() 411 list_add(&zhdr->lru, &pool->lru); in zbud_alloc() 454 list_del(&zhdr->lru); in zbud_free() 511 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in zbud_reclaim_page() 517 zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru); in zbud_reclaim_page() [all …]
|
D | pgtable-generic.c | 162 INIT_LIST_HEAD(&pgtable->lru); in pgtable_trans_huge_deposit() 164 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); in pgtable_trans_huge_deposit() 179 if (list_empty(&pgtable->lru)) in pgtable_trans_huge_withdraw() 182 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next, in pgtable_trans_huge_withdraw() 183 struct page, lru); in pgtable_trans_huge_withdraw() 184 list_del(&pgtable->lru); in pgtable_trans_huge_withdraw()
|
D | swapfile.c | 906 page = list_entry(page->lru.next, struct page, lru); in swp_swapcount() 2847 INIT_LIST_HEAD(&head->lru); in add_swap_count_continuation() 2852 list_for_each_entry(list_page, &head->lru, lru) { in add_swap_count_continuation() 2874 list_add_tail(&page->lru, &head->lru); in add_swap_count_continuation() 2906 page = list_entry(head->lru.next, struct page, lru); in swap_count_continued() 2918 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() 2924 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() 2932 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued() 2937 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued() 2948 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() [all …]
|
D | mmzone.c | 90 enum lru_list lru; in lruvec_init() local 94 for_each_lru(lru) in lruvec_init() 95 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init()
|
D | zsmalloc.c | 673 list_add_tail(&page->lru, &(*head)->lru); in insert_zspage() 694 if (list_empty(&(*head)->lru)) in remove_zspage() 697 *head = (struct page *)list_entry((*head)->lru.next, in remove_zspage() 698 struct page, lru); in remove_zspage() 700 list_del_init(&page->lru); in remove_zspage() 793 next = list_entry(page->lru.next, struct page, lru); in get_next_page() 902 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { in free_zspage() 903 list_del(&nextp->lru); in free_zspage() 981 INIT_LIST_HEAD(&page->lru); in alloc_zspage() 993 list_add(&page->lru, &prev_page->lru); in alloc_zspage()
|
D | slab.c | 691 list_for_each_entry(page, &n->slabs_full, lru) in recheck_pfmemalloc_active() 695 list_for_each_entry(page, &n->slabs_partial, lru) in recheck_pfmemalloc_active() 699 list_for_each_entry(page, &n->slabs_free, lru) in recheck_pfmemalloc_active() 1388 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < in kmem_cache_init() 1541 list_for_each_entry(page, &n->slabs_full, lru) { in slab_out_of_memory() 1545 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory() 1549 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory() 1912 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy() 1913 list_del(&page->lru); in slabs_destroy() 2387 page = list_entry(p, struct page, lru); in drain_freelist() [all …]
|
D | readahead.c | 35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 68 list_del(&victim->lru); in read_cache_pages_invalidate_pages() 91 list_del(&page->lru); in read_cache_pages() 129 list_del(&page->lru); in read_pages() 188 list_add(&page->lru, &page_pool); in __do_page_cache_readahead()
|
D | balloon_compaction.c | 65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue() 116 list_del(&page->lru); in __isolate_balloon_page() 128 list_add(&page->lru, &b_dev_info->pages); in __putback_balloon_page()
|
D | compaction.c | 47 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 49 list_del(&page->lru); in release_freepages() 62 list_for_each_entry(page, list, lru) { in map_pages() 485 list_add(&page->lru, freelist); in isolate_freepages_block() 628 list_for_each_entry(page, &cc->migratepages, lru) in acct_isolated() 819 list_add(&page->lru, migratelist); in isolate_migratepages_block() 1064 freepage = list_entry(cc->freepages.next, struct page, lru); in compaction_alloc() 1065 list_del(&freepage->lru); in compaction_alloc() 1080 list_add(&page->lru, &cc->freepages); in compaction_free()
|
D | page_alloc.c | 589 INIT_LIST_HEAD(&page->lru); in set_page_guard() 742 list_del(&buddy->lru); in __free_one_page() 794 list_add_tail(&page->lru, in __free_one_page() 800 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page() 887 page = list_entry(list->prev, struct page, lru); in free_pcppages_bulk() 889 list_del(&page->lru); in free_pcppages_bulk() 960 INIT_LIST_HEAD(&page->lru); in __init_single_page() 1018 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region() 1397 list_add(&page[size].lru, &area->free_list[migratetype]); in expand() 1496 struct page, lru); in __rmqueue_smallest() [all …]
|
D | slob.c | 114 list_add(&sp->lru, list); in set_slob_page_free() 120 list_del(&sp->lru); in clear_slob_page_free() 285 list_for_each_entry(sp, slob_list, lru) { in slob_alloc() 299 prev = sp->lru.prev; in slob_alloc() 325 INIT_LIST_HEAD(&sp->lru); in slob_alloc()
|
D | memory-failure.c | 783 #define lru (1UL << PG_lru) macro 819 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 820 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 833 #undef lru 1702 list_add(&page->lru, &pagelist); in __soft_offline_page() 1707 list_del(&page->lru); in __soft_offline_page()
|
D | hugetlb.c | 856 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 866 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node() 873 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node() 875 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node() 1269 list_del(&page->lru); in free_huge_page() 1282 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page() 1424 struct page, lru); in free_pool_huge_page() 1425 list_del(&page->lru); in free_pool_huge_page() 1473 list_del(&head->lru); in dissolve_free_huge_page() 1627 INIT_LIST_HEAD(&page->lru); in __alloc_buddy_huge_page() [all …]
|
D | workingset.c | 305 struct list_lru_one *lru, in shadow_lru_isolate() argument 336 list_lru_isolate(lru, item); in shadow_lru_isolate()
|
D | migrate.c | 89 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages() 94 list_del(&page->lru); in putback_movable_pages() 966 list_del(&page->lru); in unmap_and_move() 1164 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages() 1306 list_add_tail(&page->lru, &pagelist); in do_move_page_to_node_array() 1720 list_add(&page->lru, &migratepages); in migrate_misplaced_page() 1726 list_del(&page->lru); in migrate_misplaced_page()
|
D | slub.c | 1015 list_add(&page->lru, &n->full); in add_full() 1024 list_del(&page->lru); in remove_full() 1584 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) 1593 page = container_of((struct list_head *)h, struct page, lru); in rcu_free_slab() 1632 list_add_tail(&page->lru, &n->partial); in __add_partial() 1634 list_add(&page->lru, &n->partial); in __add_partial() 1647 list_del(&page->lru); in __remove_partial() 1728 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node() 2261 list_for_each_entry(page, &n->partial, lru) in count_partial() 3547 list_for_each_entry_safe(page, h, &n->partial, lru) { in free_partial() [all …]
|
D | hugetlb_cgroup.c | 155 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline() 414 list_move(&newhpage->lru, &h->hugepage_activelist); in hugetlb_cgroup_migrate()
|
D | memory_hotplug.c | 162 page->lru.next = (struct list_head *) type; in get_page_bootmem() 172 type = (unsigned long) page->lru.next; in put_page_bootmem() 179 INIT_LIST_HEAD(&page->lru); in put_page_bootmem() 1503 list_add_tail(&page->lru, &source); in do_migrate_range()
|
D | memcontrol.c | 725 enum lru_list lru; in mem_cgroup_node_nr_lru_pages() local 727 for_each_lru(lru) { in mem_cgroup_node_nr_lru_pages() 728 if (!(BIT(lru) & lru_mask)) in mem_cgroup_node_nr_lru_pages() 731 nr += mz->lru_size[lru]; in mem_cgroup_node_nr_lru_pages() 1130 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, in mem_cgroup_update_lru_size() argument 1140 lru_size = mz->lru_size + lru; in mem_cgroup_update_lru_size() 5600 page = list_entry(next, struct page, lru); in uncharge_list() 5601 next = page->lru.next; in uncharge_list() 5661 INIT_LIST_HEAD(&page->lru); in mem_cgroup_uncharge() 5662 uncharge_list(&page->lru); in mem_cgroup_uncharge()
|
D | sparse.c | 666 magic = (unsigned long) page->lru.next; in free_map_bootmem()
|
D | mempolicy.c | 932 list_add_tail(&page->lru, pagelist); in migrate_page_add()
|