• Home
  • Raw
  • Download

Lines Matching refs:page

81 static void __page_cache_release(struct page *page)  in __page_cache_release()  argument
83 if (PageLRU(page)) { in __page_cache_release()
84 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release()
89 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release()
90 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
91 __ClearPageLRU(page); in __page_cache_release()
92 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
95 __ClearPageWaiters(page); in __page_cache_release()
98 static void __put_single_page(struct page *page) in __put_single_page() argument
100 __page_cache_release(page); in __put_single_page()
101 mem_cgroup_uncharge(page); in __put_single_page()
102 free_unref_page(page); in __put_single_page()
105 static void __put_compound_page(struct page *page) in __put_compound_page() argument
113 if (!PageHuge(page)) in __put_compound_page()
114 __page_cache_release(page); in __put_compound_page()
115 destroy_compound_page(page); in __put_compound_page()
118 void __put_page(struct page *page) in __put_page() argument
120 if (is_zone_device_page(page)) { in __put_page()
121 put_dev_pagemap(page->pgmap); in __put_page()
130 if (unlikely(PageCompound(page))) in __put_page()
131 __put_compound_page(page); in __put_page()
133 __put_single_page(page); in __put_page()
147 struct page *victim; in put_pages_list()
170 struct page **pages) in get_kernel_pages()
197 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page()
209 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), in pagevec_lru_move_fn() argument
218 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() local
219 struct pglist_data *pagepgdat = page_pgdat(page); in pagevec_lru_move_fn()
228 lruvec = mem_cgroup_page_lruvec(page, pgdat); in pagevec_lru_move_fn()
229 (*move_fn)(page, lruvec, arg); in pagevec_lru_move_fn()
237 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, in pagevec_move_tail_fn() argument
242 if (PageLRU(page) && !PageUnevictable(page)) { in pagevec_move_tail_fn()
243 del_page_from_lru_list(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
244 ClearPageActive(page); in pagevec_move_tail_fn()
245 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
246 (*pgmoved) += thp_nr_pages(page); in pagevec_move_tail_fn()
263 static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) in pagevec_add_and_need_flush() argument
267 if (!pagevec_add(pvec, page) || PageCompound(page) || in pagevec_add_and_need_flush()
279 void rotate_reclaimable_page(struct page *page) in rotate_reclaimable_page() argument
281 if (!PageLocked(page) && !PageDirty(page) && in rotate_reclaimable_page()
282 !PageUnevictable(page) && PageLRU(page)) { in rotate_reclaimable_page()
286 get_page(page); in rotate_reclaimable_page()
289 if (pagevec_add_and_need_flush(pvec, page)) in rotate_reclaimable_page()
326 void lru_note_cost_page(struct page *page) in lru_note_cost_page() argument
328 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), in lru_note_cost_page()
329 page_is_file_lru(page), thp_nr_pages(page)); in lru_note_cost_page()
332 static void __activate_page(struct page *page, struct lruvec *lruvec, in __activate_page() argument
335 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in __activate_page()
336 int lru = page_lru_base_type(page); in __activate_page()
337 int nr_pages = thp_nr_pages(page); in __activate_page()
339 del_page_from_lru_list(page, lruvec, lru); in __activate_page()
340 SetPageActive(page); in __activate_page()
342 add_page_to_lru_list(page, lruvec, lru); in __activate_page()
343 trace_mm_lru_activate(page); in __activate_page()
365 static void activate_page(struct page *page) in activate_page() argument
367 page = compound_head(page); in activate_page()
368 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in activate_page()
373 get_page(page); in activate_page()
374 if (pagevec_add_and_need_flush(pvec, page)) in activate_page()
385 static void activate_page(struct page *page) in activate_page() argument
387 pg_data_t *pgdat = page_pgdat(page); in activate_page()
389 page = compound_head(page); in activate_page()
391 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); in activate_page()
396 static void __lru_cache_activate_page(struct page *page) in __lru_cache_activate_page() argument
415 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
417 if (pagevec_page == page) { in __lru_cache_activate_page()
418 SetPageActive(page); in __lru_cache_activate_page()
436 void mark_page_accessed(struct page *page) in mark_page_accessed() argument
438 page = compound_head(page); in mark_page_accessed()
440 trace_android_vh_mark_page_accessed(page); in mark_page_accessed()
441 if (!PageReferenced(page)) { in mark_page_accessed()
442 SetPageReferenced(page); in mark_page_accessed()
443 } else if (PageUnevictable(page)) { in mark_page_accessed()
449 } else if (!PageActive(page)) { in mark_page_accessed()
456 if (PageLRU(page)) in mark_page_accessed()
457 activate_page(page); in mark_page_accessed()
459 __lru_cache_activate_page(page); in mark_page_accessed()
460 ClearPageReferenced(page); in mark_page_accessed()
461 workingset_activation(page); in mark_page_accessed()
463 if (page_is_idle(page)) in mark_page_accessed()
464 clear_page_idle(page); in mark_page_accessed()
477 void lru_cache_add(struct page *page) in lru_cache_add() argument
481 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in lru_cache_add()
482 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add()
484 get_page(page); in lru_cache_add()
487 if (pagevec_add_and_need_flush(pvec, page)) in lru_cache_add()
501 void __lru_cache_add_inactive_or_unevictable(struct page *page, in __lru_cache_add_inactive_or_unevictable() argument
506 VM_BUG_ON_PAGE(PageLRU(page), page); in __lru_cache_add_inactive_or_unevictable()
509 if (unlikely(unevictable) && !TestSetPageMlocked(page)) { in __lru_cache_add_inactive_or_unevictable()
510 int nr_pages = thp_nr_pages(page); in __lru_cache_add_inactive_or_unevictable()
516 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in __lru_cache_add_inactive_or_unevictable()
519 lru_cache_add(page); in __lru_cache_add_inactive_or_unevictable()
543 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_file_fn() argument
548 int nr_pages = thp_nr_pages(page); in lru_deactivate_file_fn()
550 if (!PageLRU(page)) in lru_deactivate_file_fn()
553 if (PageUnevictable(page)) in lru_deactivate_file_fn()
557 if (page_mapped(page)) in lru_deactivate_file_fn()
560 active = PageActive(page); in lru_deactivate_file_fn()
561 lru = page_lru_base_type(page); in lru_deactivate_file_fn()
563 del_page_from_lru_list(page, lruvec, lru + active); in lru_deactivate_file_fn()
564 ClearPageActive(page); in lru_deactivate_file_fn()
565 ClearPageReferenced(page); in lru_deactivate_file_fn()
567 if (PageWriteback(page) || PageDirty(page)) { in lru_deactivate_file_fn()
573 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_file_fn()
574 SetPageReclaim(page); in lru_deactivate_file_fn()
580 add_page_to_lru_list_tail(page, lruvec, lru); in lru_deactivate_file_fn()
591 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_fn() argument
594 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in lru_deactivate_fn()
595 int lru = page_lru_base_type(page); in lru_deactivate_fn()
596 int nr_pages = thp_nr_pages(page); in lru_deactivate_fn()
598 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); in lru_deactivate_fn()
599 ClearPageActive(page); in lru_deactivate_fn()
600 ClearPageReferenced(page); in lru_deactivate_fn()
601 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_fn()
609 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_fn() argument
612 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in lru_lazyfree_fn()
613 !PageSwapCache(page) && !PageUnevictable(page)) { in lru_lazyfree_fn()
614 bool active = PageActive(page); in lru_lazyfree_fn()
615 int nr_pages = thp_nr_pages(page); in lru_lazyfree_fn()
617 del_page_from_lru_list(page, lruvec, in lru_lazyfree_fn()
619 ClearPageActive(page); in lru_lazyfree_fn()
620 ClearPageReferenced(page); in lru_lazyfree_fn()
626 ClearPageSwapBacked(page); in lru_lazyfree_fn()
627 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_fn()
635 static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_movetail_fn() argument
640 if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && in lru_lazyfree_movetail_fn()
641 !PageSwapCache(page)) { in lru_lazyfree_movetail_fn()
642 bool active = PageActive(page); in lru_lazyfree_movetail_fn()
644 del_page_from_lru_list(page, lruvec, in lru_lazyfree_movetail_fn()
646 ClearPageActive(page); in lru_lazyfree_movetail_fn()
647 ClearPageReferenced(page); in lru_lazyfree_movetail_fn()
649 add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_movetail_fn()
651 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_movetail_fn()
705 void deactivate_file_page(struct page *page) in deactivate_file_page() argument
711 if (PageUnevictable(page)) in deactivate_file_page()
714 if (likely(get_page_unless_zero(page))) { in deactivate_file_page()
720 if (pagevec_add_and_need_flush(pvec, page)) in deactivate_file_page()
734 void deactivate_page(struct page *page) in deactivate_page() argument
736 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in deactivate_page()
741 get_page(page); in deactivate_page()
742 if (pagevec_add_and_need_flush(pvec, page)) in deactivate_page()
755 void mark_page_lazyfree(struct page *page) in mark_page_lazyfree() argument
757 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in mark_page_lazyfree()
758 !PageSwapCache(page) && !PageUnevictable(page)) { in mark_page_lazyfree()
763 get_page(page); in mark_page_lazyfree()
764 if (pagevec_add_and_need_flush(pvec, page)) in mark_page_lazyfree()
777 void mark_page_lazyfree_movetail(struct page *page, bool tail) in mark_page_lazyfree_movetail() argument
779 if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && in mark_page_lazyfree_movetail()
780 !PageSwapCache(page)) { in mark_page_lazyfree_movetail()
785 get_page(page); in mark_page_lazyfree_movetail()
786 if (pagevec_add_and_need_flush(pvec, page)) in mark_page_lazyfree_movetail()
1000 void release_pages(struct page **pages, int nr) in release_pages()
1010 struct page *page = pages[i]; in release_pages() local
1022 page = compound_head(page); in release_pages()
1023 if (is_huge_zero_page(page)) in release_pages()
1026 if (is_zone_device_page(page)) { in release_pages()
1038 if (page_is_devmap_managed(page)) { in release_pages()
1039 put_devmap_managed_page(page); in release_pages()
1044 if (!put_page_testzero(page)) in release_pages()
1047 if (PageCompound(page)) { in release_pages()
1052 __put_compound_page(page); in release_pages()
1056 if (PageLRU(page)) { in release_pages()
1057 struct pglist_data *pgdat = page_pgdat(page); in release_pages()
1068 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); in release_pages()
1069 VM_BUG_ON_PAGE(!PageLRU(page), page); in release_pages()
1070 __ClearPageLRU(page); in release_pages()
1071 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in release_pages()
1074 __ClearPageWaiters(page); in release_pages()
1076 list_add(&page->lru, &pages_to_free); in release_pages()
1109 void lru_add_page_tail(struct page *page, struct page *page_tail, in lru_add_page_tail() argument
1112 VM_BUG_ON_PAGE(!PageHead(page), page); in lru_add_page_tail()
1113 VM_BUG_ON_PAGE(PageCompound(page_tail), page); in lru_add_page_tail()
1114 VM_BUG_ON_PAGE(PageLRU(page_tail), page); in lru_add_page_tail()
1120 if (likely(PageLRU(page))) in lru_add_page_tail()
1121 list_add_tail(&page_tail->lru, &page->lru); in lru_add_page_tail()
1140 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, in __pagevec_lru_add_fn() argument
1144 int was_unevictable = TestClearPageUnevictable(page); in __pagevec_lru_add_fn()
1145 int nr_pages = thp_nr_pages(page); in __pagevec_lru_add_fn()
1147 VM_BUG_ON_PAGE(PageLRU(page), page); in __pagevec_lru_add_fn()
1175 SetPageLRU(page); in __pagevec_lru_add_fn()
1178 if (page_evictable(page)) { in __pagevec_lru_add_fn()
1179 lru = page_lru(page); in __pagevec_lru_add_fn()
1184 ClearPageActive(page); in __pagevec_lru_add_fn()
1185 SetPageUnevictable(page); in __pagevec_lru_add_fn()
1190 add_page_to_lru_list(page, lruvec, lru); in __pagevec_lru_add_fn()
1191 trace_mm_lru_insertion(page, lru); in __pagevec_lru_add_fn()
1251 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals() local
1252 if (!xa_is_value(page)) in pagevec_remove_exceptionals()
1253 pvec->pages[j++] = page; in pagevec_remove_exceptionals()
1325 void put_devmap_managed_page(struct page *page) in put_devmap_managed_page() argument
1329 if (WARN_ON_ONCE(!page_is_devmap_managed(page))) in put_devmap_managed_page()
1332 count = page_ref_dec_return(page); in put_devmap_managed_page()
1340 free_devmap_managed_page(page); in put_devmap_managed_page()
1342 __put_page(page); in put_devmap_managed_page()