Lines Matching full:page
44 /* How many pages do we try to swap or page in/out together? */
59 static void __page_cache_release(struct page *page) in __page_cache_release() argument
61 if (PageLRU(page)) { in __page_cache_release()
62 struct zone *zone = page_zone(page); in __page_cache_release()
67 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); in __page_cache_release()
68 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
69 __ClearPageLRU(page); in __page_cache_release()
70 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
73 __ClearPageWaiters(page); in __page_cache_release()
74 mem_cgroup_uncharge(page); in __page_cache_release()
77 static void __put_single_page(struct page *page) in __put_single_page() argument
79 __page_cache_release(page); in __put_single_page()
80 free_unref_page(page); in __put_single_page()
83 static void __put_compound_page(struct page *page) in __put_compound_page() argument
89 * hugetlb. This is because hugetlb page does never have PageLRU set in __put_compound_page()
93 if (!PageHuge(page)) in __put_compound_page()
94 __page_cache_release(page); in __put_compound_page()
95 dtor = get_compound_page_dtor(page); in __put_compound_page()
96 (*dtor)(page); in __put_compound_page()
99 void __put_page(struct page *page) in __put_page() argument
101 if (is_zone_device_page(page)) { in __put_page()
102 put_dev_pagemap(page->pgmap); in __put_page()
105 * The page belongs to the device that created pgmap. Do in __put_page()
106 * not return it to page allocator. in __put_page()
111 if (unlikely(PageCompound(page))) in __put_page()
112 __put_compound_page(page); in __put_page()
114 __put_single_page(page); in __put_page()
120 * @pages: list of pages threaded on page->lru
122 * Release a list of pages which are strung together on page.lru. Currently
128 struct page *victim; in put_pages_list()
130 victim = list_entry(pages->prev, struct page, lru); in put_pages_list()
147 * were pinned, returns -errno. Each page returned must be released
151 struct page **pages) in get_kernel_pages()
168 * get_kernel_page() - pin a kernel page in memory
171 * @pages: array that receives pointer to the page pinned.
174 * Returns 1 if page is pinned. If the page was not pinned, returns
175 * -errno. The page returned must be released with a put_page() call
178 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page()
190 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), in pagevec_lru_move_fn() argument
199 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() local
200 struct pglist_data *pagepgdat = page_pgdat(page); in pagevec_lru_move_fn()
209 lruvec = mem_cgroup_page_lruvec(page, pgdat); in pagevec_lru_move_fn()
210 (*move_fn)(page, lruvec, arg); in pagevec_lru_move_fn()
218 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, in pagevec_move_tail_fn() argument
223 if (PageLRU(page) && !PageUnevictable(page)) { in pagevec_move_tail_fn()
224 del_page_from_lru_list(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
225 ClearPageActive(page); in pagevec_move_tail_fn()
226 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
244 * Writeback is about to end against a page which has been marked for immediate
248 void rotate_reclaimable_page(struct page *page) in rotate_reclaimable_page() argument
250 if (!PageLocked(page) && !PageDirty(page) && in rotate_reclaimable_page()
251 !PageUnevictable(page) && PageLRU(page)) { in rotate_reclaimable_page()
255 get_page(page); in rotate_reclaimable_page()
258 if (!pagevec_add(pvec, page) || PageCompound(page)) in rotate_reclaimable_page()
274 static void __activate_page(struct page *page, struct lruvec *lruvec, in __activate_page() argument
277 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in __activate_page()
278 int file = page_is_file_cache(page); in __activate_page()
279 int lru = page_lru_base_type(page); in __activate_page()
281 del_page_from_lru_list(page, lruvec, lru); in __activate_page()
282 SetPageActive(page); in __activate_page()
284 add_page_to_lru_list(page, lruvec, lru); in __activate_page()
285 trace_mm_lru_activate(page); in __activate_page()
306 void activate_page(struct page *page) in activate_page() argument
308 page = compound_head(page); in activate_page()
309 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in activate_page()
312 get_page(page); in activate_page()
313 if (!pagevec_add(pvec, page) || PageCompound(page)) in activate_page()
324 void activate_page(struct page *page) in activate_page() argument
326 struct zone *zone = page_zone(page); in activate_page()
328 page = compound_head(page); in activate_page()
330 __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL); in activate_page()
335 static void __lru_cache_activate_page(struct page *page) in __lru_cache_activate_page() argument
341 * Search backwards on the optimistic assumption that the page being in __lru_cache_activate_page()
343 * the local pagevec is examined as a !PageLRU page could be in the in __lru_cache_activate_page()
346 * a remote pagevec's page PageActive potentially hits a race where in __lru_cache_activate_page()
347 * a page is marked PageActive just after it is added to the inactive in __lru_cache_activate_page()
351 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
353 if (pagevec_page == page) { in __lru_cache_activate_page()
354 SetPageActive(page); in __lru_cache_activate_page()
363 * Mark a page as having seen activity.
369 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
370 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
372 void mark_page_accessed(struct page *page) in mark_page_accessed() argument
374 page = compound_head(page); in mark_page_accessed()
375 if (!PageActive(page) && !PageUnevictable(page) && in mark_page_accessed()
376 PageReferenced(page)) { in mark_page_accessed()
379 * If the page is on the LRU, queue it for activation via in mark_page_accessed()
380 * activate_page_pvecs. Otherwise, assume the page is on a in mark_page_accessed()
384 if (PageLRU(page)) in mark_page_accessed()
385 activate_page(page); in mark_page_accessed()
387 __lru_cache_activate_page(page); in mark_page_accessed()
388 ClearPageReferenced(page); in mark_page_accessed()
389 if (page_is_file_cache(page)) in mark_page_accessed()
390 workingset_activation(page); in mark_page_accessed()
391 } else if (!PageReferenced(page)) { in mark_page_accessed()
392 SetPageReferenced(page); in mark_page_accessed()
394 if (page_is_idle(page)) in mark_page_accessed()
395 clear_page_idle(page); in mark_page_accessed()
399 static void __lru_cache_add(struct page *page) in __lru_cache_add() argument
403 get_page(page); in __lru_cache_add()
404 if (!pagevec_add(pvec, page) || PageCompound(page)) in __lru_cache_add()
410 * lru_cache_add_anon - add a page to the page lists
411 * @page: the page to add
413 void lru_cache_add_anon(struct page *page) in lru_cache_add_anon() argument
415 if (PageActive(page)) in lru_cache_add_anon()
416 ClearPageActive(page); in lru_cache_add_anon()
417 __lru_cache_add(page); in lru_cache_add_anon()
420 void lru_cache_add_file(struct page *page) in lru_cache_add_file() argument
422 if (PageActive(page)) in lru_cache_add_file()
423 ClearPageActive(page); in lru_cache_add_file()
424 __lru_cache_add(page); in lru_cache_add_file()
429 * lru_cache_add - add a page to a page list
430 * @page: the page to be added to the LRU.
432 * Queue the page for addition to the LRU via pagevec. The decision on whether
433 * to add the page to the [in]active [file|anon] list is deferred until the
435 * have the page added to the active list using mark_page_accessed().
437 void lru_cache_add(struct page *page) in lru_cache_add() argument
439 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in lru_cache_add()
440 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add()
441 __lru_cache_add(page); in lru_cache_add()
446 * @page: the page to be added to LRU
447 * @vma: vma in which page is mapped for determining reclaimability
449 * Place @page on the active or unevictable LRU list, depending on its
450 * evictability. Note that if the page is not evictable, it goes
454 void lru_cache_add_active_or_unevictable(struct page *page, in lru_cache_add_active_or_unevictable() argument
457 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add_active_or_unevictable()
460 SetPageActive(page); in lru_cache_add_active_or_unevictable()
461 else if (!TestSetPageMlocked(page)) { in lru_cache_add_active_or_unevictable()
467 __mod_zone_page_state(page_zone(page), NR_MLOCK, in lru_cache_add_active_or_unevictable()
468 hpage_nr_pages(page)); in lru_cache_add_active_or_unevictable()
471 lru_cache_add(page); in lru_cache_add_active_or_unevictable()
475 * If the page can not be invalidated, it is moved to the
479 * effective than the single-page writeout from reclaim.
481 * If the page isn't page_mapped and dirty/writeback, the page
484 * 1. active, mapped page -> none
485 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
486 * 3. inactive, mapped page -> none
487 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
491 * In 4, why it moves inactive's head, the VM expects the page would
493 * than the single-page writeout from reclaim.
495 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_file_fn() argument
501 if (!PageLRU(page)) in lru_deactivate_file_fn()
504 if (PageUnevictable(page)) in lru_deactivate_file_fn()
507 /* Some processes are using the page */ in lru_deactivate_file_fn()
508 if (page_mapped(page)) in lru_deactivate_file_fn()
511 active = PageActive(page); in lru_deactivate_file_fn()
512 file = page_is_file_cache(page); in lru_deactivate_file_fn()
513 lru = page_lru_base_type(page); in lru_deactivate_file_fn()
515 del_page_from_lru_list(page, lruvec, lru + active); in lru_deactivate_file_fn()
516 ClearPageActive(page); in lru_deactivate_file_fn()
517 ClearPageReferenced(page); in lru_deactivate_file_fn()
518 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_file_fn()
520 if (PageWriteback(page) || PageDirty(page)) { in lru_deactivate_file_fn()
526 SetPageReclaim(page); in lru_deactivate_file_fn()
529 * The page's writeback ends up during pagevec in lru_deactivate_file_fn()
530 * We moves tha page into tail of inactive. in lru_deactivate_file_fn()
532 list_move_tail(&page->lru, &lruvec->lists[lru]); in lru_deactivate_file_fn()
542 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_fn() argument
545 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in lru_lazyfree_fn()
546 !PageSwapCache(page) && !PageUnevictable(page)) { in lru_lazyfree_fn()
547 bool active = PageActive(page); in lru_lazyfree_fn()
549 del_page_from_lru_list(page, lruvec, in lru_lazyfree_fn()
551 ClearPageActive(page); in lru_lazyfree_fn()
552 ClearPageReferenced(page); in lru_lazyfree_fn()
558 ClearPageSwapBacked(page); in lru_lazyfree_fn()
559 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_fn()
561 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); in lru_lazyfree_fn()
562 count_memcg_page_event(page, PGLAZYFREE); in lru_lazyfree_fn()
601 * deactivate_file_page - forcefully deactivate a file page
602 * @page: page to deactivate
604 * This function hints the VM that @page is a good reclaim candidate,
605 * for example if its invalidation fails due to the page being dirty
608 void deactivate_file_page(struct page *page) in deactivate_file_page() argument
611 * In a workload with many unevictable page such as mprotect, in deactivate_file_page()
612 * unevictable page deactivation for accelerating reclaim is pointless. in deactivate_file_page()
614 if (PageUnevictable(page)) in deactivate_file_page()
617 if (likely(get_page_unless_zero(page))) { in deactivate_file_page()
620 if (!pagevec_add(pvec, page) || PageCompound(page)) in deactivate_file_page()
627 * mark_page_lazyfree - make an anon page lazyfree
628 * @page: page to deactivate
630 * mark_page_lazyfree() moves @page to the inactive file list.
631 * This is done to accelerate the reclaim of @page.
633 void mark_page_lazyfree(struct page *page) in mark_page_lazyfree() argument
635 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in mark_page_lazyfree()
636 !PageSwapCache(page) && !PageUnevictable(page)) { in mark_page_lazyfree()
639 get_page(page); in mark_page_lazyfree()
640 if (!pagevec_add(pvec, page) || PageCompound(page)) in mark_page_lazyfree()
716 * fell to zero, remove the page from the LRU and free it.
718 void release_pages(struct page **pages, int nr) in release_pages()
728 struct page *page = pages[i]; in release_pages() local
740 if (is_huge_zero_page(page)) in release_pages()
743 if (is_zone_device_page(page)) { in release_pages()
755 if (put_devmap_managed_page(page)) in release_pages()
759 page = compound_head(page); in release_pages()
760 if (!put_page_testzero(page)) in release_pages()
763 if (PageCompound(page)) { in release_pages()
768 __put_compound_page(page); in release_pages()
772 if (PageLRU(page)) { in release_pages()
773 struct pglist_data *pgdat = page_pgdat(page); in release_pages()
784 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); in release_pages()
785 VM_BUG_ON_PAGE(!PageLRU(page), page); in release_pages()
786 __ClearPageLRU(page); in release_pages()
787 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in release_pages()
791 __ClearPageActive(page); in release_pages()
792 __ClearPageWaiters(page); in release_pages()
794 list_add(&page->lru, &pages_to_free); in release_pages()
808 * cache-warm and we want to give them back to the page allocator ASAP.
827 void lru_add_page_tail(struct page *page, struct page *page_tail, in lru_add_page_tail() argument
832 VM_BUG_ON_PAGE(!PageHead(page), page); in lru_add_page_tail()
833 VM_BUG_ON_PAGE(PageCompound(page_tail), page); in lru_add_page_tail()
834 VM_BUG_ON_PAGE(PageLRU(page_tail), page); in lru_add_page_tail()
841 if (likely(PageLRU(page))) in lru_add_page_tail()
842 list_add_tail(&page_tail->lru, &page->lru); in lru_add_page_tail()
844 /* page reclaim is reclaiming a huge page */ in lru_add_page_tail()
850 * Head page has not yet been counted, as an hpage, in lru_add_page_tail()
861 if (!PageUnevictable(page)) in lru_add_page_tail()
866 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, in __pagevec_lru_add_fn() argument
870 int was_unevictable = TestClearPageUnevictable(page); in __pagevec_lru_add_fn()
872 VM_BUG_ON_PAGE(PageLRU(page), page); in __pagevec_lru_add_fn()
874 SetPageLRU(page); in __pagevec_lru_add_fn()
876 * Page becomes evictable in two ways: in __pagevec_lru_add_fn()
878 * 2) Before acquiring LRU lock to put the page to correct LRU and then in __pagevec_lru_add_fn()
895 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU in __pagevec_lru_add_fn()
897 * the isolation of the page whose Mlocked bit is cleared (#0 is also in __pagevec_lru_add_fn()
898 * looking at the same page) and the evictable page will be stranded in __pagevec_lru_add_fn()
903 if (page_evictable(page)) { in __pagevec_lru_add_fn()
904 lru = page_lru(page); in __pagevec_lru_add_fn()
905 update_page_reclaim_stat(lruvec, page_is_file_cache(page), in __pagevec_lru_add_fn()
906 PageActive(page)); in __pagevec_lru_add_fn()
911 ClearPageActive(page); in __pagevec_lru_add_fn()
912 SetPageUnevictable(page); in __pagevec_lru_add_fn()
917 add_page_to_lru_list(page, lruvec, lru); in __pagevec_lru_add_fn()
918 trace_mm_lru_insertion(page, lru); in __pagevec_lru_add_fn()
968 * passed on to page-only pagevec operations.
975 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals() local
976 if (!radix_tree_exceptional_entry(page)) in pagevec_remove_exceptionals()
977 pvec->pages[j++] = page; in pagevec_remove_exceptionals()
986 * @start: The starting page index
987 * @end: The final page index
996 * also update @start to index the next page for the traversal.