• Home
  • Raw
  • Download

Lines Matching +full:inactive +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
32 #include <linux/backing-dev.h>
76 * This path almost never happens for VM activity - pages are normally
86 spin_lock_irqsave(&pgdat->lru_lock, flags); in __page_cache_release()
91 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in __page_cache_release()
119 put_dev_pagemap(page->pgmap); in __put_page()
136 * put_pages_list() - release a list of pages
137 * @pages: list of pages threaded on page->lru
148 list_del(&victim->lru); in put_pages_list()
155 * get_kernel_pages() - pin kernel pages in memory
164 * were pinned, returns -errno. Each page returned must be released
185 * get_kernel_page() - pin a kernel page in memory
192 * -errno. The page returned must be released with a put_page() call
216 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
221 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in pagevec_lru_move_fn()
223 spin_lock_irqsave(&pgdat->lru_lock, flags); in pagevec_lru_move_fn()
230 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in pagevec_lru_move_fn()
231 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn()
263 * inactive list.
288 lruvec->file_cost += nr_pages; in lru_note_cost()
290 lruvec->anon_cost += nr_pages; in lru_note_cost()
305 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { in lru_note_cost()
306 lruvec->file_cost /= 2; in lru_note_cost()
307 lruvec->anon_cost /= 2; in lru_note_cost()
316 lru_note_cost(&(page_pgdat(page)->__lruvec), 1, thp_nr_pages(page)); in lru_note_cost_page()
382 spin_lock_irq(&pgdat->lru_lock); in activate_page()
384 spin_unlock_irq(&pgdat->lru_lock); in activate_page()
403 * a page is marked PageActive just after it is added to the inactive in __lru_cache_activate_page()
406 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { in __lru_cache_activate_page()
407 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
421 * inactive,unreferenced -> inactive,referenced
422 * inactive,referenced -> active,unreferenced
423 * active,unreferenced -> active,referenced
425 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
460 * lru_cache_add - add a page to a page list
489 * Place @page on the inactive or unevictable LRU list, depending on its
499 unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; in lru_cache_add_inactive_or_unevictable()
503 * We use the irq-unsafe __mod_zone_page_stat because this in lru_cache_add_inactive_or_unevictable()
515 * inactive list to speed up its reclaim. It is moved to the
518 * effective than the single-page writeout from reclaim.
523 * 1. active, mapped page -> none
524 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
525 * 3. inactive, mapped page -> none
526 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
527 * 5. inactive, clean -> inactive, tail
528 * 6. Others -> none
530 * In 4, why it moves inactive's head, the VM expects the page would
532 * than the single-page writeout from reclaim.
562 * is _really_ small and it's non-critical problem. in lru_deactivate_file_fn()
569 * We moves tha page into tail of inactive. in lru_deactivate_file_fn()
629 * disabled; or "cpu" is being hot-unplugged, and is already dead.
665 * deactivate_file_page - forcefully deactivate a file page
694 * deactivate_page - deactivate a page
697 * deactivate_page() moves @page to the inactive list if @page was on the active
716 * mark_page_lazyfree - make an anon page lazyfree
719 * mark_page_lazyfree() moves @page to the inactive file list.
762 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
771 * lru_drain_gen - Global pages generation number in lru_add_drain_all()
776 * This is an optimization for the highly-contended use case where a in lru_add_drain_all()
828 * below and has already reached CPU #y's per-cpu data. CPU #x comes in lru_add_drain_all()
829 * along, adds some pages to its per-cpu vectors, then calls in lru_add_drain_all()
869 * release_pages - batched put_page()
889 * Make sure the IRQ-safe lock-holding time does not get in release_pages()
894 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); in release_pages()
904 spin_unlock_irqrestore(&locked_pgdat->lru_lock, in release_pages()
925 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); in release_pages()
937 spin_unlock_irqrestore(&locked_pgdat->lru_lock, in release_pages()
941 spin_lock_irqsave(&locked_pgdat->lru_lock, flags); in release_pages()
952 list_add(&page->lru, &pages_to_free); in release_pages()
955 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); in release_pages()
963 * The pages which we're about to release may be in the deferred lru-addition
965 * OK from a correctness point of view but is inefficient - those pages may be
966 * cache-warm and we want to give them back to the page allocator ASAP.
974 if (!pvec->percpu_pvec_drained) { in __pagevec_release()
976 pvec->percpu_pvec_drained = true; in __pagevec_release()
978 release_pages(pvec->pages, pagevec_count(pvec)); in __pagevec_release()
991 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); in lru_add_page_tail()
997 list_add_tail(&page_tail->lru, &page->lru); in lru_add_page_tail()
1001 list_add_tail(&page_tail->lru, list); in lru_add_page_tail()
1080 * pagevec_lookup_entries - gang pagecache lookup
1092 * The search returns a group of mapping-contiguous entries with
1094 * not-present entries.
1108 pvec->nr = find_get_entries(mapping, start, nr_entries, in pagevec_lookup_entries()
1109 pvec->pages, indices); in pagevec_lookup_entries()
1114 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1120 * passed on to page-only pagevec operations.
1127 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals()
1129 pvec->pages[j++] = page; in pagevec_remove_exceptionals()
1131 pvec->nr = j; in pagevec_remove_exceptionals()
1135 * pagevec_lookup_range - gang pagecache lookup
1146 * The search returns a group of mapping-contiguous pages with ascending
1147 * indexes. There may be holes in the indices due to not-present pages. We
1157 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, in pagevec_lookup_range()
1158 pvec->pages); in pagevec_lookup_range()
1167 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_tag()
1168 PAGEVEC_SIZE, pvec->pages); in pagevec_lookup_range_tag()
1177 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_nr_tag()
1178 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); in pagevec_lookup_range_nr_tag()
1187 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); in swap_setup()
1189 /* Use a smaller cluster for small-memory machines */ in swap_setup()
1211 * devmap page refcounts are 1-based, rather than 0-based: if in put_devmap_managed_page()