• Home
  • Raw
  • Download

Lines Matching +full:cost +full:- +full:effective

1 // SPDX-License-Identifier: GPL-2.0-only
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
32 #include <linux/backing-dev.h>
78 * This path almost never happens for VM activity - pages are normally freed
79 * in batches. But it gets used by networking - and for compound pages.
98 free_unref_page(&folio->page, 0); in __folio_put_small()
117 free_zone_device_page(&folio->page); in __folio_put()
126 * put_pages_list() - release a list of pages
127 * @pages: list of pages threaded on page->lru
137 list_del(&folio->lru); in put_pages_list()
141 list_del(&folio->lru); in put_pages_list()
180 * folio->mlock_count = !!folio_test_mlocked(folio)? in lru_add_fn()
186 folio->mlock_count = 0; in lru_add_fn()
202 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru()
216 folios_put(fbatch->folios, folio_batch_count(fbatch)); in folio_batch_move_lru()
264 unsigned long cost; in lru_note_cost() local
267 * Reflect the relative cost of incurring IO and spending CPU in lru_note_cost()
273 cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated; in lru_note_cost()
279 * Hold lruvec->lru_lock is safe here, since in lru_note_cost()
281 * 2) From a pre-LRU page during refault (which also holds the in lru_note_cost()
285 spin_lock_irq(&lruvec->lru_lock); in lru_note_cost()
286 /* Record cost event */ in lru_note_cost()
288 lruvec->file_cost += cost; in lru_note_cost()
290 lruvec->anon_cost += cost; in lru_note_cost()
305 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { in lru_note_cost()
306 lruvec->file_cost /= 2; in lru_note_cost()
307 lruvec->anon_cost /= 2; in lru_note_cost()
309 spin_unlock_irq(&lruvec->lru_lock); in lru_note_cost()
317 lru_note_cost(&(folio_pgdat(folio)->__lruvec), 1, folio_nr_pages(folio), 0); in lru_note_cost_refault()
401 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) { in __lru_cache_activate_folio()
402 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
416 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_refs()
439 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_refs()
450 * inactive,unreferenced -> inactive,referenced
451 * inactive,referenced -> active,unreferenced
452 * active,unreferenced -> active,referenced
454 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
492 * folio_add_lru - Add a folio to an LRU list.
510 lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) in folio_add_lru()
522 * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
533 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) in folio_add_lru_vma()
544 * effective than the single-page writeout from reclaim.
549 * 1. active, mapped folio -> none
550 * 2. active, dirty/writeback folio -> inactive, head, reclaim
551 * 3. inactive, mapped folio -> none
552 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
553 * 5. inactive, clean -> inactive, tail
554 * 6. Others -> none
558 * than the single-page writeout from reclaim.
643 * disabled; or "cpu" is being hot-unplugged, and is already dead.
648 struct folio_batch *fbatch = &fbatches->lru_add; in lru_add_drain_cpu()
664 fbatch = &fbatches->lru_deactivate_file; in lru_add_drain_cpu()
668 fbatch = &fbatches->lru_deactivate; in lru_add_drain_cpu()
672 fbatch = &fbatches->lru_lazyfree; in lru_add_drain_cpu()
680 * deactivate_file_folio() - Deactivate a file folio.
705 * folio_deactivate - deactivate a folio
727 * folio_mark_lazyfree - make an anon folio lazyfree
757 * It's called from per-cpu workqueue context in SMP case so
794 return folio_batch_count(&fbatches->lru_add) || in cpu_needs_drain()
796 folio_batch_count(&fbatches->lru_deactivate_file) || in cpu_needs_drain()
797 folio_batch_count(&fbatches->lru_deactivate) || in cpu_needs_drain()
798 folio_batch_count(&fbatches->lru_lazyfree) || in cpu_needs_drain()
799 folio_batch_count(&fbatches->activate) || in cpu_needs_drain()
805 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
814 * lru_drain_gen - Global pages generation number in __lru_add_drain_all()
819 * This is an optimization for the highly-contended use case where a in __lru_add_drain_all()
873 * below and has already reached CPU #y's per-cpu data. CPU #x comes in __lru_add_drain_all()
874 * along, adds some pages to its per-cpu vectors, then calls in __lru_add_drain_all()
948 * release_pages - batched put_page()
975 * Make sure the IRQ-safe lock-holding time does not get in release_pages()
984 if (is_huge_zero_page(&folio->page)) in release_pages()
992 if (put_devmap_managed_page(&folio->page)) in release_pages()
995 free_zone_device_page(&folio->page); in release_pages()
1023 list_add(&folio->lru, &pages_to_free); in release_pages()
1034 * The folios which we're about to release may be in the deferred lru-addition
1036 * OK from a correctness point of view but is inefficient - those folios may be
1037 * cache-warm and we want to give them back to the page allocator ASAP.
1045 if (!fbatch->percpu_pvec_drained) { in __folio_batch_release()
1047 fbatch->percpu_pvec_drained = true; in __folio_batch_release()
1049 release_pages(fbatch->folios, folio_batch_count(fbatch)); in __folio_batch_release()
1055 * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1059 * entries. This function prunes all the non-folio entries from @fbatch
1060 * without leaving holes, so that it can be passed on to folio-only batch
1068 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals()
1070 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()
1072 fbatch->nr = j; in folio_batch_remove_exceptionals()
1080 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); in swap_setup()
1082 /* Use a smaller cluster for small-memory machines */ in swap_setup()