• Home
  • Raw
  • Download

Lines Matching +full:inactive +full:-

1 // SPDX-License-Identifier: GPL-2.0
24 * inactive and the active list. Freshly faulted pages start out at
25 * the head of the inactive list and page reclaim scans pages from the
26 * tail. Pages that are accessed multiple times on the inactive list
28 * whereas active pages are demoted to the inactive list when the
31 * fault ------------------------+
33 * +--------------+ | +-------------+
34 * reclaim <- | inactive | <-+-- demotion | active | <--+
35 * +--------------+ +-------------+ |
37 * +-------------- promotion ------------------+
43 * are evicted from the inactive list every time before another access
48 * done - the thrashing set could never fit into memory under any
52 * inactive list, yet smaller than the size of memory. In this case,
54 * active pages - which may be used more, hopefully less frequently:
56 * +-memory available to cache-+
58 * +-inactive------+-active----+
60 * +---------------+-----------+
64 * thrashing on the inactive list, after which refaulting pages can be
67 * Approximating inactive page access frequency - Observations:
70 * head of the inactive list, slides every existing inactive page
75 * the active list, shrinking the inactive list by one slot. This
76 * also slides all inactive pages that were faulted into the cache
78 * inactive list.
83 * time indicate the minimum number of inactive pages accessed in
86 * 2. Moving one inactive page N page slots towards the tail of the
87 * list requires at least N inactive page accesses.
92 * inactive pages accessed while the page was in cache is at least
93 * the number of page slots on the inactive list.
102 * access the refault, we combine the in-cache distance with the
103 * out-of-cache distance to get the complete minimum access distance
106 * NR_inactive + (R - E)
112 * NR_inactive + (R - E) <= NR_inactive + NR_active
116 * (R - E) <= NR_active
118 * Put into words, the refault distance (out-of-cache) can be seen as
119 * a deficit in inactive list space (in-cache). If the inactive list
120 * had (R - E) more page slots, the page would not have been evicted
122 * the only thing eating into inactive list space is active pages.
125 * Refaulting inactive pages
132 * So when a refault distance of (R - E) is observed and there are at
133 * least (R - E) active pages, the refaulting page is activated
134 * optimistically in the hope that (R - E) active pages are actually
135 * used less frequently than the refaulting page - or even not used at
138 * That means if inactive cache is refaulting with a suitable refault
160 * For each node's LRU lists, a counter for inactive evictions and
161 * activations is maintained (node->nonresident_age).
171 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
191 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; in pack_shadow()
206 nid = entry & ((1UL << NODES_SHIFT) - 1); in unpack_shadow()
208 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); in unpack_shadow()
218 * workingset_age_nonresident - age non-resident entries as LRU ages
222 * As in-memory pages are aged, non-resident pages need to be aged as
224 * to the in-memory dimensions. This function allows reclaim and LRU
225 * operations to drive the non-resident aging along in parallel.
231 * round-robin fashion. That means that each cgroup has an LRU in workingset_age_nonresident()
236 * So when the physical inactive list of a leaf cgroup ages, in workingset_age_nonresident()
237 * the virtual inactive lists of all its parents, including in workingset_age_nonresident()
241 atomic_long_add(nr_pages, &lruvec->nonresident_age); in workingset_age_nonresident()
246 * workingset_eviction - note the eviction of a page from memory
250 * Returns a shadow entry to be stored in @page->mapping->i_pages in place
260 /* Page is fully exclusive and pins page->mem_cgroup */ in workingset_eviction()
278 eviction = atomic_long_read(&lruvec->nonresident_age); in workingset_eviction()
283 * workingset_refault - evaluate the refault of a previously evicted page
326 if (memcgid != -1) { in workingset_refault()
337 refault = atomic_long_read(&eviction_lruvec->nonresident_age); in workingset_refault()
355 refault_distance = (refault - eviction) & EVICTION_MASK; in workingset_refault()
430 spin_lock_irq(&page_pgdat(page)->lru_lock); in workingset_refault()
432 spin_unlock_irq(&page_pgdat(page)->lru_lock); in workingset_refault()
447 * workingset_activation - note a page activation
457 * Filter non-memcg pages here, e.g. unmap can call in workingset_activation()
460 * XXX: See workingset_refault() - this should return in workingset_activation()
498 * Track non-empty nodes that contain only shadow entries; in workingset_update_node()
503 * as node->private_list is protected by the i_pages lock. in workingset_update_node()
507 if (node->count && node->count == node->nr_values) { in workingset_update_node()
508 if (list_empty(&node->private_list)) { in workingset_update_node()
509 list_lru_add(&shadow_nodes, &node->private_list); in workingset_update_node()
513 if (!list_empty(&node->private_list)) { in workingset_update_node()
514 list_lru_del(&shadow_nodes, &node->private_list); in workingset_update_node()
537 * inactive list. Assume the total cache size for that. in count_shadow_nodes()
542 * worst-case density of 1/8th. Below that, not all eligible in count_shadow_nodes()
545 * On 64-bit with 7 xa_nodes per page and 64 slots in count_shadow_nodes()
553 if (sc->memcg) { in count_shadow_nodes()
557 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); in count_shadow_nodes()
568 pages = node_present_pages(sc->nid); in count_shadow_nodes()
570 max_nodes = pages >> (XA_CHUNK_SHIFT - 3); in count_shadow_nodes()
577 return nodes - max_nodes; in count_shadow_nodes()
598 * to reclaim, take the node off-LRU, and drop the lru_lock. in shadow_lru_isolate()
601 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate()
604 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate()
620 if (WARN_ON_ONCE(!node->nr_values)) in shadow_lru_isolate()
622 if (WARN_ON_ONCE(node->count != node->nr_values)) in shadow_lru_isolate()
624 mapping->nrexceptional -= node->nr_values; in shadow_lru_isolate()
629 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate()
640 /* list_lru lock nests inside the IRQ-safe i_pages lock */ in scan_shadow_nodes()
648 .seeks = 0, /* ->count reports only fully expendable nodes */
653 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
670 * double the initial memory by using totalram_pages as-is. in workingset_init()
672 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; in workingset_init()
673 max_order = fls_long(totalram_pages() - 1); in workingset_init()
675 bucket_order = max_order - timestamp_bits; in workingset_init()