• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/vmscan.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95, Stephen Tweedie.
8  *  kswapd added: 7.1.96  sct
9  *  Removed kswapd_ctl limits, and swap out as many pages as needed
10  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12  *  Multiqueue VM started 5.8.00, Rik van Riel.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/mm.h>
18 #include <linux/sched/mm.h>
19 #include <linux/module.h>
20 #include <linux/gfp.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/swap.h>
23 #include <linux/pagemap.h>
24 #include <linux/init.h>
25 #include <linux/highmem.h>
26 #include <linux/vmpressure.h>
27 #include <linux/vmstat.h>
28 #include <linux/file.h>
29 #include <linux/writeback.h>
30 #include <linux/blkdev.h>
31 #include <linux/buffer_head.h>	/* for try_to_release_page(),
32 					buffer_heads_over_limit */
33 #include <linux/mm_inline.h>
34 #include <linux/backing-dev.h>
35 #include <linux/rmap.h>
36 #include <linux/topology.h>
37 #include <linux/cpu.h>
38 #include <linux/cpuset.h>
39 #include <linux/compaction.h>
40 #include <linux/notifier.h>
41 #include <linux/rwsem.h>
42 #include <linux/delay.h>
43 #include <linux/kthread.h>
44 #include <linux/freezer.h>
45 #include <linux/memcontrol.h>
46 #include <linux/delayacct.h>
47 #include <linux/sysctl.h>
48 #include <linux/oom.h>
49 #include <linux/pagevec.h>
50 #include <linux/prefetch.h>
51 #include <linux/printk.h>
52 #include <linux/dax.h>
53 #include <linux/psi.h>
54 #include <linux/pagewalk.h>
55 #include <linux/shmem_fs.h>
56 #include <linux/ctype.h>
57 #include <linux/debugfs.h>
58 
59 #include <asm/tlbflush.h>
60 #include <asm/div64.h>
61 
62 #include <linux/swapops.h>
63 #include <linux/balloon_compaction.h>
64 
65 #include "internal.h"
66 
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/vmscan.h>
69 
70 #undef CREATE_TRACE_POINTS
71 #include <trace/hooks/vmscan.h>
72 
73 #undef CREATE_TRACE_POINTS
74 #include <trace/hooks/mm.h>
75 
76 EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_begin);
77 EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_end);
78 
79 struct scan_control {
80 	/* How many pages shrink_list() should reclaim */
81 	unsigned long nr_to_reclaim;
82 
83 	/*
84 	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
85 	 * are scanned.
86 	 */
87 	nodemask_t	*nodemask;
88 
89 	/*
90 	 * The memory cgroup that hit its limit and as a result is the
91 	 * primary target of this reclaim invocation.
92 	 */
93 	struct mem_cgroup *target_mem_cgroup;
94 
95 	/*
96 	 * Scan pressure balancing between anon and file LRUs
97 	 */
98 	unsigned long	anon_cost;
99 	unsigned long	file_cost;
100 
101 	/* Can active pages be deactivated as part of reclaim? */
102 #define DEACTIVATE_ANON 1
103 #define DEACTIVATE_FILE 2
104 	unsigned int may_deactivate:2;
105 	unsigned int force_deactivate:1;
106 	unsigned int skipped_deactivate:1;
107 
108 	/* Writepage batching in laptop mode; RECLAIM_WRITE */
109 	unsigned int may_writepage:1;
110 
111 	/* Can mapped pages be reclaimed? */
112 	unsigned int may_unmap:1;
113 
114 	/* Can pages be swapped as part of reclaim? */
115 	unsigned int may_swap:1;
116 
117 	/*
118 	 * Cgroup memory below memory.low is protected as long as we
119 	 * don't threaten to OOM. If any cgroup is reclaimed at
120 	 * reduced force or passed over entirely due to its memory.low
121 	 * setting (memcg_low_skipped), and nothing is reclaimed as a
122 	 * result, then go back for one more cycle that reclaims the protected
123 	 * memory (memcg_low_reclaim) to avert OOM.
124 	 */
125 	unsigned int memcg_low_reclaim:1;
126 	unsigned int memcg_low_skipped:1;
127 
128 	unsigned int hibernation_mode:1;
129 
130 	/* One of the zones is ready for compaction */
131 	unsigned int compaction_ready:1;
132 
133 	/* There is easily reclaimable cold cache in the current node */
134 	unsigned int cache_trim_mode:1;
135 
136 	/* The file pages on the current node are dangerously low */
137 	unsigned int file_is_tiny:1;
138 
139 #ifdef CONFIG_LRU_GEN
140 	/* help kswapd make better choices among multiple memcgs */
141 	unsigned int memcgs_need_aging:1;
142 	unsigned long last_reclaimed;
143 #endif
144 
145 	/* Allocation order */
146 	s8 order;
147 
148 	/* Scan (total_size >> priority) pages at once */
149 	s8 priority;
150 
151 	/* The highest zone to isolate pages for reclaim from */
152 	s8 reclaim_idx;
153 
154 	/* This context's GFP mask */
155 	gfp_t gfp_mask;
156 
157 	/* Incremented by the number of inactive pages that were scanned */
158 	unsigned long nr_scanned;
159 
160 	/* Number of pages freed so far during a call to shrink_zones() */
161 	unsigned long nr_reclaimed;
162 
163 	struct {
164 		unsigned int dirty;
165 		unsigned int unqueued_dirty;
166 		unsigned int congested;
167 		unsigned int writeback;
168 		unsigned int immediate;
169 		unsigned int file_taken;
170 		unsigned int taken;
171 	} nr;
172 
173 	/* for recording the reclaimed slab by now */
174 	struct reclaim_state reclaim_state;
175 };
176 
177 #ifdef ARCH_HAS_PREFETCHW
178 #define prefetchw_prev_lru_page(_page, _base, _field)			\
179 	do {								\
180 		if ((_page)->lru.prev != _base) {			\
181 			struct page *prev;				\
182 									\
183 			prev = lru_to_page(&(_page->lru));		\
184 			prefetchw(&prev->_field);			\
185 		}							\
186 	} while (0)
187 #else
188 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
189 #endif
190 
191 /*
192  * From 0 .. 200.  Higher means more swappy.
193  */
194 int vm_swappiness = 60;
195 
196 #define DEF_KSWAPD_THREADS_PER_NODE 1
197 static int kswapd_threads = DEF_KSWAPD_THREADS_PER_NODE;
kswapd_per_node_setup(char * str)198 static int __init kswapd_per_node_setup(char *str)
199 {
200 	int tmp;
201 
202 	if (kstrtoint(str, 0, &tmp) < 0)
203 		return 0;
204 
205 	if (tmp > MAX_KSWAPD_THREADS || tmp <= 0)
206 		return 0;
207 
208 	kswapd_threads = tmp;
209 	return 1;
210 }
211 __setup("kswapd_per_node=", kswapd_per_node_setup);
212 
set_task_reclaim_state(struct task_struct * task,struct reclaim_state * rs)213 static void set_task_reclaim_state(struct task_struct *task,
214 				   struct reclaim_state *rs)
215 {
216 	/* Check for an overwrite */
217 	WARN_ON_ONCE(rs && task->reclaim_state);
218 
219 	/* Check for the nulling of an already-nulled member */
220 	WARN_ON_ONCE(!rs && !task->reclaim_state);
221 
222 	task->reclaim_state = rs;
223 }
224 
225 static LIST_HEAD(shrinker_list);
226 static DECLARE_RWSEM(shrinker_rwsem);
227 
228 #ifdef CONFIG_MEMCG
229 /*
230  * We allow subsystems to populate their shrinker-related
231  * LRU lists before register_shrinker_prepared() is called
232  * for the shrinker, since we don't want to impose
233  * restrictions on their internal registration order.
234  * In this case shrink_slab_memcg() may find corresponding
235  * bit is set in the shrinkers map.
236  *
237  * This value is used by the function to detect registering
238  * shrinkers and to skip do_shrink_slab() calls for them.
239  */
240 #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
241 
242 static DEFINE_IDR(shrinker_idr);
243 static int shrinker_nr_max;
244 
prealloc_memcg_shrinker(struct shrinker * shrinker)245 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
246 {
247 	int id, ret = -ENOMEM;
248 
249 	down_write(&shrinker_rwsem);
250 	/* This may call shrinker, so it must use down_read_trylock() */
251 	id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
252 	if (id < 0)
253 		goto unlock;
254 
255 	if (id >= shrinker_nr_max) {
256 		if (memcg_expand_shrinker_maps(id)) {
257 			idr_remove(&shrinker_idr, id);
258 			goto unlock;
259 		}
260 
261 		shrinker_nr_max = id + 1;
262 	}
263 	shrinker->id = id;
264 	ret = 0;
265 unlock:
266 	up_write(&shrinker_rwsem);
267 	return ret;
268 }
269 
unregister_memcg_shrinker(struct shrinker * shrinker)270 static void unregister_memcg_shrinker(struct shrinker *shrinker)
271 {
272 	int id = shrinker->id;
273 
274 	BUG_ON(id < 0);
275 
276 	down_write(&shrinker_rwsem);
277 	idr_remove(&shrinker_idr, id);
278 	up_write(&shrinker_rwsem);
279 }
280 
cgroup_reclaim(struct scan_control * sc)281 static bool cgroup_reclaim(struct scan_control *sc)
282 {
283 	return sc->target_mem_cgroup;
284 }
285 
286 /**
287  * writeback_throttling_sane - is the usual dirty throttling mechanism available?
288  * @sc: scan_control in question
289  *
290  * The normal page dirty throttling mechanism in balance_dirty_pages() is
291  * completely broken with the legacy memcg and direct stalling in
292  * shrink_page_list() is used for throttling instead, which lacks all the
293  * niceties such as fairness, adaptive pausing, bandwidth proportional
294  * allocation and configurability.
295  *
296  * This function tests whether the vmscan currently in progress can assume
297  * that the normal dirty throttling mechanism is operational.
298  */
writeback_throttling_sane(struct scan_control * sc)299 static bool writeback_throttling_sane(struct scan_control *sc)
300 {
301 	if (!cgroup_reclaim(sc))
302 		return true;
303 #ifdef CONFIG_CGROUP_WRITEBACK
304 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
305 		return true;
306 #endif
307 	return false;
308 }
309 #else
prealloc_memcg_shrinker(struct shrinker * shrinker)310 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
311 {
312 	return 0;
313 }
314 
unregister_memcg_shrinker(struct shrinker * shrinker)315 static void unregister_memcg_shrinker(struct shrinker *shrinker)
316 {
317 }
318 
cgroup_reclaim(struct scan_control * sc)319 static bool cgroup_reclaim(struct scan_control *sc)
320 {
321 	return false;
322 }
323 
writeback_throttling_sane(struct scan_control * sc)324 static bool writeback_throttling_sane(struct scan_control *sc)
325 {
326 	return true;
327 }
328 #endif
329 
330 /*
331  * This misses isolated pages which are not accounted for to save counters.
332  * As the data only determines if reclaim or compaction continues, it is
333  * not expected that isolated pages will be a dominating factor.
334  */
zone_reclaimable_pages(struct zone * zone)335 unsigned long zone_reclaimable_pages(struct zone *zone)
336 {
337 	unsigned long nr;
338 
339 	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
340 		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
341 	if (get_nr_swap_pages() > 0)
342 		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
343 			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
344 
345 	return nr;
346 }
347 
348 /**
349  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
350  * @lruvec: lru vector
351  * @lru: lru to use
352  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
353  */
lruvec_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)354 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
355 {
356 	unsigned long size = 0;
357 	int zid;
358 
359 	for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
360 		struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
361 
362 		if (!managed_zone(zone))
363 			continue;
364 
365 		if (!mem_cgroup_disabled())
366 			size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
367 		else
368 			size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
369 	}
370 	return size;
371 }
372 
373 /*
374  * Add a shrinker callback to be called from the vm.
375  */
prealloc_shrinker(struct shrinker * shrinker)376 int prealloc_shrinker(struct shrinker *shrinker)
377 {
378 	unsigned int size = sizeof(*shrinker->nr_deferred);
379 
380 	if (shrinker->flags & SHRINKER_NUMA_AWARE)
381 		size *= nr_node_ids;
382 
383 	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
384 	if (!shrinker->nr_deferred)
385 		return -ENOMEM;
386 
387 	if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
388 		if (prealloc_memcg_shrinker(shrinker))
389 			goto free_deferred;
390 	}
391 
392 	return 0;
393 
394 free_deferred:
395 	kfree(shrinker->nr_deferred);
396 	shrinker->nr_deferred = NULL;
397 	return -ENOMEM;
398 }
399 
free_prealloced_shrinker(struct shrinker * shrinker)400 void free_prealloced_shrinker(struct shrinker *shrinker)
401 {
402 	if (!shrinker->nr_deferred)
403 		return;
404 
405 	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
406 		unregister_memcg_shrinker(shrinker);
407 
408 	kfree(shrinker->nr_deferred);
409 	shrinker->nr_deferred = NULL;
410 }
411 
register_shrinker_prepared(struct shrinker * shrinker)412 void register_shrinker_prepared(struct shrinker *shrinker)
413 {
414 	down_write(&shrinker_rwsem);
415 	list_add_tail(&shrinker->list, &shrinker_list);
416 #ifdef CONFIG_MEMCG
417 	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
418 		idr_replace(&shrinker_idr, shrinker, shrinker->id);
419 #endif
420 	up_write(&shrinker_rwsem);
421 }
422 
register_shrinker(struct shrinker * shrinker)423 int register_shrinker(struct shrinker *shrinker)
424 {
425 	int err = prealloc_shrinker(shrinker);
426 
427 	if (err)
428 		return err;
429 	register_shrinker_prepared(shrinker);
430 	return 0;
431 }
432 EXPORT_SYMBOL(register_shrinker);
433 
434 /*
435  * Remove one
436  */
unregister_shrinker(struct shrinker * shrinker)437 void unregister_shrinker(struct shrinker *shrinker)
438 {
439 	if (!shrinker->nr_deferred)
440 		return;
441 	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
442 		unregister_memcg_shrinker(shrinker);
443 	down_write(&shrinker_rwsem);
444 	list_del(&shrinker->list);
445 	up_write(&shrinker_rwsem);
446 	kfree(shrinker->nr_deferred);
447 	shrinker->nr_deferred = NULL;
448 }
449 EXPORT_SYMBOL(unregister_shrinker);
450 
451 #define SHRINK_BATCH 128
452 
do_shrink_slab(struct shrink_control * shrinkctl,struct shrinker * shrinker,int priority)453 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
454 				    struct shrinker *shrinker, int priority)
455 {
456 	unsigned long freed = 0;
457 	unsigned long long delta;
458 	long total_scan;
459 	long freeable;
460 	long nr;
461 	long new_nr;
462 	int nid = shrinkctl->nid;
463 	long batch_size = shrinker->batch ? shrinker->batch
464 					  : SHRINK_BATCH;
465 	long scanned = 0, next_deferred;
466 
467 	trace_android_vh_do_shrink_slab(shrinker, shrinkctl, priority);
468 
469 	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
470 		nid = 0;
471 
472 	freeable = shrinker->count_objects(shrinker, shrinkctl);
473 	if (freeable == 0 || freeable == SHRINK_EMPTY)
474 		return freeable;
475 
476 	/*
477 	 * copy the current shrinker scan count into a local variable
478 	 * and zero it so that other concurrent shrinker invocations
479 	 * don't also do this scanning work.
480 	 */
481 	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
482 
483 	total_scan = nr;
484 	if (shrinker->seeks) {
485 		delta = freeable >> priority;
486 		delta *= 4;
487 		do_div(delta, shrinker->seeks);
488 	} else {
489 		/*
490 		 * These objects don't require any IO to create. Trim
491 		 * them aggressively under memory pressure to keep
492 		 * them from causing refetches in the IO caches.
493 		 */
494 		delta = freeable / 2;
495 	}
496 
497 	total_scan += delta;
498 	if (total_scan < 0) {
499 		pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
500 		       shrinker->scan_objects, total_scan);
501 		total_scan = freeable;
502 		next_deferred = nr;
503 	} else
504 		next_deferred = total_scan;
505 
506 	/*
507 	 * We need to avoid excessive windup on filesystem shrinkers
508 	 * due to large numbers of GFP_NOFS allocations causing the
509 	 * shrinkers to return -1 all the time. This results in a large
510 	 * nr being built up so when a shrink that can do some work
511 	 * comes along it empties the entire cache due to nr >>>
512 	 * freeable. This is bad for sustaining a working set in
513 	 * memory.
514 	 *
515 	 * Hence only allow the shrinker to scan the entire cache when
516 	 * a large delta change is calculated directly.
517 	 */
518 	if (delta < freeable / 4)
519 		total_scan = min(total_scan, freeable / 2);
520 
521 	/*
522 	 * Avoid risking looping forever due to too large nr value:
523 	 * never try to free more than twice the estimate number of
524 	 * freeable entries.
525 	 */
526 	if (total_scan > freeable * 2)
527 		total_scan = freeable * 2;
528 
529 	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
530 				   freeable, delta, total_scan, priority);
531 
532 	/*
533 	 * Normally, we should not scan less than batch_size objects in one
534 	 * pass to avoid too frequent shrinker calls, but if the slab has less
535 	 * than batch_size objects in total and we are really tight on memory,
536 	 * we will try to reclaim all available objects, otherwise we can end
537 	 * up failing allocations although there are plenty of reclaimable
538 	 * objects spread over several slabs with usage less than the
539 	 * batch_size.
540 	 *
541 	 * We detect the "tight on memory" situations by looking at the total
542 	 * number of objects we want to scan (total_scan). If it is greater
543 	 * than the total number of objects on slab (freeable), we must be
544 	 * scanning at high prio and therefore should try to reclaim as much as
545 	 * possible.
546 	 */
547 	while (total_scan >= batch_size ||
548 	       total_scan >= freeable) {
549 		unsigned long ret;
550 		unsigned long nr_to_scan = min(batch_size, total_scan);
551 
552 		shrinkctl->nr_to_scan = nr_to_scan;
553 		shrinkctl->nr_scanned = nr_to_scan;
554 		ret = shrinker->scan_objects(shrinker, shrinkctl);
555 		if (ret == SHRINK_STOP)
556 			break;
557 		freed += ret;
558 
559 		count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
560 		total_scan -= shrinkctl->nr_scanned;
561 		scanned += shrinkctl->nr_scanned;
562 
563 		cond_resched();
564 	}
565 
566 	if (next_deferred >= scanned)
567 		next_deferred -= scanned;
568 	else
569 		next_deferred = 0;
570 	/*
571 	 * move the unused scan count back into the shrinker in a
572 	 * manner that handles concurrent updates. If we exhausted the
573 	 * scan, there is no need to do an update.
574 	 */
575 	if (next_deferred > 0)
576 		new_nr = atomic_long_add_return(next_deferred,
577 						&shrinker->nr_deferred[nid]);
578 	else
579 		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
580 
581 	trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
582 	return freed;
583 }
584 
585 #ifdef CONFIG_MEMCG
shrink_slab_memcg(gfp_t gfp_mask,int nid,struct mem_cgroup * memcg,int priority)586 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
587 			struct mem_cgroup *memcg, int priority)
588 {
589 	struct memcg_shrinker_map *map;
590 	unsigned long ret, freed = 0;
591 	int i;
592 
593 	if (!mem_cgroup_online(memcg))
594 		return 0;
595 
596 	if (!down_read_trylock(&shrinker_rwsem))
597 		return 0;
598 
599 	map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
600 					true);
601 	if (unlikely(!map))
602 		goto unlock;
603 
604 	for_each_set_bit(i, map->map, shrinker_nr_max) {
605 		struct shrink_control sc = {
606 			.gfp_mask = gfp_mask,
607 			.nid = nid,
608 			.memcg = memcg,
609 		};
610 		struct shrinker *shrinker;
611 
612 		shrinker = idr_find(&shrinker_idr, i);
613 		if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
614 			if (!shrinker)
615 				clear_bit(i, map->map);
616 			continue;
617 		}
618 
619 		/* Call non-slab shrinkers even though kmem is disabled */
620 		if (!memcg_kmem_enabled() &&
621 		    !(shrinker->flags & SHRINKER_NONSLAB))
622 			continue;
623 
624 		ret = do_shrink_slab(&sc, shrinker, priority);
625 		if (ret == SHRINK_EMPTY) {
626 			clear_bit(i, map->map);
627 			/*
628 			 * After the shrinker reported that it had no objects to
629 			 * free, but before we cleared the corresponding bit in
630 			 * the memcg shrinker map, a new object might have been
631 			 * added. To make sure, we have the bit set in this
632 			 * case, we invoke the shrinker one more time and reset
633 			 * the bit if it reports that it is not empty anymore.
634 			 * The memory barrier here pairs with the barrier in
635 			 * memcg_set_shrinker_bit():
636 			 *
637 			 * list_lru_add()     shrink_slab_memcg()
638 			 *   list_add_tail()    clear_bit()
639 			 *   <MB>               <MB>
640 			 *   set_bit()          do_shrink_slab()
641 			 */
642 			smp_mb__after_atomic();
643 			ret = do_shrink_slab(&sc, shrinker, priority);
644 			if (ret == SHRINK_EMPTY)
645 				ret = 0;
646 			else
647 				memcg_set_shrinker_bit(memcg, nid, i);
648 		}
649 		freed += ret;
650 
651 		if (rwsem_is_contended(&shrinker_rwsem)) {
652 			freed = freed ? : 1;
653 			break;
654 		}
655 	}
656 unlock:
657 	up_read(&shrinker_rwsem);
658 	return freed;
659 }
660 #else /* CONFIG_MEMCG */
shrink_slab_memcg(gfp_t gfp_mask,int nid,struct mem_cgroup * memcg,int priority)661 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
662 			struct mem_cgroup *memcg, int priority)
663 {
664 	return 0;
665 }
666 #endif /* CONFIG_MEMCG */
667 
668 /**
669  * shrink_slab - shrink slab caches
670  * @gfp_mask: allocation context
671  * @nid: node whose slab caches to target
672  * @memcg: memory cgroup whose slab caches to target
673  * @priority: the reclaim priority
674  *
675  * Call the shrink functions to age shrinkable caches.
676  *
677  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
678  * unaware shrinkers will receive a node id of 0 instead.
679  *
680  * @memcg specifies the memory cgroup to target. Unaware shrinkers
681  * are called only if it is the root cgroup.
682  *
683  * @priority is sc->priority, we take the number of objects and >> by priority
684  * in order to get the scan target.
685  *
686  * Returns the number of reclaimed slab objects.
687  */
shrink_slab(gfp_t gfp_mask,int nid,struct mem_cgroup * memcg,int priority)688 static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
689 				 struct mem_cgroup *memcg,
690 				 int priority)
691 {
692 	unsigned long ret, freed = 0;
693 	struct shrinker *shrinker;
694 	bool bypass = false;
695 
696 	trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass);
697 	if (bypass)
698 		return 0;
699 
700 	/*
701 	 * The root memcg might be allocated even though memcg is disabled
702 	 * via "cgroup_disable=memory" boot parameter.  This could make
703 	 * mem_cgroup_is_root() return false, then just run memcg slab
704 	 * shrink, but skip global shrink.  This may result in premature
705 	 * oom.
706 	 */
707 	if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
708 		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
709 
710 	if (!down_read_trylock(&shrinker_rwsem))
711 		goto out;
712 
713 	list_for_each_entry(shrinker, &shrinker_list, list) {
714 		struct shrink_control sc = {
715 			.gfp_mask = gfp_mask,
716 			.nid = nid,
717 			.memcg = memcg,
718 		};
719 
720 		ret = do_shrink_slab(&sc, shrinker, priority);
721 		if (ret == SHRINK_EMPTY)
722 			ret = 0;
723 		freed += ret;
724 		/*
725 		 * Bail out if someone want to register a new shrinker to
726 		 * prevent the registration from being stalled for long periods
727 		 * by parallel ongoing shrinking.
728 		 */
729 		if (rwsem_is_contended(&shrinker_rwsem)) {
730 			freed = freed ? : 1;
731 			break;
732 		}
733 	}
734 
735 	up_read(&shrinker_rwsem);
736 out:
737 	cond_resched();
738 	return freed;
739 }
740 
drop_slab_node(int nid)741 void drop_slab_node(int nid)
742 {
743 	unsigned long freed;
744 
745 	do {
746 		struct mem_cgroup *memcg = NULL;
747 
748 		if (fatal_signal_pending(current))
749 			return;
750 
751 		freed = 0;
752 		memcg = mem_cgroup_iter(NULL, NULL, NULL);
753 		do {
754 			freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
755 		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
756 	} while (freed > 10);
757 }
758 
drop_slab(void)759 void drop_slab(void)
760 {
761 	int nid;
762 
763 	for_each_online_node(nid)
764 		drop_slab_node(nid);
765 }
766 
is_page_cache_freeable(struct page * page)767 static inline int is_page_cache_freeable(struct page *page)
768 {
769 	/*
770 	 * A freeable page cache page is referenced only by the caller
771 	 * that isolated the page, the page cache and optional buffer
772 	 * heads at page->private.
773 	 */
774 	int page_cache_pins = thp_nr_pages(page);
775 	return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
776 }
777 
may_write_to_inode(struct inode * inode)778 static int may_write_to_inode(struct inode *inode)
779 {
780 	if (current->flags & PF_SWAPWRITE)
781 		return 1;
782 	if (!inode_write_congested(inode))
783 		return 1;
784 	if (inode_to_bdi(inode) == current->backing_dev_info)
785 		return 1;
786 	return 0;
787 }
788 
789 /*
790  * We detected a synchronous write error writing a page out.  Probably
791  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
792  * fsync(), msync() or close().
793  *
794  * The tricky part is that after writepage we cannot touch the mapping: nothing
795  * prevents it from being freed up.  But we have a ref on the page and once
796  * that page is locked, the mapping is pinned.
797  *
798  * We're allowed to run sleeping lock_page() here because we know the caller has
799  * __GFP_FS.
800  */
handle_write_error(struct address_space * mapping,struct page * page,int error)801 static void handle_write_error(struct address_space *mapping,
802 				struct page *page, int error)
803 {
804 	lock_page(page);
805 	if (page_mapping(page) == mapping)
806 		mapping_set_error(mapping, error);
807 	unlock_page(page);
808 }
809 
810 /* possible outcome of pageout() */
811 typedef enum {
812 	/* failed to write page out, page is locked */
813 	PAGE_KEEP,
814 	/* move page to the active list, page is locked */
815 	PAGE_ACTIVATE,
816 	/* page has been sent to the disk successfully, page is unlocked */
817 	PAGE_SUCCESS,
818 	/* page is clean and locked */
819 	PAGE_CLEAN,
820 } pageout_t;
821 
822 /*
823  * pageout is called by shrink_page_list() for each dirty page.
824  * Calls ->writepage().
825  */
pageout(struct page * page,struct address_space * mapping)826 static pageout_t pageout(struct page *page, struct address_space *mapping)
827 {
828 	/*
829 	 * If the page is dirty, only perform writeback if that write
830 	 * will be non-blocking.  To prevent this allocation from being
831 	 * stalled by pagecache activity.  But note that there may be
832 	 * stalls if we need to run get_block().  We could test
833 	 * PagePrivate for that.
834 	 *
835 	 * If this process is currently in __generic_file_write_iter() against
836 	 * this page's queue, we can perform writeback even if that
837 	 * will block.
838 	 *
839 	 * If the page is swapcache, write it back even if that would
840 	 * block, for some throttling. This happens by accident, because
841 	 * swap_backing_dev_info is bust: it doesn't reflect the
842 	 * congestion state of the swapdevs.  Easy to fix, if needed.
843 	 */
844 	if (!is_page_cache_freeable(page))
845 		return PAGE_KEEP;
846 	if (!mapping) {
847 		/*
848 		 * Some data journaling orphaned pages can have
849 		 * page->mapping == NULL while being dirty with clean buffers.
850 		 */
851 		if (page_has_private(page)) {
852 			if (try_to_free_buffers(page)) {
853 				ClearPageDirty(page);
854 				pr_info("%s: orphaned page\n", __func__);
855 				return PAGE_CLEAN;
856 			}
857 		}
858 		return PAGE_KEEP;
859 	}
860 	if (mapping->a_ops->writepage == NULL)
861 		return PAGE_ACTIVATE;
862 	if (!may_write_to_inode(mapping->host))
863 		return PAGE_KEEP;
864 
865 	if (clear_page_dirty_for_io(page)) {
866 		int res;
867 		struct writeback_control wbc = {
868 			.sync_mode = WB_SYNC_NONE,
869 			.nr_to_write = SWAP_CLUSTER_MAX,
870 			.range_start = 0,
871 			.range_end = LLONG_MAX,
872 			.for_reclaim = 1,
873 		};
874 
875 		SetPageReclaim(page);
876 		res = mapping->a_ops->writepage(page, &wbc);
877 		if (res < 0)
878 			handle_write_error(mapping, page, res);
879 		if (res == AOP_WRITEPAGE_ACTIVATE) {
880 			ClearPageReclaim(page);
881 			return PAGE_ACTIVATE;
882 		}
883 
884 		if (!PageWriteback(page)) {
885 			/* synchronous write or broken a_ops? */
886 			ClearPageReclaim(page);
887 		}
888 		trace_mm_vmscan_writepage(page);
889 		inc_node_page_state(page, NR_VMSCAN_WRITE);
890 		return PAGE_SUCCESS;
891 	}
892 
893 	return PAGE_CLEAN;
894 }
895 
896 /*
897  * Same as remove_mapping, but if the page is removed from the mapping, it
898  * gets returned with a refcount of 0.
899  */
__remove_mapping(struct address_space * mapping,struct page * page,bool reclaimed,struct mem_cgroup * target_memcg)900 static int __remove_mapping(struct address_space *mapping, struct page *page,
901 			    bool reclaimed, struct mem_cgroup *target_memcg)
902 {
903 	unsigned long flags;
904 	int refcount;
905 	void *shadow = NULL;
906 
907 	BUG_ON(!PageLocked(page));
908 	BUG_ON(mapping != page_mapping(page));
909 
910 	xa_lock_irqsave(&mapping->i_pages, flags);
911 	/*
912 	 * The non racy check for a busy page.
913 	 *
914 	 * Must be careful with the order of the tests. When someone has
915 	 * a ref to the page, it may be possible that they dirty it then
916 	 * drop the reference. So if PageDirty is tested before page_count
917 	 * here, then the following race may occur:
918 	 *
919 	 * get_user_pages(&page);
920 	 * [user mapping goes away]
921 	 * write_to(page);
922 	 *				!PageDirty(page)    [good]
923 	 * SetPageDirty(page);
924 	 * put_page(page);
925 	 *				!page_count(page)   [good, discard it]
926 	 *
927 	 * [oops, our write_to data is lost]
928 	 *
929 	 * Reversing the order of the tests ensures such a situation cannot
930 	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
931 	 * load is not satisfied before that of page->_refcount.
932 	 *
933 	 * Note that if SetPageDirty is always performed via set_page_dirty,
934 	 * and thus under the i_pages lock, then this ordering is not required.
935 	 */
936 	refcount = 1 + compound_nr(page);
937 	if (!page_ref_freeze(page, refcount))
938 		goto cannot_free;
939 	/* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
940 	if (unlikely(PageDirty(page))) {
941 		page_ref_unfreeze(page, refcount);
942 		goto cannot_free;
943 	}
944 
945 	if (PageSwapCache(page)) {
946 		swp_entry_t swap = { .val = page_private(page) };
947 
948 		/* get a shadow entry before mem_cgroup_swapout() clears page_memcg() */
949 		if (reclaimed && !mapping_exiting(mapping))
950 			shadow = workingset_eviction(page, target_memcg);
951 		mem_cgroup_swapout(page, swap);
952 		__delete_from_swap_cache(page, swap, shadow);
953 		xa_unlock_irqrestore(&mapping->i_pages, flags);
954 		put_swap_page(page, swap);
955 	} else {
956 		void (*freepage)(struct page *);
957 
958 		freepage = mapping->a_ops->freepage;
959 		/*
960 		 * Remember a shadow entry for reclaimed file cache in
961 		 * order to detect refaults, thus thrashing, later on.
962 		 *
963 		 * But don't store shadows in an address space that is
964 		 * already exiting.  This is not just an optimization,
965 		 * inode reclaim needs to empty out the radix tree or
966 		 * the nodes are lost.  Don't plant shadows behind its
967 		 * back.
968 		 *
969 		 * We also don't store shadows for DAX mappings because the
970 		 * only page cache pages found in these are zero pages
971 		 * covering holes, and because we don't want to mix DAX
972 		 * exceptional entries and shadow exceptional entries in the
973 		 * same address_space.
974 		 */
975 		if (reclaimed && page_is_file_lru(page) &&
976 		    !mapping_exiting(mapping) && !dax_mapping(mapping))
977 			shadow = workingset_eviction(page, target_memcg);
978 		__delete_from_page_cache(page, shadow);
979 		xa_unlock_irqrestore(&mapping->i_pages, flags);
980 
981 		if (freepage != NULL)
982 			freepage(page);
983 	}
984 
985 	return 1;
986 
987 cannot_free:
988 	xa_unlock_irqrestore(&mapping->i_pages, flags);
989 	return 0;
990 }
991 
992 /*
993  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
994  * someone else has a ref on the page, abort and return 0.  If it was
995  * successfully detached, return 1.  Assumes the caller has a single ref on
996  * this page.
997  */
remove_mapping(struct address_space * mapping,struct page * page)998 int remove_mapping(struct address_space *mapping, struct page *page)
999 {
1000 	if (__remove_mapping(mapping, page, false, NULL)) {
1001 		/*
1002 		 * Unfreezing the refcount with 1 rather than 2 effectively
1003 		 * drops the pagecache ref for us without requiring another
1004 		 * atomic operation.
1005 		 */
1006 		page_ref_unfreeze(page, 1);
1007 		return 1;
1008 	}
1009 	return 0;
1010 }
1011 
1012 /**
1013  * putback_lru_page - put previously isolated page onto appropriate LRU list
1014  * @page: page to be put back to appropriate lru list
1015  *
1016  * Add previously isolated @page to appropriate LRU list.
1017  * Page may still be unevictable for other reasons.
1018  *
1019  * lru_lock must not be held, interrupts must be enabled.
1020  */
putback_lru_page(struct page * page)1021 void putback_lru_page(struct page *page)
1022 {
1023 	lru_cache_add(page);
1024 	put_page(page);		/* drop ref from isolate */
1025 }
1026 
1027 enum page_references {
1028 	PAGEREF_RECLAIM,
1029 	PAGEREF_RECLAIM_CLEAN,
1030 	PAGEREF_KEEP,
1031 	PAGEREF_ACTIVATE,
1032 };
1033 
page_check_references(struct page * page,struct scan_control * sc)1034 static enum page_references page_check_references(struct page *page,
1035 						  struct scan_control *sc)
1036 {
1037 	int referenced_ptes, referenced_page;
1038 	unsigned long vm_flags;
1039 
1040 	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1041 					  &vm_flags);
1042 	referenced_page = TestClearPageReferenced(page);
1043 
1044 	/*
1045 	 * Mlock lost the isolation race with us.  Let try_to_unmap()
1046 	 * move the page to the unevictable list.
1047 	 */
1048 	if (vm_flags & VM_LOCKED)
1049 		return PAGEREF_RECLAIM;
1050 
1051 	/* rmap lock contention: rotate */
1052 	if (referenced_ptes == -1)
1053 		return PAGEREF_KEEP;
1054 
1055 	if (referenced_ptes) {
1056 		/*
1057 		 * All mapped pages start out with page table
1058 		 * references from the instantiating fault, so we need
1059 		 * to look twice if a mapped file page is used more
1060 		 * than once.
1061 		 *
1062 		 * Mark it and spare it for another trip around the
1063 		 * inactive list.  Another page table reference will
1064 		 * lead to its activation.
1065 		 *
1066 		 * Note: the mark is set for activated pages as well
1067 		 * so that recently deactivated but used pages are
1068 		 * quickly recovered.
1069 		 */
1070 		SetPageReferenced(page);
1071 
1072 		if (referenced_page || referenced_ptes > 1)
1073 			return PAGEREF_ACTIVATE;
1074 
1075 		/*
1076 		 * Activate file-backed executable pages after first usage.
1077 		 */
1078 		if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
1079 			return PAGEREF_ACTIVATE;
1080 
1081 		return PAGEREF_KEEP;
1082 	}
1083 
1084 	/* Reclaim if clean, defer dirty pages to writeback */
1085 	if (referenced_page && !PageSwapBacked(page))
1086 		return PAGEREF_RECLAIM_CLEAN;
1087 
1088 	return PAGEREF_RECLAIM;
1089 }
1090 
1091 /* Check if a page is dirty or under writeback */
page_check_dirty_writeback(struct page * page,bool * dirty,bool * writeback)1092 static void page_check_dirty_writeback(struct page *page,
1093 				       bool *dirty, bool *writeback)
1094 {
1095 	struct address_space *mapping;
1096 
1097 	/*
1098 	 * Anonymous pages are not handled by flushers and must be written
1099 	 * from reclaim context. Do not stall reclaim based on them
1100 	 */
1101 	if (!page_is_file_lru(page) ||
1102 	    (PageAnon(page) && !PageSwapBacked(page))) {
1103 		*dirty = false;
1104 		*writeback = false;
1105 		return;
1106 	}
1107 
1108 	/* By default assume that the page flags are accurate */
1109 	*dirty = PageDirty(page);
1110 	*writeback = PageWriteback(page);
1111 
1112 	/* Verify dirty/writeback state if the filesystem supports it */
1113 	if (!page_has_private(page))
1114 		return;
1115 
1116 	mapping = page_mapping(page);
1117 	if (mapping && mapping->a_ops->is_dirty_writeback)
1118 		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1119 }
1120 
1121 /*
1122  * shrink_page_list() returns the number of reclaimed pages
1123  */
shrink_page_list(struct list_head * page_list,struct pglist_data * pgdat,struct scan_control * sc,struct reclaim_stat * stat,bool ignore_references)1124 static unsigned int shrink_page_list(struct list_head *page_list,
1125 				     struct pglist_data *pgdat,
1126 				     struct scan_control *sc,
1127 				     struct reclaim_stat *stat,
1128 				     bool ignore_references)
1129 {
1130 	LIST_HEAD(ret_pages);
1131 	LIST_HEAD(free_pages);
1132 	unsigned int nr_reclaimed = 0;
1133 	unsigned int pgactivate = 0;
1134 
1135 	memset(stat, 0, sizeof(*stat));
1136 	cond_resched();
1137 
1138 	while (!list_empty(page_list)) {
1139 		struct address_space *mapping;
1140 		struct page *page;
1141 		enum page_references references = PAGEREF_RECLAIM;
1142 		bool dirty, writeback, may_enter_fs;
1143 		unsigned int nr_pages;
1144 
1145 		cond_resched();
1146 
1147 		page = lru_to_page(page_list);
1148 		list_del(&page->lru);
1149 
1150 		if (!trylock_page(page))
1151 			goto keep;
1152 
1153 		VM_BUG_ON_PAGE(PageActive(page), page);
1154 
1155 		nr_pages = compound_nr(page);
1156 
1157 		/* Account the number of base pages even though THP */
1158 		sc->nr_scanned += nr_pages;
1159 
1160 		if (unlikely(!page_evictable(page)))
1161 			goto activate_locked;
1162 
1163 		if (!sc->may_unmap && page_mapped(page))
1164 			goto keep_locked;
1165 
1166 		/* page_update_gen() tried to promote this page? */
1167 		if (lru_gen_enabled() && !ignore_references &&
1168 		    page_mapped(page) && PageReferenced(page))
1169 			goto keep_locked;
1170 
1171 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1172 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1173 
1174 		/*
1175 		 * The number of dirty pages determines if a node is marked
1176 		 * reclaim_congested which affects wait_iff_congested. kswapd
1177 		 * will stall and start writing pages if the tail of the LRU
1178 		 * is all dirty unqueued pages.
1179 		 */
1180 		page_check_dirty_writeback(page, &dirty, &writeback);
1181 		if (dirty || writeback)
1182 			stat->nr_dirty++;
1183 
1184 		if (dirty && !writeback)
1185 			stat->nr_unqueued_dirty++;
1186 
1187 		/*
1188 		 * Treat this page as congested if the underlying BDI is or if
1189 		 * pages are cycling through the LRU so quickly that the
1190 		 * pages marked for immediate reclaim are making it to the
1191 		 * end of the LRU a second time.
1192 		 */
1193 		mapping = page_mapping(page);
1194 		if (((dirty || writeback) && mapping &&
1195 		     inode_write_congested(mapping->host)) ||
1196 		    (writeback && PageReclaim(page)))
1197 			stat->nr_congested++;
1198 
1199 		/*
1200 		 * If a page at the tail of the LRU is under writeback, there
1201 		 * are three cases to consider.
1202 		 *
1203 		 * 1) If reclaim is encountering an excessive number of pages
1204 		 *    under writeback and this page is both under writeback and
1205 		 *    PageReclaim then it indicates that pages are being queued
1206 		 *    for IO but are being recycled through the LRU before the
1207 		 *    IO can complete. Waiting on the page itself risks an
1208 		 *    indefinite stall if it is impossible to writeback the
1209 		 *    page due to IO error or disconnected storage so instead
1210 		 *    note that the LRU is being scanned too quickly and the
1211 		 *    caller can stall after page list has been processed.
1212 		 *
1213 		 * 2) Global or new memcg reclaim encounters a page that is
1214 		 *    not marked for immediate reclaim, or the caller does not
1215 		 *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
1216 		 *    not to fs). In this case mark the page for immediate
1217 		 *    reclaim and continue scanning.
1218 		 *
1219 		 *    Require may_enter_fs because we would wait on fs, which
1220 		 *    may not have submitted IO yet. And the loop driver might
1221 		 *    enter reclaim, and deadlock if it waits on a page for
1222 		 *    which it is needed to do the write (loop masks off
1223 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
1224 		 *    would probably show more reasons.
1225 		 *
1226 		 * 3) Legacy memcg encounters a page that is already marked
1227 		 *    PageReclaim. memcg does not have any dirty pages
1228 		 *    throttling so we could easily OOM just because too many
1229 		 *    pages are in writeback and there is nothing else to
1230 		 *    reclaim. Wait for the writeback to complete.
1231 		 *
1232 		 * In cases 1) and 2) we activate the pages to get them out of
1233 		 * the way while we continue scanning for clean pages on the
1234 		 * inactive list and refilling from the active list. The
1235 		 * observation here is that waiting for disk writes is more
1236 		 * expensive than potentially causing reloads down the line.
1237 		 * Since they're marked for immediate reclaim, they won't put
1238 		 * memory pressure on the cache working set any longer than it
1239 		 * takes to write them to disk.
1240 		 */
1241 		if (PageWriteback(page)) {
1242 			/* Case 1 above */
1243 			if (current_is_kswapd() &&
1244 			    PageReclaim(page) &&
1245 			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1246 				stat->nr_immediate++;
1247 				goto activate_locked;
1248 
1249 			/* Case 2 above */
1250 			} else if (writeback_throttling_sane(sc) ||
1251 			    !PageReclaim(page) || !may_enter_fs) {
1252 				/*
1253 				 * This is slightly racy - end_page_writeback()
1254 				 * might have just cleared PageReclaim, then
1255 				 * setting PageReclaim here end up interpreted
1256 				 * as PageReadahead - but that does not matter
1257 				 * enough to care.  What we do want is for this
1258 				 * page to have PageReclaim set next time memcg
1259 				 * reclaim reaches the tests above, so it will
1260 				 * then wait_on_page_writeback() to avoid OOM;
1261 				 * and it's also appropriate in global reclaim.
1262 				 */
1263 				SetPageReclaim(page);
1264 				stat->nr_writeback++;
1265 				goto activate_locked;
1266 
1267 			/* Case 3 above */
1268 			} else {
1269 				unlock_page(page);
1270 				wait_on_page_writeback(page);
1271 				/* then go back and try same page again */
1272 				list_add_tail(&page->lru, page_list);
1273 				continue;
1274 			}
1275 		}
1276 
1277 		if (!ignore_references)
1278 			references = page_check_references(page, sc);
1279 
1280 		switch (references) {
1281 		case PAGEREF_ACTIVATE:
1282 			goto activate_locked;
1283 		case PAGEREF_KEEP:
1284 			stat->nr_ref_keep += nr_pages;
1285 			goto keep_locked;
1286 		case PAGEREF_RECLAIM:
1287 		case PAGEREF_RECLAIM_CLEAN:
1288 			; /* try to reclaim the page below */
1289 		}
1290 
1291 		/*
1292 		 * Anonymous process memory has backing store?
1293 		 * Try to allocate it some swap space here.
1294 		 * Lazyfree page could be freed directly
1295 		 */
1296 		if (PageAnon(page) && PageSwapBacked(page)) {
1297 			if (!PageSwapCache(page)) {
1298 				if (!(sc->gfp_mask & __GFP_IO))
1299 					goto keep_locked;
1300 				if (page_maybe_dma_pinned(page))
1301 					goto keep_locked;
1302 				if (PageTransHuge(page)) {
1303 					/* cannot split THP, skip it */
1304 					if (!can_split_huge_page(page, NULL))
1305 						goto activate_locked;
1306 					/*
1307 					 * Split pages without a PMD map right
1308 					 * away. Chances are some or all of the
1309 					 * tail pages can be freed without IO.
1310 					 */
1311 					if (!compound_mapcount(page) &&
1312 					    split_huge_page_to_list(page,
1313 								    page_list))
1314 						goto activate_locked;
1315 				}
1316 				if (!add_to_swap(page)) {
1317 					if (!PageTransHuge(page))
1318 						goto activate_locked_split;
1319 					/* Fallback to swap normal pages */
1320 					if (split_huge_page_to_list(page,
1321 								    page_list))
1322 						goto activate_locked;
1323 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1324 					count_vm_event(THP_SWPOUT_FALLBACK);
1325 #endif
1326 					if (!add_to_swap(page))
1327 						goto activate_locked_split;
1328 				}
1329 
1330 				may_enter_fs = true;
1331 
1332 				/* Adding to swap updated mapping */
1333 				mapping = page_mapping(page);
1334 			}
1335 		} else if (unlikely(PageTransHuge(page))) {
1336 			/* Split file THP */
1337 			if (split_huge_page_to_list(page, page_list))
1338 				goto keep_locked;
1339 		}
1340 
1341 		/*
1342 		 * THP may get split above, need minus tail pages and update
1343 		 * nr_pages to avoid accounting tail pages twice.
1344 		 *
1345 		 * The tail pages that are added into swap cache successfully
1346 		 * reach here.
1347 		 */
1348 		if ((nr_pages > 1) && !PageTransHuge(page)) {
1349 			sc->nr_scanned -= (nr_pages - 1);
1350 			nr_pages = 1;
1351 		}
1352 
1353 		/*
1354 		 * The page is mapped into the page tables of one or more
1355 		 * processes. Try to unmap it here.
1356 		 */
1357 		if (page_mapped(page)) {
1358 			enum ttu_flags flags = TTU_BATCH_FLUSH;
1359 			bool was_swapbacked = PageSwapBacked(page);
1360 
1361 			if (unlikely(PageTransHuge(page)))
1362 				flags |= TTU_SPLIT_HUGE_PMD;
1363 
1364 			if (!try_to_unmap(page, flags)) {
1365 				stat->nr_unmap_fail += nr_pages;
1366 				if (!was_swapbacked && PageSwapBacked(page))
1367 					stat->nr_lazyfree_fail += nr_pages;
1368 				goto activate_locked;
1369 			}
1370 		}
1371 
1372 		if (PageDirty(page)) {
1373 			/*
1374 			 * Only kswapd can writeback filesystem pages
1375 			 * to avoid risk of stack overflow. But avoid
1376 			 * injecting inefficient single-page IO into
1377 			 * flusher writeback as much as possible: only
1378 			 * write pages when we've encountered many
1379 			 * dirty pages, and when we've already scanned
1380 			 * the rest of the LRU for clean pages and see
1381 			 * the same dirty pages again (PageReclaim).
1382 			 */
1383 			if (page_is_file_lru(page) &&
1384 			    (!current_is_kswapd() || !PageReclaim(page) ||
1385 			     !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1386 				/*
1387 				 * Immediately reclaim when written back.
1388 				 * Similar in principal to deactivate_page()
1389 				 * except we already have the page isolated
1390 				 * and know it's dirty
1391 				 */
1392 				inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1393 				SetPageReclaim(page);
1394 
1395 				goto activate_locked;
1396 			}
1397 
1398 			if (references == PAGEREF_RECLAIM_CLEAN)
1399 				goto keep_locked;
1400 			if (!may_enter_fs)
1401 				goto keep_locked;
1402 			if (!sc->may_writepage)
1403 				goto keep_locked;
1404 
1405 			/*
1406 			 * Page is dirty. Flush the TLB if a writable entry
1407 			 * potentially exists to avoid CPU writes after IO
1408 			 * starts and then write it out here.
1409 			 */
1410 			try_to_unmap_flush_dirty();
1411 			switch (pageout(page, mapping)) {
1412 			case PAGE_KEEP:
1413 				goto keep_locked;
1414 			case PAGE_ACTIVATE:
1415 				goto activate_locked;
1416 			case PAGE_SUCCESS:
1417 				stat->nr_pageout += thp_nr_pages(page);
1418 
1419 				if (PageWriteback(page))
1420 					goto keep;
1421 				if (PageDirty(page))
1422 					goto keep;
1423 
1424 				/*
1425 				 * A synchronous write - probably a ramdisk.  Go
1426 				 * ahead and try to reclaim the page.
1427 				 */
1428 				if (!trylock_page(page))
1429 					goto keep;
1430 				if (PageDirty(page) || PageWriteback(page))
1431 					goto keep_locked;
1432 				mapping = page_mapping(page);
1433 			case PAGE_CLEAN:
1434 				; /* try to free the page below */
1435 			}
1436 		}
1437 
1438 		/*
1439 		 * If the page has buffers, try to free the buffer mappings
1440 		 * associated with this page. If we succeed we try to free
1441 		 * the page as well.
1442 		 *
1443 		 * We do this even if the page is PageDirty().
1444 		 * try_to_release_page() does not perform I/O, but it is
1445 		 * possible for a page to have PageDirty set, but it is actually
1446 		 * clean (all its buffers are clean).  This happens if the
1447 		 * buffers were written out directly, with submit_bh(). ext3
1448 		 * will do this, as well as the blockdev mapping.
1449 		 * try_to_release_page() will discover that cleanness and will
1450 		 * drop the buffers and mark the page clean - it can be freed.
1451 		 *
1452 		 * Rarely, pages can have buffers and no ->mapping.  These are
1453 		 * the pages which were not successfully invalidated in
1454 		 * truncate_complete_page().  We try to drop those buffers here
1455 		 * and if that worked, and the page is no longer mapped into
1456 		 * process address space (page_count == 1) it can be freed.
1457 		 * Otherwise, leave the page on the LRU so it is swappable.
1458 		 */
1459 		if (page_has_private(page)) {
1460 			if (!try_to_release_page(page, sc->gfp_mask))
1461 				goto activate_locked;
1462 			if (!mapping && page_count(page) == 1) {
1463 				unlock_page(page);
1464 				if (put_page_testzero(page))
1465 					goto free_it;
1466 				else {
1467 					/*
1468 					 * rare race with speculative reference.
1469 					 * the speculative reference will free
1470 					 * this page shortly, so we may
1471 					 * increment nr_reclaimed here (and
1472 					 * leave it off the LRU).
1473 					 */
1474 					nr_reclaimed++;
1475 					continue;
1476 				}
1477 			}
1478 		}
1479 
1480 		if (PageAnon(page) && !PageSwapBacked(page)) {
1481 			/* follow __remove_mapping for reference */
1482 			if (!page_ref_freeze(page, 1))
1483 				goto keep_locked;
1484 			if (PageDirty(page)) {
1485 				page_ref_unfreeze(page, 1);
1486 				goto keep_locked;
1487 			}
1488 
1489 			count_vm_event(PGLAZYFREED);
1490 			count_memcg_page_event(page, PGLAZYFREED);
1491 		} else if (!mapping || !__remove_mapping(mapping, page, true,
1492 							 sc->target_mem_cgroup))
1493 			goto keep_locked;
1494 
1495 		unlock_page(page);
1496 free_it:
1497 		/*
1498 		 * THP may get swapped out in a whole, need account
1499 		 * all base pages.
1500 		 */
1501 		nr_reclaimed += nr_pages;
1502 
1503 		/*
1504 		 * Is there need to periodically free_page_list? It would
1505 		 * appear not as the counts should be low
1506 		 */
1507 		if (unlikely(PageTransHuge(page)))
1508 			destroy_compound_page(page);
1509 		else
1510 			list_add(&page->lru, &free_pages);
1511 		continue;
1512 
1513 activate_locked_split:
1514 		/*
1515 		 * The tail pages that are failed to add into swap cache
1516 		 * reach here.  Fixup nr_scanned and nr_pages.
1517 		 */
1518 		if (nr_pages > 1) {
1519 			sc->nr_scanned -= (nr_pages - 1);
1520 			nr_pages = 1;
1521 		}
1522 activate_locked:
1523 		/* Not a candidate for swapping, so reclaim swap space. */
1524 		if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1525 						PageMlocked(page)))
1526 			try_to_free_swap(page);
1527 		VM_BUG_ON_PAGE(PageActive(page), page);
1528 		if (!PageMlocked(page)) {
1529 			int type = page_is_file_lru(page);
1530 			SetPageActive(page);
1531 			stat->nr_activate[type] += nr_pages;
1532 			count_memcg_page_event(page, PGACTIVATE);
1533 		}
1534 keep_locked:
1535 		unlock_page(page);
1536 keep:
1537 		list_add(&page->lru, &ret_pages);
1538 		VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1539 	}
1540 
1541 	pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1542 
1543 	mem_cgroup_uncharge_list(&free_pages);
1544 	try_to_unmap_flush();
1545 	free_unref_page_list(&free_pages);
1546 
1547 	list_splice(&ret_pages, page_list);
1548 	count_vm_events(PGACTIVATE, pgactivate);
1549 
1550 	return nr_reclaimed;
1551 }
1552 
reclaim_clean_pages_from_list(struct zone * zone,struct list_head * page_list)1553 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1554 					    struct list_head *page_list)
1555 {
1556 	struct scan_control sc = {
1557 		.gfp_mask = GFP_KERNEL,
1558 		.priority = DEF_PRIORITY,
1559 		.may_unmap = 1,
1560 	};
1561 	struct reclaim_stat stat;
1562 	unsigned int nr_reclaimed;
1563 	struct page *page, *next;
1564 	LIST_HEAD(clean_pages);
1565 
1566 	list_for_each_entry_safe(page, next, page_list, lru) {
1567 		if (page_is_file_lru(page) && !PageDirty(page) &&
1568 		    !__PageMovable(page) && !PageUnevictable(page)) {
1569 			ClearPageActive(page);
1570 			list_move(&page->lru, &clean_pages);
1571 		}
1572 	}
1573 
1574 	nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1575 					&stat, true);
1576 	list_splice(&clean_pages, page_list);
1577 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1578 			    -(long)nr_reclaimed);
1579 	/*
1580 	 * Since lazyfree pages are isolated from file LRU from the beginning,
1581 	 * they will rotate back to anonymous LRU in the end if it failed to
1582 	 * discard so isolated count will be mismatched.
1583 	 * Compensate the isolated count for both LRU lists.
1584 	 */
1585 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1586 			    stat.nr_lazyfree_fail);
1587 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1588 			    -(long)stat.nr_lazyfree_fail);
1589 	return nr_reclaimed;
1590 }
1591 
reclaim_pages_from_list(struct list_head * page_list)1592 int reclaim_pages_from_list(struct list_head *page_list)
1593 {
1594 	struct scan_control sc = {
1595 		.gfp_mask = GFP_KERNEL,
1596 		.priority = DEF_PRIORITY,
1597 		.may_writepage = 1,
1598 		.may_unmap = 1,
1599 		.may_swap = 1,
1600 	};
1601 	unsigned long nr_reclaimed;
1602 	struct reclaim_stat dummy_stat;
1603 	struct page *page;
1604 
1605 	list_for_each_entry(page, page_list, lru)
1606 		ClearPageActive(page);
1607 
1608 	nr_reclaimed = shrink_page_list(page_list, NULL, &sc,
1609 				&dummy_stat, false);
1610 	while (!list_empty(page_list)) {
1611 
1612 		page = lru_to_page(page_list);
1613 		list_del(&page->lru);
1614 		dec_node_page_state(page, NR_ISOLATED_ANON +
1615 				page_is_file_lru(page));
1616 		putback_lru_page(page);
1617 	}
1618 
1619 	return nr_reclaimed;
1620 }
1621 
1622 /*
1623  * Attempt to remove the specified page from its LRU.  Only take this page
1624  * if it is of the appropriate PageActive status.  Pages which are being
1625  * freed elsewhere are also ignored.
1626  *
1627  * page:	page to consider
1628  * mode:	one of the LRU isolation modes defined above
1629  *
1630  * returns 0 on success, -ve errno on failure.
1631  */
__isolate_lru_page(struct page * page,isolate_mode_t mode)1632 int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1633 {
1634 	int ret = -EINVAL;
1635 
1636 	/* Only take pages on the LRU. */
1637 	if (!PageLRU(page))
1638 		return ret;
1639 
1640 	/* Compaction should not handle unevictable pages but CMA can do so */
1641 	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1642 		return ret;
1643 
1644 	ret = -EBUSY;
1645 
1646 	/*
1647 	 * To minimise LRU disruption, the caller can indicate that it only
1648 	 * wants to isolate pages it will be able to operate on without
1649 	 * blocking - clean pages for the most part.
1650 	 *
1651 	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1652 	 * that it is possible to migrate without blocking
1653 	 */
1654 	if (mode & ISOLATE_ASYNC_MIGRATE) {
1655 		/* All the caller can do on PageWriteback is block */
1656 		if (PageWriteback(page))
1657 			return ret;
1658 
1659 		if (PageDirty(page)) {
1660 			struct address_space *mapping;
1661 			bool migrate_dirty;
1662 
1663 			/*
1664 			 * Only pages without mappings or that have a
1665 			 * ->migratepage callback are possible to migrate
1666 			 * without blocking. However, we can be racing with
1667 			 * truncation so it's necessary to lock the page
1668 			 * to stabilise the mapping as truncation holds
1669 			 * the page lock until after the page is removed
1670 			 * from the page cache.
1671 			 */
1672 			if (!trylock_page(page))
1673 				return ret;
1674 
1675 			mapping = page_mapping(page);
1676 			migrate_dirty = !mapping || mapping->a_ops->migratepage;
1677 			unlock_page(page);
1678 			if (!migrate_dirty)
1679 				return ret;
1680 		}
1681 	}
1682 
1683 	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1684 		return ret;
1685 
1686 	if (likely(get_page_unless_zero(page))) {
1687 		/*
1688 		 * Be careful not to clear PageLRU until after we're
1689 		 * sure the page is not being freed elsewhere -- the
1690 		 * page release code relies on it.
1691 		 */
1692 		ClearPageLRU(page);
1693 		ret = 0;
1694 	}
1695 
1696 	return ret;
1697 }
1698 
1699 
1700 /*
1701  * Update LRU sizes after isolating pages. The LRU size updates must
1702  * be complete before mem_cgroup_update_lru_size due to a sanity check.
1703  */
update_lru_sizes(struct lruvec * lruvec,enum lru_list lru,unsigned long * nr_zone_taken)1704 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1705 			enum lru_list lru, unsigned long *nr_zone_taken)
1706 {
1707 	int zid;
1708 
1709 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1710 		if (!nr_zone_taken[zid])
1711 			continue;
1712 
1713 		update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1714 	}
1715 
1716 }
1717 
1718 #ifdef CONFIG_CMA
1719 /*
1720  * It is waste of effort to scan and reclaim CMA pages if it is not available
1721  * for current allocation context. Kswapd can not be enrolled as it can not
1722  * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
1723  */
skip_cma(struct page * page,struct scan_control * sc)1724 static bool skip_cma(struct page *page, struct scan_control *sc)
1725 {
1726 	return !current_is_kswapd() &&
1727 			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
1728 			get_pageblock_migratetype(page) == MIGRATE_CMA;
1729 }
1730 #else
skip_cma(struct page * page,struct scan_control * sc)1731 static bool skip_cma(struct page *page, struct scan_control *sc)
1732 {
1733 	return false;
1734 }
1735 #endif
1736 
1737 /**
1738  * pgdat->lru_lock is heavily contended.  Some of the functions that
1739  * shrink the lists perform better by taking out a batch of pages
1740  * and working on them outside the LRU lock.
1741  *
1742  * For pagecache intensive workloads, this function is the hottest
1743  * spot in the kernel (apart from copy_*_user functions).
1744  *
1745  * Appropriate locks must be held before calling this function.
1746  *
1747  * @nr_to_scan:	The number of eligible pages to look through on the list.
1748  * @lruvec:	The LRU vector to pull pages from.
1749  * @dst:	The temp list to put pages on to.
1750  * @nr_scanned:	The number of pages that were scanned.
1751  * @sc:		The scan_control struct for this reclaim session
1752  * @lru:	LRU list id for isolating
1753  *
1754  * returns how many pages were moved onto *@dst.
1755  */
isolate_lru_pages(unsigned long nr_to_scan,struct lruvec * lruvec,struct list_head * dst,unsigned long * nr_scanned,struct scan_control * sc,enum lru_list lru)1756 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1757 		struct lruvec *lruvec, struct list_head *dst,
1758 		unsigned long *nr_scanned, struct scan_control *sc,
1759 		enum lru_list lru)
1760 {
1761 	struct list_head *src = &lruvec->lists[lru];
1762 	unsigned long nr_taken = 0;
1763 	unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1764 	unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1765 	unsigned long skipped = 0;
1766 	unsigned long scan, total_scan, nr_pages;
1767 	LIST_HEAD(pages_skipped);
1768 	isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1769 
1770 	total_scan = 0;
1771 	scan = 0;
1772 	while (scan < nr_to_scan && !list_empty(src)) {
1773 		struct page *page;
1774 
1775 		page = lru_to_page(src);
1776 		prefetchw_prev_lru_page(page, src, flags);
1777 
1778 		VM_BUG_ON_PAGE(!PageLRU(page), page);
1779 
1780 		nr_pages = compound_nr(page);
1781 		total_scan += nr_pages;
1782 
1783 		if (page_zonenum(page) > sc->reclaim_idx ||
1784 				skip_cma(page, sc)) {
1785 			list_move(&page->lru, &pages_skipped);
1786 			nr_skipped[page_zonenum(page)] += nr_pages;
1787 			continue;
1788 		}
1789 
1790 		/*
1791 		 * Do not count skipped pages because that makes the function
1792 		 * return with no isolated pages if the LRU mostly contains
1793 		 * ineligible pages.  This causes the VM to not reclaim any
1794 		 * pages, triggering a premature OOM.
1795 		 *
1796 		 * Account all tail pages of THP.  This would not cause
1797 		 * premature OOM since __isolate_lru_page() returns -EBUSY
1798 		 * only when the page is being freed somewhere else.
1799 		 */
1800 		scan += nr_pages;
1801 		switch (__isolate_lru_page(page, mode)) {
1802 		case 0:
1803 			nr_taken += nr_pages;
1804 			nr_zone_taken[page_zonenum(page)] += nr_pages;
1805 			list_move(&page->lru, dst);
1806 			break;
1807 
1808 		case -EBUSY:
1809 			/* else it is being freed elsewhere */
1810 			list_move(&page->lru, src);
1811 			continue;
1812 
1813 		default:
1814 			BUG();
1815 		}
1816 	}
1817 
1818 	/*
1819 	 * Splice any skipped pages to the start of the LRU list. Note that
1820 	 * this disrupts the LRU order when reclaiming for lower zones but
1821 	 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1822 	 * scanning would soon rescan the same pages to skip and put the
1823 	 * system at risk of premature OOM.
1824 	 */
1825 	if (!list_empty(&pages_skipped)) {
1826 		int zid;
1827 
1828 		list_splice(&pages_skipped, src);
1829 		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1830 			if (!nr_skipped[zid])
1831 				continue;
1832 
1833 			__count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1834 			skipped += nr_skipped[zid];
1835 		}
1836 	}
1837 	*nr_scanned = total_scan;
1838 	trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1839 				    total_scan, skipped, nr_taken, mode, lru);
1840 	update_lru_sizes(lruvec, lru, nr_zone_taken);
1841 	return nr_taken;
1842 }
1843 
1844 /**
1845  * isolate_lru_page - tries to isolate a page from its LRU list
1846  * @page: page to isolate from its LRU list
1847  *
1848  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1849  * vmstat statistic corresponding to whatever LRU list the page was on.
1850  *
1851  * Returns 0 if the page was removed from an LRU list.
1852  * Returns -EBUSY if the page was not on an LRU list.
1853  *
1854  * The returned page will have PageLRU() cleared.  If it was found on
1855  * the active list, it will have PageActive set.  If it was found on
1856  * the unevictable list, it will have the PageUnevictable bit set. That flag
1857  * may need to be cleared by the caller before letting the page go.
1858  *
1859  * The vmstat statistic corresponding to the list on which the page was
1860  * found will be decremented.
1861  *
1862  * Restrictions:
1863  *
1864  * (1) Must be called with an elevated refcount on the page. This is a
1865  *     fundamental difference from isolate_lru_pages (which is called
1866  *     without a stable reference).
1867  * (2) the lru_lock must not be held.
1868  * (3) interrupts must be enabled.
1869  */
isolate_lru_page(struct page * page)1870 int isolate_lru_page(struct page *page)
1871 {
1872 	int ret = -EBUSY;
1873 
1874 	VM_BUG_ON_PAGE(!page_count(page), page);
1875 	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1876 
1877 	if (PageLRU(page)) {
1878 		pg_data_t *pgdat = page_pgdat(page);
1879 		struct lruvec *lruvec;
1880 
1881 		spin_lock_irq(&pgdat->lru_lock);
1882 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
1883 		if (PageLRU(page)) {
1884 			get_page(page);
1885 			ClearPageLRU(page);
1886 			del_page_from_lru_list(page, lruvec);
1887 			ret = 0;
1888 		}
1889 		spin_unlock_irq(&pgdat->lru_lock);
1890 	}
1891 	return ret;
1892 }
1893 
1894 /*
1895  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1896  * then get rescheduled. When there are massive number of tasks doing page
1897  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1898  * the LRU list will go small and be scanned faster than necessary, leading to
1899  * unnecessary swapping, thrashing and OOM.
1900  */
too_many_isolated(struct pglist_data * pgdat,int file,struct scan_control * sc)1901 static int too_many_isolated(struct pglist_data *pgdat, int file,
1902 		struct scan_control *sc)
1903 {
1904 	unsigned long inactive, isolated;
1905 
1906 	if (current_is_kswapd())
1907 		return 0;
1908 
1909 	if (!writeback_throttling_sane(sc))
1910 		return 0;
1911 
1912 	if (file) {
1913 		inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1914 		isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1915 	} else {
1916 		inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1917 		isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1918 	}
1919 
1920 	/*
1921 	 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1922 	 * won't get blocked by normal direct-reclaimers, forming a circular
1923 	 * deadlock.
1924 	 */
1925 	if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1926 		inactive >>= 3;
1927 
1928 	return isolated > inactive;
1929 }
1930 
1931 /*
1932  * This moves pages from @list to corresponding LRU list.
1933  *
1934  * We move them the other way if the page is referenced by one or more
1935  * processes, from rmap.
1936  *
1937  * If the pages are mostly unmapped, the processing is fast and it is
1938  * appropriate to hold zone_lru_lock across the whole operation.  But if
1939  * the pages are mapped, the processing is slow (page_referenced()) so we
1940  * should drop zone_lru_lock around each page.  It's impossible to balance
1941  * this, so instead we remove the pages from the LRU while processing them.
1942  * It is safe to rely on PG_active against the non-LRU pages in here because
1943  * nobody will play with that bit on a non-LRU page.
1944  *
1945  * The downside is that we have to touch page->_refcount against each page.
1946  * But we had to alter page->flags anyway.
1947  *
1948  * Returns the number of pages moved to the given lruvec.
1949  */
1950 
move_pages_to_lru(struct lruvec * lruvec,struct list_head * list)1951 static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1952 						     struct list_head *list)
1953 {
1954 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1955 	int nr_pages, nr_moved = 0;
1956 	LIST_HEAD(pages_to_free);
1957 	struct page *page;
1958 
1959 	while (!list_empty(list)) {
1960 		page = lru_to_page(list);
1961 		VM_BUG_ON_PAGE(PageLRU(page), page);
1962 		list_del(&page->lru);
1963 		if (unlikely(!page_evictable(page))) {
1964 			spin_unlock_irq(&pgdat->lru_lock);
1965 			putback_lru_page(page);
1966 			spin_lock_irq(&pgdat->lru_lock);
1967 			continue;
1968 		}
1969 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
1970 
1971 		SetPageLRU(page);
1972 		add_page_to_lru_list(page, lruvec);
1973 
1974 		if (put_page_testzero(page)) {
1975 			del_page_from_lru_list(page, lruvec);
1976 			__clear_page_lru_flags(page);
1977 
1978 			if (unlikely(PageCompound(page))) {
1979 				spin_unlock_irq(&pgdat->lru_lock);
1980 				destroy_compound_page(page);
1981 				spin_lock_irq(&pgdat->lru_lock);
1982 			} else
1983 				list_add(&page->lru, &pages_to_free);
1984 		} else {
1985 			nr_pages = thp_nr_pages(page);
1986 			nr_moved += nr_pages;
1987 			if (PageActive(page))
1988 				workingset_age_nonresident(lruvec, nr_pages);
1989 		}
1990 	}
1991 
1992 	/*
1993 	 * To save our caller's stack, now use input list for pages to free.
1994 	 */
1995 	list_splice(&pages_to_free, list);
1996 
1997 	return nr_moved;
1998 }
1999 
2000 /*
2001  * If a kernel thread (such as nfsd for loop-back mounts) services
2002  * a backing device by writing to the page cache it sets PF_LOCAL_THROTTLE.
2003  * In that case we should only throttle if the backing device it is
2004  * writing to is congested.  In other cases it is safe to throttle.
2005  */
current_may_throttle(void)2006 static int current_may_throttle(void)
2007 {
2008 	return !(current->flags & PF_LOCAL_THROTTLE) ||
2009 		current->backing_dev_info == NULL ||
2010 		bdi_write_congested(current->backing_dev_info);
2011 }
2012 
2013 /*
2014  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
2015  * of reclaimed pages
2016  */
2017 static noinline_for_stack unsigned long
shrink_inactive_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2018 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
2019 		     struct scan_control *sc, enum lru_list lru)
2020 {
2021 	LIST_HEAD(page_list);
2022 	unsigned long nr_scanned;
2023 	unsigned int nr_reclaimed = 0;
2024 	unsigned long nr_taken;
2025 	struct reclaim_stat stat;
2026 	bool file = is_file_lru(lru);
2027 	enum vm_event_item item;
2028 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2029 	bool stalled = false;
2030 	struct blk_plug plug;
2031 	bool do_plug = false;
2032 
2033 	while (unlikely(too_many_isolated(pgdat, file, sc))) {
2034 		if (stalled)
2035 			return 0;
2036 
2037 		/* wait a bit for the reclaimer. */
2038 		msleep(100);
2039 		stalled = true;
2040 
2041 		/* We are about to die and free our memory. Return now. */
2042 		if (fatal_signal_pending(current))
2043 			return SWAP_CLUSTER_MAX;
2044 	}
2045 
2046 	lru_add_drain();
2047 
2048 	spin_lock_irq(&pgdat->lru_lock);
2049 
2050 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
2051 				     &nr_scanned, sc, lru);
2052 
2053 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2054 	item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
2055 	if (!cgroup_reclaim(sc))
2056 		__count_vm_events(item, nr_scanned);
2057 	__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2058 	__count_vm_events(PGSCAN_ANON + file, nr_scanned);
2059 
2060 	spin_unlock_irq(&pgdat->lru_lock);
2061 
2062 	if (nr_taken == 0)
2063 		return 0;
2064 
2065 	trace_android_vh_shrink_inactive_list_blk_plug(&do_plug);
2066 	if (do_plug)
2067 		blk_start_plug(&plug);
2068 	nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
2069 
2070 	spin_lock_irq(&pgdat->lru_lock);
2071 
2072 	move_pages_to_lru(lruvec, &page_list);
2073 
2074 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2075 	lru_note_cost(lruvec, file, stat.nr_pageout);
2076 	item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
2077 	if (!cgroup_reclaim(sc))
2078 		__count_vm_events(item, nr_reclaimed);
2079 	__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2080 	__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
2081 
2082 	spin_unlock_irq(&pgdat->lru_lock);
2083 
2084 	if (do_plug)
2085 		blk_finish_plug(&plug);
2086 
2087 	mem_cgroup_uncharge_list(&page_list);
2088 	free_unref_page_list(&page_list);
2089 
2090 	/*
2091 	 * If dirty pages are scanned that are not queued for IO, it
2092 	 * implies that flushers are not doing their job. This can
2093 	 * happen when memory pressure pushes dirty pages to the end of
2094 	 * the LRU before the dirty limits are breached and the dirty
2095 	 * data has expired. It can also happen when the proportion of
2096 	 * dirty pages grows not through writes but through memory
2097 	 * pressure reclaiming all the clean cache. And in some cases,
2098 	 * the flushers simply cannot keep up with the allocation
2099 	 * rate. Nudge the flusher threads in case they are asleep.
2100 	 */
2101 	if (stat.nr_unqueued_dirty == nr_taken)
2102 		wakeup_flusher_threads(WB_REASON_VMSCAN);
2103 
2104 	sc->nr.dirty += stat.nr_dirty;
2105 	sc->nr.congested += stat.nr_congested;
2106 	sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2107 	sc->nr.writeback += stat.nr_writeback;
2108 	sc->nr.immediate += stat.nr_immediate;
2109 	sc->nr.taken += nr_taken;
2110 	if (file)
2111 		sc->nr.file_taken += nr_taken;
2112 
2113 	trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2114 			nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2115 	return nr_reclaimed;
2116 }
2117 
shrink_active_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2118 static void shrink_active_list(unsigned long nr_to_scan,
2119 			       struct lruvec *lruvec,
2120 			       struct scan_control *sc,
2121 			       enum lru_list lru)
2122 {
2123 	unsigned long nr_taken;
2124 	unsigned long nr_scanned;
2125 	unsigned long vm_flags;
2126 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
2127 	LIST_HEAD(l_active);
2128 	LIST_HEAD(l_inactive);
2129 	struct page *page;
2130 	unsigned nr_deactivate, nr_activate;
2131 	unsigned nr_rotated = 0;
2132 	int file = is_file_lru(lru);
2133 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2134 
2135 	lru_add_drain();
2136 
2137 	spin_lock_irq(&pgdat->lru_lock);
2138 
2139 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2140 				     &nr_scanned, sc, lru);
2141 
2142 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2143 
2144 	if (!cgroup_reclaim(sc))
2145 		__count_vm_events(PGREFILL, nr_scanned);
2146 	__count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2147 
2148 	spin_unlock_irq(&pgdat->lru_lock);
2149 
2150 	while (!list_empty(&l_hold)) {
2151 		cond_resched();
2152 		page = lru_to_page(&l_hold);
2153 		list_del(&page->lru);
2154 
2155 		if (unlikely(!page_evictable(page))) {
2156 			putback_lru_page(page);
2157 			continue;
2158 		}
2159 
2160 		if (unlikely(buffer_heads_over_limit)) {
2161 			if (page_has_private(page) && trylock_page(page)) {
2162 				if (page_has_private(page))
2163 					try_to_release_page(page, 0);
2164 				unlock_page(page);
2165 			}
2166 		}
2167 
2168 		/* Referenced or rmap lock contention: rotate */
2169 		if (page_referenced(page, 0, sc->target_mem_cgroup,
2170 				     &vm_flags) != 0) {
2171 			/*
2172 			 * Identify referenced, file-backed active pages and
2173 			 * give them one more trip around the active list. So
2174 			 * that executable code get better chances to stay in
2175 			 * memory under moderate memory pressure.  Anon pages
2176 			 * are not likely to be evicted by use-once streaming
2177 			 * IO, plus JVM can create lots of anon VM_EXEC pages,
2178 			 * so we ignore them here.
2179 			 */
2180 			if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
2181 				nr_rotated += thp_nr_pages(page);
2182 				list_add(&page->lru, &l_active);
2183 				continue;
2184 			}
2185 		}
2186 
2187 		ClearPageActive(page);	/* we are de-activating */
2188 		SetPageWorkingset(page);
2189 		list_add(&page->lru, &l_inactive);
2190 	}
2191 
2192 	/*
2193 	 * Move pages back to the lru list.
2194 	 */
2195 	spin_lock_irq(&pgdat->lru_lock);
2196 
2197 	nr_activate = move_pages_to_lru(lruvec, &l_active);
2198 	nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2199 	/* Keep all free pages in l_active list */
2200 	list_splice(&l_inactive, &l_active);
2201 
2202 	__count_vm_events(PGDEACTIVATE, nr_deactivate);
2203 	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2204 
2205 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2206 	spin_unlock_irq(&pgdat->lru_lock);
2207 
2208 	mem_cgroup_uncharge_list(&l_active);
2209 	free_unref_page_list(&l_active);
2210 	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2211 			nr_deactivate, nr_rotated, sc->priority, file);
2212 }
2213 
reclaim_pages(struct list_head * page_list)2214 unsigned long reclaim_pages(struct list_head *page_list)
2215 {
2216 	int nid = NUMA_NO_NODE;
2217 	unsigned int nr_reclaimed = 0;
2218 	LIST_HEAD(node_page_list);
2219 	struct reclaim_stat dummy_stat;
2220 	struct page *page;
2221 	struct blk_plug plug;
2222 	bool do_plug = false;
2223 	struct scan_control sc = {
2224 		.gfp_mask = GFP_KERNEL,
2225 		.priority = DEF_PRIORITY,
2226 		.may_writepage = 1,
2227 		.may_unmap = 1,
2228 		.may_swap = 1,
2229 	};
2230 
2231 	trace_android_vh_reclaim_pages_plug(&do_plug);
2232 	if (do_plug)
2233 		blk_start_plug(&plug);
2234 
2235 	while (!list_empty(page_list)) {
2236 		page = lru_to_page(page_list);
2237 		if (nid == NUMA_NO_NODE) {
2238 			nid = page_to_nid(page);
2239 			INIT_LIST_HEAD(&node_page_list);
2240 		}
2241 
2242 		if (nid == page_to_nid(page)) {
2243 			ClearPageActive(page);
2244 			list_move(&page->lru, &node_page_list);
2245 			continue;
2246 		}
2247 
2248 		nr_reclaimed += shrink_page_list(&node_page_list,
2249 						NODE_DATA(nid),
2250 						&sc, &dummy_stat, false);
2251 		while (!list_empty(&node_page_list)) {
2252 			page = lru_to_page(&node_page_list);
2253 			list_del(&page->lru);
2254 			putback_lru_page(page);
2255 		}
2256 
2257 		nid = NUMA_NO_NODE;
2258 	}
2259 
2260 	if (!list_empty(&node_page_list)) {
2261 		nr_reclaimed += shrink_page_list(&node_page_list,
2262 						NODE_DATA(nid),
2263 						&sc, &dummy_stat, false);
2264 		while (!list_empty(&node_page_list)) {
2265 			page = lru_to_page(&node_page_list);
2266 			list_del(&page->lru);
2267 			putback_lru_page(page);
2268 		}
2269 	}
2270 	if (do_plug)
2271 		blk_finish_plug(&plug);
2272 
2273 	return nr_reclaimed;
2274 }
2275 
shrink_list(enum lru_list lru,unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc)2276 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2277 				 struct lruvec *lruvec, struct scan_control *sc)
2278 {
2279 	if (is_active_lru(lru)) {
2280 		if (sc->may_deactivate & (1 << is_file_lru(lru)))
2281 			shrink_active_list(nr_to_scan, lruvec, sc, lru);
2282 		else
2283 			sc->skipped_deactivate = 1;
2284 		return 0;
2285 	}
2286 
2287 	return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2288 }
2289 
2290 /*
2291  * The inactive anon list should be small enough that the VM never has
2292  * to do too much work.
2293  *
2294  * The inactive file list should be small enough to leave most memory
2295  * to the established workingset on the scan-resistant active list,
2296  * but large enough to avoid thrashing the aggregate readahead window.
2297  *
2298  * Both inactive lists should also be large enough that each inactive
2299  * page has a chance to be referenced again before it is reclaimed.
2300  *
2301  * If that fails and refaulting is observed, the inactive list grows.
2302  *
2303  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2304  * on this LRU, maintained by the pageout code. An inactive_ratio
2305  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2306  *
2307  * total     target    max
2308  * memory    ratio     inactive
2309  * -------------------------------------
2310  *   10MB       1         5MB
2311  *  100MB       1        50MB
2312  *    1GB       3       250MB
2313  *   10GB      10       0.9GB
2314  *  100GB      31         3GB
2315  *    1TB     101        10GB
2316  *   10TB     320        32GB
2317  */
inactive_is_low(struct lruvec * lruvec,enum lru_list inactive_lru)2318 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2319 {
2320 	enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2321 	unsigned long inactive, active;
2322 	unsigned long inactive_ratio;
2323 	unsigned long gb;
2324 
2325 	inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2326 	active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2327 
2328 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
2329 	if (gb)
2330 		inactive_ratio = int_sqrt(10 * gb);
2331 	else
2332 		inactive_ratio = 1;
2333 
2334 	trace_android_vh_tune_inactive_ratio(&inactive_ratio, is_file_lru(inactive_lru));
2335 
2336 	return inactive * inactive_ratio < active;
2337 }
2338 
2339 enum scan_balance {
2340 	SCAN_EQUAL,
2341 	SCAN_FRACT,
2342 	SCAN_ANON,
2343 	SCAN_FILE,
2344 };
2345 
prepare_scan_count(pg_data_t * pgdat,struct scan_control * sc)2346 static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
2347 {
2348 	unsigned long file;
2349 	struct lruvec *target_lruvec;
2350 
2351 	if (lru_gen_enabled())
2352 		return;
2353 
2354 	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2355 
2356 	/*
2357 	 * Determine the scan balance between anon and file LRUs.
2358 	 */
2359 	spin_lock_irq(&pgdat->lru_lock);
2360 	sc->anon_cost = target_lruvec->anon_cost;
2361 	sc->file_cost = target_lruvec->file_cost;
2362 	spin_unlock_irq(&pgdat->lru_lock);
2363 
2364 	/*
2365 	 * Target desirable inactive:active list ratios for the anon
2366 	 * and file LRU lists.
2367 	 */
2368 	if (!sc->force_deactivate) {
2369 		unsigned long refaults;
2370 
2371 		refaults = lruvec_page_state(target_lruvec,
2372 				WORKINGSET_ACTIVATE_ANON);
2373 		if (refaults != target_lruvec->refaults[0] ||
2374 			inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2375 			sc->may_deactivate |= DEACTIVATE_ANON;
2376 		else
2377 			sc->may_deactivate &= ~DEACTIVATE_ANON;
2378 
2379 		/*
2380 		 * When refaults are being observed, it means a new
2381 		 * workingset is being established. Deactivate to get
2382 		 * rid of any stale active pages quickly.
2383 		 */
2384 		refaults = lruvec_page_state(target_lruvec,
2385 				WORKINGSET_ACTIVATE_FILE);
2386 		if (refaults != target_lruvec->refaults[1] ||
2387 		    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2388 			sc->may_deactivate |= DEACTIVATE_FILE;
2389 		else
2390 			sc->may_deactivate &= ~DEACTIVATE_FILE;
2391 	} else
2392 		sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2393 
2394 	/*
2395 	 * If we have plenty of inactive file pages that aren't
2396 	 * thrashing, try to reclaim those first before touching
2397 	 * anonymous pages.
2398 	 */
2399 	file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2400 	if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2401 		sc->cache_trim_mode = 1;
2402 	else
2403 		sc->cache_trim_mode = 0;
2404 
2405 	/*
2406 	 * Prevent the reclaimer from falling into the cache trap: as
2407 	 * cache pages start out inactive, every cache fault will tip
2408 	 * the scan balance towards the file LRU.  And as the file LRU
2409 	 * shrinks, so does the window for rotation from references.
2410 	 * This means we have a runaway feedback loop where a tiny
2411 	 * thrashing file LRU becomes infinitely more attractive than
2412 	 * anon pages.  Try to detect this based on file LRU size.
2413 	 */
2414 	if (!cgroup_reclaim(sc)) {
2415 		unsigned long total_high_wmark = 0;
2416 		unsigned long free, anon;
2417 		int z;
2418 
2419 		free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2420 		file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2421 			   node_page_state(pgdat, NR_INACTIVE_FILE);
2422 
2423 		for (z = 0; z < MAX_NR_ZONES; z++) {
2424 			struct zone *zone = &pgdat->node_zones[z];
2425 
2426 			if (!managed_zone(zone))
2427 				continue;
2428 
2429 			total_high_wmark += high_wmark_pages(zone);
2430 		}
2431 
2432 		/*
2433 		 * Consider anon: if that's low too, this isn't a
2434 		 * runaway file reclaim problem, but rather just
2435 		 * extreme pressure. Reclaim as per usual then.
2436 		 */
2437 		anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2438 
2439 		sc->file_is_tiny =
2440 			file + free <= total_high_wmark &&
2441 			!(sc->may_deactivate & DEACTIVATE_ANON) &&
2442 			anon >> sc->priority;
2443 	}
2444 }
2445 
2446 /*
2447  * Determine how aggressively the anon and file LRU lists should be
2448  * scanned.  The relative value of each set of LRU lists is determined
2449  * by looking at the fraction of the pages scanned we did rotate back
2450  * onto the active list instead of evict.
2451  *
2452  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2453  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2454  */
get_scan_count(struct lruvec * lruvec,struct scan_control * sc,unsigned long * nr)2455 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2456 			   unsigned long *nr)
2457 {
2458 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2459 	unsigned long anon_cost, file_cost, total_cost;
2460 	int swappiness = mem_cgroup_swappiness(memcg);
2461 	u64 fraction[ANON_AND_FILE];
2462 	u64 denominator = 0;	/* gcc */
2463 	enum scan_balance scan_balance;
2464 	unsigned long ap, fp;
2465 	enum lru_list lru;
2466 	bool balance_anon_file_reclaim = false;
2467 
2468 	/* If we have no swap space, do not bother scanning anon pages. */
2469 	if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2470 		scan_balance = SCAN_FILE;
2471 		goto out;
2472 	}
2473 
2474 	trace_android_vh_tune_swappiness(&swappiness);
2475 	/*
2476 	 * Global reclaim will swap to prevent OOM even with no
2477 	 * swappiness, but memcg users want to use this knob to
2478 	 * disable swapping for individual groups completely when
2479 	 * using the memory controller's swap limit feature would be
2480 	 * too expensive.
2481 	 */
2482 	if (cgroup_reclaim(sc) && !swappiness) {
2483 		scan_balance = SCAN_FILE;
2484 		goto out;
2485 	}
2486 
2487 	/*
2488 	 * Do not apply any pressure balancing cleverness when the
2489 	 * system is close to OOM, scan both anon and file equally
2490 	 * (unless the swappiness setting disagrees with swapping).
2491 	 */
2492 	if (!sc->priority && swappiness) {
2493 		scan_balance = SCAN_EQUAL;
2494 		goto out;
2495 	}
2496 
2497 	/*
2498 	 * If the system is almost out of file pages, force-scan anon.
2499 	 */
2500 	if (sc->file_is_tiny) {
2501 		scan_balance = SCAN_ANON;
2502 		goto out;
2503 	}
2504 
2505 	trace_android_rvh_set_balance_anon_file_reclaim(&balance_anon_file_reclaim);
2506 
2507 	/*
2508 	 * If there is enough inactive page cache, we do not reclaim
2509 	 * anything from the anonymous working right now. But when balancing
2510 	 * anon and page cache files for reclaim, allow swapping of anon pages
2511 	 * even if there are a number of inactive file cache pages.
2512 	 */
2513 	if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
2514 		scan_balance = SCAN_FILE;
2515 		goto out;
2516 	}
2517 
2518 	scan_balance = SCAN_FRACT;
2519 	/*
2520 	 * Calculate the pressure balance between anon and file pages.
2521 	 *
2522 	 * The amount of pressure we put on each LRU is inversely
2523 	 * proportional to the cost of reclaiming each list, as
2524 	 * determined by the share of pages that are refaulting, times
2525 	 * the relative IO cost of bringing back a swapped out
2526 	 * anonymous page vs reloading a filesystem page (swappiness).
2527 	 *
2528 	 * Although we limit that influence to ensure no list gets
2529 	 * left behind completely: at least a third of the pressure is
2530 	 * applied, before swappiness.
2531 	 *
2532 	 * With swappiness at 100, anon and file have equal IO cost.
2533 	 */
2534 	total_cost = sc->anon_cost + sc->file_cost;
2535 	anon_cost = total_cost + sc->anon_cost;
2536 	file_cost = total_cost + sc->file_cost;
2537 	total_cost = anon_cost + file_cost;
2538 
2539 	ap = swappiness * (total_cost + 1);
2540 	ap /= anon_cost + 1;
2541 
2542 	fp = (200 - swappiness) * (total_cost + 1);
2543 	fp /= file_cost + 1;
2544 
2545 	fraction[0] = ap;
2546 	fraction[1] = fp;
2547 	denominator = ap + fp;
2548 out:
2549 	trace_android_vh_tune_scan_type((char *)(&scan_balance));
2550 	trace_android_vh_tune_memcg_scan_type(memcg, (char *)(&scan_balance));
2551 	for_each_evictable_lru(lru) {
2552 		int file = is_file_lru(lru);
2553 		unsigned long lruvec_size;
2554 		unsigned long low, min;
2555 		unsigned long scan;
2556 
2557 		lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2558 		mem_cgroup_protection(sc->target_mem_cgroup, memcg,
2559 				      &min, &low);
2560 
2561 		if (min || low) {
2562 			/*
2563 			 * Scale a cgroup's reclaim pressure by proportioning
2564 			 * its current usage to its memory.low or memory.min
2565 			 * setting.
2566 			 *
2567 			 * This is important, as otherwise scanning aggression
2568 			 * becomes extremely binary -- from nothing as we
2569 			 * approach the memory protection threshold, to totally
2570 			 * nominal as we exceed it.  This results in requiring
2571 			 * setting extremely liberal protection thresholds. It
2572 			 * also means we simply get no protection at all if we
2573 			 * set it too low, which is not ideal.
2574 			 *
2575 			 * If there is any protection in place, we reduce scan
2576 			 * pressure by how much of the total memory used is
2577 			 * within protection thresholds.
2578 			 *
2579 			 * There is one special case: in the first reclaim pass,
2580 			 * we skip over all groups that are within their low
2581 			 * protection. If that fails to reclaim enough pages to
2582 			 * satisfy the reclaim goal, we come back and override
2583 			 * the best-effort low protection. However, we still
2584 			 * ideally want to honor how well-behaved groups are in
2585 			 * that case instead of simply punishing them all
2586 			 * equally. As such, we reclaim them based on how much
2587 			 * memory they are using, reducing the scan pressure
2588 			 * again by how much of the total memory used is under
2589 			 * hard protection.
2590 			 */
2591 			unsigned long cgroup_size = mem_cgroup_size(memcg);
2592 			unsigned long protection;
2593 
2594 			/* memory.low scaling, make sure we retry before OOM */
2595 			if (!sc->memcg_low_reclaim && low > min) {
2596 				protection = low;
2597 				sc->memcg_low_skipped = 1;
2598 			} else {
2599 				protection = min;
2600 			}
2601 
2602 			/* Avoid TOCTOU with earlier protection check */
2603 			cgroup_size = max(cgroup_size, protection);
2604 
2605 			scan = lruvec_size - lruvec_size * protection /
2606 				(cgroup_size + 1);
2607 
2608 			/*
2609 			 * Minimally target SWAP_CLUSTER_MAX pages to keep
2610 			 * reclaim moving forwards, avoiding decrementing
2611 			 * sc->priority further than desirable.
2612 			 */
2613 			scan = max(scan, SWAP_CLUSTER_MAX);
2614 		} else {
2615 			scan = lruvec_size;
2616 		}
2617 
2618 		scan >>= sc->priority;
2619 
2620 		/*
2621 		 * If the cgroup's already been deleted, make sure to
2622 		 * scrape out the remaining cache.
2623 		 */
2624 		if (!scan && !mem_cgroup_online(memcg))
2625 			scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2626 
2627 		switch (scan_balance) {
2628 		case SCAN_EQUAL:
2629 			/* Scan lists relative to size */
2630 			break;
2631 		case SCAN_FRACT:
2632 			/*
2633 			 * Scan types proportional to swappiness and
2634 			 * their relative recent reclaim efficiency.
2635 			 * Make sure we don't miss the last page on
2636 			 * the offlined memory cgroups because of a
2637 			 * round-off error.
2638 			 */
2639 			scan = mem_cgroup_online(memcg) ?
2640 			       div64_u64(scan * fraction[file], denominator) :
2641 			       DIV64_U64_ROUND_UP(scan * fraction[file],
2642 						  denominator);
2643 			break;
2644 		case SCAN_FILE:
2645 		case SCAN_ANON:
2646 			/* Scan one type exclusively */
2647 			if ((scan_balance == SCAN_FILE) != file)
2648 				scan = 0;
2649 			break;
2650 		default:
2651 			/* Look ma, no brain */
2652 			BUG();
2653 		}
2654 
2655 		nr[lru] = scan;
2656 	}
2657 }
2658 
2659 #ifdef CONFIG_LRU_GEN
2660 
2661 #ifdef CONFIG_LRU_GEN_ENABLED
2662 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
2663 #define get_cap(cap)	static_branch_likely(&lru_gen_caps[cap])
2664 #else
2665 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
2666 #define get_cap(cap)	static_branch_unlikely(&lru_gen_caps[cap])
2667 #endif
2668 
2669 /******************************************************************************
2670  *                          shorthand helpers
2671  ******************************************************************************/
2672 
2673 #define LRU_REFS_FLAGS	(BIT(PG_referenced) | BIT(PG_workingset))
2674 
2675 #define DEFINE_MAX_SEQ(lruvec)						\
2676 	unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2677 
2678 #define DEFINE_MIN_SEQ(lruvec)						\
2679 	unsigned long min_seq[ANON_AND_FILE] = {			\
2680 		READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]),	\
2681 		READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]),	\
2682 	}
2683 
2684 #define for_each_gen_type_zone(gen, type, zone)				\
2685 	for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)			\
2686 		for ((type) = 0; (type) < ANON_AND_FILE; (type)++)	\
2687 			for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
2688 
get_lruvec(struct mem_cgroup * memcg,int nid)2689 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2690 {
2691 	struct pglist_data *pgdat = NODE_DATA(nid);
2692 
2693 #ifdef CONFIG_MEMCG
2694 	if (memcg) {
2695 		struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2696 
2697 		/* for hotadd_new_pgdat() */
2698 		if (!lruvec->pgdat)
2699 			lruvec->pgdat = pgdat;
2700 
2701 		return lruvec;
2702 	}
2703 #endif
2704 	VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2705 
2706 	return pgdat ? &pgdat->__lruvec : NULL;
2707 }
2708 
get_swappiness(struct lruvec * lruvec,struct scan_control * sc)2709 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
2710 {
2711 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2712 
2713 	if (mem_cgroup_get_nr_swap_pages(memcg) <= 0)
2714 		return 0;
2715 
2716 	return mem_cgroup_swappiness(memcg);
2717 }
2718 
get_nr_gens(struct lruvec * lruvec,int type)2719 static int get_nr_gens(struct lruvec *lruvec, int type)
2720 {
2721 	return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
2722 }
2723 
seq_is_valid(struct lruvec * lruvec)2724 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
2725 {
2726 	/* see the comment on lru_gen_struct */
2727 	return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
2728 	       get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
2729 	       get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
2730 }
2731 
2732 /******************************************************************************
2733  *                          mm_struct list
2734  ******************************************************************************/
2735 
get_mm_list(struct mem_cgroup * memcg)2736 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2737 {
2738 	static struct lru_gen_mm_list mm_list = {
2739 		.fifo = LIST_HEAD_INIT(mm_list.fifo),
2740 		.lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
2741 	};
2742 
2743 #ifdef CONFIG_MEMCG
2744 	if (memcg)
2745 		return &memcg->mm_list;
2746 #endif
2747 	VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2748 
2749 	return &mm_list;
2750 }
2751 
lru_gen_add_mm(struct mm_struct * mm)2752 void lru_gen_add_mm(struct mm_struct *mm)
2753 {
2754 	int nid;
2755 	struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
2756 	struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2757 
2758 	VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
2759 #ifdef CONFIG_MEMCG
2760 	VM_WARN_ON_ONCE(mm->lru_gen.memcg);
2761 	mm->lru_gen.memcg = memcg;
2762 #endif
2763 	spin_lock(&mm_list->lock);
2764 
2765 	for_each_node_state(nid, N_MEMORY) {
2766 		struct lruvec *lruvec = get_lruvec(memcg, nid);
2767 
2768 		if (!lruvec)
2769 			continue;
2770 
2771 		/* the first addition since the last iteration */
2772 		if (lruvec->mm_state.tail == &mm_list->fifo)
2773 			lruvec->mm_state.tail = &mm->lru_gen.list;
2774 	}
2775 
2776 	list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
2777 
2778 	spin_unlock(&mm_list->lock);
2779 }
2780 
lru_gen_del_mm(struct mm_struct * mm)2781 void lru_gen_del_mm(struct mm_struct *mm)
2782 {
2783 	int nid;
2784 	struct lru_gen_mm_list *mm_list;
2785 	struct mem_cgroup *memcg = NULL;
2786 
2787 	if (list_empty(&mm->lru_gen.list))
2788 		return;
2789 
2790 #ifdef CONFIG_MEMCG
2791 	memcg = mm->lru_gen.memcg;
2792 #endif
2793 	mm_list = get_mm_list(memcg);
2794 
2795 	spin_lock(&mm_list->lock);
2796 
2797 	for_each_node(nid) {
2798 		struct lruvec *lruvec = get_lruvec(memcg, nid);
2799 
2800 		if (!lruvec)
2801 			continue;
2802 
2803 		/* where the current iteration continues after */
2804 		if (lruvec->mm_state.head == &mm->lru_gen.list)
2805 			lruvec->mm_state.head = lruvec->mm_state.head->prev;
2806 
2807 		/* where the last iteration ended before */
2808 		if (lruvec->mm_state.tail == &mm->lru_gen.list)
2809 			lruvec->mm_state.tail = lruvec->mm_state.tail->next;
2810 	}
2811 
2812 	list_del_init(&mm->lru_gen.list);
2813 
2814 	spin_unlock(&mm_list->lock);
2815 
2816 #ifdef CONFIG_MEMCG
2817 	mem_cgroup_put(mm->lru_gen.memcg);
2818 	mm->lru_gen.memcg = NULL;
2819 #endif
2820 }
2821 
2822 #ifdef CONFIG_MEMCG
lru_gen_migrate_mm(struct mm_struct * mm)2823 void lru_gen_migrate_mm(struct mm_struct *mm)
2824 {
2825 	struct mem_cgroup *memcg;
2826 	struct task_struct *task = rcu_dereference_protected(mm->owner, true);
2827 
2828 	VM_WARN_ON_ONCE(task->mm != mm);
2829 	lockdep_assert_held(&task->alloc_lock);
2830 
2831 	/* for mm_update_next_owner() */
2832 	if (mem_cgroup_disabled())
2833 		return;
2834 
2835 	/* migration can happen before addition */
2836 	if (!mm->lru_gen.memcg)
2837 		return;
2838 
2839 	rcu_read_lock();
2840 	memcg = mem_cgroup_from_task(task);
2841 	rcu_read_unlock();
2842 	if (memcg == mm->lru_gen.memcg)
2843 		return;
2844 
2845 	VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
2846 
2847 	lru_gen_del_mm(mm);
2848 	lru_gen_add_mm(mm);
2849 }
2850 #endif
2851 
2852 /*
2853  * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
2854  * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
2855  * bits in a bitmap, k is the number of hash functions and n is the number of
2856  * inserted items.
2857  *
2858  * Page table walkers use one of the two filters to reduce their search space.
2859  * To get rid of non-leaf entries that no longer have enough leaf entries, the
2860  * aging uses the double-buffering technique to flip to the other filter each
2861  * time it produces a new generation. For non-leaf entries that have enough
2862  * leaf entries, the aging carries them over to the next generation in
2863  * walk_pmd_range(); the eviction also report them when walking the rmap
2864  * in lru_gen_look_around().
2865  *
2866  * For future optimizations:
2867  * 1. It's not necessary to keep both filters all the time. The spare one can be
2868  *    freed after the RCU grace period and reallocated if needed again.
2869  * 2. And when reallocating, it's worth scaling its size according to the number
2870  *    of inserted entries in the other filter, to reduce the memory overhead on
2871  *    small systems and false positives on large systems.
2872  * 3. Jenkins' hash function is an alternative to Knuth's.
2873  */
2874 #define BLOOM_FILTER_SHIFT	15
2875 
filter_gen_from_seq(unsigned long seq)2876 static inline int filter_gen_from_seq(unsigned long seq)
2877 {
2878 	return seq % NR_BLOOM_FILTERS;
2879 }
2880 
get_item_key(void * item,int * key)2881 static void get_item_key(void *item, int *key)
2882 {
2883 	u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
2884 
2885 	BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
2886 
2887 	key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
2888 	key[1] = hash >> BLOOM_FILTER_SHIFT;
2889 }
2890 
reset_bloom_filter(struct lruvec * lruvec,unsigned long seq)2891 static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
2892 {
2893 	unsigned long *filter;
2894 	int gen = filter_gen_from_seq(seq);
2895 
2896 	filter = lruvec->mm_state.filters[gen];
2897 	if (filter) {
2898 		bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
2899 		return;
2900 	}
2901 
2902 	filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
2903 			       __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
2904 	WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
2905 }
2906 
update_bloom_filter(struct lruvec * lruvec,unsigned long seq,void * item)2907 static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
2908 {
2909 	int key[2];
2910 	unsigned long *filter;
2911 	int gen = filter_gen_from_seq(seq);
2912 
2913 	filter = READ_ONCE(lruvec->mm_state.filters[gen]);
2914 	if (!filter)
2915 		return;
2916 
2917 	get_item_key(item, key);
2918 
2919 	if (!test_bit(key[0], filter))
2920 		set_bit(key[0], filter);
2921 	if (!test_bit(key[1], filter))
2922 		set_bit(key[1], filter);
2923 }
2924 
test_bloom_filter(struct lruvec * lruvec,unsigned long seq,void * item)2925 static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
2926 {
2927 	int key[2];
2928 	unsigned long *filter;
2929 	int gen = filter_gen_from_seq(seq);
2930 
2931 	filter = READ_ONCE(lruvec->mm_state.filters[gen]);
2932 	if (!filter)
2933 		return true;
2934 
2935 	get_item_key(item, key);
2936 
2937 	return test_bit(key[0], filter) && test_bit(key[1], filter);
2938 }
2939 
reset_mm_stats(struct lruvec * lruvec,struct lru_gen_mm_walk * walk,bool last)2940 static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
2941 {
2942 	int i;
2943 	int hist;
2944 
2945 	lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
2946 
2947 	if (walk) {
2948 		hist = lru_hist_from_seq(walk->max_seq);
2949 
2950 		for (i = 0; i < NR_MM_STATS; i++) {
2951 			WRITE_ONCE(lruvec->mm_state.stats[hist][i],
2952 				   lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
2953 			walk->mm_stats[i] = 0;
2954 		}
2955 	}
2956 
2957 	if (NR_HIST_GENS > 1 && last) {
2958 		hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
2959 
2960 		for (i = 0; i < NR_MM_STATS; i++)
2961 			WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
2962 	}
2963 }
2964 
should_skip_mm(struct mm_struct * mm,struct lru_gen_mm_walk * walk)2965 static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
2966 {
2967 	int type;
2968 	unsigned long size = 0;
2969 	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
2970 	int key = pgdat->node_id;
2971 
2972 	if (!walk->full_scan && !node_isset(key, mm->lru_gen.nodes))
2973 		return true;
2974 
2975 	node_clear(key, mm->lru_gen.nodes);
2976 
2977 	for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
2978 		size += type ? get_mm_counter(mm, MM_FILEPAGES) :
2979 			       get_mm_counter(mm, MM_ANONPAGES) +
2980 			       get_mm_counter(mm, MM_SHMEMPAGES);
2981 	}
2982 
2983 	if (size < MIN_LRU_BATCH)
2984 		return true;
2985 
2986 	return !mmget_not_zero(mm);
2987 }
2988 
iterate_mm_list(struct lruvec * lruvec,struct lru_gen_mm_walk * walk,struct mm_struct ** iter)2989 static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
2990 			    struct mm_struct **iter)
2991 {
2992 	bool first = false;
2993 	bool last = false;
2994 	struct mm_struct *mm = NULL;
2995 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2996 	struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2997 	struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
2998 
2999 	/*
3000 	 * mm_state->seq is incremented after each iteration of mm_list. There
3001 	 * are three interesting cases for this page table walker:
3002 	 * 1. It tries to start a new iteration with a stale max_seq: there is
3003 	 *    nothing left to do.
3004 	 * 2. It started the next iteration: it needs to reset the Bloom filter
3005 	 *    so that a fresh set of PTE tables can be recorded.
3006 	 * 3. It ended the current iteration: it needs to reset the mm stats
3007 	 *    counters and tell its caller to increment max_seq.
3008 	 */
3009 	spin_lock(&mm_list->lock);
3010 
3011 	VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
3012 
3013 	if (walk->max_seq <= mm_state->seq)
3014 		goto done;
3015 
3016 	if (!mm_state->head)
3017 		mm_state->head = &mm_list->fifo;
3018 
3019 	if (mm_state->head == &mm_list->fifo)
3020 		first = true;
3021 
3022 	do {
3023 		mm_state->head = mm_state->head->next;
3024 		if (mm_state->head == &mm_list->fifo) {
3025 			WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3026 			last = true;
3027 			break;
3028 		}
3029 
3030 		/* force scan for those added after the last iteration */
3031 		if (!mm_state->tail || mm_state->tail == mm_state->head) {
3032 			mm_state->tail = mm_state->head->next;
3033 			walk->full_scan = true;
3034 		}
3035 
3036 		mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
3037 		if (should_skip_mm(mm, walk))
3038 			mm = NULL;
3039 	} while (!mm);
3040 done:
3041 	if (*iter || last)
3042 		reset_mm_stats(lruvec, walk, last);
3043 
3044 	spin_unlock(&mm_list->lock);
3045 
3046 	if (mm && first)
3047 		reset_bloom_filter(lruvec, walk->max_seq + 1);
3048 
3049 	if (*iter)
3050 		mmput_async(*iter);
3051 
3052 	*iter = mm;
3053 
3054 	return last;
3055 }
3056 
iterate_mm_list_nowalk(struct lruvec * lruvec,unsigned long max_seq)3057 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
3058 {
3059 	bool success = false;
3060 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3061 	struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3062 	struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3063 
3064 	spin_lock(&mm_list->lock);
3065 
3066 	VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
3067 
3068 	if (max_seq > mm_state->seq) {
3069 		mm_state->head = NULL;
3070 		mm_state->tail = NULL;
3071 		WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3072 		reset_mm_stats(lruvec, NULL, true);
3073 		success = true;
3074 	}
3075 
3076 	spin_unlock(&mm_list->lock);
3077 
3078 	return success;
3079 }
3080 
3081 /******************************************************************************
3082  *                          refault feedback loop
3083  ******************************************************************************/
3084 
3085 /*
3086  * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3087  *
3088  * The P term is refaulted/(evicted+protected) from a tier in the generation
3089  * currently being evicted; the I term is the exponential moving average of the
3090  * P term over the generations previously evicted, using the smoothing factor
3091  * 1/2; the D term isn't supported.
3092  *
3093  * The setpoint (SP) is always the first tier of one type; the process variable
3094  * (PV) is either any tier of the other type or any other tier of the same
3095  * type.
3096  *
3097  * The error is the difference between the SP and the PV; the correction is to
3098  * turn off protection when SP>PV or turn on protection when SP<PV.
3099  *
3100  * For future optimizations:
3101  * 1. The D term may discount the other two terms over time so that long-lived
3102  *    generations can resist stale information.
3103  */
3104 struct ctrl_pos {
3105 	unsigned long refaulted;
3106 	unsigned long total;
3107 	int gain;
3108 };
3109 
read_ctrl_pos(struct lruvec * lruvec,int type,int tier,int gain,struct ctrl_pos * pos)3110 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3111 			  struct ctrl_pos *pos)
3112 {
3113 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3114 	int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3115 
3116 	pos->refaulted = lrugen->avg_refaulted[type][tier] +
3117 			 atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3118 	pos->total = lrugen->avg_total[type][tier] +
3119 		     atomic_long_read(&lrugen->evicted[hist][type][tier]);
3120 	if (tier)
3121 		pos->total += lrugen->protected[hist][type][tier - 1];
3122 	pos->gain = gain;
3123 }
3124 
reset_ctrl_pos(struct lruvec * lruvec,int type,bool carryover)3125 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3126 {
3127 	int hist, tier;
3128 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3129 	bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3130 	unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3131 
3132 	lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
3133 
3134 	if (!carryover && !clear)
3135 		return;
3136 
3137 	hist = lru_hist_from_seq(seq);
3138 
3139 	for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3140 		if (carryover) {
3141 			unsigned long sum;
3142 
3143 			sum = lrugen->avg_refaulted[type][tier] +
3144 			      atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3145 			WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3146 
3147 			sum = lrugen->avg_total[type][tier] +
3148 			      atomic_long_read(&lrugen->evicted[hist][type][tier]);
3149 			if (tier)
3150 				sum += lrugen->protected[hist][type][tier - 1];
3151 			WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3152 		}
3153 
3154 		if (clear) {
3155 			atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3156 			atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3157 			if (tier)
3158 				WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
3159 		}
3160 	}
3161 }
3162 
positive_ctrl_err(struct ctrl_pos * sp,struct ctrl_pos * pv)3163 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3164 {
3165 	/*
3166 	 * Return true if the PV has a limited number of refaults or a lower
3167 	 * refaulted/total than the SP.
3168 	 */
3169 	return pv->refaulted < MIN_LRU_BATCH ||
3170 	       pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3171 	       (sp->refaulted + 1) * pv->total * pv->gain;
3172 }
3173 
3174 /******************************************************************************
3175  *                          the aging
3176  ******************************************************************************/
3177 
3178 /* promote pages accessed through page tables */
page_update_gen(struct page * page,int gen)3179 static int page_update_gen(struct page *page, int gen)
3180 {
3181 	unsigned long new_flags, old_flags;
3182 
3183 	VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3184 	VM_WARN_ON_ONCE(!rcu_read_lock_held());
3185 
3186 	do {
3187 		old_flags = READ_ONCE(page->flags);
3188 
3189 		/* lru_gen_del_page() has isolated this page? */
3190 		if (!(old_flags & LRU_GEN_MASK)) {
3191 			/* for shrink_page_list() */
3192 			new_flags = old_flags | BIT(PG_referenced);
3193 			continue;
3194 		}
3195 
3196 		new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3197 		new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
3198 	} while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
3199 
3200 	return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3201 }
3202 
3203 /* protect pages accessed multiple times through file descriptors */
page_inc_gen(struct lruvec * lruvec,struct page * page,bool reclaiming)3204 static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
3205 {
3206 	int type = page_is_file_lru(page);
3207 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3208 	int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3209 	unsigned long new_flags, old_flags;
3210 
3211 	do {
3212 		old_flags = READ_ONCE(page->flags);
3213 
3214 		VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
3215 
3216 		new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3217 		/* page_update_gen() has promoted this page? */
3218 		if (new_gen >= 0 && new_gen != old_gen)
3219 			return new_gen;
3220 
3221 		new_gen = (old_gen + 1) % MAX_NR_GENS;
3222 
3223 		new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3224 		new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3225 		/* for end_page_writeback() */
3226 		if (reclaiming)
3227 			new_flags |= BIT(PG_reclaim);
3228 	} while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
3229 
3230 	lru_gen_update_size(lruvec, page, old_gen, new_gen);
3231 
3232 	return new_gen;
3233 }
3234 
update_batch_size(struct lru_gen_mm_walk * walk,struct page * page,int old_gen,int new_gen)3235 static void update_batch_size(struct lru_gen_mm_walk *walk, struct page *page,
3236 			      int old_gen, int new_gen)
3237 {
3238 	int type = page_is_file_lru(page);
3239 	int zone = page_zonenum(page);
3240 	int delta = thp_nr_pages(page);
3241 
3242 	VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3243 	VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3244 
3245 	walk->batched++;
3246 
3247 	walk->nr_pages[old_gen][type][zone] -= delta;
3248 	walk->nr_pages[new_gen][type][zone] += delta;
3249 }
3250 
reset_batch_size(struct lruvec * lruvec,struct lru_gen_mm_walk * walk)3251 static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
3252 {
3253 	int gen, type, zone;
3254 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3255 
3256 	walk->batched = 0;
3257 
3258 	for_each_gen_type_zone(gen, type, zone) {
3259 		enum lru_list lru = type * LRU_INACTIVE_FILE;
3260 		int delta = walk->nr_pages[gen][type][zone];
3261 
3262 		if (!delta)
3263 			continue;
3264 
3265 		walk->nr_pages[gen][type][zone] = 0;
3266 		WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3267 			   lrugen->nr_pages[gen][type][zone] + delta);
3268 
3269 		if (lru_gen_is_active(lruvec, gen))
3270 			lru += LRU_ACTIVE;
3271 		__update_lru_size(lruvec, lru, zone, delta);
3272 	}
3273 }
3274 
should_skip_vma(unsigned long start,unsigned long end,struct mm_walk * args)3275 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3276 {
3277 	struct address_space *mapping;
3278 	struct vm_area_struct *vma = args->vma;
3279 	struct lru_gen_mm_walk *walk = args->private;
3280 
3281 	if (!vma_is_accessible(vma))
3282 		return true;
3283 
3284 	if (is_vm_hugetlb_page(vma))
3285 		return true;
3286 
3287 	if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ))
3288 		return true;
3289 
3290 	if (vma == get_gate_vma(vma->vm_mm))
3291 		return true;
3292 
3293 	if (vma_is_anonymous(vma))
3294 		return !walk->can_swap;
3295 
3296 	if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3297 		return true;
3298 
3299 	mapping = vma->vm_file->f_mapping;
3300 	if (mapping_unevictable(mapping))
3301 		return true;
3302 
3303 	if (shmem_mapping(mapping))
3304 		return !walk->can_swap;
3305 
3306 	/* to exclude special mappings like dax, etc. */
3307 	return !mapping->a_ops->readpage;
3308 }
3309 
3310 /*
3311  * Some userspace memory allocators map many single-page VMAs. Instead of
3312  * returning back to the PGD table for each of such VMAs, finish an entire PMD
3313  * table to reduce zigzags and improve cache performance.
3314  */
get_next_vma(unsigned long mask,unsigned long size,struct mm_walk * args,unsigned long * vm_start,unsigned long * vm_end)3315 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3316 			 unsigned long *vm_start, unsigned long *vm_end)
3317 {
3318 	unsigned long start = round_up(*vm_end, size);
3319 	unsigned long end = (start | ~mask) + 1;
3320 
3321 	VM_WARN_ON_ONCE(mask & size);
3322 	VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3323 
3324 	while (args->vma) {
3325 		if (start >= args->vma->vm_end) {
3326 			args->vma = args->vma->vm_next;
3327 			continue;
3328 		}
3329 
3330 		if (end && end <= args->vma->vm_start)
3331 			return false;
3332 
3333 		if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) {
3334 			args->vma = args->vma->vm_next;
3335 			continue;
3336 		}
3337 
3338 		*vm_start = max(start, args->vma->vm_start);
3339 		*vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3340 
3341 		return true;
3342 	}
3343 
3344 	return false;
3345 }
3346 
get_pte_pfn(pte_t pte,struct vm_area_struct * vma,unsigned long addr)3347 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
3348 {
3349 	unsigned long pfn = pte_pfn(pte);
3350 
3351 	VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3352 
3353 	if (!pte_present(pte) || is_zero_pfn(pfn))
3354 		return -1;
3355 
3356 	if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3357 		return -1;
3358 
3359 	if (WARN_ON_ONCE(!pfn_valid(pfn)))
3360 		return -1;
3361 
3362 	return pfn;
3363 }
3364 
3365 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
get_pmd_pfn(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr)3366 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
3367 {
3368 	unsigned long pfn = pmd_pfn(pmd);
3369 
3370 	VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3371 
3372 	if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3373 		return -1;
3374 
3375 	if (WARN_ON_ONCE(pmd_devmap(pmd)))
3376 		return -1;
3377 
3378 	if (WARN_ON_ONCE(!pfn_valid(pfn)))
3379 		return -1;
3380 
3381 	return pfn;
3382 }
3383 #endif
3384 
get_pfn_page(unsigned long pfn,struct mem_cgroup * memcg,struct pglist_data * pgdat,bool can_swap)3385 static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg,
3386 				 struct pglist_data *pgdat, bool can_swap)
3387 {
3388 	struct page *page;
3389 
3390 	/* try to avoid unnecessary memory loads */
3391 	if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3392 		return NULL;
3393 
3394 	page = compound_head(pfn_to_page(pfn));
3395 	if (page_to_nid(page) != pgdat->node_id)
3396 		return NULL;
3397 
3398 	if (page_memcg_rcu(page) != memcg)
3399 		return NULL;
3400 
3401 	/* file VMAs can contain anon pages from COW */
3402 	if (!page_is_file_lru(page) && !can_swap)
3403 		return NULL;
3404 
3405 	return page;
3406 }
3407 
suitable_to_scan(int total,int young)3408 static bool suitable_to_scan(int total, int young)
3409 {
3410 	int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3411 
3412 	/* suitable if the average number of young PTEs per cacheline is >=1 */
3413 	return young * n >= total;
3414 }
3415 
walk_pte_range(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * args)3416 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
3417 			   struct mm_walk *args)
3418 {
3419 	int i;
3420 	pte_t *pte;
3421 	spinlock_t *ptl;
3422 	unsigned long addr;
3423 	int total = 0;
3424 	int young = 0;
3425 	struct lru_gen_mm_walk *walk = args->private;
3426 	struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3427 	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3428 	int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3429 
3430 	VM_WARN_ON_ONCE(pmd_leaf(*pmd));
3431 
3432 	ptl = pte_lockptr(args->mm, pmd);
3433 	if (!spin_trylock(ptl))
3434 		return false;
3435 
3436 	arch_enter_lazy_mmu_mode();
3437 
3438 	pte = pte_offset_map(pmd, start & PMD_MASK);
3439 restart:
3440 	for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
3441 		unsigned long pfn;
3442 		struct page *page;
3443 
3444 		total++;
3445 		walk->mm_stats[MM_LEAF_TOTAL]++;
3446 
3447 		pfn = get_pte_pfn(pte[i], args->vma, addr);
3448 		if (pfn == -1)
3449 			continue;
3450 
3451 		if (!pte_young(pte[i])) {
3452 			walk->mm_stats[MM_LEAF_OLD]++;
3453 			continue;
3454 		}
3455 
3456 		page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3457 		if (!page)
3458 			continue;
3459 
3460 		if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
3461 			VM_WARN_ON_ONCE(true);
3462 
3463 		young++;
3464 		walk->mm_stats[MM_LEAF_YOUNG]++;
3465 
3466 		if (pte_dirty(pte[i]) && !PageDirty(page) &&
3467 		    !(PageAnon(page) && PageSwapBacked(page) &&
3468 		      !PageSwapCache(page)))
3469 			set_page_dirty(page);
3470 
3471 		old_gen = page_update_gen(page, new_gen);
3472 		if (old_gen >= 0 && old_gen != new_gen)
3473 			update_batch_size(walk, page, old_gen, new_gen);
3474 	}
3475 
3476 	if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
3477 		goto restart;
3478 
3479 	pte_unmap(pte);
3480 
3481 	arch_leave_lazy_mmu_mode();
3482 	spin_unlock(ptl);
3483 
3484 	return suitable_to_scan(total, young);
3485 }
3486 
3487 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
walk_pmd_range_locked(pud_t * pud,unsigned long next,struct vm_area_struct * vma,struct mm_walk * args,unsigned long * bitmap,unsigned long * start)3488 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3489 				  struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3490 {
3491 	int i;
3492 	pmd_t *pmd;
3493 	spinlock_t *ptl;
3494 	struct lru_gen_mm_walk *walk = args->private;
3495 	struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3496 	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3497 	int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3498 
3499 	VM_WARN_ON_ONCE(pud_leaf(*pud));
3500 
3501 	/* try to batch at most 1+MIN_LRU_BATCH+1 entries */
3502 	if (*start == -1) {
3503 		*start = next;
3504 		return;
3505 	}
3506 
3507 	i = next == -1 ? 0 : pmd_index(next) - pmd_index(*start);
3508 	if (i && i <= MIN_LRU_BATCH) {
3509 		__set_bit(i - 1, bitmap);
3510 		return;
3511 	}
3512 
3513 	pmd = pmd_offset(pud, *start);
3514 
3515 	ptl = pmd_lockptr(args->mm, pmd);
3516 	if (!spin_trylock(ptl))
3517 		goto done;
3518 
3519 	arch_enter_lazy_mmu_mode();
3520 
3521 	do {
3522 		unsigned long pfn;
3523 		struct page *page;
3524 		unsigned long addr = i ? (*start & PMD_MASK) + i * PMD_SIZE : *start;
3525 
3526 		pfn = get_pmd_pfn(pmd[i], vma, addr);
3527 		if (pfn == -1)
3528 			goto next;
3529 
3530 		if (!pmd_trans_huge(pmd[i])) {
3531 			if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) &&
3532 			    get_cap(LRU_GEN_NONLEAF_YOUNG))
3533 				pmdp_test_and_clear_young(vma, addr, pmd + i);
3534 			goto next;
3535 		}
3536 
3537 		page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3538 		if (!page)
3539 			goto next;
3540 
3541 		if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
3542 			goto next;
3543 
3544 		walk->mm_stats[MM_LEAF_YOUNG]++;
3545 
3546 		if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
3547 		    !(PageAnon(page) && PageSwapBacked(page) &&
3548 		      !PageSwapCache(page)))
3549 			set_page_dirty(page);
3550 
3551 		old_gen = page_update_gen(page, new_gen);
3552 		if (old_gen >= 0 && old_gen != new_gen)
3553 			update_batch_size(walk, page, old_gen, new_gen);
3554 next:
3555 		i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
3556 	} while (i <= MIN_LRU_BATCH);
3557 
3558 	arch_leave_lazy_mmu_mode();
3559 	spin_unlock(ptl);
3560 done:
3561 	*start = -1;
3562 	bitmap_zero(bitmap, MIN_LRU_BATCH);
3563 }
3564 #else
walk_pmd_range_locked(pud_t * pud,unsigned long next,struct vm_area_struct * vma,struct mm_walk * args,unsigned long * bitmap,unsigned long * start)3565 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3566 				  struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3567 {
3568 }
3569 #endif
3570 
walk_pmd_range(pud_t * pud,unsigned long start,unsigned long end,struct mm_walk * args)3571 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
3572 			   struct mm_walk *args)
3573 {
3574 	int i;
3575 	pmd_t *pmd;
3576 	unsigned long next;
3577 	unsigned long addr;
3578 	struct vm_area_struct *vma;
3579 	unsigned long pos = -1;
3580 	struct lru_gen_mm_walk *walk = args->private;
3581 	unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
3582 
3583 	VM_WARN_ON_ONCE(pud_leaf(*pud));
3584 
3585 	/*
3586 	 * Finish an entire PMD in two passes: the first only reaches to PTE
3587 	 * tables to avoid taking the PMD lock; the second, if necessary, takes
3588 	 * the PMD lock to clear the accessed bit in PMD entries.
3589 	 */
3590 	pmd = pmd_offset(pud, start & PUD_MASK);
3591 restart:
3592 	/* walk_pte_range() may call get_next_vma() */
3593 	vma = args->vma;
3594 	for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
3595 		pmd_t val = pmd_read_atomic(pmd + i);
3596 
3597 		/* for pmd_read_atomic() */
3598 		barrier();
3599 
3600 		next = pmd_addr_end(addr, end);
3601 
3602 		if (!pmd_present(val) || is_huge_zero_pmd(val)) {
3603 			walk->mm_stats[MM_LEAF_TOTAL]++;
3604 			continue;
3605 		}
3606 
3607 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3608 		if (pmd_trans_huge(val)) {
3609 			unsigned long pfn = pmd_pfn(val);
3610 			struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3611 
3612 			walk->mm_stats[MM_LEAF_TOTAL]++;
3613 
3614 			if (!pmd_young(val)) {
3615 				walk->mm_stats[MM_LEAF_OLD]++;
3616 				continue;
3617 			}
3618 
3619 			/* try to avoid unnecessary memory loads */
3620 			if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3621 				continue;
3622 
3623 			walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3624 			continue;
3625 		}
3626 #endif
3627 		walk->mm_stats[MM_NONLEAF_TOTAL]++;
3628 
3629 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
3630 		if (get_cap(LRU_GEN_NONLEAF_YOUNG)) {
3631 			if (!pmd_young(val))
3632 				continue;
3633 
3634 			walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3635 		}
3636 #endif
3637 		if (!walk->full_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
3638 			continue;
3639 
3640 		walk->mm_stats[MM_NONLEAF_FOUND]++;
3641 
3642 		if (!walk_pte_range(&val, addr, next, args))
3643 			continue;
3644 
3645 		walk->mm_stats[MM_NONLEAF_ADDED]++;
3646 
3647 		/* carry over to the next generation */
3648 		update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
3649 	}
3650 
3651 	walk_pmd_range_locked(pud, -1, vma, args, bitmap, &pos);
3652 
3653 	if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
3654 		goto restart;
3655 }
3656 
walk_pud_range(p4d_t * p4d,unsigned long start,unsigned long end,struct mm_walk * args)3657 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
3658 			  struct mm_walk *args)
3659 {
3660 	int i;
3661 	pud_t *pud;
3662 	unsigned long addr;
3663 	unsigned long next;
3664 	struct lru_gen_mm_walk *walk = args->private;
3665 
3666 	VM_WARN_ON_ONCE(p4d_leaf(*p4d));
3667 
3668 	pud = pud_offset(p4d, start & P4D_MASK);
3669 restart:
3670 	for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
3671 		pud_t val = READ_ONCE(pud[i]);
3672 
3673 		next = pud_addr_end(addr, end);
3674 
3675 		if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
3676 			continue;
3677 
3678 		walk_pmd_range(&val, addr, next, args);
3679 
3680 		if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
3681 			end = (addr | ~PUD_MASK) + 1;
3682 			goto done;
3683 		}
3684 	}
3685 
3686 	if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
3687 		goto restart;
3688 
3689 	end = round_up(end, P4D_SIZE);
3690 done:
3691 	if (!end || !args->vma)
3692 		return 1;
3693 
3694 	walk->next_addr = max(end, args->vma->vm_start);
3695 
3696 	return -EAGAIN;
3697 }
3698 
walk_mm(struct lruvec * lruvec,struct mm_struct * mm,struct lru_gen_mm_walk * walk)3699 static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3700 {
3701 	static const struct mm_walk_ops mm_walk_ops = {
3702 		.test_walk = should_skip_vma,
3703 		.p4d_entry = walk_pud_range,
3704 	};
3705 
3706 	int err;
3707 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3708 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3709 
3710 	walk->next_addr = FIRST_USER_ADDRESS;
3711 
3712 	do {
3713 		DEFINE_MAX_SEQ(lruvec);
3714 
3715 		err = -EBUSY;
3716 
3717 		/* another thread might have called inc_max_seq() */
3718 		if (walk->max_seq != max_seq)
3719 			break;
3720 
3721 		/* page_update_gen() requires stable page_memcg() */
3722 		if (!mem_cgroup_trylock_pages(memcg))
3723 			break;
3724 
3725 		/* the caller might be holding the lock for write */
3726 		if (mmap_read_trylock(mm)) {
3727 			err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
3728 
3729 			mmap_read_unlock(mm);
3730 		}
3731 
3732 		mem_cgroup_unlock_pages();
3733 
3734 		if (walk->batched) {
3735 			spin_lock_irq(&pgdat->lru_lock);
3736 			reset_batch_size(lruvec, walk);
3737 			spin_unlock_irq(&pgdat->lru_lock);
3738 		}
3739 
3740 		cond_resched();
3741 	} while (err == -EAGAIN);
3742 }
3743 
set_mm_walk(struct pglist_data * pgdat)3744 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat)
3745 {
3746 	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3747 
3748 	if (pgdat && current_is_kswapd()) {
3749 		VM_WARN_ON_ONCE(walk);
3750 
3751 		walk = &pgdat->mm_walk;
3752 	} else if (!pgdat && !walk) {
3753 		VM_WARN_ON_ONCE(current_is_kswapd());
3754 
3755 		walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3756 	}
3757 
3758 	current->reclaim_state->mm_walk = walk;
3759 
3760 	return walk;
3761 }
3762 
clear_mm_walk(void)3763 static void clear_mm_walk(void)
3764 {
3765 	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3766 
3767 	VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
3768 	VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
3769 
3770 	current->reclaim_state->mm_walk = NULL;
3771 
3772 	if (!current_is_kswapd())
3773 		kfree(walk);
3774 }
3775 
inc_min_seq(struct lruvec * lruvec,int type,bool can_swap)3776 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
3777 {
3778 	int zone;
3779 	int remaining = MAX_LRU_BATCH;
3780 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3781 	int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3782 
3783 	if (type == LRU_GEN_ANON && !can_swap)
3784 		goto done;
3785 
3786 	/* prevent cold/hot inversion if full_scan is true */
3787 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3788 		struct list_head *head = &lrugen->lists[old_gen][type][zone];
3789 
3790 		while (!list_empty(head)) {
3791 			struct page *page = lru_to_page(head);
3792 
3793 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
3794 			VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
3795 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
3796 			VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
3797 
3798 			new_gen = page_inc_gen(lruvec, page, false);
3799 			list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
3800 
3801 			if (!--remaining)
3802 				return false;
3803 		}
3804 	}
3805 done:
3806 	reset_ctrl_pos(lruvec, type, true);
3807 	WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
3808 
3809 	return true;
3810 }
3811 
try_to_inc_min_seq(struct lruvec * lruvec,bool can_swap)3812 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
3813 {
3814 	int gen, type, zone;
3815 	bool success = false;
3816 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3817 	DEFINE_MIN_SEQ(lruvec);
3818 
3819 	VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3820 
3821 	/* find the oldest populated generation */
3822 	for (type = !can_swap; type < ANON_AND_FILE; type++) {
3823 		while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
3824 			gen = lru_gen_from_seq(min_seq[type]);
3825 
3826 			for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3827 				if (!list_empty(&lrugen->lists[gen][type][zone]))
3828 					goto next;
3829 			}
3830 
3831 			min_seq[type]++;
3832 		}
3833 next:
3834 		;
3835 	}
3836 
3837 	/* see the comment on lru_gen_struct */
3838 	if (can_swap) {
3839 		min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
3840 		min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
3841 	}
3842 
3843 	for (type = !can_swap; type < ANON_AND_FILE; type++) {
3844 		if (min_seq[type] == lrugen->min_seq[type])
3845 			continue;
3846 
3847 		reset_ctrl_pos(lruvec, type, true);
3848 		WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
3849 		success = true;
3850 	}
3851 
3852 	return success;
3853 }
3854 
inc_max_seq(struct lruvec * lruvec,bool can_swap,bool full_scan)3855 static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool full_scan)
3856 {
3857 	int prev, next;
3858 	int type, zone;
3859 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3860 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3861 restart:
3862 	spin_lock_irq(&pgdat->lru_lock);
3863 
3864 	VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3865 
3866 	for (type = ANON_AND_FILE - 1; type >= 0; type--) {
3867 		if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
3868 			continue;
3869 
3870 		VM_WARN_ON_ONCE(!full_scan && (type == LRU_GEN_FILE || can_swap));
3871 
3872 		if (inc_min_seq(lruvec, type, can_swap))
3873 			continue;
3874 
3875 		spin_unlock_irq(&pgdat->lru_lock);
3876 		cond_resched();
3877 		goto restart;
3878 	}
3879 
3880 	/*
3881 	 * Update the active/inactive LRU sizes for compatibility. Both sides of
3882 	 * the current max_seq need to be covered, since max_seq+1 can overlap
3883 	 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
3884 	 * overlap, cold/hot inversion happens.
3885 	 */
3886 	prev = lru_gen_from_seq(lrugen->max_seq - 1);
3887 	next = lru_gen_from_seq(lrugen->max_seq + 1);
3888 
3889 	for (type = 0; type < ANON_AND_FILE; type++) {
3890 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3891 			enum lru_list lru = type * LRU_INACTIVE_FILE;
3892 			long delta = lrugen->nr_pages[prev][type][zone] -
3893 				     lrugen->nr_pages[next][type][zone];
3894 
3895 			if (!delta)
3896 				continue;
3897 
3898 			__update_lru_size(lruvec, lru, zone, delta);
3899 			__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
3900 		}
3901 	}
3902 
3903 	for (type = 0; type < ANON_AND_FILE; type++)
3904 		reset_ctrl_pos(lruvec, type, false);
3905 
3906 	WRITE_ONCE(lrugen->timestamps[next], jiffies);
3907 	/* make sure preceding modifications appear */
3908 	smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
3909 
3910 	spin_unlock_irq(&pgdat->lru_lock);
3911 }
3912 
try_to_inc_max_seq(struct lruvec * lruvec,unsigned long max_seq,struct scan_control * sc,bool can_swap,bool full_scan)3913 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
3914 			       struct scan_control *sc, bool can_swap, bool full_scan)
3915 {
3916 	bool success;
3917 	struct lru_gen_mm_walk *walk;
3918 	struct mm_struct *mm = NULL;
3919 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3920 
3921 	VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
3922 
3923 	/* see the comment in iterate_mm_list() */
3924 	if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
3925 		success = false;
3926 		goto done;
3927 	}
3928 
3929 	/*
3930 	 * If the hardware doesn't automatically set the accessed bit, fallback
3931 	 * to lru_gen_look_around(), which only clears the accessed bit in a
3932 	 * handful of PTEs. Spreading the work out over a period of time usually
3933 	 * is less efficient, but it avoids bursty page faults.
3934 	 */
3935 	if (!full_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
3936 		success = iterate_mm_list_nowalk(lruvec, max_seq);
3937 		goto done;
3938 	}
3939 
3940 	walk = set_mm_walk(NULL);
3941 	if (!walk) {
3942 		success = iterate_mm_list_nowalk(lruvec, max_seq);
3943 		goto done;
3944 	}
3945 
3946 	walk->lruvec = lruvec;
3947 	walk->max_seq = max_seq;
3948 	walk->can_swap = can_swap;
3949 	walk->full_scan = full_scan;
3950 
3951 	do {
3952 		success = iterate_mm_list(lruvec, walk, &mm);
3953 		if (mm)
3954 			walk_mm(lruvec, mm, walk);
3955 	} while (mm);
3956 done:
3957 	if (success)
3958 		inc_max_seq(lruvec, can_swap, full_scan);
3959 
3960 	return success;
3961 }
3962 
should_run_aging(struct lruvec * lruvec,unsigned long max_seq,unsigned long * min_seq,struct scan_control * sc,bool can_swap,unsigned long * nr_to_scan)3963 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
3964 			     struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
3965 {
3966 	int gen, type, zone;
3967 	unsigned long old = 0;
3968 	unsigned long young = 0;
3969 	unsigned long total = 0;
3970 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3971 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3972 
3973 	for (type = !can_swap; type < ANON_AND_FILE; type++) {
3974 		unsigned long seq;
3975 
3976 		for (seq = min_seq[type]; seq <= max_seq; seq++) {
3977 			unsigned long size = 0;
3978 
3979 			gen = lru_gen_from_seq(seq);
3980 
3981 			for (zone = 0; zone < MAX_NR_ZONES; zone++)
3982 				size += max_t(long, READ_ONCE(lrugen->nr_pages[gen][type][zone]),
3983 						0);
3984 
3985 			total += size;
3986 			if (seq == max_seq)
3987 				young += size;
3988 			else if (seq + MIN_NR_GENS == max_seq)
3989 				old += size;
3990 		}
3991 	}
3992 
3993 	/* try to scrape all its memory if this memcg was deleted */
3994 	*nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
3995 
3996 	/*
3997 	 * The aging tries to be lazy to reduce the overhead, while the eviction
3998 	 * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
3999 	 * ideal number of generations is MIN_NR_GENS+1.
4000 	 */
4001 	if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
4002 		return true;
4003 	if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
4004 		return false;
4005 
4006 	/*
4007 	 * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
4008 	 * of the total number of pages for each generation. A reasonable range
4009 	 * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
4010 	 * aging cares about the upper bound of hot pages, while the eviction
4011 	 * cares about the lower bound of cold pages.
4012 	 */
4013 	if (young * MIN_NR_GENS > total)
4014 		return true;
4015 	if (old * (MIN_NR_GENS + 2) < total)
4016 		return true;
4017 
4018 	return false;
4019 }
4020 
age_lruvec(struct lruvec * lruvec,struct scan_control * sc,unsigned long min_ttl)4021 static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
4022 {
4023 	bool need_aging;
4024 	unsigned long nr_to_scan;
4025 	int swappiness = get_swappiness(lruvec, sc);
4026 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4027 	DEFINE_MAX_SEQ(lruvec);
4028 	DEFINE_MIN_SEQ(lruvec);
4029 
4030 	VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
4031 
4032 	mem_cgroup_calculate_protection(NULL, memcg);
4033 
4034 	if (mem_cgroup_below_min(memcg))
4035 		return false;
4036 
4037 	need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
4038 
4039 	if (min_ttl) {
4040 		int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
4041 		unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4042 
4043 		if (time_is_after_jiffies(birth + min_ttl))
4044 			return false;
4045 
4046 		/* the size is likely too small to be helpful */
4047 		if (!nr_to_scan && sc->priority != DEF_PRIORITY)
4048 			return false;
4049 	}
4050 
4051 	if (need_aging)
4052 		try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
4053 
4054 	return true;
4055 }
4056 
4057 /* to protect the working set of the last N jiffies */
4058 static unsigned long lru_gen_min_ttl __read_mostly;
4059 
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)4060 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
4061 {
4062 	struct mem_cgroup *memcg;
4063 	bool success = false;
4064 	unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
4065 
4066 	VM_WARN_ON_ONCE(!current_is_kswapd());
4067 
4068 	sc->last_reclaimed = sc->nr_reclaimed;
4069 
4070 	/*
4071 	 * To reduce the chance of going into the aging path, which can be
4072 	 * costly, optimistically skip it if the flag below was cleared in the
4073 	 * eviction path. This improves the overall performance when multiple
4074 	 * memcgs are available.
4075 	 */
4076 	if (!sc->memcgs_need_aging) {
4077 		sc->memcgs_need_aging = true;
4078 		return;
4079 	}
4080 
4081 	set_mm_walk(pgdat);
4082 
4083 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
4084 	do {
4085 		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4086 
4087 		if (age_lruvec(lruvec, sc, min_ttl))
4088 			success = true;
4089 
4090 		cond_resched();
4091 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4092 
4093 	clear_mm_walk();
4094 
4095 	/* check the order to exclude compaction-induced reclaim */
4096 	if (success || !min_ttl || sc->order)
4097 		return;
4098 
4099 	/*
4100 	 * The main goal is to OOM kill if every generation from all memcgs is
4101 	 * younger than min_ttl. However, another possibility is all memcgs are
4102 	 * either below min or empty.
4103 	 */
4104 	if (mutex_trylock(&oom_lock)) {
4105 		struct oom_control oc = {
4106 			.gfp_mask = sc->gfp_mask,
4107 		};
4108 
4109 		out_of_memory(&oc);
4110 
4111 		mutex_unlock(&oom_lock);
4112 	}
4113 }
4114 
4115 /*
4116  * This function exploits spatial locality when shrink_page_list() walks the
4117  * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
4118  * the scan was done cacheline efficiently, it adds the PMD entry pointing to
4119  * the PTE table to the Bloom filter. This forms a feedback loop between the
4120  * eviction and the aging.
4121  */
lru_gen_look_around(struct page_vma_mapped_walk * pvmw)4122 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
4123 {
4124 	int i;
4125 	pte_t *pte;
4126 	unsigned long start;
4127 	unsigned long end;
4128 	unsigned long addr;
4129 	struct lru_gen_mm_walk *walk;
4130 	int young = 0;
4131 	unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
4132 	struct page *page = pvmw->page;
4133 	bool can_swap = !page_is_file_lru(page);
4134 	struct mem_cgroup *memcg = page_memcg(page);
4135 	struct pglist_data *pgdat = page_pgdat(page);
4136 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4137 	DEFINE_MAX_SEQ(lruvec);
4138 	int old_gen, new_gen = lru_gen_from_seq(max_seq);
4139 
4140 	lockdep_assert_held(pvmw->ptl);
4141 	VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
4142 
4143 	if (spin_is_contended(pvmw->ptl))
4144 		return;
4145 
4146 	/* avoid taking the LRU lock under the PTL when possible */
4147 	walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
4148 
4149 	start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
4150 	end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
4151 
4152 	if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
4153 		if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
4154 			end = start + MIN_LRU_BATCH * PAGE_SIZE;
4155 		else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
4156 			start = end - MIN_LRU_BATCH * PAGE_SIZE;
4157 		else {
4158 			start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
4159 			end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
4160 		}
4161 	}
4162 
4163 	pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
4164 
4165 	rcu_read_lock();
4166 	arch_enter_lazy_mmu_mode();
4167 
4168 	for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
4169 		unsigned long pfn;
4170 
4171 		pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
4172 		if (pfn == -1)
4173 			continue;
4174 
4175 		if (!pte_young(pte[i]))
4176 			continue;
4177 
4178 		page = get_pfn_page(pfn, memcg, pgdat, can_swap);
4179 		if (!page)
4180 			continue;
4181 
4182 		if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
4183 			VM_WARN_ON_ONCE(true);
4184 
4185 		young++;
4186 
4187 		if (pte_dirty(pte[i]) && !PageDirty(page) &&
4188 		    !(PageAnon(page) && PageSwapBacked(page) &&
4189 		      !PageSwapCache(page)))
4190 			set_page_dirty(page);
4191 
4192 		old_gen = page_lru_gen(page);
4193 		if (old_gen < 0)
4194 			SetPageReferenced(page);
4195 		else if (old_gen != new_gen)
4196 			__set_bit(i, bitmap);
4197 	}
4198 
4199 	arch_leave_lazy_mmu_mode();
4200 	rcu_read_unlock();
4201 
4202 	/* feedback from rmap walkers to page table walkers */
4203 	if (suitable_to_scan(i, young))
4204 		update_bloom_filter(lruvec, max_seq, pvmw->pmd);
4205 
4206 	if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
4207 		for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4208 			page = pte_page(pte[i]);
4209 			activate_page(page);
4210 		}
4211 		return;
4212 	}
4213 
4214 	/* page_update_gen() requires stable page_memcg() */
4215 	if (!mem_cgroup_trylock_pages(memcg))
4216 		return;
4217 
4218 	if (!walk) {
4219 		spin_lock_irq(&pgdat->lru_lock);
4220 		new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
4221 	}
4222 
4223 	for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4224 		page = compound_head(pte_page(pte[i]));
4225 		if (page_memcg_rcu(page) != memcg)
4226 			continue;
4227 
4228 		old_gen = page_update_gen(page, new_gen);
4229 		if (old_gen < 0 || old_gen == new_gen)
4230 			continue;
4231 
4232 		if (walk)
4233 			update_batch_size(walk, page, old_gen, new_gen);
4234 		else
4235 			lru_gen_update_size(lruvec, page, old_gen, new_gen);
4236 	}
4237 
4238 	if (!walk)
4239 		spin_unlock_irq(&pgdat->lru_lock);
4240 
4241 	mem_cgroup_unlock_pages();
4242 }
4243 
4244 /******************************************************************************
4245  *                          the eviction
4246  ******************************************************************************/
4247 
sort_page(struct lruvec * lruvec,struct page * page,struct scan_control * sc,int tier_idx)4248 static bool sort_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc,
4249 		       int tier_idx)
4250 {
4251 	bool success;
4252 	int gen = page_lru_gen(page);
4253 	int type = page_is_file_lru(page);
4254 	int zone = page_zonenum(page);
4255 	int delta = thp_nr_pages(page);
4256 	int refs = page_lru_refs(page);
4257 	int tier = lru_tier_from_refs(refs);
4258 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4259 
4260 	VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
4261 
4262 	/* unevictable */
4263 	if (!page_evictable(page)) {
4264 		success = lru_gen_del_page(lruvec, page, true);
4265 		VM_WARN_ON_ONCE_PAGE(!success, page);
4266 		SetPageUnevictable(page);
4267 		add_page_to_lru_list(page, lruvec);
4268 		__count_vm_events(UNEVICTABLE_PGCULLED, delta);
4269 		return true;
4270 	}
4271 
4272 	/* dirty lazyfree */
4273 	if (type == LRU_GEN_FILE && PageAnon(page) && PageDirty(page)) {
4274 		success = lru_gen_del_page(lruvec, page, true);
4275 		VM_WARN_ON_ONCE_PAGE(!success, page);
4276 		SetPageSwapBacked(page);
4277 		add_page_to_lru_list_tail(page, lruvec);
4278 		return true;
4279 	}
4280 
4281 	/* promoted */
4282 	if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4283 		list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4284 		return true;
4285 	}
4286 
4287 	/* protected */
4288 	if (tier > tier_idx) {
4289 		int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4290 
4291 		gen = page_inc_gen(lruvec, page, false);
4292 		list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
4293 
4294 		WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
4295 			   lrugen->protected[hist][type][tier - 1] + delta);
4296 		return true;
4297 	}
4298 
4299 	/* ineligible */
4300 	if (zone > sc->reclaim_idx || skip_cma(page, sc)) {
4301 		gen = page_inc_gen(lruvec, page, false);
4302 		list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
4303 		return true;
4304 	}
4305 
4306 	/* waiting for writeback */
4307 	if (PageLocked(page) || PageWriteback(page) ||
4308 	    (type == LRU_GEN_FILE && PageDirty(page))) {
4309 		gen = page_inc_gen(lruvec, page, true);
4310 		list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4311 		return true;
4312 	}
4313 
4314 	return false;
4315 }
4316 
isolate_page(struct lruvec * lruvec,struct page * page,struct scan_control * sc)4317 static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc)
4318 {
4319 	bool success;
4320 
4321 	/* unmapping inhibited */
4322 	if (!sc->may_unmap && page_mapped(page))
4323 		return false;
4324 
4325 	/* swapping inhibited */
4326 	if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
4327 	    (PageDirty(page) ||
4328 	     (PageAnon(page) && !PageSwapCache(page))))
4329 		return false;
4330 
4331 	/* raced with release_pages() */
4332 	if (!get_page_unless_zero(page))
4333 		return false;
4334 
4335 	ClearPageLRU(page);
4336 
4337 	/* see the comment on MAX_NR_TIERS */
4338 	if (!PageReferenced(page))
4339 		set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
4340 
4341 	/* for shrink_page_list() */
4342 	ClearPageReclaim(page);
4343 	ClearPageReferenced(page);
4344 
4345 	success = lru_gen_del_page(lruvec, page, true);
4346 	VM_WARN_ON_ONCE_PAGE(!success, page);
4347 
4348 	return true;
4349 }
4350 
scan_pages(struct lruvec * lruvec,struct scan_control * sc,int type,int tier,struct list_head * list)4351 static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
4352 		      int type, int tier, struct list_head *list)
4353 {
4354 	int i;
4355 	int gen;
4356 	enum vm_event_item item;
4357 	int sorted = 0;
4358 	int scanned = 0;
4359 	int isolated = 0;
4360 	int remaining = MAX_LRU_BATCH;
4361 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4362 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4363 
4364 	VM_WARN_ON_ONCE(!list_empty(list));
4365 
4366 	if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
4367 		return 0;
4368 
4369 	gen = lru_gen_from_seq(lrugen->min_seq[type]);
4370 
4371 	for (i = MAX_NR_ZONES; i > 0; i--) {
4372 		LIST_HEAD(moved);
4373 		int skipped = 0;
4374 		int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
4375 		struct list_head *head = &lrugen->lists[gen][type][zone];
4376 
4377 		while (!list_empty(head)) {
4378 			struct page *page = lru_to_page(head);
4379 			int delta = thp_nr_pages(page);
4380 
4381 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4382 			VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4383 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
4384 			VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4385 
4386 			scanned += delta;
4387 
4388 			if (sort_page(lruvec, page, sc, tier))
4389 				sorted += delta;
4390 			else if (isolate_page(lruvec, page, sc)) {
4391 				list_add(&page->lru, list);
4392 				isolated += delta;
4393 			} else {
4394 				list_move(&page->lru, &moved);
4395 				skipped += delta;
4396 			}
4397 
4398 			if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
4399 				break;
4400 		}
4401 
4402 		if (skipped) {
4403 			list_splice(&moved, head);
4404 			__count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
4405 		}
4406 
4407 		if (!remaining || isolated >= MIN_LRU_BATCH)
4408 			break;
4409 	}
4410 
4411 	item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
4412 	if (!cgroup_reclaim(sc)) {
4413 		__count_vm_events(item, isolated);
4414 		__count_vm_events(PGREFILL, sorted);
4415 	}
4416 	__count_memcg_events(memcg, item, isolated);
4417 	__count_memcg_events(memcg, PGREFILL, sorted);
4418 	__count_vm_events(PGSCAN_ANON + type, isolated);
4419 
4420 	/*
4421 	 * There might not be eligible pages due to reclaim_idx, may_unmap and
4422 	 * may_writepage. Check the remaining to prevent livelock if it's not
4423 	 * making progress.
4424 	 */
4425 	return isolated || !remaining ? scanned : 0;
4426 }
4427 
get_tier_idx(struct lruvec * lruvec,int type)4428 static int get_tier_idx(struct lruvec *lruvec, int type)
4429 {
4430 	int tier;
4431 	struct ctrl_pos sp, pv;
4432 
4433 	/*
4434 	 * To leave a margin for fluctuations, use a larger gain factor (1:2).
4435 	 * This value is chosen because any other tier would have at least twice
4436 	 * as many refaults as the first tier.
4437 	 */
4438 	read_ctrl_pos(lruvec, type, 0, 1, &sp);
4439 	for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4440 		read_ctrl_pos(lruvec, type, tier, 2, &pv);
4441 		if (!positive_ctrl_err(&sp, &pv))
4442 			break;
4443 	}
4444 
4445 	return tier - 1;
4446 }
4447 
get_type_to_scan(struct lruvec * lruvec,int swappiness,int * tier_idx)4448 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
4449 {
4450 	int type, tier;
4451 	struct ctrl_pos sp, pv;
4452 	int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
4453 
4454 	/*
4455 	 * Compare the first tier of anon with that of file to determine which
4456 	 * type to scan. Also need to compare other tiers of the selected type
4457 	 * with the first tier of the other type to determine the last tier (of
4458 	 * the selected type) to evict.
4459 	 */
4460 	read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
4461 	read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
4462 	type = positive_ctrl_err(&sp, &pv);
4463 
4464 	read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
4465 	for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4466 		read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
4467 		if (!positive_ctrl_err(&sp, &pv))
4468 			break;
4469 	}
4470 
4471 	*tier_idx = tier - 1;
4472 
4473 	return type;
4474 }
4475 
isolate_pages(struct lruvec * lruvec,struct scan_control * sc,int swappiness,int * type_scanned,struct list_head * list)4476 static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4477 			 int *type_scanned, struct list_head *list)
4478 {
4479 	int i;
4480 	int type;
4481 	int scanned;
4482 	int tier = -1;
4483 	DEFINE_MIN_SEQ(lruvec);
4484 
4485 	/*
4486 	 * Try to make the obvious choice first. When anon and file are both
4487 	 * available from the same generation, interpret swappiness 1 as file
4488 	 * first and 200 as anon first.
4489 	 */
4490 	if (!swappiness)
4491 		type = LRU_GEN_FILE;
4492 	else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
4493 		type = LRU_GEN_ANON;
4494 	else if (swappiness == 1)
4495 		type = LRU_GEN_FILE;
4496 	else if (swappiness == 200)
4497 		type = LRU_GEN_ANON;
4498 	else
4499 		type = get_type_to_scan(lruvec, swappiness, &tier);
4500 
4501 	for (i = !swappiness; i < ANON_AND_FILE; i++) {
4502 		if (tier < 0)
4503 			tier = get_tier_idx(lruvec, type);
4504 
4505 		scanned = scan_pages(lruvec, sc, type, tier, list);
4506 		if (scanned)
4507 			break;
4508 
4509 		type = !type;
4510 		tier = -1;
4511 	}
4512 
4513 	*type_scanned = type;
4514 
4515 	return scanned;
4516 }
4517 
evict_pages(struct lruvec * lruvec,struct scan_control * sc,int swappiness,bool * need_swapping)4518 static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4519 		       bool *need_swapping)
4520 {
4521 	int type;
4522 	int scanned;
4523 	int reclaimed;
4524 	LIST_HEAD(list);
4525 	LIST_HEAD(clean);
4526 	struct page *page;
4527 	struct page *next;
4528 	enum vm_event_item item;
4529 	struct reclaim_stat stat;
4530 	struct lru_gen_mm_walk *walk;
4531 	bool skip_retry = false;
4532 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4533 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4534 
4535 	spin_lock_irq(&pgdat->lru_lock);
4536 
4537 	scanned = isolate_pages(lruvec, sc, swappiness, &type, &list);
4538 
4539 	scanned += try_to_inc_min_seq(lruvec, swappiness);
4540 
4541 	if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
4542 		scanned = 0;
4543 
4544 	spin_unlock_irq(&pgdat->lru_lock);
4545 
4546 	if (list_empty(&list))
4547 		return scanned;
4548 retry:
4549 	reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
4550 	sc->nr_reclaimed += reclaimed;
4551 
4552 	list_for_each_entry_safe_reverse(page, next, &list, lru) {
4553 		if (!page_evictable(page)) {
4554 			list_del(&page->lru);
4555 			putback_lru_page(page);
4556 			continue;
4557 		}
4558 
4559 		if (PageReclaim(page) &&
4560 		    (PageDirty(page) || PageWriteback(page))) {
4561 			/* restore LRU_REFS_FLAGS cleared by isolate_page() */
4562 			if (PageWorkingset(page))
4563 				SetPageReferenced(page);
4564 			continue;
4565 		}
4566 
4567 		if (skip_retry || PageActive(page) || PageReferenced(page) ||
4568 		    page_mapped(page) || PageLocked(page) ||
4569 		    PageDirty(page) || PageWriteback(page)) {
4570 			/* don't add rejected pages to the oldest generation */
4571 			set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
4572 				      BIT(PG_active));
4573 			continue;
4574 		}
4575 
4576 		/* retry pages that may have missed rotate_reclaimable_page() */
4577 		list_move(&page->lru, &clean);
4578 		sc->nr_scanned -= thp_nr_pages(page);
4579 	}
4580 
4581 	spin_lock_irq(&pgdat->lru_lock);
4582 
4583 	move_pages_to_lru(lruvec, &list);
4584 
4585 	walk = current->reclaim_state->mm_walk;
4586 	if (walk && walk->batched)
4587 		reset_batch_size(lruvec, walk);
4588 
4589 	item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
4590 	if (!cgroup_reclaim(sc))
4591 		__count_vm_events(item, reclaimed);
4592 	__count_memcg_events(memcg, item, reclaimed);
4593 	__count_vm_events(PGSTEAL_ANON + type, reclaimed);
4594 
4595 	spin_unlock_irq(&pgdat->lru_lock);
4596 
4597 	mem_cgroup_uncharge_list(&list);
4598 	free_unref_page_list(&list);
4599 
4600 	INIT_LIST_HEAD(&list);
4601 	list_splice_init(&clean, &list);
4602 
4603 	if (!list_empty(&list)) {
4604 		skip_retry = true;
4605 		goto retry;
4606 	}
4607 
4608 	if (need_swapping && type == LRU_GEN_ANON)
4609 		*need_swapping = true;
4610 
4611 	return scanned;
4612 }
4613 
4614 /*
4615  * For future optimizations:
4616  * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4617  *    reclaim.
4618  */
get_nr_to_scan(struct lruvec * lruvec,struct scan_control * sc,bool can_swap,bool * need_aging)4619 static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
4620 				    bool can_swap, bool *need_aging)
4621 {
4622 	unsigned long nr_to_scan;
4623 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4624 	DEFINE_MAX_SEQ(lruvec);
4625 	DEFINE_MIN_SEQ(lruvec);
4626 
4627 	if (mem_cgroup_below_min(memcg) ||
4628 	    (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
4629 		return 0;
4630 
4631 	*need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
4632 	if (!*need_aging)
4633 		return nr_to_scan;
4634 
4635 	/* skip the aging path at the default priority */
4636 	if (sc->priority == DEF_PRIORITY)
4637 		goto done;
4638 
4639 	/* leave the work to lru_gen_age_node() */
4640 	if (current_is_kswapd())
4641 		return 0;
4642 
4643 	if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
4644 		return nr_to_scan;
4645 done:
4646 	return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
4647 }
4648 
should_abort_scan(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,bool need_swapping)4649 static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
4650 			      struct scan_control *sc, bool need_swapping)
4651 {
4652 	int i;
4653 	DEFINE_MAX_SEQ(lruvec);
4654 
4655 	if (!current_is_kswapd()) {
4656 		/* age each memcg at most once to ensure fairness */
4657 		if (max_seq - seq > 1)
4658 			return true;
4659 
4660 		/* over-swapping can increase allocation latency */
4661 		if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
4662 			return true;
4663 
4664 		/* give this thread a chance to exit and free its memory */
4665 		if (fatal_signal_pending(current)) {
4666 			sc->nr_reclaimed += MIN_LRU_BATCH;
4667 			return true;
4668 		}
4669 
4670 		if (cgroup_reclaim(sc))
4671 			return false;
4672 	} else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
4673 		return false;
4674 
4675 	/* keep scanning at low priorities to ensure fairness */
4676 	if (sc->priority > DEF_PRIORITY - 2)
4677 		return false;
4678 
4679 	/*
4680 	 * A minimum amount of work was done under global memory pressure. For
4681 	 * kswapd, it may be overshooting. For direct reclaim, the allocation
4682 	 * may succeed if all suitable zones are somewhat safe. In either case,
4683 	 * it's better to stop now, and restart later if necessary.
4684 	 */
4685 	for (i = 0; i <= sc->reclaim_idx; i++) {
4686 		unsigned long wmark;
4687 		struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
4688 
4689 		if (!managed_zone(zone))
4690 			continue;
4691 
4692 		wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
4693 		if (wmark > zone_page_state(zone, NR_FREE_PAGES))
4694 			return false;
4695 	}
4696 
4697 	sc->nr_reclaimed += MIN_LRU_BATCH;
4698 
4699 	return true;
4700 }
4701 
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)4702 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4703 {
4704 	struct blk_plug plug;
4705 	bool need_aging = false;
4706 	bool need_swapping = false;
4707 	unsigned long scanned = 0;
4708 	unsigned long reclaimed = sc->nr_reclaimed;
4709 	DEFINE_MAX_SEQ(lruvec);
4710 
4711 	lru_add_drain();
4712 
4713 	blk_start_plug(&plug);
4714 
4715 	set_mm_walk(lruvec_pgdat(lruvec));
4716 
4717 	while (true) {
4718 		int delta;
4719 		int swappiness;
4720 		unsigned long nr_to_scan;
4721 
4722 		if (sc->may_swap)
4723 			swappiness = get_swappiness(lruvec, sc);
4724 		else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
4725 			swappiness = 1;
4726 		else
4727 			swappiness = 0;
4728 
4729 		nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
4730 		if (!nr_to_scan)
4731 			goto done;
4732 
4733 		delta = evict_pages(lruvec, sc, swappiness, &need_swapping);
4734 		if (!delta)
4735 			goto done;
4736 
4737 		scanned += delta;
4738 		if (scanned >= nr_to_scan)
4739 			break;
4740 
4741 		if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
4742 			break;
4743 
4744 		cond_resched();
4745 	}
4746 
4747 	/* see the comment in lru_gen_age_node() */
4748 	if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
4749 		sc->memcgs_need_aging = false;
4750 done:
4751 	clear_mm_walk();
4752 
4753 	blk_finish_plug(&plug);
4754 }
4755 
4756 /******************************************************************************
4757  *                          state change
4758  ******************************************************************************/
4759 
state_is_valid(struct lruvec * lruvec)4760 static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
4761 {
4762 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4763 
4764 	if (lrugen->enabled) {
4765 		enum lru_list lru;
4766 
4767 		for_each_evictable_lru(lru) {
4768 			if (!list_empty(&lruvec->lists[lru]))
4769 				return false;
4770 		}
4771 	} else {
4772 		int gen, type, zone;
4773 
4774 		for_each_gen_type_zone(gen, type, zone) {
4775 			if (!list_empty(&lrugen->lists[gen][type][zone]))
4776 				return false;
4777 		}
4778 	}
4779 
4780 	return true;
4781 }
4782 
fill_evictable(struct lruvec * lruvec)4783 static bool fill_evictable(struct lruvec *lruvec)
4784 {
4785 	enum lru_list lru;
4786 	int remaining = MAX_LRU_BATCH;
4787 
4788 	for_each_evictable_lru(lru) {
4789 		int type = is_file_lru(lru);
4790 		bool active = is_active_lru(lru);
4791 		struct list_head *head = &lruvec->lists[lru];
4792 
4793 		while (!list_empty(head)) {
4794 			bool success;
4795 			struct page *page = lru_to_page(head);
4796 
4797 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4798 			VM_WARN_ON_ONCE_PAGE(PageActive(page) != active, page);
4799 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
4800 			VM_WARN_ON_ONCE_PAGE(page_lru_gen(page) != -1, page);
4801 
4802 			del_page_from_lru_list(page, lruvec);
4803 			success = lru_gen_add_page(lruvec, page, false);
4804 			VM_WARN_ON_ONCE(!success);
4805 
4806 			if (!--remaining)
4807 				return false;
4808 		}
4809 	}
4810 
4811 	return true;
4812 }
4813 
drain_evictable(struct lruvec * lruvec)4814 static bool drain_evictable(struct lruvec *lruvec)
4815 {
4816 	int gen, type, zone;
4817 	int remaining = MAX_LRU_BATCH;
4818 
4819 	for_each_gen_type_zone(gen, type, zone) {
4820 		struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
4821 
4822 		while (!list_empty(head)) {
4823 			bool success;
4824 			struct page *page = lru_to_page(head);
4825 
4826 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4827 			VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4828 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
4829 			VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4830 
4831 			success = lru_gen_del_page(lruvec, page, false);
4832 			VM_WARN_ON_ONCE(!success);
4833 			add_page_to_lru_list(page, lruvec);
4834 
4835 			if (!--remaining)
4836 				return false;
4837 		}
4838 	}
4839 
4840 	return true;
4841 }
4842 
lru_gen_change_state(bool enabled)4843 static void lru_gen_change_state(bool enabled)
4844 {
4845 	static DEFINE_MUTEX(state_mutex);
4846 
4847 	struct mem_cgroup *memcg;
4848 
4849 	cgroup_lock();
4850 	cpus_read_lock();
4851 	get_online_mems();
4852 	mutex_lock(&state_mutex);
4853 
4854 	if (enabled == lru_gen_enabled())
4855 		goto unlock;
4856 
4857 	if (enabled)
4858 		static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
4859 	else
4860 		static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
4861 
4862 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
4863 	do {
4864 		int nid;
4865 
4866 		for_each_node(nid) {
4867 			struct pglist_data *pgdat = NODE_DATA(nid);
4868 			struct lruvec *lruvec = get_lruvec(memcg, nid);
4869 
4870 			if (!lruvec)
4871 				continue;
4872 
4873 			spin_lock_irq(&pgdat->lru_lock);
4874 
4875 			VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4876 			VM_WARN_ON_ONCE(!state_is_valid(lruvec));
4877 
4878 			lruvec->lrugen.enabled = enabled;
4879 
4880 			while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
4881 				spin_unlock_irq(&pgdat->lru_lock);
4882 				cond_resched();
4883 				spin_lock_irq(&pgdat->lru_lock);
4884 			}
4885 
4886 			spin_unlock_irq(&pgdat->lru_lock);
4887 		}
4888 
4889 		cond_resched();
4890 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4891 unlock:
4892 	mutex_unlock(&state_mutex);
4893 	put_online_mems();
4894 	cpus_read_unlock();
4895 	cgroup_unlock();
4896 }
4897 
4898 /******************************************************************************
4899  *                          sysfs interface
4900  ******************************************************************************/
4901 
show_min_ttl(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4902 static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
4903 {
4904 	return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
4905 }
4906 
4907 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
store_min_ttl(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)4908 static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr,
4909 			     const char *buf, size_t len)
4910 {
4911 	unsigned int msecs;
4912 
4913 	if (kstrtouint(buf, 0, &msecs))
4914 		return -EINVAL;
4915 
4916 	WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
4917 
4918 	return len;
4919 }
4920 
4921 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR(
4922 	min_ttl_ms, 0644, show_min_ttl, store_min_ttl
4923 );
4924 
show_enabled(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4925 static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
4926 {
4927 	unsigned int caps = 0;
4928 
4929 	if (get_cap(LRU_GEN_CORE))
4930 		caps |= BIT(LRU_GEN_CORE);
4931 
4932 	if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
4933 		caps |= BIT(LRU_GEN_MM_WALK);
4934 
4935 	if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG))
4936 		caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
4937 
4938 	return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps);
4939 }
4940 
4941 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
store_enabled(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)4942 static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr,
4943 			     const char *buf, size_t len)
4944 {
4945 	int i;
4946 	unsigned int caps;
4947 
4948 	if (tolower(*buf) == 'n')
4949 		caps = 0;
4950 	else if (tolower(*buf) == 'y')
4951 		caps = -1;
4952 	else if (kstrtouint(buf, 0, &caps))
4953 		return -EINVAL;
4954 
4955 	for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
4956 		bool enabled = caps & BIT(i);
4957 
4958 		if (i == LRU_GEN_CORE)
4959 			lru_gen_change_state(enabled);
4960 		else if (enabled)
4961 			static_branch_enable(&lru_gen_caps[i]);
4962 		else
4963 			static_branch_disable(&lru_gen_caps[i]);
4964 	}
4965 
4966 	return len;
4967 }
4968 
4969 static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
4970 	enabled, 0644, show_enabled, store_enabled
4971 );
4972 
4973 static struct attribute *lru_gen_attrs[] = {
4974 	&lru_gen_min_ttl_attr.attr,
4975 	&lru_gen_enabled_attr.attr,
4976 	NULL
4977 };
4978 
4979 static struct attribute_group lru_gen_attr_group = {
4980 	.name = "lru_gen",
4981 	.attrs = lru_gen_attrs,
4982 };
4983 
4984 /******************************************************************************
4985  *                          debugfs interface
4986  ******************************************************************************/
4987 
lru_gen_seq_start(struct seq_file * m,loff_t * pos)4988 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
4989 {
4990 	struct mem_cgroup *memcg;
4991 	loff_t nr_to_skip = *pos;
4992 
4993 	m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
4994 	if (!m->private)
4995 		return ERR_PTR(-ENOMEM);
4996 
4997 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
4998 	do {
4999 		int nid;
5000 
5001 		for_each_node_state(nid, N_MEMORY) {
5002 			if (!nr_to_skip--)
5003 				return get_lruvec(memcg, nid);
5004 		}
5005 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5006 
5007 	return NULL;
5008 }
5009 
lru_gen_seq_stop(struct seq_file * m,void * v)5010 static void lru_gen_seq_stop(struct seq_file *m, void *v)
5011 {
5012 	if (!IS_ERR_OR_NULL(v))
5013 		mem_cgroup_iter_break(NULL, lruvec_memcg(v));
5014 
5015 	kvfree(m->private);
5016 	m->private = NULL;
5017 }
5018 
lru_gen_seq_next(struct seq_file * m,void * v,loff_t * pos)5019 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
5020 {
5021 	int nid = lruvec_pgdat(v)->node_id;
5022 	struct mem_cgroup *memcg = lruvec_memcg(v);
5023 
5024 	++*pos;
5025 
5026 	nid = next_memory_node(nid);
5027 	if (nid == MAX_NUMNODES) {
5028 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
5029 		if (!memcg)
5030 			return NULL;
5031 
5032 		nid = first_memory_node;
5033 	}
5034 
5035 	return get_lruvec(memcg, nid);
5036 }
5037 
lru_gen_seq_show_full(struct seq_file * m,struct lruvec * lruvec,unsigned long max_seq,unsigned long * min_seq,unsigned long seq)5038 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
5039 				  unsigned long max_seq, unsigned long *min_seq,
5040 				  unsigned long seq)
5041 {
5042 	int i;
5043 	int type, tier;
5044 	int hist = lru_hist_from_seq(seq);
5045 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5046 
5047 	for (tier = 0; tier < MAX_NR_TIERS; tier++) {
5048 		seq_printf(m, "            %10d", tier);
5049 		for (type = 0; type < ANON_AND_FILE; type++) {
5050 			const char *s = "   ";
5051 			unsigned long n[3] = {};
5052 
5053 			if (seq == max_seq) {
5054 				s = "RT ";
5055 				n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
5056 				n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
5057 			} else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
5058 				s = "rep";
5059 				n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
5060 				n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
5061 				if (tier)
5062 					n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
5063 			}
5064 
5065 			for (i = 0; i < 3; i++)
5066 				seq_printf(m, " %10lu%c", n[i], s[i]);
5067 		}
5068 		seq_putc(m, '\n');
5069 	}
5070 
5071 	seq_puts(m, "                      ");
5072 	for (i = 0; i < NR_MM_STATS; i++) {
5073 		const char *s = "      ";
5074 		unsigned long n = 0;
5075 
5076 		if (seq == max_seq && NR_HIST_GENS == 1) {
5077 			s = "LOYNFA";
5078 			n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5079 		} else if (seq != max_seq && NR_HIST_GENS > 1) {
5080 			s = "loynfa";
5081 			n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5082 		}
5083 
5084 		seq_printf(m, " %10lu%c", n, s[i]);
5085 	}
5086 	seq_putc(m, '\n');
5087 }
5088 
5089 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_show(struct seq_file * m,void * v)5090 static int lru_gen_seq_show(struct seq_file *m, void *v)
5091 {
5092 	unsigned long seq;
5093 	bool full = !debugfs_real_fops(m->file)->write;
5094 	struct lruvec *lruvec = v;
5095 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5096 	int nid = lruvec_pgdat(lruvec)->node_id;
5097 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5098 	DEFINE_MAX_SEQ(lruvec);
5099 	DEFINE_MIN_SEQ(lruvec);
5100 
5101 	if (nid == first_memory_node) {
5102 		const char *path = memcg ? m->private : "";
5103 
5104 #ifdef CONFIG_MEMCG
5105 		if (memcg)
5106 			cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5107 #endif
5108 		seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
5109 	}
5110 
5111 	seq_printf(m, " node %5d\n", nid);
5112 
5113 	if (!full)
5114 		seq = min_seq[LRU_GEN_ANON];
5115 	else if (max_seq >= MAX_NR_GENS)
5116 		seq = max_seq - MAX_NR_GENS + 1;
5117 	else
5118 		seq = 0;
5119 
5120 	for (; seq <= max_seq; seq++) {
5121 		int type, zone;
5122 		int gen = lru_gen_from_seq(seq);
5123 		unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
5124 
5125 		seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
5126 
5127 		for (type = 0; type < ANON_AND_FILE; type++) {
5128 			unsigned long size = 0;
5129 			char mark = full && seq < min_seq[type] ? 'x' : ' ';
5130 
5131 			for (zone = 0; zone < MAX_NR_ZONES; zone++)
5132 				size += max_t(long, READ_ONCE(lrugen->nr_pages[gen][type][zone]),
5133 						0);
5134 
5135 			seq_printf(m, " %10lu%c", size, mark);
5136 		}
5137 
5138 		seq_putc(m, '\n');
5139 
5140 		if (full)
5141 			lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
5142 	}
5143 
5144 	return 0;
5145 }
5146 
5147 static const struct seq_operations lru_gen_seq_ops = {
5148 	.start = lru_gen_seq_start,
5149 	.stop = lru_gen_seq_stop,
5150 	.next = lru_gen_seq_next,
5151 	.show = lru_gen_seq_show,
5152 };
5153 
run_aging(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,bool can_swap,bool full_scan)5154 static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5155 		     bool can_swap, bool full_scan)
5156 {
5157 	DEFINE_MAX_SEQ(lruvec);
5158 	DEFINE_MIN_SEQ(lruvec);
5159 
5160 	if (seq < max_seq)
5161 		return 0;
5162 
5163 	if (seq > max_seq)
5164 		return -EINVAL;
5165 
5166 	if (!full_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
5167 		return -ERANGE;
5168 
5169 	try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, full_scan);
5170 
5171 	return 0;
5172 }
5173 
run_eviction(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long nr_to_reclaim)5174 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5175 			int swappiness, unsigned long nr_to_reclaim)
5176 {
5177 	DEFINE_MAX_SEQ(lruvec);
5178 
5179 	if (seq + MIN_NR_GENS > max_seq)
5180 		return -EINVAL;
5181 
5182 	sc->nr_reclaimed = 0;
5183 
5184 	while (!signal_pending(current)) {
5185 		DEFINE_MIN_SEQ(lruvec);
5186 
5187 		if (seq < min_seq[!swappiness])
5188 			return 0;
5189 
5190 		if (sc->nr_reclaimed >= nr_to_reclaim)
5191 			return 0;
5192 
5193 		if (!evict_pages(lruvec, sc, swappiness, NULL))
5194 			return 0;
5195 
5196 		cond_resched();
5197 	}
5198 
5199 	return -EINTR;
5200 }
5201 
run_cmd(char cmd,int memcg_id,int nid,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long opt)5202 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
5203 		   struct scan_control *sc, int swappiness, unsigned long opt)
5204 {
5205 	struct lruvec *lruvec;
5206 	int err = -EINVAL;
5207 	struct mem_cgroup *memcg = NULL;
5208 
5209 	if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
5210 		return -EINVAL;
5211 
5212 	if (!mem_cgroup_disabled()) {
5213 		rcu_read_lock();
5214 		memcg = mem_cgroup_from_id(memcg_id);
5215 #ifdef CONFIG_MEMCG
5216 		if (memcg && !css_tryget(&memcg->css))
5217 			memcg = NULL;
5218 #endif
5219 		rcu_read_unlock();
5220 
5221 		if (!memcg)
5222 			return -EINVAL;
5223 	}
5224 
5225 	if (memcg_id != mem_cgroup_id(memcg))
5226 		goto done;
5227 
5228 	lruvec = get_lruvec(memcg, nid);
5229 
5230 	if (swappiness < 0)
5231 		swappiness = get_swappiness(lruvec, sc);
5232 	else if (swappiness > 200)
5233 		goto done;
5234 
5235 	switch (cmd) {
5236 	case '+':
5237 		err = run_aging(lruvec, seq, sc, swappiness, opt);
5238 		break;
5239 	case '-':
5240 		err = run_eviction(lruvec, seq, sc, swappiness, opt);
5241 		break;
5242 	}
5243 done:
5244 	mem_cgroup_put(memcg);
5245 
5246 	return err;
5247 }
5248 
5249 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_write(struct file * file,const char __user * src,size_t len,loff_t * pos)5250 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
5251 				 size_t len, loff_t *pos)
5252 {
5253 	void *buf;
5254 	char *cur, *next;
5255 	unsigned int flags;
5256 	struct blk_plug plug;
5257 	int err = -EINVAL;
5258 	struct scan_control sc = {
5259 		.may_writepage = true,
5260 		.may_unmap = true,
5261 		.may_swap = true,
5262 		.reclaim_idx = MAX_NR_ZONES - 1,
5263 		.gfp_mask = GFP_KERNEL,
5264 	};
5265 
5266 	buf = kvmalloc(len + 1, GFP_KERNEL);
5267 	if (!buf)
5268 		return -ENOMEM;
5269 
5270 	if (copy_from_user(buf, src, len)) {
5271 		kvfree(buf);
5272 		return -EFAULT;
5273 	}
5274 
5275 	set_task_reclaim_state(current, &sc.reclaim_state);
5276 	flags = memalloc_noreclaim_save();
5277 	blk_start_plug(&plug);
5278 	if (!set_mm_walk(NULL)) {
5279 		err = -ENOMEM;
5280 		goto done;
5281 	}
5282 
5283 	next = buf;
5284 	next[len] = '\0';
5285 
5286 	while ((cur = strsep(&next, ",;\n"))) {
5287 		int n;
5288 		int end;
5289 		char cmd;
5290 		unsigned int memcg_id;
5291 		unsigned int nid;
5292 		unsigned long seq;
5293 		unsigned int swappiness = -1;
5294 		unsigned long opt = -1;
5295 
5296 		cur = skip_spaces(cur);
5297 		if (!*cur)
5298 			continue;
5299 
5300 		n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
5301 			   &seq, &end, &swappiness, &end, &opt, &end);
5302 		if (n < 4 || cur[end]) {
5303 			err = -EINVAL;
5304 			break;
5305 		}
5306 
5307 		err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
5308 		if (err)
5309 			break;
5310 	}
5311 done:
5312 	clear_mm_walk();
5313 	blk_finish_plug(&plug);
5314 	memalloc_noreclaim_restore(flags);
5315 	set_task_reclaim_state(current, NULL);
5316 
5317 	kvfree(buf);
5318 
5319 	return err ? : len;
5320 }
5321 
lru_gen_seq_open(struct inode * inode,struct file * file)5322 static int lru_gen_seq_open(struct inode *inode, struct file *file)
5323 {
5324 	return seq_open(file, &lru_gen_seq_ops);
5325 }
5326 
5327 static const struct file_operations lru_gen_rw_fops = {
5328 	.open = lru_gen_seq_open,
5329 	.read = seq_read,
5330 	.write = lru_gen_seq_write,
5331 	.llseek = seq_lseek,
5332 	.release = seq_release,
5333 };
5334 
5335 static const struct file_operations lru_gen_ro_fops = {
5336 	.open = lru_gen_seq_open,
5337 	.read = seq_read,
5338 	.llseek = seq_lseek,
5339 	.release = seq_release,
5340 };
5341 
5342 /******************************************************************************
5343  *                          initialization
5344  ******************************************************************************/
5345 
lru_gen_init_lruvec(struct lruvec * lruvec)5346 void lru_gen_init_lruvec(struct lruvec *lruvec)
5347 {
5348 	int i;
5349 	int gen, type, zone;
5350 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5351 
5352 	lrugen->max_seq = MIN_NR_GENS + 1;
5353 	lrugen->enabled = lru_gen_enabled();
5354 
5355 	for (i = 0; i <= MIN_NR_GENS + 1; i++)
5356 		lrugen->timestamps[i] = jiffies;
5357 
5358 	for_each_gen_type_zone(gen, type, zone)
5359 		INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
5360 
5361 	lruvec->mm_state.seq = MIN_NR_GENS;
5362 }
5363 
5364 #ifdef CONFIG_MEMCG
lru_gen_init_memcg(struct mem_cgroup * memcg)5365 void lru_gen_init_memcg(struct mem_cgroup *memcg)
5366 {
5367 	INIT_LIST_HEAD(&memcg->mm_list.fifo);
5368 	spin_lock_init(&memcg->mm_list.lock);
5369 }
5370 
lru_gen_exit_memcg(struct mem_cgroup * memcg)5371 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
5372 {
5373 	int i;
5374 	int nid;
5375 
5376 	for_each_node(nid) {
5377 		struct lruvec *lruvec = get_lruvec(memcg, nid);
5378 
5379 		VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
5380 					   sizeof(lruvec->lrugen.nr_pages)));
5381 
5382 		for (i = 0; i < NR_BLOOM_FILTERS; i++) {
5383 			bitmap_free(lruvec->mm_state.filters[i]);
5384 			lruvec->mm_state.filters[i] = NULL;
5385 		}
5386 	}
5387 }
5388 #endif
5389 
init_lru_gen(void)5390 static int __init init_lru_gen(void)
5391 {
5392 	BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
5393 	BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
5394 
5395 	if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
5396 		pr_err("lru_gen: failed to create sysfs group\n");
5397 
5398 	debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
5399 	debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
5400 
5401 	return 0;
5402 };
5403 late_initcall(init_lru_gen);
5404 
5405 #else /* !CONFIG_LRU_GEN */
5406 
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)5407 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5408 {
5409 }
5410 
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5411 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5412 {
5413 }
5414 
5415 #endif /* CONFIG_LRU_GEN */
5416 
shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5417 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5418 {
5419 	unsigned long nr[NR_LRU_LISTS];
5420 	unsigned long targets[NR_LRU_LISTS];
5421 	unsigned long nr_to_scan;
5422 	enum lru_list lru;
5423 	unsigned long nr_reclaimed = 0;
5424 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
5425 	bool proportional_reclaim;
5426 	struct blk_plug plug;
5427 	bool do_plug = true;
5428 
5429 	if (lru_gen_enabled()) {
5430 		lru_gen_shrink_lruvec(lruvec, sc);
5431 		return;
5432 	}
5433 
5434 	get_scan_count(lruvec, sc, nr);
5435 
5436 	/* Record the original scan target for proportional adjustments later */
5437 	memcpy(targets, nr, sizeof(nr));
5438 
5439 	/*
5440 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
5441 	 * event that can occur when there is little memory pressure e.g.
5442 	 * multiple streaming readers/writers. Hence, we do not abort scanning
5443 	 * when the requested number of pages are reclaimed when scanning at
5444 	 * DEF_PRIORITY on the assumption that the fact we are direct
5445 	 * reclaiming implies that kswapd is not keeping up and it is best to
5446 	 * do a batch of work at once. For memcg reclaim one check is made to
5447 	 * abort proportional reclaim if either the file or anon lru has already
5448 	 * dropped to zero at the first pass.
5449 	 */
5450 	proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
5451 				sc->priority == DEF_PRIORITY);
5452 
5453 	trace_android_vh_shrink_lruvec_blk_plug(&do_plug);
5454 	if (do_plug)
5455 		blk_start_plug(&plug);
5456 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
5457 					nr[LRU_INACTIVE_FILE]) {
5458 		unsigned long nr_anon, nr_file, percentage;
5459 		unsigned long nr_scanned;
5460 
5461 		for_each_evictable_lru(lru) {
5462 			if (nr[lru]) {
5463 				nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
5464 				nr[lru] -= nr_to_scan;
5465 
5466 				nr_reclaimed += shrink_list(lru, nr_to_scan,
5467 							    lruvec, sc);
5468 			}
5469 		}
5470 
5471 		cond_resched();
5472 
5473 		if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
5474 			continue;
5475 
5476 		/*
5477 		 * For kswapd and memcg, reclaim at least the number of pages
5478 		 * requested. Ensure that the anon and file LRUs are scanned
5479 		 * proportionally what was requested by get_scan_count(). We
5480 		 * stop reclaiming one LRU and reduce the amount scanning
5481 		 * proportional to the original scan target.
5482 		 */
5483 		nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
5484 		nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
5485 
5486 		/*
5487 		 * It's just vindictive to attack the larger once the smaller
5488 		 * has gone to zero.  And given the way we stop scanning the
5489 		 * smaller below, this makes sure that we only make one nudge
5490 		 * towards proportionality once we've got nr_to_reclaim.
5491 		 */
5492 		if (!nr_file || !nr_anon)
5493 			break;
5494 
5495 		if (nr_file > nr_anon) {
5496 			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
5497 						targets[LRU_ACTIVE_ANON] + 1;
5498 			lru = LRU_BASE;
5499 			percentage = nr_anon * 100 / scan_target;
5500 		} else {
5501 			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
5502 						targets[LRU_ACTIVE_FILE] + 1;
5503 			lru = LRU_FILE;
5504 			percentage = nr_file * 100 / scan_target;
5505 		}
5506 
5507 		/* Stop scanning the smaller of the LRU */
5508 		nr[lru] = 0;
5509 		nr[lru + LRU_ACTIVE] = 0;
5510 
5511 		/*
5512 		 * Recalculate the other LRU scan count based on its original
5513 		 * scan target and the percentage scanning already complete
5514 		 */
5515 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
5516 		nr_scanned = targets[lru] - nr[lru];
5517 		nr[lru] = targets[lru] * (100 - percentage) / 100;
5518 		nr[lru] -= min(nr[lru], nr_scanned);
5519 
5520 		lru += LRU_ACTIVE;
5521 		nr_scanned = targets[lru] - nr[lru];
5522 		nr[lru] = targets[lru] * (100 - percentage) / 100;
5523 		nr[lru] -= min(nr[lru], nr_scanned);
5524 	}
5525 	if (do_plug)
5526 		blk_finish_plug(&plug);
5527 	sc->nr_reclaimed += nr_reclaimed;
5528 
5529 	/*
5530 	 * Even if we did not try to evict anon pages at all, we want to
5531 	 * rebalance the anon lru active/inactive ratio.
5532 	 */
5533 	if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
5534 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
5535 				   sc, LRU_ACTIVE_ANON);
5536 }
5537 
5538 /* Use reclaim/compaction for costly allocs or under memory pressure */
in_reclaim_compaction(struct scan_control * sc)5539 static bool in_reclaim_compaction(struct scan_control *sc)
5540 {
5541 	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
5542 			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
5543 			 sc->priority < DEF_PRIORITY - 2))
5544 		return true;
5545 
5546 	return false;
5547 }
5548 
5549 /*
5550  * Reclaim/compaction is used for high-order allocation requests. It reclaims
5551  * order-0 pages before compacting the zone. should_continue_reclaim() returns
5552  * true if more pages should be reclaimed such that when the page allocator
5553  * calls try_to_compact_pages() that it will have enough free pages to succeed.
5554  * It will give up earlier than that if there is difficulty reclaiming pages.
5555  */
should_continue_reclaim(struct pglist_data * pgdat,unsigned long nr_reclaimed,struct scan_control * sc)5556 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
5557 					unsigned long nr_reclaimed,
5558 					struct scan_control *sc)
5559 {
5560 	unsigned long pages_for_compaction;
5561 	unsigned long inactive_lru_pages;
5562 	int z;
5563 
5564 	/* If not in reclaim/compaction mode, stop */
5565 	if (!in_reclaim_compaction(sc))
5566 		return false;
5567 
5568 	/*
5569 	 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
5570 	 * number of pages that were scanned. This will return to the caller
5571 	 * with the risk reclaim/compaction and the resulting allocation attempt
5572 	 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
5573 	 * allocations through requiring that the full LRU list has been scanned
5574 	 * first, by assuming that zero delta of sc->nr_scanned means full LRU
5575 	 * scan, but that approximation was wrong, and there were corner cases
5576 	 * where always a non-zero amount of pages were scanned.
5577 	 */
5578 	if (!nr_reclaimed)
5579 		return false;
5580 
5581 	/* If compaction would go ahead or the allocation would succeed, stop */
5582 	for (z = 0; z <= sc->reclaim_idx; z++) {
5583 		struct zone *zone = &pgdat->node_zones[z];
5584 		if (!managed_zone(zone))
5585 			continue;
5586 
5587 		switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
5588 		case COMPACT_SUCCESS:
5589 		case COMPACT_CONTINUE:
5590 			return false;
5591 		default:
5592 			/* check next zone */
5593 			;
5594 		}
5595 	}
5596 
5597 	/*
5598 	 * If we have not reclaimed enough pages for compaction and the
5599 	 * inactive lists are large enough, continue reclaiming
5600 	 */
5601 	pages_for_compaction = compact_gap(sc->order);
5602 	inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
5603 	if (get_nr_swap_pages() > 0)
5604 		inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
5605 
5606 	return inactive_lru_pages > pages_for_compaction;
5607 }
5608 
shrink_node_memcgs(pg_data_t * pgdat,struct scan_control * sc)5609 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
5610 {
5611 	struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
5612 	struct mem_cgroup *memcg;
5613 
5614 	memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
5615 	do {
5616 		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5617 		unsigned long reclaimed;
5618 		unsigned long scanned;
5619 
5620 		/*
5621 		 * This loop can become CPU-bound when target memcgs
5622 		 * aren't eligible for reclaim - either because they
5623 		 * don't have any reclaimable pages, or because their
5624 		 * memory is explicitly protected. Avoid soft lockups.
5625 		 */
5626 		cond_resched();
5627 
5628 		mem_cgroup_calculate_protection(target_memcg, memcg);
5629 
5630 		if (mem_cgroup_below_min(memcg)) {
5631 			/*
5632 			 * Hard protection.
5633 			 * If there is no reclaimable memory, OOM.
5634 			 */
5635 			continue;
5636 		} else if (mem_cgroup_below_low(memcg)) {
5637 			/*
5638 			 * Soft protection.
5639 			 * Respect the protection only as long as
5640 			 * there is an unprotected supply
5641 			 * of reclaimable memory from other cgroups.
5642 			 */
5643 			if (!sc->memcg_low_reclaim) {
5644 				sc->memcg_low_skipped = 1;
5645 				continue;
5646 			}
5647 			memcg_memory_event(memcg, MEMCG_LOW);
5648 		}
5649 
5650 		reclaimed = sc->nr_reclaimed;
5651 		scanned = sc->nr_scanned;
5652 
5653 		shrink_lruvec(lruvec, sc);
5654 
5655 		shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
5656 			    sc->priority);
5657 
5658 		/* Record the group's reclaim efficiency */
5659 		vmpressure(sc->gfp_mask, memcg, false,
5660 			   sc->nr_scanned - scanned,
5661 			   sc->nr_reclaimed - reclaimed);
5662 
5663 	} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
5664 }
5665 
shrink_node(pg_data_t * pgdat,struct scan_control * sc)5666 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
5667 {
5668 	struct reclaim_state *reclaim_state = current->reclaim_state;
5669 	unsigned long nr_reclaimed, nr_scanned;
5670 	struct lruvec *target_lruvec;
5671 	bool reclaimable = false;
5672 
5673 	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
5674 
5675 again:
5676 	memset(&sc->nr, 0, sizeof(sc->nr));
5677 
5678 	nr_reclaimed = sc->nr_reclaimed;
5679 	nr_scanned = sc->nr_scanned;
5680 
5681 	prepare_scan_count(pgdat, sc);
5682 
5683 	shrink_node_memcgs(pgdat, sc);
5684 
5685 	if (reclaim_state) {
5686 		sc->nr_reclaimed += reclaim_state->reclaimed_slab;
5687 		reclaim_state->reclaimed_slab = 0;
5688 	}
5689 
5690 	/* Record the subtree's reclaim efficiency */
5691 	vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
5692 		   sc->nr_scanned - nr_scanned,
5693 		   sc->nr_reclaimed - nr_reclaimed);
5694 
5695 	if (sc->nr_reclaimed - nr_reclaimed)
5696 		reclaimable = true;
5697 
5698 	if (current_is_kswapd()) {
5699 		/*
5700 		 * If reclaim is isolating dirty pages under writeback,
5701 		 * it implies that the long-lived page allocation rate
5702 		 * is exceeding the page laundering rate. Either the
5703 		 * global limits are not being effective at throttling
5704 		 * processes due to the page distribution throughout
5705 		 * zones or there is heavy usage of a slow backing
5706 		 * device. The only option is to throttle from reclaim
5707 		 * context which is not ideal as there is no guarantee
5708 		 * the dirtying process is throttled in the same way
5709 		 * balance_dirty_pages() manages.
5710 		 *
5711 		 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
5712 		 * count the number of pages under pages flagged for
5713 		 * immediate reclaim and stall if any are encountered
5714 		 * in the nr_immediate check below.
5715 		 */
5716 		if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
5717 			set_bit(PGDAT_WRITEBACK, &pgdat->flags);
5718 
5719 		/* Allow kswapd to start writing pages during reclaim.*/
5720 		if (sc->nr.unqueued_dirty == sc->nr.file_taken)
5721 			set_bit(PGDAT_DIRTY, &pgdat->flags);
5722 
5723 		/*
5724 		 * If kswapd scans pages marked for immediate
5725 		 * reclaim and under writeback (nr_immediate), it
5726 		 * implies that pages are cycling through the LRU
5727 		 * faster than they are written so also forcibly stall.
5728 		 */
5729 		if (sc->nr.immediate)
5730 			congestion_wait(BLK_RW_ASYNC, HZ/10);
5731 	}
5732 
5733 	/*
5734 	 * Tag a node/memcg as congested if all the dirty pages
5735 	 * scanned were backed by a congested BDI and
5736 	 * wait_iff_congested will stall.
5737 	 *
5738 	 * Legacy memcg will stall in page writeback so avoid forcibly
5739 	 * stalling in wait_iff_congested().
5740 	 */
5741 	if ((current_is_kswapd() ||
5742 	     (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
5743 	    sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
5744 		set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
5745 
5746 	/*
5747 	 * Stall direct reclaim for IO completions if underlying BDIs
5748 	 * and node is congested. Allow kswapd to continue until it
5749 	 * starts encountering unqueued dirty pages or cycling through
5750 	 * the LRU too quickly.
5751 	 */
5752 	if (!current_is_kswapd() && current_may_throttle() &&
5753 	    !sc->hibernation_mode &&
5754 	    test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
5755 		wait_iff_congested(BLK_RW_ASYNC, HZ/10);
5756 
5757 	if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
5758 				    sc))
5759 		goto again;
5760 
5761 	/*
5762 	 * Kswapd gives up on balancing particular nodes after too
5763 	 * many failures to reclaim anything from them and goes to
5764 	 * sleep. On reclaim progress, reset the failure counter. A
5765 	 * successful direct reclaim run will revive a dormant kswapd.
5766 	 */
5767 	if (reclaimable)
5768 		pgdat->kswapd_failures = 0;
5769 }
5770 
5771 /*
5772  * Returns true if compaction should go ahead for a costly-order request, or
5773  * the allocation would already succeed without compaction. Return false if we
5774  * should reclaim first.
5775  */
compaction_ready(struct zone * zone,struct scan_control * sc)5776 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
5777 {
5778 	unsigned long watermark;
5779 	enum compact_result suitable;
5780 
5781 	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
5782 	if (suitable == COMPACT_SUCCESS)
5783 		/* Allocation should succeed already. Don't reclaim. */
5784 		return true;
5785 	if (suitable == COMPACT_SKIPPED)
5786 		/* Compaction cannot yet proceed. Do reclaim. */
5787 		return false;
5788 
5789 	/*
5790 	 * Compaction is already possible, but it takes time to run and there
5791 	 * are potentially other callers using the pages just freed. So proceed
5792 	 * with reclaim to make a buffer of free pages available to give
5793 	 * compaction a reasonable chance of completing and allocating the page.
5794 	 * Note that we won't actually reclaim the whole buffer in one attempt
5795 	 * as the target watermark in should_continue_reclaim() is lower. But if
5796 	 * we are already above the high+gap watermark, don't reclaim at all.
5797 	 */
5798 	watermark = high_wmark_pages(zone) + compact_gap(sc->order);
5799 
5800 	return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
5801 }
5802 
5803 /*
5804  * This is the direct reclaim path, for page-allocating processes.  We only
5805  * try to reclaim pages from zones which will satisfy the caller's allocation
5806  * request.
5807  *
5808  * If a zone is deemed to be full of pinned pages then just give it a light
5809  * scan then give up on it.
5810  */
shrink_zones(struct zonelist * zonelist,struct scan_control * sc)5811 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
5812 {
5813 	struct zoneref *z;
5814 	struct zone *zone;
5815 	unsigned long nr_soft_reclaimed;
5816 	unsigned long nr_soft_scanned;
5817 	gfp_t orig_mask;
5818 	pg_data_t *last_pgdat = NULL;
5819 
5820 	/*
5821 	 * If the number of buffer_heads in the machine exceeds the maximum
5822 	 * allowed level, force direct reclaim to scan the highmem zone as
5823 	 * highmem pages could be pinning lowmem pages storing buffer_heads
5824 	 */
5825 	orig_mask = sc->gfp_mask;
5826 	if (buffer_heads_over_limit) {
5827 		sc->gfp_mask |= __GFP_HIGHMEM;
5828 		sc->reclaim_idx = gfp_zone(sc->gfp_mask);
5829 	}
5830 
5831 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
5832 					sc->reclaim_idx, sc->nodemask) {
5833 		/*
5834 		 * Take care memory controller reclaiming has small influence
5835 		 * to global LRU.
5836 		 */
5837 		if (!cgroup_reclaim(sc)) {
5838 			if (!cpuset_zone_allowed(zone,
5839 						 GFP_KERNEL | __GFP_HARDWALL))
5840 				continue;
5841 
5842 			/*
5843 			 * If we already have plenty of memory free for
5844 			 * compaction in this zone, don't free any more.
5845 			 * Even though compaction is invoked for any
5846 			 * non-zero order, only frequent costly order
5847 			 * reclamation is disruptive enough to become a
5848 			 * noticeable problem, like transparent huge
5849 			 * page allocations.
5850 			 */
5851 			if (IS_ENABLED(CONFIG_COMPACTION) &&
5852 			    sc->order > PAGE_ALLOC_COSTLY_ORDER &&
5853 			    compaction_ready(zone, sc)) {
5854 				sc->compaction_ready = true;
5855 				continue;
5856 			}
5857 
5858 			/*
5859 			 * Shrink each node in the zonelist once. If the
5860 			 * zonelist is ordered by zone (not the default) then a
5861 			 * node may be shrunk multiple times but in that case
5862 			 * the user prefers lower zones being preserved.
5863 			 */
5864 			if (zone->zone_pgdat == last_pgdat)
5865 				continue;
5866 
5867 			/*
5868 			 * This steals pages from memory cgroups over softlimit
5869 			 * and returns the number of reclaimed pages and
5870 			 * scanned pages. This works for global memory pressure
5871 			 * and balancing, not for a memcg's limit.
5872 			 */
5873 			nr_soft_scanned = 0;
5874 			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
5875 						sc->order, sc->gfp_mask,
5876 						&nr_soft_scanned);
5877 			sc->nr_reclaimed += nr_soft_reclaimed;
5878 			sc->nr_scanned += nr_soft_scanned;
5879 			/* need some check for avoid more shrink_zone() */
5880 		}
5881 
5882 		/* See comment about same check for global reclaim above */
5883 		if (zone->zone_pgdat == last_pgdat)
5884 			continue;
5885 		last_pgdat = zone->zone_pgdat;
5886 		shrink_node(zone->zone_pgdat, sc);
5887 	}
5888 
5889 	/*
5890 	 * Restore to original mask to avoid the impact on the caller if we
5891 	 * promoted it to __GFP_HIGHMEM.
5892 	 */
5893 	sc->gfp_mask = orig_mask;
5894 }
5895 
snapshot_refaults(struct mem_cgroup * target_memcg,pg_data_t * pgdat)5896 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
5897 {
5898 	struct lruvec *target_lruvec;
5899 	unsigned long refaults;
5900 
5901 	if (lru_gen_enabled())
5902 		return;
5903 
5904 	target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
5905 	refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
5906 	target_lruvec->refaults[0] = refaults;
5907 	refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
5908 	target_lruvec->refaults[1] = refaults;
5909 }
5910 
5911 /*
5912  * This is the main entry point to direct page reclaim.
5913  *
5914  * If a full scan of the inactive list fails to free enough memory then we
5915  * are "out of memory" and something needs to be killed.
5916  *
5917  * If the caller is !__GFP_FS then the probability of a failure is reasonably
5918  * high - the zone may be full of dirty or under-writeback pages, which this
5919  * caller can't do much about.  We kick the writeback threads and take explicit
5920  * naps in the hope that some of these pages can be written.  But if the
5921  * allocating task holds filesystem locks which prevent writeout this might not
5922  * work, and the allocation attempt will fail.
5923  *
5924  * returns:	0, if no pages reclaimed
5925  * 		else, the number of pages reclaimed
5926  */
do_try_to_free_pages(struct zonelist * zonelist,struct scan_control * sc)5927 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
5928 					  struct scan_control *sc)
5929 {
5930 	int initial_priority = sc->priority;
5931 	pg_data_t *last_pgdat;
5932 	struct zoneref *z;
5933 	struct zone *zone;
5934 retry:
5935 	delayacct_freepages_start();
5936 
5937 	if (!cgroup_reclaim(sc))
5938 		__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
5939 
5940 	do {
5941 		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
5942 				sc->priority);
5943 		sc->nr_scanned = 0;
5944 		shrink_zones(zonelist, sc);
5945 
5946 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
5947 			break;
5948 
5949 		if (sc->compaction_ready)
5950 			break;
5951 
5952 		/*
5953 		 * If we're getting trouble reclaiming, start doing
5954 		 * writepage even in laptop mode.
5955 		 */
5956 		if (sc->priority < DEF_PRIORITY - 2)
5957 			sc->may_writepage = 1;
5958 	} while (--sc->priority >= 0);
5959 
5960 	last_pgdat = NULL;
5961 	for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
5962 					sc->nodemask) {
5963 		if (zone->zone_pgdat == last_pgdat)
5964 			continue;
5965 		last_pgdat = zone->zone_pgdat;
5966 
5967 		snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
5968 
5969 		if (cgroup_reclaim(sc)) {
5970 			struct lruvec *lruvec;
5971 
5972 			lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
5973 						   zone->zone_pgdat);
5974 			clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
5975 		}
5976 	}
5977 
5978 	delayacct_freepages_end();
5979 
5980 	if (sc->nr_reclaimed)
5981 		return sc->nr_reclaimed;
5982 
5983 	/* Aborted reclaim to try compaction? don't OOM, then */
5984 	if (sc->compaction_ready)
5985 		return 1;
5986 
5987 	/*
5988 	 * We make inactive:active ratio decisions based on the node's
5989 	 * composition of memory, but a restrictive reclaim_idx or a
5990 	 * memory.low cgroup setting can exempt large amounts of
5991 	 * memory from reclaim. Neither of which are very common, so
5992 	 * instead of doing costly eligibility calculations of the
5993 	 * entire cgroup subtree up front, we assume the estimates are
5994 	 * good, and retry with forcible deactivation if that fails.
5995 	 */
5996 	if (sc->skipped_deactivate) {
5997 		sc->priority = initial_priority;
5998 		sc->force_deactivate = 1;
5999 		sc->skipped_deactivate = 0;
6000 		goto retry;
6001 	}
6002 
6003 	/* Untapped cgroup reserves?  Don't OOM, retry. */
6004 	if (sc->memcg_low_skipped) {
6005 		sc->priority = initial_priority;
6006 		sc->force_deactivate = 0;
6007 		sc->memcg_low_reclaim = 1;
6008 		sc->memcg_low_skipped = 0;
6009 		goto retry;
6010 	}
6011 
6012 	return 0;
6013 }
6014 
allow_direct_reclaim(pg_data_t * pgdat)6015 static bool allow_direct_reclaim(pg_data_t *pgdat)
6016 {
6017 	struct zone *zone;
6018 	unsigned long pfmemalloc_reserve = 0;
6019 	unsigned long free_pages = 0;
6020 	int i;
6021 	bool wmark_ok;
6022 
6023 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6024 		return true;
6025 
6026 	for (i = 0; i <= ZONE_NORMAL; i++) {
6027 		zone = &pgdat->node_zones[i];
6028 		if (!managed_zone(zone))
6029 			continue;
6030 
6031 		if (!zone_reclaimable_pages(zone))
6032 			continue;
6033 
6034 		pfmemalloc_reserve += min_wmark_pages(zone);
6035 		free_pages += zone_page_state(zone, NR_FREE_PAGES);
6036 	}
6037 
6038 	/* If there are no reserves (unexpected config) then do not throttle */
6039 	if (!pfmemalloc_reserve)
6040 		return true;
6041 
6042 	wmark_ok = free_pages > pfmemalloc_reserve / 2;
6043 
6044 	/* kswapd must be awake if processes are being throttled */
6045 	if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
6046 		if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
6047 			WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
6048 
6049 		wake_up_interruptible(&pgdat->kswapd_wait);
6050 	}
6051 
6052 	return wmark_ok;
6053 }
6054 
6055 /*
6056  * Throttle direct reclaimers if backing storage is backed by the network
6057  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
6058  * depleted. kswapd will continue to make progress and wake the processes
6059  * when the low watermark is reached.
6060  *
6061  * Returns true if a fatal signal was delivered during throttling. If this
6062  * happens, the page allocator should not consider triggering the OOM killer.
6063  */
throttle_direct_reclaim(gfp_t gfp_mask,struct zonelist * zonelist,nodemask_t * nodemask)6064 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
6065 					nodemask_t *nodemask)
6066 {
6067 	struct zoneref *z;
6068 	struct zone *zone;
6069 	pg_data_t *pgdat = NULL;
6070 
6071 	/*
6072 	 * Kernel threads should not be throttled as they may be indirectly
6073 	 * responsible for cleaning pages necessary for reclaim to make forward
6074 	 * progress. kjournald for example may enter direct reclaim while
6075 	 * committing a transaction where throttling it could forcing other
6076 	 * processes to block on log_wait_commit().
6077 	 */
6078 	if (current->flags & PF_KTHREAD)
6079 		goto out;
6080 
6081 	/*
6082 	 * If a fatal signal is pending, this process should not throttle.
6083 	 * It should return quickly so it can exit and free its memory
6084 	 */
6085 	if (fatal_signal_pending(current))
6086 		goto out;
6087 
6088 	/*
6089 	 * Check if the pfmemalloc reserves are ok by finding the first node
6090 	 * with a usable ZONE_NORMAL or lower zone. The expectation is that
6091 	 * GFP_KERNEL will be required for allocating network buffers when
6092 	 * swapping over the network so ZONE_HIGHMEM is unusable.
6093 	 *
6094 	 * Throttling is based on the first usable node and throttled processes
6095 	 * wait on a queue until kswapd makes progress and wakes them. There
6096 	 * is an affinity then between processes waking up and where reclaim
6097 	 * progress has been made assuming the process wakes on the same node.
6098 	 * More importantly, processes running on remote nodes will not compete
6099 	 * for remote pfmemalloc reserves and processes on different nodes
6100 	 * should make reasonable progress.
6101 	 */
6102 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
6103 					gfp_zone(gfp_mask), nodemask) {
6104 		if (zone_idx(zone) > ZONE_NORMAL)
6105 			continue;
6106 
6107 		/* Throttle based on the first usable node */
6108 		pgdat = zone->zone_pgdat;
6109 		if (allow_direct_reclaim(pgdat))
6110 			goto out;
6111 		break;
6112 	}
6113 
6114 	/* If no zone was usable by the allocation flags then do not throttle */
6115 	if (!pgdat)
6116 		goto out;
6117 
6118 	/* Account for the throttling */
6119 	count_vm_event(PGSCAN_DIRECT_THROTTLE);
6120 
6121 	/*
6122 	 * If the caller cannot enter the filesystem, it's possible that it
6123 	 * is due to the caller holding an FS lock or performing a journal
6124 	 * transaction in the case of a filesystem like ext[3|4]. In this case,
6125 	 * it is not safe to block on pfmemalloc_wait as kswapd could be
6126 	 * blocked waiting on the same lock. Instead, throttle for up to a
6127 	 * second before continuing.
6128 	 */
6129 	if (!(gfp_mask & __GFP_FS)) {
6130 		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
6131 			allow_direct_reclaim(pgdat), HZ);
6132 
6133 		goto check_pending;
6134 	}
6135 
6136 	/* Throttle until kswapd wakes the process */
6137 	wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
6138 		allow_direct_reclaim(pgdat));
6139 
6140 check_pending:
6141 	if (fatal_signal_pending(current))
6142 		return true;
6143 
6144 out:
6145 	return false;
6146 }
6147 
try_to_free_pages(struct zonelist * zonelist,int order,gfp_t gfp_mask,nodemask_t * nodemask)6148 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
6149 				gfp_t gfp_mask, nodemask_t *nodemask)
6150 {
6151 	unsigned long nr_reclaimed;
6152 	struct scan_control sc = {
6153 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
6154 		.gfp_mask = current_gfp_context(gfp_mask),
6155 		.reclaim_idx = gfp_zone(gfp_mask),
6156 		.order = order,
6157 		.nodemask = nodemask,
6158 		.priority = DEF_PRIORITY,
6159 		.may_writepage = !laptop_mode,
6160 		.may_unmap = 1,
6161 		.may_swap = 1,
6162 	};
6163 
6164 	/*
6165 	 * scan_control uses s8 fields for order, priority, and reclaim_idx.
6166 	 * Confirm they are large enough for max values.
6167 	 */
6168 	BUILD_BUG_ON(MAX_ORDER > S8_MAX);
6169 	BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
6170 	BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
6171 
6172 	/*
6173 	 * Do not enter reclaim if fatal signal was delivered while throttled.
6174 	 * 1 is returned so that the page allocator does not OOM kill at this
6175 	 * point.
6176 	 */
6177 	if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
6178 		return 1;
6179 
6180 	set_task_reclaim_state(current, &sc.reclaim_state);
6181 	trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
6182 
6183 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6184 
6185 	trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
6186 	set_task_reclaim_state(current, NULL);
6187 
6188 	return nr_reclaimed;
6189 }
6190 
6191 #ifdef CONFIG_MEMCG
6192 
6193 /* Only used by soft limit reclaim. Do not reuse for anything else. */
mem_cgroup_shrink_node(struct mem_cgroup * memcg,gfp_t gfp_mask,bool noswap,pg_data_t * pgdat,unsigned long * nr_scanned)6194 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
6195 						gfp_t gfp_mask, bool noswap,
6196 						pg_data_t *pgdat,
6197 						unsigned long *nr_scanned)
6198 {
6199 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6200 	struct scan_control sc = {
6201 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
6202 		.target_mem_cgroup = memcg,
6203 		.may_writepage = !laptop_mode,
6204 		.may_unmap = 1,
6205 		.reclaim_idx = MAX_NR_ZONES - 1,
6206 		.may_swap = !noswap,
6207 	};
6208 
6209 	WARN_ON_ONCE(!current->reclaim_state);
6210 
6211 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
6212 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
6213 
6214 	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
6215 						      sc.gfp_mask);
6216 
6217 	/*
6218 	 * NOTE: Although we can get the priority field, using it
6219 	 * here is not a good idea, since it limits the pages we can scan.
6220 	 * if we don't reclaim here, the shrink_node from balance_pgdat
6221 	 * will pick up pages from other mem cgroup's as well. We hack
6222 	 * the priority and make it zero.
6223 	 */
6224 	shrink_lruvec(lruvec, &sc);
6225 
6226 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
6227 
6228 	*nr_scanned = sc.nr_scanned;
6229 
6230 	return sc.nr_reclaimed;
6231 }
6232 
try_to_free_mem_cgroup_pages(struct mem_cgroup * memcg,unsigned long nr_pages,gfp_t gfp_mask,bool may_swap)6233 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6234 					   unsigned long nr_pages,
6235 					   gfp_t gfp_mask,
6236 					   bool may_swap)
6237 {
6238 	unsigned long nr_reclaimed;
6239 	unsigned int noreclaim_flag;
6240 	struct scan_control sc = {
6241 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6242 		.gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
6243 				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
6244 		.reclaim_idx = MAX_NR_ZONES - 1,
6245 		.target_mem_cgroup = memcg,
6246 		.priority = DEF_PRIORITY,
6247 		.may_writepage = !laptop_mode,
6248 		.may_unmap = 1,
6249 		.may_swap = may_swap,
6250 	};
6251 	/*
6252 	 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
6253 	 * equal pressure on all the nodes. This is based on the assumption that
6254 	 * the reclaim does not bail out early.
6255 	 */
6256 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6257 
6258 	set_task_reclaim_state(current, &sc.reclaim_state);
6259 	trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
6260 	noreclaim_flag = memalloc_noreclaim_save();
6261 
6262 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6263 
6264 	memalloc_noreclaim_restore(noreclaim_flag);
6265 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
6266 	set_task_reclaim_state(current, NULL);
6267 
6268 	return nr_reclaimed;
6269 }
6270 EXPORT_SYMBOL_GPL(try_to_free_mem_cgroup_pages);
6271 #endif
6272 
kswapd_age_node(struct pglist_data * pgdat,struct scan_control * sc)6273 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6274 {
6275 	struct mem_cgroup *memcg;
6276 	struct lruvec *lruvec;
6277 
6278 	if (lru_gen_enabled()) {
6279 		lru_gen_age_node(pgdat, sc);
6280 		return;
6281 	}
6282 
6283 	if (!total_swap_pages)
6284 		return;
6285 
6286 	lruvec = mem_cgroup_lruvec(NULL, pgdat);
6287 	if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6288 		return;
6289 
6290 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
6291 	do {
6292 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
6293 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6294 				   sc, LRU_ACTIVE_ANON);
6295 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
6296 	} while (memcg);
6297 }
6298 
pgdat_watermark_boosted(pg_data_t * pgdat,int highest_zoneidx)6299 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
6300 {
6301 	int i;
6302 	struct zone *zone;
6303 
6304 	/*
6305 	 * Check for watermark boosts top-down as the higher zones
6306 	 * are more likely to be boosted. Both watermarks and boosts
6307 	 * should not be checked at the same time as reclaim would
6308 	 * start prematurely when there is no boosting and a lower
6309 	 * zone is balanced.
6310 	 */
6311 	for (i = highest_zoneidx; i >= 0; i--) {
6312 		zone = pgdat->node_zones + i;
6313 		if (!managed_zone(zone))
6314 			continue;
6315 
6316 		if (zone->watermark_boost)
6317 			return true;
6318 	}
6319 
6320 	return false;
6321 }
6322 
6323 /*
6324  * Returns true if there is an eligible zone balanced for the request order
6325  * and highest_zoneidx
6326  */
pgdat_balanced(pg_data_t * pgdat,int order,int highest_zoneidx)6327 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
6328 {
6329 	int i;
6330 	unsigned long mark = -1;
6331 	struct zone *zone;
6332 
6333 	/*
6334 	 * Check watermarks bottom-up as lower zones are more likely to
6335 	 * meet watermarks.
6336 	 */
6337 	for (i = 0; i <= highest_zoneidx; i++) {
6338 		zone = pgdat->node_zones + i;
6339 
6340 		if (!managed_zone(zone))
6341 			continue;
6342 
6343 		mark = high_wmark_pages(zone);
6344 		if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
6345 			return true;
6346 	}
6347 
6348 	/*
6349 	 * If a node has no populated zone within highest_zoneidx, it does not
6350 	 * need balancing by definition. This can happen if a zone-restricted
6351 	 * allocation tries to wake a remote kswapd.
6352 	 */
6353 	if (mark == -1)
6354 		return true;
6355 
6356 	return false;
6357 }
6358 
6359 /* Clear pgdat state for congested, dirty or under writeback. */
clear_pgdat_congested(pg_data_t * pgdat)6360 static void clear_pgdat_congested(pg_data_t *pgdat)
6361 {
6362 	struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
6363 
6364 	clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
6365 	clear_bit(PGDAT_DIRTY, &pgdat->flags);
6366 	clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
6367 }
6368 
6369 /*
6370  * Prepare kswapd for sleeping. This verifies that there are no processes
6371  * waiting in throttle_direct_reclaim() and that watermarks have been met.
6372  *
6373  * Returns true if kswapd is ready to sleep
6374  */
prepare_kswapd_sleep(pg_data_t * pgdat,int order,int highest_zoneidx)6375 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
6376 				int highest_zoneidx)
6377 {
6378 	/*
6379 	 * The throttled processes are normally woken up in balance_pgdat() as
6380 	 * soon as allow_direct_reclaim() is true. But there is a potential
6381 	 * race between when kswapd checks the watermarks and a process gets
6382 	 * throttled. There is also a potential race if processes get
6383 	 * throttled, kswapd wakes, a large process exits thereby balancing the
6384 	 * zones, which causes kswapd to exit balance_pgdat() before reaching
6385 	 * the wake up checks. If kswapd is going to sleep, no process should
6386 	 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
6387 	 * the wake up is premature, processes will wake kswapd and get
6388 	 * throttled again. The difference from wake ups in balance_pgdat() is
6389 	 * that here we are under prepare_to_wait().
6390 	 */
6391 	if (waitqueue_active(&pgdat->pfmemalloc_wait))
6392 		wake_up_all(&pgdat->pfmemalloc_wait);
6393 
6394 	/* Hopeless node, leave it to direct reclaim */
6395 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6396 		return true;
6397 
6398 	if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
6399 		clear_pgdat_congested(pgdat);
6400 		return true;
6401 	}
6402 
6403 	return false;
6404 }
6405 
6406 /*
6407  * kswapd shrinks a node of pages that are at or below the highest usable
6408  * zone that is currently unbalanced.
6409  *
6410  * Returns true if kswapd scanned at least the requested number of pages to
6411  * reclaim or if the lack of progress was due to pages under writeback.
6412  * This is used to determine if the scanning priority needs to be raised.
6413  */
kswapd_shrink_node(pg_data_t * pgdat,struct scan_control * sc)6414 static bool kswapd_shrink_node(pg_data_t *pgdat,
6415 			       struct scan_control *sc)
6416 {
6417 	struct zone *zone;
6418 	int z;
6419 
6420 	/* Reclaim a number of pages proportional to the number of zones */
6421 	sc->nr_to_reclaim = 0;
6422 	for (z = 0; z <= sc->reclaim_idx; z++) {
6423 		zone = pgdat->node_zones + z;
6424 		if (!managed_zone(zone))
6425 			continue;
6426 
6427 		sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
6428 	}
6429 
6430 	/*
6431 	 * Historically care was taken to put equal pressure on all zones but
6432 	 * now pressure is applied based on node LRU order.
6433 	 */
6434 	shrink_node(pgdat, sc);
6435 
6436 	/*
6437 	 * Fragmentation may mean that the system cannot be rebalanced for
6438 	 * high-order allocations. If twice the allocation size has been
6439 	 * reclaimed then recheck watermarks only at order-0 to prevent
6440 	 * excessive reclaim. Assume that a process requested a high-order
6441 	 * can direct reclaim/compact.
6442 	 */
6443 	if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
6444 		sc->order = 0;
6445 
6446 	return sc->nr_scanned >= sc->nr_to_reclaim;
6447 }
6448 
6449 /*
6450  * For kswapd, balance_pgdat() will reclaim pages across a node from zones
6451  * that are eligible for use by the caller until at least one zone is
6452  * balanced.
6453  *
6454  * Returns the order kswapd finished reclaiming at.
6455  *
6456  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
6457  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
6458  * found to have free_pages <= high_wmark_pages(zone), any page in that zone
6459  * or lower is eligible for reclaim until at least one usable zone is
6460  * balanced.
6461  */
balance_pgdat(pg_data_t * pgdat,int order,int highest_zoneidx)6462 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
6463 {
6464 	int i;
6465 	unsigned long nr_soft_reclaimed;
6466 	unsigned long nr_soft_scanned;
6467 	unsigned long pflags;
6468 	unsigned long nr_boost_reclaim;
6469 	unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
6470 	bool boosted;
6471 	struct zone *zone;
6472 	struct scan_control sc = {
6473 		.gfp_mask = GFP_KERNEL,
6474 		.order = order,
6475 		.may_unmap = 1,
6476 	};
6477 
6478 	set_task_reclaim_state(current, &sc.reclaim_state);
6479 	psi_memstall_enter(&pflags);
6480 	__fs_reclaim_acquire();
6481 
6482 	count_vm_event(PAGEOUTRUN);
6483 
6484 	/*
6485 	 * Account for the reclaim boost. Note that the zone boost is left in
6486 	 * place so that parallel allocations that are near the watermark will
6487 	 * stall or direct reclaim until kswapd is finished.
6488 	 */
6489 	nr_boost_reclaim = 0;
6490 	for (i = 0; i <= highest_zoneidx; i++) {
6491 		zone = pgdat->node_zones + i;
6492 		if (!managed_zone(zone))
6493 			continue;
6494 
6495 		nr_boost_reclaim += zone->watermark_boost;
6496 		zone_boosts[i] = zone->watermark_boost;
6497 	}
6498 	boosted = nr_boost_reclaim;
6499 
6500 restart:
6501 	sc.priority = DEF_PRIORITY;
6502 	do {
6503 		unsigned long nr_reclaimed = sc.nr_reclaimed;
6504 		bool raise_priority = true;
6505 		bool balanced;
6506 		bool ret;
6507 
6508 		sc.reclaim_idx = highest_zoneidx;
6509 
6510 		/*
6511 		 * If the number of buffer_heads exceeds the maximum allowed
6512 		 * then consider reclaiming from all zones. This has a dual
6513 		 * purpose -- on 64-bit systems it is expected that
6514 		 * buffer_heads are stripped during active rotation. On 32-bit
6515 		 * systems, highmem pages can pin lowmem memory and shrinking
6516 		 * buffers can relieve lowmem pressure. Reclaim may still not
6517 		 * go ahead if all eligible zones for the original allocation
6518 		 * request are balanced to avoid excessive reclaim from kswapd.
6519 		 */
6520 		if (buffer_heads_over_limit) {
6521 			for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
6522 				zone = pgdat->node_zones + i;
6523 				if (!managed_zone(zone))
6524 					continue;
6525 
6526 				sc.reclaim_idx = i;
6527 				break;
6528 			}
6529 		}
6530 
6531 		/*
6532 		 * If the pgdat is imbalanced then ignore boosting and preserve
6533 		 * the watermarks for a later time and restart. Note that the
6534 		 * zone watermarks will be still reset at the end of balancing
6535 		 * on the grounds that the normal reclaim should be enough to
6536 		 * re-evaluate if boosting is required when kswapd next wakes.
6537 		 */
6538 		balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
6539 		if (!balanced && nr_boost_reclaim) {
6540 			nr_boost_reclaim = 0;
6541 			goto restart;
6542 		}
6543 
6544 		/*
6545 		 * If boosting is not active then only reclaim if there are no
6546 		 * eligible zones. Note that sc.reclaim_idx is not used as
6547 		 * buffer_heads_over_limit may have adjusted it.
6548 		 */
6549 		if (!nr_boost_reclaim && balanced)
6550 			goto out;
6551 
6552 		/* Limit the priority of boosting to avoid reclaim writeback */
6553 		if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
6554 			raise_priority = false;
6555 
6556 		/*
6557 		 * Do not writeback or swap pages for boosted reclaim. The
6558 		 * intent is to relieve pressure not issue sub-optimal IO
6559 		 * from reclaim context. If no pages are reclaimed, the
6560 		 * reclaim will be aborted.
6561 		 */
6562 		sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
6563 		sc.may_swap = !nr_boost_reclaim;
6564 
6565 		/*
6566 		 * Do some background aging, to give pages a chance to be
6567 		 * referenced before reclaiming. All pages are rotated
6568 		 * regardless of classzone as this is about consistent aging.
6569 		 */
6570 		kswapd_age_node(pgdat, &sc);
6571 
6572 		/*
6573 		 * If we're getting trouble reclaiming, start doing writepage
6574 		 * even in laptop mode.
6575 		 */
6576 		if (sc.priority < DEF_PRIORITY - 2)
6577 			sc.may_writepage = 1;
6578 
6579 		/* Call soft limit reclaim before calling shrink_node. */
6580 		sc.nr_scanned = 0;
6581 		nr_soft_scanned = 0;
6582 		nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
6583 						sc.gfp_mask, &nr_soft_scanned);
6584 		sc.nr_reclaimed += nr_soft_reclaimed;
6585 
6586 		/*
6587 		 * There should be no need to raise the scanning priority if
6588 		 * enough pages are already being scanned that that high
6589 		 * watermark would be met at 100% efficiency.
6590 		 */
6591 		if (kswapd_shrink_node(pgdat, &sc))
6592 			raise_priority = false;
6593 
6594 		/*
6595 		 * If the low watermark is met there is no need for processes
6596 		 * to be throttled on pfmemalloc_wait as they should not be
6597 		 * able to safely make forward progress. Wake them
6598 		 */
6599 		if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
6600 				allow_direct_reclaim(pgdat))
6601 			wake_up_all(&pgdat->pfmemalloc_wait);
6602 
6603 		/* Check if kswapd should be suspending */
6604 		__fs_reclaim_release();
6605 		ret = try_to_freeze();
6606 		__fs_reclaim_acquire();
6607 		if (ret || kthread_should_stop())
6608 			break;
6609 
6610 		/*
6611 		 * Raise priority if scanning rate is too low or there was no
6612 		 * progress in reclaiming pages
6613 		 */
6614 		nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
6615 		nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
6616 
6617 		/*
6618 		 * If reclaim made no progress for a boost, stop reclaim as
6619 		 * IO cannot be queued and it could be an infinite loop in
6620 		 * extreme circumstances.
6621 		 */
6622 		if (nr_boost_reclaim && !nr_reclaimed)
6623 			break;
6624 
6625 		if (raise_priority || !nr_reclaimed)
6626 			sc.priority--;
6627 	} while (sc.priority >= 1);
6628 
6629 	if (!sc.nr_reclaimed)
6630 		pgdat->kswapd_failures++;
6631 
6632 out:
6633 	/* If reclaim was boosted, account for the reclaim done in this pass */
6634 	if (boosted) {
6635 		unsigned long flags;
6636 
6637 		for (i = 0; i <= highest_zoneidx; i++) {
6638 			if (!zone_boosts[i])
6639 				continue;
6640 
6641 			/* Increments are under the zone lock */
6642 			zone = pgdat->node_zones + i;
6643 			spin_lock_irqsave(&zone->lock, flags);
6644 			zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
6645 			spin_unlock_irqrestore(&zone->lock, flags);
6646 		}
6647 
6648 		/*
6649 		 * As there is now likely space, wakeup kcompact to defragment
6650 		 * pageblocks.
6651 		 */
6652 		wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
6653 	}
6654 
6655 	snapshot_refaults(NULL, pgdat);
6656 	__fs_reclaim_release();
6657 	psi_memstall_leave(&pflags);
6658 	set_task_reclaim_state(current, NULL);
6659 
6660 	/*
6661 	 * Return the order kswapd stopped reclaiming at as
6662 	 * prepare_kswapd_sleep() takes it into account. If another caller
6663 	 * entered the allocator slow path while kswapd was awake, order will
6664 	 * remain at the higher level.
6665 	 */
6666 	return sc.order;
6667 }
6668 
6669 /*
6670  * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
6671  * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
6672  * not a valid index then either kswapd runs for first time or kswapd couldn't
6673  * sleep after previous reclaim attempt (node is still unbalanced). In that
6674  * case return the zone index of the previous kswapd reclaim cycle.
6675  */
kswapd_highest_zoneidx(pg_data_t * pgdat,enum zone_type prev_highest_zoneidx)6676 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
6677 					   enum zone_type prev_highest_zoneidx)
6678 {
6679 	enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
6680 
6681 	return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
6682 }
6683 
kswapd_try_to_sleep(pg_data_t * pgdat,int alloc_order,int reclaim_order,unsigned int highest_zoneidx)6684 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
6685 				unsigned int highest_zoneidx)
6686 {
6687 	long remaining = 0;
6688 	DEFINE_WAIT(wait);
6689 
6690 	if (freezing(current) || kthread_should_stop())
6691 		return;
6692 
6693 	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
6694 
6695 	/*
6696 	 * Try to sleep for a short interval. Note that kcompactd will only be
6697 	 * woken if it is possible to sleep for a short interval. This is
6698 	 * deliberate on the assumption that if reclaim cannot keep an
6699 	 * eligible zone balanced that it's also unlikely that compaction will
6700 	 * succeed.
6701 	 */
6702 	if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
6703 		/*
6704 		 * Compaction records what page blocks it recently failed to
6705 		 * isolate pages from and skips them in the future scanning.
6706 		 * When kswapd is going to sleep, it is reasonable to assume
6707 		 * that pages and compaction may succeed so reset the cache.
6708 		 */
6709 		reset_isolation_suitable(pgdat);
6710 
6711 		/*
6712 		 * We have freed the memory, now we should compact it to make
6713 		 * allocation of the requested order possible.
6714 		 */
6715 		wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
6716 
6717 		remaining = schedule_timeout(HZ/10);
6718 
6719 		/*
6720 		 * If woken prematurely then reset kswapd_highest_zoneidx and
6721 		 * order. The values will either be from a wakeup request or
6722 		 * the previous request that slept prematurely.
6723 		 */
6724 		if (remaining) {
6725 			WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
6726 					kswapd_highest_zoneidx(pgdat,
6727 							highest_zoneidx));
6728 
6729 			if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
6730 				WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
6731 		}
6732 
6733 		finish_wait(&pgdat->kswapd_wait, &wait);
6734 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
6735 	}
6736 
6737 	/*
6738 	 * After a short sleep, check if it was a premature sleep. If not, then
6739 	 * go fully to sleep until explicitly woken up.
6740 	 */
6741 	if (!remaining &&
6742 	    prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
6743 		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
6744 
6745 		/*
6746 		 * vmstat counters are not perfectly accurate and the estimated
6747 		 * value for counters such as NR_FREE_PAGES can deviate from the
6748 		 * true value by nr_online_cpus * threshold. To avoid the zone
6749 		 * watermarks being breached while under pressure, we reduce the
6750 		 * per-cpu vmstat threshold while kswapd is awake and restore
6751 		 * them before going back to sleep.
6752 		 */
6753 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
6754 
6755 		if (!kthread_should_stop())
6756 			schedule();
6757 
6758 		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
6759 	} else {
6760 		if (remaining)
6761 			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
6762 		else
6763 			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
6764 	}
6765 	finish_wait(&pgdat->kswapd_wait, &wait);
6766 }
6767 
6768 /*
6769  * The background pageout daemon, started as a kernel thread
6770  * from the init process.
6771  *
6772  * This basically trickles out pages so that we have _some_
6773  * free memory available even if there is no other activity
6774  * that frees anything up. This is needed for things like routing
6775  * etc, where we otherwise might have all activity going on in
6776  * asynchronous contexts that cannot page things out.
6777  *
6778  * If there are applications that are active memory-allocators
6779  * (most normal use), this basically shouldn't matter.
6780  */
kswapd(void * p)6781 static int kswapd(void *p)
6782 {
6783 	unsigned int alloc_order, reclaim_order;
6784 	unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
6785 	pg_data_t *pgdat = (pg_data_t*)p;
6786 	struct task_struct *tsk = current;
6787 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
6788 
6789 	if (!cpumask_empty(cpumask))
6790 		set_cpus_allowed_ptr(tsk, cpumask);
6791 
6792 	/*
6793 	 * Tell the memory management that we're a "memory allocator",
6794 	 * and that if we need more memory we should get access to it
6795 	 * regardless (see "__alloc_pages()"). "kswapd" should
6796 	 * never get caught in the normal page freeing logic.
6797 	 *
6798 	 * (Kswapd normally doesn't need memory anyway, but sometimes
6799 	 * you need a small amount of memory in order to be able to
6800 	 * page out something else, and this flag essentially protects
6801 	 * us from recursively trying to free more memory as we're
6802 	 * trying to free the first piece of memory in the first place).
6803 	 */
6804 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
6805 	set_freezable();
6806 
6807 	WRITE_ONCE(pgdat->kswapd_order, 0);
6808 	WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
6809 	for ( ; ; ) {
6810 		bool ret;
6811 
6812 		alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
6813 		highest_zoneidx = kswapd_highest_zoneidx(pgdat,
6814 							highest_zoneidx);
6815 
6816 kswapd_try_sleep:
6817 		kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
6818 					highest_zoneidx);
6819 
6820 		/* Read the new order and highest_zoneidx */
6821 		alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
6822 		highest_zoneidx = kswapd_highest_zoneidx(pgdat,
6823 							highest_zoneidx);
6824 		WRITE_ONCE(pgdat->kswapd_order, 0);
6825 		WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
6826 
6827 		ret = try_to_freeze();
6828 		if (kthread_should_stop())
6829 			break;
6830 
6831 		/*
6832 		 * We can speed up thawing tasks if we don't call balance_pgdat
6833 		 * after returning from the refrigerator
6834 		 */
6835 		if (ret)
6836 			continue;
6837 
6838 		/*
6839 		 * Reclaim begins at the requested order but if a high-order
6840 		 * reclaim fails then kswapd falls back to reclaiming for
6841 		 * order-0. If that happens, kswapd will consider sleeping
6842 		 * for the order it finished reclaiming at (reclaim_order)
6843 		 * but kcompactd is woken to compact for the original
6844 		 * request (alloc_order).
6845 		 */
6846 		trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
6847 						alloc_order);
6848 		reclaim_order = balance_pgdat(pgdat, alloc_order,
6849 						highest_zoneidx);
6850 		if (reclaim_order < alloc_order)
6851 			goto kswapd_try_sleep;
6852 	}
6853 
6854 	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
6855 
6856 	return 0;
6857 }
6858 
kswapd_per_node_run(int nid)6859 static int kswapd_per_node_run(int nid)
6860 {
6861 	pg_data_t *pgdat = NODE_DATA(nid);
6862 	int hid;
6863 	int ret = 0;
6864 
6865 	for (hid = 0; hid < kswapd_threads; ++hid) {
6866 		pgdat->mkswapd[hid] = kthread_run(kswapd, pgdat, "kswapd%d:%d",
6867 								nid, hid);
6868 		if (IS_ERR(pgdat->mkswapd[hid])) {
6869 			/* failure at boot is fatal */
6870 			WARN_ON(system_state < SYSTEM_RUNNING);
6871 			pr_err("Failed to start kswapd%d on node %d\n",
6872 				hid, nid);
6873 			ret = PTR_ERR(pgdat->mkswapd[hid]);
6874 			pgdat->mkswapd[hid] = NULL;
6875 			continue;
6876 		}
6877 		if (!pgdat->kswapd)
6878 			pgdat->kswapd = pgdat->mkswapd[hid];
6879 	}
6880 
6881 	return ret;
6882 }
6883 
kswapd_per_node_stop(int nid)6884 static void kswapd_per_node_stop(int nid)
6885 {
6886 	int hid = 0;
6887 	struct task_struct *kswapd;
6888 
6889 	for (hid = 0; hid < kswapd_threads; hid++) {
6890 		kswapd = NODE_DATA(nid)->mkswapd[hid];
6891 		if (kswapd) {
6892 			kthread_stop(kswapd);
6893 			NODE_DATA(nid)->mkswapd[hid] = NULL;
6894 		}
6895 	}
6896 	NODE_DATA(nid)->kswapd = NULL;
6897 }
6898 
6899 /*
6900  * A zone is low on free memory or too fragmented for high-order memory.  If
6901  * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
6902  * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
6903  * has failed or is not needed, still wake up kcompactd if only compaction is
6904  * needed.
6905  */
wakeup_kswapd(struct zone * zone,gfp_t gfp_flags,int order,enum zone_type highest_zoneidx)6906 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
6907 		   enum zone_type highest_zoneidx)
6908 {
6909 	pg_data_t *pgdat;
6910 	enum zone_type curr_idx;
6911 
6912 	if (!managed_zone(zone))
6913 		return;
6914 
6915 	if (!cpuset_zone_allowed(zone, gfp_flags))
6916 		return;
6917 
6918 	pgdat = zone->zone_pgdat;
6919 	curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
6920 
6921 	if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
6922 		WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
6923 
6924 	if (READ_ONCE(pgdat->kswapd_order) < order)
6925 		WRITE_ONCE(pgdat->kswapd_order, order);
6926 
6927 	if (!waitqueue_active(&pgdat->kswapd_wait))
6928 		return;
6929 
6930 	/* Hopeless node, leave it to direct reclaim if possible */
6931 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
6932 	    (pgdat_balanced(pgdat, order, highest_zoneidx) &&
6933 	     !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
6934 		/*
6935 		 * There may be plenty of free memory available, but it's too
6936 		 * fragmented for high-order allocations.  Wake up kcompactd
6937 		 * and rely on compaction_suitable() to determine if it's
6938 		 * needed.  If it fails, it will defer subsequent attempts to
6939 		 * ratelimit its work.
6940 		 */
6941 		if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
6942 			wakeup_kcompactd(pgdat, order, highest_zoneidx);
6943 		return;
6944 	}
6945 
6946 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
6947 				      gfp_flags);
6948 	wake_up_interruptible(&pgdat->kswapd_wait);
6949 }
6950 
6951 #ifdef CONFIG_HIBERNATION
6952 /*
6953  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
6954  * freed pages.
6955  *
6956  * Rather than trying to age LRUs the aim is to preserve the overall
6957  * LRU order by reclaiming preferentially
6958  * inactive > active > active referenced > active mapped
6959  */
shrink_all_memory(unsigned long nr_to_reclaim)6960 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
6961 {
6962 	struct scan_control sc = {
6963 		.nr_to_reclaim = nr_to_reclaim,
6964 		.gfp_mask = GFP_HIGHUSER_MOVABLE,
6965 		.reclaim_idx = MAX_NR_ZONES - 1,
6966 		.priority = DEF_PRIORITY,
6967 		.may_writepage = 1,
6968 		.may_unmap = 1,
6969 		.may_swap = 1,
6970 		.hibernation_mode = 1,
6971 	};
6972 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6973 	unsigned long nr_reclaimed;
6974 	unsigned int noreclaim_flag;
6975 
6976 	fs_reclaim_acquire(sc.gfp_mask);
6977 	noreclaim_flag = memalloc_noreclaim_save();
6978 	set_task_reclaim_state(current, &sc.reclaim_state);
6979 
6980 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6981 
6982 	set_task_reclaim_state(current, NULL);
6983 	memalloc_noreclaim_restore(noreclaim_flag);
6984 	fs_reclaim_release(sc.gfp_mask);
6985 
6986 	return nr_reclaimed;
6987 }
6988 #endif /* CONFIG_HIBERNATION */
6989 
6990 /*
6991  * This kswapd start function will be called by init and node-hot-add.
6992  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
6993  */
kswapd_run(int nid)6994 int kswapd_run(int nid)
6995 {
6996 	pg_data_t *pgdat = NODE_DATA(nid);
6997 	int ret = 0;
6998 
6999 	if (pgdat->kswapd)
7000 		return 0;
7001 
7002 	if (kswapd_threads > 1)
7003 		return kswapd_per_node_run(nid);
7004 
7005 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
7006 	if (IS_ERR(pgdat->kswapd)) {
7007 		/* failure at boot is fatal */
7008 		BUG_ON(system_state < SYSTEM_RUNNING);
7009 		pr_err("Failed to start kswapd on node %d\n", nid);
7010 		ret = PTR_ERR(pgdat->kswapd);
7011 		pgdat->kswapd = NULL;
7012 	}
7013 	return ret;
7014 }
7015 
7016 /*
7017  * Called by memory hotplug when all memory in a node is offlined.  Caller must
7018  * hold mem_hotplug_begin/end().
7019  */
kswapd_stop(int nid)7020 void kswapd_stop(int nid)
7021 {
7022 	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
7023 
7024 	if (kswapd_threads > 1) {
7025 		kswapd_per_node_stop(nid);
7026 		return;
7027 	}
7028 
7029 	if (kswapd) {
7030 		kthread_stop(kswapd);
7031 		NODE_DATA(nid)->kswapd = NULL;
7032 	}
7033 }
7034 
kswapd_init(void)7035 static int __init kswapd_init(void)
7036 {
7037 	int nid;
7038 
7039 	swap_setup();
7040 	for_each_node_state(nid, N_MEMORY)
7041  		kswapd_run(nid);
7042 	return 0;
7043 }
7044 
7045 module_init(kswapd_init)
7046 
7047 #ifdef CONFIG_NUMA
7048 /*
7049  * Node reclaim mode
7050  *
7051  * If non-zero call node_reclaim when the number of free pages falls below
7052  * the watermarks.
7053  */
7054 int node_reclaim_mode __read_mostly;
7055 
7056 /*
7057  * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
7058  * ABI.  New bits are OK, but existing bits can never change.
7059  */
7060 #define RECLAIM_ZONE  (1<<0)   /* Run shrink_inactive_list on the zone */
7061 #define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
7062 #define RECLAIM_UNMAP (1<<2)   /* Unmap pages during reclaim */
7063 
7064 /*
7065  * Priority for NODE_RECLAIM. This determines the fraction of pages
7066  * of a node considered for each zone_reclaim. 4 scans 1/16th of
7067  * a zone.
7068  */
7069 #define NODE_RECLAIM_PRIORITY 4
7070 
7071 /*
7072  * Percentage of pages in a zone that must be unmapped for node_reclaim to
7073  * occur.
7074  */
7075 int sysctl_min_unmapped_ratio = 1;
7076 
7077 /*
7078  * If the number of slab pages in a zone grows beyond this percentage then
7079  * slab reclaim needs to occur.
7080  */
7081 int sysctl_min_slab_ratio = 5;
7082 
node_unmapped_file_pages(struct pglist_data * pgdat)7083 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
7084 {
7085 	unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
7086 	unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
7087 		node_page_state(pgdat, NR_ACTIVE_FILE);
7088 
7089 	/*
7090 	 * It's possible for there to be more file mapped pages than
7091 	 * accounted for by the pages on the file LRU lists because
7092 	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
7093 	 */
7094 	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
7095 }
7096 
7097 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
node_pagecache_reclaimable(struct pglist_data * pgdat)7098 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
7099 {
7100 	unsigned long nr_pagecache_reclaimable;
7101 	unsigned long delta = 0;
7102 
7103 	/*
7104 	 * If RECLAIM_UNMAP is set, then all file pages are considered
7105 	 * potentially reclaimable. Otherwise, we have to worry about
7106 	 * pages like swapcache and node_unmapped_file_pages() provides
7107 	 * a better estimate
7108 	 */
7109 	if (node_reclaim_mode & RECLAIM_UNMAP)
7110 		nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
7111 	else
7112 		nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
7113 
7114 	/* If we can't clean pages, remove dirty pages from consideration */
7115 	if (!(node_reclaim_mode & RECLAIM_WRITE))
7116 		delta += node_page_state(pgdat, NR_FILE_DIRTY);
7117 
7118 	/* Watch for any possible underflows due to delta */
7119 	if (unlikely(delta > nr_pagecache_reclaimable))
7120 		delta = nr_pagecache_reclaimable;
7121 
7122 	return nr_pagecache_reclaimable - delta;
7123 }
7124 
7125 /*
7126  * Try to free up some pages from this node through reclaim.
7127  */
__node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7128 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7129 {
7130 	/* Minimum pages needed in order to stay on node */
7131 	const unsigned long nr_pages = 1 << order;
7132 	struct task_struct *p = current;
7133 	unsigned int noreclaim_flag;
7134 	struct scan_control sc = {
7135 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
7136 		.gfp_mask = current_gfp_context(gfp_mask),
7137 		.order = order,
7138 		.priority = NODE_RECLAIM_PRIORITY,
7139 		.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
7140 		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
7141 		.may_swap = 1,
7142 		.reclaim_idx = gfp_zone(gfp_mask),
7143 	};
7144 
7145 	trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
7146 					   sc.gfp_mask);
7147 
7148 	cond_resched();
7149 	fs_reclaim_acquire(sc.gfp_mask);
7150 	/*
7151 	 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
7152 	 * and we also need to be able to write out pages for RECLAIM_WRITE
7153 	 * and RECLAIM_UNMAP.
7154 	 */
7155 	noreclaim_flag = memalloc_noreclaim_save();
7156 	p->flags |= PF_SWAPWRITE;
7157 	set_task_reclaim_state(p, &sc.reclaim_state);
7158 
7159 	if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
7160 		/*
7161 		 * Free memory by calling shrink node with increasing
7162 		 * priorities until we have enough memory freed.
7163 		 */
7164 		do {
7165 			shrink_node(pgdat, &sc);
7166 		} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
7167 	}
7168 
7169 	set_task_reclaim_state(p, NULL);
7170 	current->flags &= ~PF_SWAPWRITE;
7171 	memalloc_noreclaim_restore(noreclaim_flag);
7172 	fs_reclaim_release(sc.gfp_mask);
7173 
7174 	trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
7175 
7176 	return sc.nr_reclaimed >= nr_pages;
7177 }
7178 
node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7179 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7180 {
7181 	int ret;
7182 
7183 	/*
7184 	 * Node reclaim reclaims unmapped file backed pages and
7185 	 * slab pages if we are over the defined limits.
7186 	 *
7187 	 * A small portion of unmapped file backed pages is needed for
7188 	 * file I/O otherwise pages read by file I/O will be immediately
7189 	 * thrown out if the node is overallocated. So we do not reclaim
7190 	 * if less than a specified percentage of the node is used by
7191 	 * unmapped file backed pages.
7192 	 */
7193 	if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
7194 	    node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
7195 	    pgdat->min_slab_pages)
7196 		return NODE_RECLAIM_FULL;
7197 
7198 	/*
7199 	 * Do not scan if the allocation should not be delayed.
7200 	 */
7201 	if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
7202 		return NODE_RECLAIM_NOSCAN;
7203 
7204 	/*
7205 	 * Only run node reclaim on the local node or on nodes that do not
7206 	 * have associated processors. This will favor the local processor
7207 	 * over remote processors and spread off node memory allocations
7208 	 * as wide as possible.
7209 	 */
7210 	if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
7211 		return NODE_RECLAIM_NOSCAN;
7212 
7213 	if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
7214 		return NODE_RECLAIM_NOSCAN;
7215 
7216 	ret = __node_reclaim(pgdat, gfp_mask, order);
7217 	clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
7218 
7219 	if (!ret)
7220 		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
7221 
7222 	return ret;
7223 }
7224 #endif
7225 
7226 /**
7227  * check_move_unevictable_pages - check pages for evictability and move to
7228  * appropriate zone lru list
7229  * @pvec: pagevec with lru pages to check
7230  *
7231  * Checks pages for evictability, if an evictable page is in the unevictable
7232  * lru list, moves it to the appropriate evictable lru list. This function
7233  * should be only used for lru pages.
7234  */
check_move_unevictable_pages(struct pagevec * pvec)7235 void check_move_unevictable_pages(struct pagevec *pvec)
7236 {
7237 	struct lruvec *lruvec;
7238 	struct pglist_data *pgdat = NULL;
7239 	int pgscanned = 0;
7240 	int pgrescued = 0;
7241 	int i;
7242 
7243 	for (i = 0; i < pvec->nr; i++) {
7244 		struct page *page = pvec->pages[i];
7245 		struct pglist_data *pagepgdat = page_pgdat(page);
7246 		int nr_pages;
7247 
7248 		if (PageTransTail(page))
7249 			continue;
7250 
7251 		nr_pages = thp_nr_pages(page);
7252 		pgscanned += nr_pages;
7253 
7254 		if (pagepgdat != pgdat) {
7255 			if (pgdat)
7256 				spin_unlock_irq(&pgdat->lru_lock);
7257 			pgdat = pagepgdat;
7258 			spin_lock_irq(&pgdat->lru_lock);
7259 		}
7260 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
7261 
7262 		if (!PageLRU(page) || !PageUnevictable(page))
7263 			continue;
7264 
7265 		if (page_evictable(page)) {
7266 			del_page_from_lru_list(page, lruvec);
7267 			ClearPageUnevictable(page);
7268 			add_page_to_lru_list(page, lruvec);
7269 			pgrescued += nr_pages;
7270 		}
7271 	}
7272 
7273 	if (pgdat) {
7274 		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
7275 		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7276 		spin_unlock_irq(&pgdat->lru_lock);
7277 	}
7278 }
7279 EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
7280