• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
4  *
5  *  Swap reorganised 29.12.95, Stephen Tweedie.
6  *  kswapd added: 7.1.96  sct
7  *  Removed kswapd_ctl limits, and swap out as many pages as needed
8  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
9  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
10  *  Multiqueue VM started 5.8.00, Rik van Riel.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/mm.h>
16 #include <linux/sched/mm.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/vmpressure.h>
25 #include <linux/vmstat.h>
26 #include <linux/file.h>
27 #include <linux/writeback.h>
28 #include <linux/blkdev.h>
29 #include <linux/buffer_head.h>	/* for try_to_release_page(),
30 					buffer_heads_over_limit */
31 #include <linux/mm_inline.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rmap.h>
34 #include <linux/topology.h>
35 #include <linux/cpu.h>
36 #include <linux/cpuset.h>
37 #include <linux/compaction.h>
38 #include <linux/notifier.h>
39 #include <linux/rwsem.h>
40 #include <linux/delay.h>
41 #include <linux/kthread.h>
42 #include <linux/freezer.h>
43 #include <linux/memcontrol.h>
44 #include <linux/migrate.h>
45 #include <linux/delayacct.h>
46 #include <linux/sysctl.h>
47 #include <linux/oom.h>
48 #include <linux/pagevec.h>
49 #include <linux/prefetch.h>
50 #include <linux/printk.h>
51 #include <linux/dax.h>
52 #include <linux/psi.h>
53 #include <linux/pagewalk.h>
54 #include <linux/shmem_fs.h>
55 #include <linux/ctype.h>
56 #include <linux/debugfs.h>
57 
58 #include <asm/tlbflush.h>
59 #include <asm/div64.h>
60 
61 #include <linux/swapops.h>
62 #include <linux/balloon_compaction.h>
63 
64 #include "internal.h"
65 
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/vmscan.h>
68 
69 EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_begin);
70 EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_end);
71 EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_kswapd_wake);
72 
73 #undef CREATE_TRACE_POINTS
74 #include <trace/hooks/vmscan.h>
75 
76 struct scan_control {
77 	/* How many pages shrink_list() should reclaim */
78 	unsigned long nr_to_reclaim;
79 
80 	/*
81 	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
82 	 * are scanned.
83 	 */
84 	nodemask_t	*nodemask;
85 
86 	/*
87 	 * The memory cgroup that hit its limit and as a result is the
88 	 * primary target of this reclaim invocation.
89 	 */
90 	struct mem_cgroup *target_mem_cgroup;
91 
92 	/*
93 	 * Scan pressure balancing between anon and file LRUs
94 	 */
95 	unsigned long	anon_cost;
96 	unsigned long	file_cost;
97 
98 	/* Can active pages be deactivated as part of reclaim? */
99 #define DEACTIVATE_ANON 1
100 #define DEACTIVATE_FILE 2
101 	unsigned int may_deactivate:2;
102 	unsigned int force_deactivate:1;
103 	unsigned int skipped_deactivate:1;
104 
105 	/* Writepage batching in laptop mode; RECLAIM_WRITE */
106 	unsigned int may_writepage:1;
107 
108 	/* Can mapped pages be reclaimed? */
109 	unsigned int may_unmap:1;
110 
111 	/* Can pages be swapped as part of reclaim? */
112 	unsigned int may_swap:1;
113 
114 	/*
115 	 * Cgroup memory below memory.low is protected as long as we
116 	 * don't threaten to OOM. If any cgroup is reclaimed at
117 	 * reduced force or passed over entirely due to its memory.low
118 	 * setting (memcg_low_skipped), and nothing is reclaimed as a
119 	 * result, then go back for one more cycle that reclaims the protected
120 	 * memory (memcg_low_reclaim) to avert OOM.
121 	 */
122 	unsigned int memcg_low_reclaim:1;
123 	unsigned int memcg_low_skipped:1;
124 
125 	unsigned int hibernation_mode:1;
126 
127 	/* One of the zones is ready for compaction */
128 	unsigned int compaction_ready:1;
129 
130 	/* There is easily reclaimable cold cache in the current node */
131 	unsigned int cache_trim_mode:1;
132 
133 	/* The file pages on the current node are dangerously low */
134 	unsigned int file_is_tiny:1;
135 
136 	/* Always discard instead of demoting to lower tier memory */
137 	unsigned int no_demotion:1;
138 
139 #ifdef CONFIG_LRU_GEN
140 	/* help kswapd make better choices among multiple memcgs */
141 	unsigned int memcgs_need_aging:1;
142 	unsigned long last_reclaimed;
143 #endif
144 
145 	/* Allocation order */
146 	s8 order;
147 
148 	/* Scan (total_size >> priority) pages at once */
149 	s8 priority;
150 
151 	/* The highest zone to isolate pages for reclaim from */
152 	s8 reclaim_idx;
153 
154 	/* This context's GFP mask */
155 	gfp_t gfp_mask;
156 
157 	/* Incremented by the number of inactive pages that were scanned */
158 	unsigned long nr_scanned;
159 
160 	/* Number of pages freed so far during a call to shrink_zones() */
161 	unsigned long nr_reclaimed;
162 
163 	struct {
164 		unsigned int dirty;
165 		unsigned int unqueued_dirty;
166 		unsigned int congested;
167 		unsigned int writeback;
168 		unsigned int immediate;
169 		unsigned int file_taken;
170 		unsigned int taken;
171 	} nr;
172 
173 	/* for recording the reclaimed slab by now */
174 	struct reclaim_state reclaim_state;
175 };
176 
177 #ifdef ARCH_HAS_PREFETCHW
178 #define prefetchw_prev_lru_page(_page, _base, _field)			\
179 	do {								\
180 		if ((_page)->lru.prev != _base) {			\
181 			struct page *prev;				\
182 									\
183 			prev = lru_to_page(&(_page->lru));		\
184 			prefetchw(&prev->_field);			\
185 		}							\
186 	} while (0)
187 #else
188 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
189 #endif
190 
191 /*
192  * From 0 .. 200.  Higher means more swappy.
193  */
194 int vm_swappiness = 60;
195 
set_task_reclaim_state(struct task_struct * task,struct reclaim_state * rs)196 static void set_task_reclaim_state(struct task_struct *task,
197 				   struct reclaim_state *rs)
198 {
199 	/* Check for an overwrite */
200 	WARN_ON_ONCE(rs && task->reclaim_state);
201 
202 	/* Check for the nulling of an already-nulled member */
203 	WARN_ON_ONCE(!rs && !task->reclaim_state);
204 
205 	task->reclaim_state = rs;
206 }
207 
208 static LIST_HEAD(shrinker_list);
209 static DECLARE_RWSEM(shrinker_rwsem);
210 
211 #ifdef CONFIG_MEMCG
212 static int shrinker_nr_max;
213 
214 /* The shrinker_info is expanded in a batch of BITS_PER_LONG */
shrinker_map_size(int nr_items)215 static inline int shrinker_map_size(int nr_items)
216 {
217 	return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
218 }
219 
shrinker_defer_size(int nr_items)220 static inline int shrinker_defer_size(int nr_items)
221 {
222 	return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
223 }
224 
shrinker_info_protected(struct mem_cgroup * memcg,int nid)225 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
226 						     int nid)
227 {
228 	return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
229 					 lockdep_is_held(&shrinker_rwsem));
230 }
231 
expand_one_shrinker_info(struct mem_cgroup * memcg,int map_size,int defer_size,int old_map_size,int old_defer_size)232 static int expand_one_shrinker_info(struct mem_cgroup *memcg,
233 				    int map_size, int defer_size,
234 				    int old_map_size, int old_defer_size)
235 {
236 	struct shrinker_info *new, *old;
237 	struct mem_cgroup_per_node *pn;
238 	int nid;
239 	int size = map_size + defer_size;
240 
241 	for_each_node(nid) {
242 		pn = memcg->nodeinfo[nid];
243 		old = shrinker_info_protected(memcg, nid);
244 		/* Not yet online memcg */
245 		if (!old)
246 			return 0;
247 
248 		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
249 		if (!new)
250 			return -ENOMEM;
251 
252 		new->nr_deferred = (atomic_long_t *)(new + 1);
253 		new->map = (void *)new->nr_deferred + defer_size;
254 
255 		/* map: set all old bits, clear all new bits */
256 		memset(new->map, (int)0xff, old_map_size);
257 		memset((void *)new->map + old_map_size, 0, map_size - old_map_size);
258 		/* nr_deferred: copy old values, clear all new values */
259 		memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
260 		memset((void *)new->nr_deferred + old_defer_size, 0,
261 		       defer_size - old_defer_size);
262 
263 		rcu_assign_pointer(pn->shrinker_info, new);
264 		kvfree_rcu(old, rcu);
265 	}
266 
267 	return 0;
268 }
269 
free_shrinker_info(struct mem_cgroup * memcg)270 void free_shrinker_info(struct mem_cgroup *memcg)
271 {
272 	struct mem_cgroup_per_node *pn;
273 	struct shrinker_info *info;
274 	int nid;
275 
276 	for_each_node(nid) {
277 		pn = memcg->nodeinfo[nid];
278 		info = rcu_dereference_protected(pn->shrinker_info, true);
279 		kvfree(info);
280 		rcu_assign_pointer(pn->shrinker_info, NULL);
281 	}
282 }
283 
alloc_shrinker_info(struct mem_cgroup * memcg)284 int alloc_shrinker_info(struct mem_cgroup *memcg)
285 {
286 	struct shrinker_info *info;
287 	int nid, size, ret = 0;
288 	int map_size, defer_size = 0;
289 
290 	down_write(&shrinker_rwsem);
291 	map_size = shrinker_map_size(shrinker_nr_max);
292 	defer_size = shrinker_defer_size(shrinker_nr_max);
293 	size = map_size + defer_size;
294 	for_each_node(nid) {
295 		info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
296 		if (!info) {
297 			free_shrinker_info(memcg);
298 			ret = -ENOMEM;
299 			break;
300 		}
301 		info->nr_deferred = (atomic_long_t *)(info + 1);
302 		info->map = (void *)info->nr_deferred + defer_size;
303 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
304 	}
305 	up_write(&shrinker_rwsem);
306 
307 	return ret;
308 }
309 
need_expand(int nr_max)310 static inline bool need_expand(int nr_max)
311 {
312 	return round_up(nr_max, BITS_PER_LONG) >
313 	       round_up(shrinker_nr_max, BITS_PER_LONG);
314 }
315 
expand_shrinker_info(int new_id)316 static int expand_shrinker_info(int new_id)
317 {
318 	int ret = 0;
319 	int new_nr_max = new_id + 1;
320 	int map_size, defer_size = 0;
321 	int old_map_size, old_defer_size = 0;
322 	struct mem_cgroup *memcg;
323 
324 	if (!need_expand(new_nr_max))
325 		goto out;
326 
327 	if (!root_mem_cgroup)
328 		goto out;
329 
330 	lockdep_assert_held(&shrinker_rwsem);
331 
332 	map_size = shrinker_map_size(new_nr_max);
333 	defer_size = shrinker_defer_size(new_nr_max);
334 	old_map_size = shrinker_map_size(shrinker_nr_max);
335 	old_defer_size = shrinker_defer_size(shrinker_nr_max);
336 
337 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
338 	do {
339 		ret = expand_one_shrinker_info(memcg, map_size, defer_size,
340 					       old_map_size, old_defer_size);
341 		if (ret) {
342 			mem_cgroup_iter_break(NULL, memcg);
343 			goto out;
344 		}
345 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
346 out:
347 	if (!ret)
348 		shrinker_nr_max = new_nr_max;
349 
350 	return ret;
351 }
352 
set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)353 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
354 {
355 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
356 		struct shrinker_info *info;
357 
358 		rcu_read_lock();
359 		info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
360 		/* Pairs with smp mb in shrink_slab() */
361 		smp_mb__before_atomic();
362 		set_bit(shrinker_id, info->map);
363 		rcu_read_unlock();
364 	}
365 }
366 
367 static DEFINE_IDR(shrinker_idr);
368 
prealloc_memcg_shrinker(struct shrinker * shrinker)369 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
370 {
371 	int id, ret = -ENOMEM;
372 
373 	if (mem_cgroup_disabled())
374 		return -ENOSYS;
375 
376 	down_write(&shrinker_rwsem);
377 	/* This may call shrinker, so it must use down_read_trylock() */
378 	id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
379 	if (id < 0)
380 		goto unlock;
381 
382 	if (id >= shrinker_nr_max) {
383 		if (expand_shrinker_info(id)) {
384 			idr_remove(&shrinker_idr, id);
385 			goto unlock;
386 		}
387 	}
388 	shrinker->id = id;
389 	ret = 0;
390 unlock:
391 	up_write(&shrinker_rwsem);
392 	return ret;
393 }
394 
unregister_memcg_shrinker(struct shrinker * shrinker)395 static void unregister_memcg_shrinker(struct shrinker *shrinker)
396 {
397 	int id = shrinker->id;
398 
399 	BUG_ON(id < 0);
400 
401 	lockdep_assert_held(&shrinker_rwsem);
402 
403 	idr_remove(&shrinker_idr, id);
404 }
405 
xchg_nr_deferred_memcg(int nid,struct shrinker * shrinker,struct mem_cgroup * memcg)406 static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
407 				   struct mem_cgroup *memcg)
408 {
409 	struct shrinker_info *info;
410 
411 	info = shrinker_info_protected(memcg, nid);
412 	return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
413 }
414 
add_nr_deferred_memcg(long nr,int nid,struct shrinker * shrinker,struct mem_cgroup * memcg)415 static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
416 				  struct mem_cgroup *memcg)
417 {
418 	struct shrinker_info *info;
419 
420 	info = shrinker_info_protected(memcg, nid);
421 	return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
422 }
423 
reparent_shrinker_deferred(struct mem_cgroup * memcg)424 void reparent_shrinker_deferred(struct mem_cgroup *memcg)
425 {
426 	int i, nid;
427 	long nr;
428 	struct mem_cgroup *parent;
429 	struct shrinker_info *child_info, *parent_info;
430 
431 	parent = parent_mem_cgroup(memcg);
432 	if (!parent)
433 		parent = root_mem_cgroup;
434 
435 	/* Prevent from concurrent shrinker_info expand */
436 	down_read(&shrinker_rwsem);
437 	for_each_node(nid) {
438 		child_info = shrinker_info_protected(memcg, nid);
439 		parent_info = shrinker_info_protected(parent, nid);
440 		for (i = 0; i < shrinker_nr_max; i++) {
441 			nr = atomic_long_read(&child_info->nr_deferred[i]);
442 			atomic_long_add(nr, &parent_info->nr_deferred[i]);
443 		}
444 	}
445 	up_read(&shrinker_rwsem);
446 }
447 
cgroup_reclaim(struct scan_control * sc)448 static bool cgroup_reclaim(struct scan_control *sc)
449 {
450 	return sc->target_mem_cgroup;
451 }
452 
453 /**
454  * writeback_throttling_sane - is the usual dirty throttling mechanism available?
455  * @sc: scan_control in question
456  *
457  * The normal page dirty throttling mechanism in balance_dirty_pages() is
458  * completely broken with the legacy memcg and direct stalling in
459  * shrink_page_list() is used for throttling instead, which lacks all the
460  * niceties such as fairness, adaptive pausing, bandwidth proportional
461  * allocation and configurability.
462  *
463  * This function tests whether the vmscan currently in progress can assume
464  * that the normal dirty throttling mechanism is operational.
465  */
writeback_throttling_sane(struct scan_control * sc)466 static bool writeback_throttling_sane(struct scan_control *sc)
467 {
468 	if (!cgroup_reclaim(sc))
469 		return true;
470 #ifdef CONFIG_CGROUP_WRITEBACK
471 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
472 		return true;
473 #endif
474 	return false;
475 }
476 #else
prealloc_memcg_shrinker(struct shrinker * shrinker)477 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
478 {
479 	return -ENOSYS;
480 }
481 
unregister_memcg_shrinker(struct shrinker * shrinker)482 static void unregister_memcg_shrinker(struct shrinker *shrinker)
483 {
484 }
485 
xchg_nr_deferred_memcg(int nid,struct shrinker * shrinker,struct mem_cgroup * memcg)486 static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
487 				   struct mem_cgroup *memcg)
488 {
489 	return 0;
490 }
491 
add_nr_deferred_memcg(long nr,int nid,struct shrinker * shrinker,struct mem_cgroup * memcg)492 static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
493 				  struct mem_cgroup *memcg)
494 {
495 	return 0;
496 }
497 
cgroup_reclaim(struct scan_control * sc)498 static bool cgroup_reclaim(struct scan_control *sc)
499 {
500 	return false;
501 }
502 
writeback_throttling_sane(struct scan_control * sc)503 static bool writeback_throttling_sane(struct scan_control *sc)
504 {
505 	return true;
506 }
507 #endif
508 
xchg_nr_deferred(struct shrinker * shrinker,struct shrink_control * sc)509 static long xchg_nr_deferred(struct shrinker *shrinker,
510 			     struct shrink_control *sc)
511 {
512 	int nid = sc->nid;
513 
514 	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
515 		nid = 0;
516 
517 	if (sc->memcg &&
518 	    (shrinker->flags & SHRINKER_MEMCG_AWARE))
519 		return xchg_nr_deferred_memcg(nid, shrinker,
520 					      sc->memcg);
521 
522 	return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
523 }
524 
525 
add_nr_deferred(long nr,struct shrinker * shrinker,struct shrink_control * sc)526 static long add_nr_deferred(long nr, struct shrinker *shrinker,
527 			    struct shrink_control *sc)
528 {
529 	int nid = sc->nid;
530 
531 	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
532 		nid = 0;
533 
534 	if (sc->memcg &&
535 	    (shrinker->flags & SHRINKER_MEMCG_AWARE))
536 		return add_nr_deferred_memcg(nr, nid, shrinker,
537 					     sc->memcg);
538 
539 	return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
540 }
541 
can_demote(int nid,struct scan_control * sc)542 static bool can_demote(int nid, struct scan_control *sc)
543 {
544 	if (!numa_demotion_enabled)
545 		return false;
546 	if (sc) {
547 		if (sc->no_demotion)
548 			return false;
549 		/* It is pointless to do demotion in memcg reclaim */
550 		if (cgroup_reclaim(sc))
551 			return false;
552 	}
553 	if (next_demotion_node(nid) == NUMA_NO_NODE)
554 		return false;
555 
556 	return true;
557 }
558 
can_reclaim_anon_pages(struct mem_cgroup * memcg,int nid,struct scan_control * sc)559 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
560 					  int nid,
561 					  struct scan_control *sc)
562 {
563 	if (memcg == NULL) {
564 		/*
565 		 * For non-memcg reclaim, is there
566 		 * space in any swap device?
567 		 */
568 		if (get_nr_swap_pages() > 0)
569 			return true;
570 	} else {
571 		/* Is the memcg below its swap limit? */
572 		if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
573 			return true;
574 	}
575 
576 	/*
577 	 * The page can not be swapped.
578 	 *
579 	 * Can it be reclaimed from this node via demotion?
580 	 */
581 	return can_demote(nid, sc);
582 }
583 
584 /*
585  * This misses isolated pages which are not accounted for to save counters.
586  * As the data only determines if reclaim or compaction continues, it is
587  * not expected that isolated pages will be a dominating factor.
588  */
zone_reclaimable_pages(struct zone * zone)589 unsigned long zone_reclaimable_pages(struct zone *zone)
590 {
591 	unsigned long nr;
592 
593 	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
594 		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
595 	if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
596 		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
597 			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
598 
599 	return nr;
600 }
601 
602 /**
603  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
604  * @lruvec: lru vector
605  * @lru: lru to use
606  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
607  */
lruvec_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)608 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
609 				     int zone_idx)
610 {
611 	unsigned long size = 0;
612 	int zid;
613 
614 	for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
615 		struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
616 
617 		if (!managed_zone(zone))
618 			continue;
619 
620 		if (!mem_cgroup_disabled())
621 			size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
622 		else
623 			size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
624 	}
625 	return size;
626 }
627 
628 /*
629  * Add a shrinker callback to be called from the vm.
630  */
prealloc_shrinker(struct shrinker * shrinker)631 int prealloc_shrinker(struct shrinker *shrinker)
632 {
633 	unsigned int size;
634 	int err;
635 
636 	if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
637 		err = prealloc_memcg_shrinker(shrinker);
638 		if (err != -ENOSYS)
639 			return err;
640 
641 		shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
642 	}
643 
644 	size = sizeof(*shrinker->nr_deferred);
645 	if (shrinker->flags & SHRINKER_NUMA_AWARE)
646 		size *= nr_node_ids;
647 
648 	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
649 	if (!shrinker->nr_deferred)
650 		return -ENOMEM;
651 
652 	return 0;
653 }
654 
free_prealloced_shrinker(struct shrinker * shrinker)655 void free_prealloced_shrinker(struct shrinker *shrinker)
656 {
657 	if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
658 		down_write(&shrinker_rwsem);
659 		unregister_memcg_shrinker(shrinker);
660 		up_write(&shrinker_rwsem);
661 		return;
662 	}
663 
664 	kfree(shrinker->nr_deferred);
665 	shrinker->nr_deferred = NULL;
666 }
667 
register_shrinker_prepared(struct shrinker * shrinker)668 void register_shrinker_prepared(struct shrinker *shrinker)
669 {
670 	down_write(&shrinker_rwsem);
671 	list_add_tail(&shrinker->list, &shrinker_list);
672 	shrinker->flags |= SHRINKER_REGISTERED;
673 	up_write(&shrinker_rwsem);
674 }
675 
register_shrinker(struct shrinker * shrinker)676 int register_shrinker(struct shrinker *shrinker)
677 {
678 	int err = prealloc_shrinker(shrinker);
679 
680 	if (err)
681 		return err;
682 	register_shrinker_prepared(shrinker);
683 	return 0;
684 }
685 EXPORT_SYMBOL(register_shrinker);
686 
687 /*
688  * Remove one
689  */
unregister_shrinker(struct shrinker * shrinker)690 void unregister_shrinker(struct shrinker *shrinker)
691 {
692 	if (!(shrinker->flags & SHRINKER_REGISTERED))
693 		return;
694 
695 	down_write(&shrinker_rwsem);
696 	list_del(&shrinker->list);
697 	shrinker->flags &= ~SHRINKER_REGISTERED;
698 	if (shrinker->flags & SHRINKER_MEMCG_AWARE)
699 		unregister_memcg_shrinker(shrinker);
700 	up_write(&shrinker_rwsem);
701 
702 	kfree(shrinker->nr_deferred);
703 	shrinker->nr_deferred = NULL;
704 }
705 EXPORT_SYMBOL(unregister_shrinker);
706 
707 #define SHRINK_BATCH 128
708 
do_shrink_slab(struct shrink_control * shrinkctl,struct shrinker * shrinker,int priority)709 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
710 				    struct shrinker *shrinker, int priority)
711 {
712 	unsigned long freed = 0;
713 	unsigned long long delta;
714 	long total_scan;
715 	long freeable;
716 	long nr;
717 	long new_nr;
718 	long batch_size = shrinker->batch ? shrinker->batch
719 					  : SHRINK_BATCH;
720 	long scanned = 0, next_deferred;
721 
722 	trace_android_vh_do_shrink_slab(shrinker, shrinkctl, priority);
723 
724 	freeable = shrinker->count_objects(shrinker, shrinkctl);
725 	if (freeable == 0 || freeable == SHRINK_EMPTY)
726 		return freeable;
727 
728 	/*
729 	 * copy the current shrinker scan count into a local variable
730 	 * and zero it so that other concurrent shrinker invocations
731 	 * don't also do this scanning work.
732 	 */
733 	nr = xchg_nr_deferred(shrinker, shrinkctl);
734 
735 	if (shrinker->seeks) {
736 		delta = freeable >> priority;
737 		delta *= 4;
738 		do_div(delta, shrinker->seeks);
739 	} else {
740 		/*
741 		 * These objects don't require any IO to create. Trim
742 		 * them aggressively under memory pressure to keep
743 		 * them from causing refetches in the IO caches.
744 		 */
745 		delta = freeable / 2;
746 	}
747 
748 	total_scan = nr >> priority;
749 	total_scan += delta;
750 	total_scan = min(total_scan, (2 * freeable));
751 
752 	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
753 				   freeable, delta, total_scan, priority);
754 
755 	/*
756 	 * Normally, we should not scan less than batch_size objects in one
757 	 * pass to avoid too frequent shrinker calls, but if the slab has less
758 	 * than batch_size objects in total and we are really tight on memory,
759 	 * we will try to reclaim all available objects, otherwise we can end
760 	 * up failing allocations although there are plenty of reclaimable
761 	 * objects spread over several slabs with usage less than the
762 	 * batch_size.
763 	 *
764 	 * We detect the "tight on memory" situations by looking at the total
765 	 * number of objects we want to scan (total_scan). If it is greater
766 	 * than the total number of objects on slab (freeable), we must be
767 	 * scanning at high prio and therefore should try to reclaim as much as
768 	 * possible.
769 	 */
770 	while (total_scan >= batch_size ||
771 	       total_scan >= freeable) {
772 		unsigned long ret;
773 		unsigned long nr_to_scan = min(batch_size, total_scan);
774 
775 		shrinkctl->nr_to_scan = nr_to_scan;
776 		shrinkctl->nr_scanned = nr_to_scan;
777 		ret = shrinker->scan_objects(shrinker, shrinkctl);
778 		if (ret == SHRINK_STOP)
779 			break;
780 		freed += ret;
781 
782 		count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
783 		total_scan -= shrinkctl->nr_scanned;
784 		scanned += shrinkctl->nr_scanned;
785 
786 		cond_resched();
787 	}
788 
789 	/*
790 	 * The deferred work is increased by any new work (delta) that wasn't
791 	 * done, decreased by old deferred work that was done now.
792 	 *
793 	 * And it is capped to two times of the freeable items.
794 	 */
795 	next_deferred = max_t(long, (nr + delta - scanned), 0);
796 	next_deferred = min(next_deferred, (2 * freeable));
797 
798 	/*
799 	 * move the unused scan count back into the shrinker in a
800 	 * manner that handles concurrent updates.
801 	 */
802 	new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
803 
804 	trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
805 	return freed;
806 }
807 
808 #ifdef CONFIG_MEMCG
shrink_slab_memcg(gfp_t gfp_mask,int nid,struct mem_cgroup * memcg,int priority)809 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
810 			struct mem_cgroup *memcg, int priority)
811 {
812 	struct shrinker_info *info;
813 	unsigned long ret, freed = 0;
814 	int i;
815 
816 	if (!mem_cgroup_online(memcg))
817 		return 0;
818 
819 	if (!down_read_trylock(&shrinker_rwsem))
820 		return 0;
821 
822 	info = shrinker_info_protected(memcg, nid);
823 	if (unlikely(!info))
824 		goto unlock;
825 
826 	for_each_set_bit(i, info->map, shrinker_nr_max) {
827 		struct shrink_control sc = {
828 			.gfp_mask = gfp_mask,
829 			.nid = nid,
830 			.memcg = memcg,
831 		};
832 		struct shrinker *shrinker;
833 
834 		shrinker = idr_find(&shrinker_idr, i);
835 		if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) {
836 			if (!shrinker)
837 				clear_bit(i, info->map);
838 			continue;
839 		}
840 
841 		/* Call non-slab shrinkers even though kmem is disabled */
842 		if (!memcg_kmem_enabled() &&
843 		    !(shrinker->flags & SHRINKER_NONSLAB))
844 			continue;
845 
846 		ret = do_shrink_slab(&sc, shrinker, priority);
847 		if (ret == SHRINK_EMPTY) {
848 			clear_bit(i, info->map);
849 			/*
850 			 * After the shrinker reported that it had no objects to
851 			 * free, but before we cleared the corresponding bit in
852 			 * the memcg shrinker map, a new object might have been
853 			 * added. To make sure, we have the bit set in this
854 			 * case, we invoke the shrinker one more time and reset
855 			 * the bit if it reports that it is not empty anymore.
856 			 * The memory barrier here pairs with the barrier in
857 			 * set_shrinker_bit():
858 			 *
859 			 * list_lru_add()     shrink_slab_memcg()
860 			 *   list_add_tail()    clear_bit()
861 			 *   <MB>               <MB>
862 			 *   set_bit()          do_shrink_slab()
863 			 */
864 			smp_mb__after_atomic();
865 			ret = do_shrink_slab(&sc, shrinker, priority);
866 			if (ret == SHRINK_EMPTY)
867 				ret = 0;
868 			else
869 				set_shrinker_bit(memcg, nid, i);
870 		}
871 		freed += ret;
872 
873 		if (rwsem_is_contended(&shrinker_rwsem)) {
874 			freed = freed ? : 1;
875 			break;
876 		}
877 	}
878 unlock:
879 	up_read(&shrinker_rwsem);
880 	return freed;
881 }
882 #else /* CONFIG_MEMCG */
shrink_slab_memcg(gfp_t gfp_mask,int nid,struct mem_cgroup * memcg,int priority)883 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
884 			struct mem_cgroup *memcg, int priority)
885 {
886 	return 0;
887 }
888 #endif /* CONFIG_MEMCG */
889 
890 /**
891  * shrink_slab - shrink slab caches
892  * @gfp_mask: allocation context
893  * @nid: node whose slab caches to target
894  * @memcg: memory cgroup whose slab caches to target
895  * @priority: the reclaim priority
896  *
897  * Call the shrink functions to age shrinkable caches.
898  *
899  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
900  * unaware shrinkers will receive a node id of 0 instead.
901  *
902  * @memcg specifies the memory cgroup to target. Unaware shrinkers
903  * are called only if it is the root cgroup.
904  *
905  * @priority is sc->priority, we take the number of objects and >> by priority
906  * in order to get the scan target.
907  *
908  * Returns the number of reclaimed slab objects.
909  */
shrink_slab(gfp_t gfp_mask,int nid,struct mem_cgroup * memcg,int priority)910 unsigned long shrink_slab(gfp_t gfp_mask, int nid,
911 				 struct mem_cgroup *memcg,
912 				 int priority)
913 {
914 	unsigned long ret, freed = 0;
915 	struct shrinker *shrinker;
916 	bool bypass = false;
917 
918 	trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass);
919 	if (bypass)
920 		return 0;
921 
922 	/*
923 	 * The root memcg might be allocated even though memcg is disabled
924 	 * via "cgroup_disable=memory" boot parameter.  This could make
925 	 * mem_cgroup_is_root() return false, then just run memcg slab
926 	 * shrink, but skip global shrink.  This may result in premature
927 	 * oom.
928 	 */
929 	if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
930 		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
931 
932 	if (!down_read_trylock(&shrinker_rwsem))
933 		goto out;
934 
935 	list_for_each_entry(shrinker, &shrinker_list, list) {
936 		struct shrink_control sc = {
937 			.gfp_mask = gfp_mask,
938 			.nid = nid,
939 			.memcg = memcg,
940 		};
941 
942 		ret = do_shrink_slab(&sc, shrinker, priority);
943 		if (ret == SHRINK_EMPTY)
944 			ret = 0;
945 		freed += ret;
946 		/*
947 		 * Bail out if someone want to register a new shrinker to
948 		 * prevent the registration from being stalled for long periods
949 		 * by parallel ongoing shrinking.
950 		 */
951 		if (rwsem_is_contended(&shrinker_rwsem)) {
952 			freed = freed ? : 1;
953 			break;
954 		}
955 	}
956 
957 	up_read(&shrinker_rwsem);
958 out:
959 	cond_resched();
960 	return freed;
961 }
962 EXPORT_SYMBOL_GPL(shrink_slab);
963 
drop_slab_node(int nid)964 void drop_slab_node(int nid)
965 {
966 	unsigned long freed;
967 	int shift = 0;
968 
969 	do {
970 		struct mem_cgroup *memcg = NULL;
971 
972 		if (fatal_signal_pending(current))
973 			return;
974 
975 		freed = 0;
976 		memcg = mem_cgroup_iter(NULL, NULL, NULL);
977 		do {
978 			freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
979 		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
980 	} while ((freed >> shift++) > 1);
981 }
982 
drop_slab(void)983 void drop_slab(void)
984 {
985 	int nid;
986 
987 	for_each_online_node(nid)
988 		drop_slab_node(nid);
989 }
990 
is_page_cache_freeable(struct page * page)991 static inline int is_page_cache_freeable(struct page *page)
992 {
993 	/*
994 	 * A freeable page cache page is referenced only by the caller
995 	 * that isolated the page, the page cache and optional buffer
996 	 * heads at page->private.
997 	 */
998 	int page_cache_pins = thp_nr_pages(page);
999 	return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
1000 }
1001 
may_write_to_inode(struct inode * inode)1002 static int may_write_to_inode(struct inode *inode)
1003 {
1004 	if (current->flags & PF_SWAPWRITE)
1005 		return 1;
1006 	if (!inode_write_congested(inode))
1007 		return 1;
1008 	if (inode_to_bdi(inode) == current->backing_dev_info)
1009 		return 1;
1010 	return 0;
1011 }
1012 
1013 /*
1014  * We detected a synchronous write error writing a page out.  Probably
1015  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
1016  * fsync(), msync() or close().
1017  *
1018  * The tricky part is that after writepage we cannot touch the mapping: nothing
1019  * prevents it from being freed up.  But we have a ref on the page and once
1020  * that page is locked, the mapping is pinned.
1021  *
1022  * We're allowed to run sleeping lock_page() here because we know the caller has
1023  * __GFP_FS.
1024  */
handle_write_error(struct address_space * mapping,struct page * page,int error)1025 static void handle_write_error(struct address_space *mapping,
1026 				struct page *page, int error)
1027 {
1028 	lock_page(page);
1029 	if (page_mapping(page) == mapping)
1030 		mapping_set_error(mapping, error);
1031 	unlock_page(page);
1032 }
1033 
1034 /* possible outcome of pageout() */
1035 typedef enum {
1036 	/* failed to write page out, page is locked */
1037 	PAGE_KEEP,
1038 	/* move page to the active list, page is locked */
1039 	PAGE_ACTIVATE,
1040 	/* page has been sent to the disk successfully, page is unlocked */
1041 	PAGE_SUCCESS,
1042 	/* page is clean and locked */
1043 	PAGE_CLEAN,
1044 } pageout_t;
1045 
1046 /*
1047  * pageout is called by shrink_page_list() for each dirty page.
1048  * Calls ->writepage().
1049  */
pageout(struct page * page,struct address_space * mapping)1050 static pageout_t pageout(struct page *page, struct address_space *mapping)
1051 {
1052 	/*
1053 	 * If the page is dirty, only perform writeback if that write
1054 	 * will be non-blocking.  To prevent this allocation from being
1055 	 * stalled by pagecache activity.  But note that there may be
1056 	 * stalls if we need to run get_block().  We could test
1057 	 * PagePrivate for that.
1058 	 *
1059 	 * If this process is currently in __generic_file_write_iter() against
1060 	 * this page's queue, we can perform writeback even if that
1061 	 * will block.
1062 	 *
1063 	 * If the page is swapcache, write it back even if that would
1064 	 * block, for some throttling. This happens by accident, because
1065 	 * swap_backing_dev_info is bust: it doesn't reflect the
1066 	 * congestion state of the swapdevs.  Easy to fix, if needed.
1067 	 */
1068 	if (!is_page_cache_freeable(page))
1069 		return PAGE_KEEP;
1070 	if (!mapping) {
1071 		/*
1072 		 * Some data journaling orphaned pages can have
1073 		 * page->mapping == NULL while being dirty with clean buffers.
1074 		 */
1075 		if (page_has_private(page)) {
1076 			if (try_to_free_buffers(page)) {
1077 				ClearPageDirty(page);
1078 				pr_info("%s: orphaned page\n", __func__);
1079 				return PAGE_CLEAN;
1080 			}
1081 		}
1082 		return PAGE_KEEP;
1083 	}
1084 	if (mapping->a_ops->writepage == NULL)
1085 		return PAGE_ACTIVATE;
1086 	if (!may_write_to_inode(mapping->host))
1087 		return PAGE_KEEP;
1088 
1089 	if (clear_page_dirty_for_io(page)) {
1090 		int res;
1091 		struct writeback_control wbc = {
1092 			.sync_mode = WB_SYNC_NONE,
1093 			.nr_to_write = SWAP_CLUSTER_MAX,
1094 			.range_start = 0,
1095 			.range_end = LLONG_MAX,
1096 			.for_reclaim = 1,
1097 		};
1098 
1099 		SetPageReclaim(page);
1100 		res = mapping->a_ops->writepage(page, &wbc);
1101 		if (res < 0)
1102 			handle_write_error(mapping, page, res);
1103 		if (res == AOP_WRITEPAGE_ACTIVATE) {
1104 			ClearPageReclaim(page);
1105 			return PAGE_ACTIVATE;
1106 		}
1107 
1108 		if (!PageWriteback(page)) {
1109 			/* synchronous write or broken a_ops? */
1110 			ClearPageReclaim(page);
1111 		}
1112 		trace_mm_vmscan_writepage(page);
1113 		inc_node_page_state(page, NR_VMSCAN_WRITE);
1114 		return PAGE_SUCCESS;
1115 	}
1116 
1117 	return PAGE_CLEAN;
1118 }
1119 
1120 /*
1121  * Same as remove_mapping, but if the page is removed from the mapping, it
1122  * gets returned with a refcount of 0.
1123  */
__remove_mapping(struct address_space * mapping,struct page * page,bool reclaimed,struct mem_cgroup * target_memcg)1124 static int __remove_mapping(struct address_space *mapping, struct page *page,
1125 			    bool reclaimed, struct mem_cgroup *target_memcg)
1126 {
1127 	int refcount;
1128 	void *shadow = NULL;
1129 
1130 	BUG_ON(!PageLocked(page));
1131 	BUG_ON(mapping != page_mapping(page));
1132 
1133 	xa_lock_irq(&mapping->i_pages);
1134 	/*
1135 	 * The non racy check for a busy page.
1136 	 *
1137 	 * Must be careful with the order of the tests. When someone has
1138 	 * a ref to the page, it may be possible that they dirty it then
1139 	 * drop the reference. So if PageDirty is tested before page_count
1140 	 * here, then the following race may occur:
1141 	 *
1142 	 * get_user_pages(&page);
1143 	 * [user mapping goes away]
1144 	 * write_to(page);
1145 	 *				!PageDirty(page)    [good]
1146 	 * SetPageDirty(page);
1147 	 * put_page(page);
1148 	 *				!page_count(page)   [good, discard it]
1149 	 *
1150 	 * [oops, our write_to data is lost]
1151 	 *
1152 	 * Reversing the order of the tests ensures such a situation cannot
1153 	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
1154 	 * load is not satisfied before that of page->_refcount.
1155 	 *
1156 	 * Note that if SetPageDirty is always performed via set_page_dirty,
1157 	 * and thus under the i_pages lock, then this ordering is not required.
1158 	 */
1159 	refcount = 1 + compound_nr(page);
1160 	if (!page_ref_freeze(page, refcount))
1161 		goto cannot_free;
1162 	/* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
1163 	if (unlikely(PageDirty(page))) {
1164 		page_ref_unfreeze(page, refcount);
1165 		goto cannot_free;
1166 	}
1167 
1168 	if (PageSwapCache(page)) {
1169 		swp_entry_t swap = { .val = page_private(page) };
1170 
1171 		/* get a shadow entry before mem_cgroup_swapout() clears page_memcg() */
1172 		if (reclaimed && !mapping_exiting(mapping))
1173 			shadow = workingset_eviction(page, target_memcg);
1174 		mem_cgroup_swapout(page, swap);
1175 		__delete_from_swap_cache(page, swap, shadow);
1176 		xa_unlock_irq(&mapping->i_pages);
1177 		put_swap_page(page, swap);
1178 	} else {
1179 		void (*freepage)(struct page *);
1180 
1181 		freepage = mapping->a_ops->freepage;
1182 		/*
1183 		 * Remember a shadow entry for reclaimed file cache in
1184 		 * order to detect refaults, thus thrashing, later on.
1185 		 *
1186 		 * But don't store shadows in an address space that is
1187 		 * already exiting.  This is not just an optimization,
1188 		 * inode reclaim needs to empty out the radix tree or
1189 		 * the nodes are lost.  Don't plant shadows behind its
1190 		 * back.
1191 		 *
1192 		 * We also don't store shadows for DAX mappings because the
1193 		 * only page cache pages found in these are zero pages
1194 		 * covering holes, and because we don't want to mix DAX
1195 		 * exceptional entries and shadow exceptional entries in the
1196 		 * same address_space.
1197 		 */
1198 		if (reclaimed && page_is_file_lru(page) &&
1199 		    !mapping_exiting(mapping) && !dax_mapping(mapping))
1200 			shadow = workingset_eviction(page, target_memcg);
1201 		__delete_from_page_cache(page, shadow);
1202 		xa_unlock_irq(&mapping->i_pages);
1203 
1204 		if (freepage != NULL)
1205 			freepage(page);
1206 	}
1207 
1208 	return 1;
1209 
1210 cannot_free:
1211 	xa_unlock_irq(&mapping->i_pages);
1212 	return 0;
1213 }
1214 
1215 /*
1216  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
1217  * someone else has a ref on the page, abort and return 0.  If it was
1218  * successfully detached, return 1.  Assumes the caller has a single ref on
1219  * this page.
1220  */
remove_mapping(struct address_space * mapping,struct page * page)1221 int remove_mapping(struct address_space *mapping, struct page *page)
1222 {
1223 	if (__remove_mapping(mapping, page, false, NULL)) {
1224 		/*
1225 		 * Unfreezing the refcount with 1 rather than 2 effectively
1226 		 * drops the pagecache ref for us without requiring another
1227 		 * atomic operation.
1228 		 */
1229 		page_ref_unfreeze(page, 1);
1230 		return 1;
1231 	}
1232 	return 0;
1233 }
1234 
1235 /**
1236  * putback_lru_page - put previously isolated page onto appropriate LRU list
1237  * @page: page to be put back to appropriate lru list
1238  *
1239  * Add previously isolated @page to appropriate LRU list.
1240  * Page may still be unevictable for other reasons.
1241  *
1242  * lru_lock must not be held, interrupts must be enabled.
1243  */
putback_lru_page(struct page * page)1244 void putback_lru_page(struct page *page)
1245 {
1246 	lru_cache_add(page);
1247 	put_page(page);		/* drop ref from isolate */
1248 }
1249 
1250 enum page_references {
1251 	PAGEREF_RECLAIM,
1252 	PAGEREF_RECLAIM_CLEAN,
1253 	PAGEREF_KEEP,
1254 	PAGEREF_ACTIVATE,
1255 };
1256 
page_check_references(struct page * page,struct scan_control * sc)1257 static enum page_references page_check_references(struct page *page,
1258 						  struct scan_control *sc)
1259 {
1260 	int referenced_ptes, referenced_page;
1261 	unsigned long vm_flags;
1262 	bool should_protect = false;
1263 	bool trylock_fail = false;
1264 	int ret = 0;
1265 
1266 	trace_android_vh_page_should_be_protected(page, &should_protect);
1267 	if (unlikely(should_protect))
1268 		return PAGEREF_ACTIVATE;
1269 
1270 	trace_android_vh_page_trylock_set(page);
1271 	trace_android_vh_check_page_look_around_ref(page, &ret);
1272 	if (ret)
1273 		return ret;
1274 	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1275 					  &vm_flags);
1276 	referenced_page = TestClearPageReferenced(page);
1277 	trace_android_vh_page_trylock_get_result(page, &trylock_fail);
1278 	if (trylock_fail)
1279 		return PAGEREF_KEEP;
1280 	/*
1281 	 * Mlock lost the isolation race with us.  Let try_to_unmap()
1282 	 * move the page to the unevictable list.
1283 	 */
1284 	if (vm_flags & VM_LOCKED)
1285 		return PAGEREF_RECLAIM;
1286 
1287 	/* rmap lock contention: rotate */
1288 	if (referenced_ptes == -1)
1289 		return PAGEREF_KEEP;
1290 
1291 	if (referenced_ptes) {
1292 		/*
1293 		 * All mapped pages start out with page table
1294 		 * references from the instantiating fault, so we need
1295 		 * to look twice if a mapped file page is used more
1296 		 * than once.
1297 		 *
1298 		 * Mark it and spare it for another trip around the
1299 		 * inactive list.  Another page table reference will
1300 		 * lead to its activation.
1301 		 *
1302 		 * Note: the mark is set for activated pages as well
1303 		 * so that recently deactivated but used pages are
1304 		 * quickly recovered.
1305 		 */
1306 		SetPageReferenced(page);
1307 
1308 		if (referenced_page || referenced_ptes > 1)
1309 			return PAGEREF_ACTIVATE;
1310 
1311 		/*
1312 		 * Activate file-backed executable pages after first usage.
1313 		 */
1314 		if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
1315 			return PAGEREF_ACTIVATE;
1316 
1317 		return PAGEREF_KEEP;
1318 	}
1319 
1320 	/* Reclaim if clean, defer dirty pages to writeback */
1321 	if (referenced_page && !PageSwapBacked(page))
1322 		return PAGEREF_RECLAIM_CLEAN;
1323 
1324 	return PAGEREF_RECLAIM;
1325 }
1326 
1327 /* Check if a page is dirty or under writeback */
page_check_dirty_writeback(struct page * page,bool * dirty,bool * writeback)1328 static void page_check_dirty_writeback(struct page *page,
1329 				       bool *dirty, bool *writeback)
1330 {
1331 	struct address_space *mapping;
1332 
1333 	/*
1334 	 * Anonymous pages are not handled by flushers and must be written
1335 	 * from reclaim context. Do not stall reclaim based on them
1336 	 */
1337 	if (!page_is_file_lru(page) ||
1338 	    (PageAnon(page) && !PageSwapBacked(page))) {
1339 		*dirty = false;
1340 		*writeback = false;
1341 		return;
1342 	}
1343 
1344 	/* By default assume that the page flags are accurate */
1345 	*dirty = PageDirty(page);
1346 	*writeback = PageWriteback(page);
1347 
1348 	/* Verify dirty/writeback state if the filesystem supports it */
1349 	if (!page_has_private(page))
1350 		return;
1351 
1352 	mapping = page_mapping(page);
1353 	if (mapping && mapping->a_ops->is_dirty_writeback)
1354 		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1355 }
1356 
alloc_demote_page(struct page * page,unsigned long node)1357 static struct page *alloc_demote_page(struct page *page, unsigned long node)
1358 {
1359 	struct migration_target_control mtc = {
1360 		/*
1361 		 * Allocate from 'node', or fail quickly and quietly.
1362 		 * When this happens, 'page' will likely just be discarded
1363 		 * instead of migrated.
1364 		 */
1365 		.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
1366 			    __GFP_THISNODE  | __GFP_NOWARN |
1367 			    __GFP_NOMEMALLOC | GFP_NOWAIT,
1368 		.nid = node
1369 	};
1370 
1371 	return alloc_migration_target(page, (unsigned long)&mtc);
1372 }
1373 
1374 /*
1375  * Take pages on @demote_list and attempt to demote them to
1376  * another node.  Pages which are not demoted are left on
1377  * @demote_pages.
1378  */
demote_page_list(struct list_head * demote_pages,struct pglist_data * pgdat)1379 static unsigned int demote_page_list(struct list_head *demote_pages,
1380 				     struct pglist_data *pgdat)
1381 {
1382 	int target_nid = next_demotion_node(pgdat->node_id);
1383 	unsigned int nr_succeeded;
1384 	int err;
1385 
1386 	if (list_empty(demote_pages))
1387 		return 0;
1388 
1389 	if (target_nid == NUMA_NO_NODE)
1390 		return 0;
1391 
1392 	/* Demotion ignores all cpuset and mempolicy settings */
1393 	err = migrate_pages(demote_pages, alloc_demote_page, NULL,
1394 			    target_nid, MIGRATE_ASYNC, MR_DEMOTION,
1395 			    &nr_succeeded);
1396 
1397 	if (current_is_kswapd())
1398 		__count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);
1399 	else
1400 		__count_vm_events(PGDEMOTE_DIRECT, nr_succeeded);
1401 
1402 	return nr_succeeded;
1403 }
1404 
1405 /*
1406  * shrink_page_list() returns the number of reclaimed pages
1407  */
shrink_page_list(struct list_head * page_list,struct pglist_data * pgdat,struct scan_control * sc,struct reclaim_stat * stat,bool ignore_references)1408 static unsigned int shrink_page_list(struct list_head *page_list,
1409 				     struct pglist_data *pgdat,
1410 				     struct scan_control *sc,
1411 				     struct reclaim_stat *stat,
1412 				     bool ignore_references)
1413 {
1414 	LIST_HEAD(ret_pages);
1415 	LIST_HEAD(free_pages);
1416 	LIST_HEAD(demote_pages);
1417 	unsigned int nr_reclaimed = 0;
1418 	unsigned int pgactivate = 0;
1419 	bool do_demote_pass;
1420 	bool page_trylock_result;
1421 
1422 	memset(stat, 0, sizeof(*stat));
1423 	cond_resched();
1424 	do_demote_pass = can_demote(pgdat->node_id, sc);
1425 
1426 retry:
1427 	while (!list_empty(page_list)) {
1428 		struct address_space *mapping;
1429 		struct page *page;
1430 		enum page_references references = PAGEREF_RECLAIM;
1431 		bool dirty, writeback, may_enter_fs;
1432 		unsigned int nr_pages;
1433 
1434 		cond_resched();
1435 
1436 		page = lru_to_page(page_list);
1437 		list_del(&page->lru);
1438 
1439 		if (!trylock_page(page))
1440 			goto keep;
1441 
1442 		VM_BUG_ON_PAGE(PageActive(page), page);
1443 
1444 		nr_pages = compound_nr(page);
1445 
1446 		/* Account the number of base pages even though THP */
1447 		sc->nr_scanned += nr_pages;
1448 
1449 		if (unlikely(!page_evictable(page)))
1450 			goto activate_locked;
1451 
1452 		if (!sc->may_unmap && page_mapped(page))
1453 			goto keep_locked;
1454 
1455 		/* page_update_gen() tried to promote this page? */
1456 		if (lru_gen_enabled() && !ignore_references &&
1457 		    page_mapped(page) && PageReferenced(page))
1458 			goto keep_locked;
1459 
1460 		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1461 			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1462 
1463 		/*
1464 		 * The number of dirty pages determines if a node is marked
1465 		 * reclaim_congested which affects wait_iff_congested. kswapd
1466 		 * will stall and start writing pages if the tail of the LRU
1467 		 * is all dirty unqueued pages.
1468 		 */
1469 		page_check_dirty_writeback(page, &dirty, &writeback);
1470 		if (dirty || writeback)
1471 			stat->nr_dirty++;
1472 
1473 		if (dirty && !writeback)
1474 			stat->nr_unqueued_dirty++;
1475 
1476 		/*
1477 		 * Treat this page as congested if the underlying BDI is or if
1478 		 * pages are cycling through the LRU so quickly that the
1479 		 * pages marked for immediate reclaim are making it to the
1480 		 * end of the LRU a second time.
1481 		 */
1482 		mapping = page_mapping(page);
1483 		if (((dirty || writeback) && mapping &&
1484 		     inode_write_congested(mapping->host)) ||
1485 		    (writeback && PageReclaim(page)))
1486 			stat->nr_congested++;
1487 
1488 		/*
1489 		 * If a page at the tail of the LRU is under writeback, there
1490 		 * are three cases to consider.
1491 		 *
1492 		 * 1) If reclaim is encountering an excessive number of pages
1493 		 *    under writeback and this page is both under writeback and
1494 		 *    PageReclaim then it indicates that pages are being queued
1495 		 *    for IO but are being recycled through the LRU before the
1496 		 *    IO can complete. Waiting on the page itself risks an
1497 		 *    indefinite stall if it is impossible to writeback the
1498 		 *    page due to IO error or disconnected storage so instead
1499 		 *    note that the LRU is being scanned too quickly and the
1500 		 *    caller can stall after page list has been processed.
1501 		 *
1502 		 * 2) Global or new memcg reclaim encounters a page that is
1503 		 *    not marked for immediate reclaim, or the caller does not
1504 		 *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
1505 		 *    not to fs). In this case mark the page for immediate
1506 		 *    reclaim and continue scanning.
1507 		 *
1508 		 *    Require may_enter_fs because we would wait on fs, which
1509 		 *    may not have submitted IO yet. And the loop driver might
1510 		 *    enter reclaim, and deadlock if it waits on a page for
1511 		 *    which it is needed to do the write (loop masks off
1512 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
1513 		 *    would probably show more reasons.
1514 		 *
1515 		 * 3) Legacy memcg encounters a page that is already marked
1516 		 *    PageReclaim. memcg does not have any dirty pages
1517 		 *    throttling so we could easily OOM just because too many
1518 		 *    pages are in writeback and there is nothing else to
1519 		 *    reclaim. Wait for the writeback to complete.
1520 		 *
1521 		 * In cases 1) and 2) we activate the pages to get them out of
1522 		 * the way while we continue scanning for clean pages on the
1523 		 * inactive list and refilling from the active list. The
1524 		 * observation here is that waiting for disk writes is more
1525 		 * expensive than potentially causing reloads down the line.
1526 		 * Since they're marked for immediate reclaim, they won't put
1527 		 * memory pressure on the cache working set any longer than it
1528 		 * takes to write them to disk.
1529 		 */
1530 		if (PageWriteback(page)) {
1531 			/* Case 1 above */
1532 			if (current_is_kswapd() &&
1533 			    PageReclaim(page) &&
1534 			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1535 				stat->nr_immediate++;
1536 				goto activate_locked;
1537 
1538 			/* Case 2 above */
1539 			} else if (writeback_throttling_sane(sc) ||
1540 			    !PageReclaim(page) || !may_enter_fs) {
1541 				/*
1542 				 * This is slightly racy - end_page_writeback()
1543 				 * might have just cleared PageReclaim, then
1544 				 * setting PageReclaim here end up interpreted
1545 				 * as PageReadahead - but that does not matter
1546 				 * enough to care.  What we do want is for this
1547 				 * page to have PageReclaim set next time memcg
1548 				 * reclaim reaches the tests above, so it will
1549 				 * then wait_on_page_writeback() to avoid OOM;
1550 				 * and it's also appropriate in global reclaim.
1551 				 */
1552 				SetPageReclaim(page);
1553 				stat->nr_writeback++;
1554 				goto activate_locked;
1555 
1556 			/* Case 3 above */
1557 			} else {
1558 				unlock_page(page);
1559 				wait_on_page_writeback(page);
1560 				/* then go back and try same page again */
1561 				list_add_tail(&page->lru, page_list);
1562 				continue;
1563 			}
1564 		}
1565 
1566 		if (!ignore_references)
1567 			references = page_check_references(page, sc);
1568 
1569 		switch (references) {
1570 		case PAGEREF_ACTIVATE:
1571 			goto activate_locked;
1572 		case PAGEREF_KEEP:
1573 			stat->nr_ref_keep += nr_pages;
1574 			goto keep_locked;
1575 		case PAGEREF_RECLAIM:
1576 		case PAGEREF_RECLAIM_CLEAN:
1577 			; /* try to reclaim the page below */
1578 		}
1579 
1580 		/*
1581 		 * Before reclaiming the page, try to relocate
1582 		 * its contents to another node.
1583 		 */
1584 		if (do_demote_pass &&
1585 		    (thp_migration_supported() || !PageTransHuge(page))) {
1586 			list_add(&page->lru, &demote_pages);
1587 			unlock_page(page);
1588 			continue;
1589 		}
1590 
1591 		/*
1592 		 * Anonymous process memory has backing store?
1593 		 * Try to allocate it some swap space here.
1594 		 * Lazyfree page could be freed directly
1595 		 */
1596 		if (PageAnon(page) && PageSwapBacked(page)) {
1597 			if (!PageSwapCache(page)) {
1598 				if (!(sc->gfp_mask & __GFP_IO))
1599 					goto keep_locked;
1600 				if (page_maybe_dma_pinned(page))
1601 					goto keep_locked;
1602 				if (PageTransHuge(page)) {
1603 					/* cannot split THP, skip it */
1604 					if (!can_split_huge_page(page, NULL))
1605 						goto activate_locked;
1606 					/*
1607 					 * Split pages without a PMD map right
1608 					 * away. Chances are some or all of the
1609 					 * tail pages can be freed without IO.
1610 					 */
1611 					if (!compound_mapcount(page) &&
1612 					    split_huge_page_to_list(page,
1613 								    page_list))
1614 						goto activate_locked;
1615 				}
1616 				if (!add_to_swap(page)) {
1617 					if (!PageTransHuge(page))
1618 						goto activate_locked_split;
1619 					/* Fallback to swap normal pages */
1620 					if (split_huge_page_to_list(page,
1621 								    page_list))
1622 						goto activate_locked;
1623 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1624 					count_vm_event(THP_SWPOUT_FALLBACK);
1625 #endif
1626 					if (!add_to_swap(page))
1627 						goto activate_locked_split;
1628 				}
1629 
1630 				may_enter_fs = true;
1631 
1632 				/* Adding to swap updated mapping */
1633 				mapping = page_mapping(page);
1634 			}
1635 		} else if (unlikely(PageTransHuge(page))) {
1636 			/* Split file THP */
1637 			if (split_huge_page_to_list(page, page_list))
1638 				goto keep_locked;
1639 		}
1640 
1641 		/*
1642 		 * THP may get split above, need minus tail pages and update
1643 		 * nr_pages to avoid accounting tail pages twice.
1644 		 *
1645 		 * The tail pages that are added into swap cache successfully
1646 		 * reach here.
1647 		 */
1648 		if ((nr_pages > 1) && !PageTransHuge(page)) {
1649 			sc->nr_scanned -= (nr_pages - 1);
1650 			nr_pages = 1;
1651 		}
1652 
1653 		/*
1654 		 * The page is mapped into the page tables of one or more
1655 		 * processes. Try to unmap it here.
1656 		 */
1657 		if (page_mapped(page)) {
1658 			enum ttu_flags flags = TTU_BATCH_FLUSH;
1659 			bool was_swapbacked = PageSwapBacked(page);
1660 
1661 			if (unlikely(PageTransHuge(page)))
1662 				flags |= TTU_SPLIT_HUGE_PMD;
1663 			if (!ignore_references)
1664 				trace_android_vh_page_trylock_set(page);
1665 			try_to_unmap(page, flags);
1666 			if (page_mapped(page)) {
1667 				stat->nr_unmap_fail += nr_pages;
1668 				if (!was_swapbacked && PageSwapBacked(page))
1669 					stat->nr_lazyfree_fail += nr_pages;
1670 				goto activate_locked;
1671 			}
1672 		}
1673 
1674 		if (PageDirty(page)) {
1675 			/*
1676 			 * Only kswapd can writeback filesystem pages
1677 			 * to avoid risk of stack overflow. But avoid
1678 			 * injecting inefficient single-page IO into
1679 			 * flusher writeback as much as possible: only
1680 			 * write pages when we've encountered many
1681 			 * dirty pages, and when we've already scanned
1682 			 * the rest of the LRU for clean pages and see
1683 			 * the same dirty pages again (PageReclaim).
1684 			 */
1685 			if (page_is_file_lru(page) &&
1686 			    (!current_is_kswapd() || !PageReclaim(page) ||
1687 			     !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1688 				/*
1689 				 * Immediately reclaim when written back.
1690 				 * Similar in principal to deactivate_page()
1691 				 * except we already have the page isolated
1692 				 * and know it's dirty
1693 				 */
1694 				inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1695 				SetPageReclaim(page);
1696 
1697 				goto activate_locked;
1698 			}
1699 
1700 			if (references == PAGEREF_RECLAIM_CLEAN)
1701 				goto keep_locked;
1702 			if (!may_enter_fs)
1703 				goto keep_locked;
1704 			if (!sc->may_writepage)
1705 				goto keep_locked;
1706 
1707 			/*
1708 			 * Page is dirty. Flush the TLB if a writable entry
1709 			 * potentially exists to avoid CPU writes after IO
1710 			 * starts and then write it out here.
1711 			 */
1712 			try_to_unmap_flush_dirty();
1713 			switch (pageout(page, mapping)) {
1714 			case PAGE_KEEP:
1715 				goto keep_locked;
1716 			case PAGE_ACTIVATE:
1717 				goto activate_locked;
1718 			case PAGE_SUCCESS:
1719 				stat->nr_pageout += thp_nr_pages(page);
1720 
1721 				if (PageWriteback(page))
1722 					goto keep;
1723 				if (PageDirty(page))
1724 					goto keep;
1725 
1726 				/*
1727 				 * A synchronous write - probably a ramdisk.  Go
1728 				 * ahead and try to reclaim the page.
1729 				 */
1730 				if (!trylock_page(page))
1731 					goto keep;
1732 				if (PageDirty(page) || PageWriteback(page))
1733 					goto keep_locked;
1734 				mapping = page_mapping(page);
1735 				fallthrough;
1736 			case PAGE_CLEAN:
1737 				; /* try to free the page below */
1738 			}
1739 		}
1740 
1741 		/*
1742 		 * If the page has buffers, try to free the buffer mappings
1743 		 * associated with this page. If we succeed we try to free
1744 		 * the page as well.
1745 		 *
1746 		 * We do this even if the page is PageDirty().
1747 		 * try_to_release_page() does not perform I/O, but it is
1748 		 * possible for a page to have PageDirty set, but it is actually
1749 		 * clean (all its buffers are clean).  This happens if the
1750 		 * buffers were written out directly, with submit_bh(). ext3
1751 		 * will do this, as well as the blockdev mapping.
1752 		 * try_to_release_page() will discover that cleanness and will
1753 		 * drop the buffers and mark the page clean - it can be freed.
1754 		 *
1755 		 * Rarely, pages can have buffers and no ->mapping.  These are
1756 		 * the pages which were not successfully invalidated in
1757 		 * truncate_cleanup_page().  We try to drop those buffers here
1758 		 * and if that worked, and the page is no longer mapped into
1759 		 * process address space (page_count == 1) it can be freed.
1760 		 * Otherwise, leave the page on the LRU so it is swappable.
1761 		 */
1762 		if (page_has_private(page)) {
1763 			if (!try_to_release_page(page, sc->gfp_mask))
1764 				goto activate_locked;
1765 			if (!mapping && page_count(page) == 1) {
1766 				unlock_page(page);
1767 				if (put_page_testzero(page))
1768 					goto free_it;
1769 				else {
1770 					/*
1771 					 * rare race with speculative reference.
1772 					 * the speculative reference will free
1773 					 * this page shortly, so we may
1774 					 * increment nr_reclaimed here (and
1775 					 * leave it off the LRU).
1776 					 */
1777 					trace_android_vh_page_trylock_clear(page);
1778 					nr_reclaimed++;
1779 					continue;
1780 				}
1781 			}
1782 		}
1783 
1784 		if (PageAnon(page) && !PageSwapBacked(page)) {
1785 			/* follow __remove_mapping for reference */
1786 			if (!page_ref_freeze(page, 1))
1787 				goto keep_locked;
1788 			/*
1789 			 * The page has only one reference left, which is
1790 			 * from the isolation. After the caller puts the
1791 			 * page back on lru and drops the reference, the
1792 			 * page will be freed anyway. It doesn't matter
1793 			 * which lru it goes. So we don't bother checking
1794 			 * PageDirty here.
1795 			 */
1796 			count_vm_event(PGLAZYFREED);
1797 			count_memcg_page_event(page, PGLAZYFREED);
1798 		} else if (!mapping || !__remove_mapping(mapping, page, true,
1799 							 sc->target_mem_cgroup))
1800 			goto keep_locked;
1801 
1802 		unlock_page(page);
1803 free_it:
1804 		/*
1805 		 * THP may get swapped out in a whole, need account
1806 		 * all base pages.
1807 		 */
1808 		nr_reclaimed += nr_pages;
1809 
1810 		/*
1811 		 * Is there need to periodically free_page_list? It would
1812 		 * appear not as the counts should be low
1813 		 */
1814 		trace_android_vh_page_trylock_clear(page);
1815 		if (unlikely(PageTransHuge(page)))
1816 			destroy_compound_page(page);
1817 		else
1818 			list_add(&page->lru, &free_pages);
1819 		continue;
1820 
1821 activate_locked_split:
1822 		/*
1823 		 * The tail pages that are failed to add into swap cache
1824 		 * reach here.  Fixup nr_scanned and nr_pages.
1825 		 */
1826 		if (nr_pages > 1) {
1827 			sc->nr_scanned -= (nr_pages - 1);
1828 			nr_pages = 1;
1829 		}
1830 activate_locked:
1831 		/* Not a candidate for swapping, so reclaim swap space. */
1832 		if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1833 						PageMlocked(page)))
1834 			try_to_free_swap(page);
1835 		VM_BUG_ON_PAGE(PageActive(page), page);
1836 		if (!PageMlocked(page)) {
1837 			int type = page_is_file_lru(page);
1838 			SetPageActive(page);
1839 			stat->nr_activate[type] += nr_pages;
1840 			count_memcg_page_event(page, PGACTIVATE);
1841 		}
1842 keep_locked:
1843 		/*
1844 		 * The page with trylock-bit will be added ret_pages and
1845 		 * handled in trace_android_vh_handle_failed_page_trylock.
1846 		 * If the page carried with trylock-bit after unlocked by
1847 		 * shrink_page_list will cause some error-issues in other
1848 		 * scene, so clear trylock-bit here.
1849 		 * trace_android_vh_page_trylock_get_result will clear
1850 		 * trylock-bit and return if page tyrlock failed in
1851 		 * reclaim-process. Here we just want to clear trylock-bit
1852 		 * so that ignore page_trylock_result.
1853 		 * TODO: trace_android_vh_page_trylock_get_result should be
1854 		 * changed to a different hook which correctly reflects the
1855 		 * usage here, which is to clear the try-lock bit.
1856 		 */
1857 		trace_android_vh_page_trylock_get_result(page, &page_trylock_result);
1858 		unlock_page(page);
1859 keep:
1860 		list_add(&page->lru, &ret_pages);
1861 		VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1862 	}
1863 	/* 'page_list' is always empty here */
1864 
1865 	/* Migrate pages selected for demotion */
1866 	nr_reclaimed += demote_page_list(&demote_pages, pgdat);
1867 	/* Pages that could not be demoted are still in @demote_pages */
1868 	if (!list_empty(&demote_pages)) {
1869 		/* Pages which failed to demoted go back on @page_list for retry: */
1870 		list_splice_init(&demote_pages, page_list);
1871 		do_demote_pass = false;
1872 		goto retry;
1873 	}
1874 
1875 	pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1876 
1877 	mem_cgroup_uncharge_list(&free_pages);
1878 	try_to_unmap_flush();
1879 	free_unref_page_list(&free_pages);
1880 
1881 	list_splice(&ret_pages, page_list);
1882 	count_vm_events(PGACTIVATE, pgactivate);
1883 
1884 	return nr_reclaimed;
1885 }
1886 
reclaim_clean_pages_from_list(struct zone * zone,struct list_head * page_list)1887 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1888 					    struct list_head *page_list)
1889 {
1890 	struct scan_control sc = {
1891 		.gfp_mask = GFP_KERNEL,
1892 		.may_unmap = 1,
1893 	};
1894 	struct reclaim_stat stat;
1895 	unsigned int nr_reclaimed;
1896 	struct page *page, *next;
1897 	LIST_HEAD(clean_pages);
1898 	unsigned int noreclaim_flag;
1899 
1900 	list_for_each_entry_safe(page, next, page_list, lru) {
1901 		if (!PageHuge(page) && page_is_file_lru(page) &&
1902 		    !PageDirty(page) && !__PageMovable(page) &&
1903 		    !PageUnevictable(page)) {
1904 			ClearPageActive(page);
1905 			list_move(&page->lru, &clean_pages);
1906 		}
1907 	}
1908 
1909 	/*
1910 	 * We should be safe here since we are only dealing with file pages and
1911 	 * we are not kswapd and therefore cannot write dirty file pages. But
1912 	 * call memalloc_noreclaim_save() anyway, just in case these conditions
1913 	 * change in the future.
1914 	 */
1915 	noreclaim_flag = memalloc_noreclaim_save();
1916 	nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1917 					&stat, true);
1918 	memalloc_noreclaim_restore(noreclaim_flag);
1919 
1920 	list_splice(&clean_pages, page_list);
1921 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1922 			    -(long)nr_reclaimed);
1923 	/*
1924 	 * Since lazyfree pages are isolated from file LRU from the beginning,
1925 	 * they will rotate back to anonymous LRU in the end if it failed to
1926 	 * discard so isolated count will be mismatched.
1927 	 * Compensate the isolated count for both LRU lists.
1928 	 */
1929 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1930 			    stat.nr_lazyfree_fail);
1931 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1932 			    -(long)stat.nr_lazyfree_fail);
1933 	return nr_reclaimed;
1934 }
1935 
1936 /*
1937  * Update LRU sizes after isolating pages. The LRU size updates must
1938  * be complete before mem_cgroup_update_lru_size due to a sanity check.
1939  */
update_lru_sizes(struct lruvec * lruvec,enum lru_list lru,unsigned long * nr_zone_taken)1940 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1941 			enum lru_list lru, unsigned long *nr_zone_taken)
1942 {
1943 	int zid;
1944 
1945 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1946 		if (!nr_zone_taken[zid])
1947 			continue;
1948 
1949 		update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1950 	}
1951 
1952 }
1953 
1954 #ifdef CONFIG_CMA
1955 /*
1956  * It is waste of effort to scan and reclaim CMA pages if it is not available
1957  * for current allocation context. Kswapd can not be enrolled as it can not
1958  * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
1959  */
skip_cma(struct page * page,struct scan_control * sc)1960 static bool skip_cma(struct page *page, struct scan_control *sc)
1961 {
1962 	return !current_is_kswapd() &&
1963 			gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
1964 			get_pageblock_migratetype(page) == MIGRATE_CMA;
1965 }
1966 #else
skip_cma(struct page * page,struct scan_control * sc)1967 static bool skip_cma(struct page *page, struct scan_control *sc)
1968 {
1969 	return false;
1970 }
1971 #endif
1972 
1973 /*
1974  * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
1975  *
1976  * lruvec->lru_lock is heavily contended.  Some of the functions that
1977  * shrink the lists perform better by taking out a batch of pages
1978  * and working on them outside the LRU lock.
1979  *
1980  * For pagecache intensive workloads, this function is the hottest
1981  * spot in the kernel (apart from copy_*_user functions).
1982  *
1983  * Lru_lock must be held before calling this function.
1984  *
1985  * @nr_to_scan:	The number of eligible pages to look through on the list.
1986  * @lruvec:	The LRU vector to pull pages from.
1987  * @dst:	The temp list to put pages on to.
1988  * @nr_scanned:	The number of pages that were scanned.
1989  * @sc:		The scan_control struct for this reclaim session
1990  * @lru:	LRU list id for isolating
1991  *
1992  * returns how many pages were moved onto *@dst.
1993  */
isolate_lru_pages(unsigned long nr_to_scan,struct lruvec * lruvec,struct list_head * dst,unsigned long * nr_scanned,struct scan_control * sc,enum lru_list lru)1994 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1995 		struct lruvec *lruvec, struct list_head *dst,
1996 		unsigned long *nr_scanned, struct scan_control *sc,
1997 		enum lru_list lru)
1998 {
1999 	struct list_head *src = &lruvec->lists[lru];
2000 	unsigned long nr_taken = 0;
2001 	unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
2002 	unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
2003 	unsigned long skipped = 0;
2004 	unsigned long scan, total_scan, nr_pages;
2005 	LIST_HEAD(pages_skipped);
2006 
2007 	total_scan = 0;
2008 	scan = 0;
2009 	while (scan < nr_to_scan && !list_empty(src)) {
2010 		struct list_head *move_to = src;
2011 		struct page *page;
2012 
2013 		page = lru_to_page(src);
2014 		prefetchw_prev_lru_page(page, src, flags);
2015 
2016 		nr_pages = compound_nr(page);
2017 		total_scan += nr_pages;
2018 
2019 		if (page_zonenum(page) > sc->reclaim_idx ||
2020 				skip_cma(page, sc)) {
2021 			nr_skipped[page_zonenum(page)] += nr_pages;
2022 			move_to = &pages_skipped;
2023 			goto move;
2024 		}
2025 
2026 		/*
2027 		 * Do not count skipped pages because that makes the function
2028 		 * return with no isolated pages if the LRU mostly contains
2029 		 * ineligible pages.  This causes the VM to not reclaim any
2030 		 * pages, triggering a premature OOM.
2031 		 * Account all tail pages of THP.
2032 		 */
2033 		scan += nr_pages;
2034 
2035 		if (!PageLRU(page))
2036 			goto move;
2037 		if (!sc->may_unmap && page_mapped(page))
2038 			goto move;
2039 
2040 		/*
2041 		 * Be careful not to clear PageLRU until after we're
2042 		 * sure the page is not being freed elsewhere -- the
2043 		 * page release code relies on it.
2044 		 */
2045 		if (unlikely(!get_page_unless_zero(page)))
2046 			goto move;
2047 
2048 		if (!TestClearPageLRU(page)) {
2049 			/* Another thread is already isolating this page */
2050 			put_page(page);
2051 			goto move;
2052 		}
2053 
2054 		nr_taken += nr_pages;
2055 		nr_zone_taken[page_zonenum(page)] += nr_pages;
2056 		move_to = dst;
2057 move:
2058 		list_move(&page->lru, move_to);
2059 		trace_android_vh_del_page_from_lrulist(page, false, lru);
2060 	}
2061 
2062 	/*
2063 	 * Splice any skipped pages to the start of the LRU list. Note that
2064 	 * this disrupts the LRU order when reclaiming for lower zones but
2065 	 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
2066 	 * scanning would soon rescan the same pages to skip and put the
2067 	 * system at risk of premature OOM.
2068 	 */
2069 	if (!list_empty(&pages_skipped)) {
2070 		int zid;
2071 
2072 		list_splice(&pages_skipped, src);
2073 		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2074 			if (!nr_skipped[zid])
2075 				continue;
2076 
2077 			__count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
2078 			skipped += nr_skipped[zid];
2079 		}
2080 	}
2081 	*nr_scanned = total_scan;
2082 	trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
2083 				    total_scan, skipped, nr_taken,
2084 				    sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru);
2085 	update_lru_sizes(lruvec, lru, nr_zone_taken);
2086 	return nr_taken;
2087 }
2088 
2089 /**
2090  * isolate_lru_page - tries to isolate a page from its LRU list
2091  * @page: page to isolate from its LRU list
2092  *
2093  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
2094  * vmstat statistic corresponding to whatever LRU list the page was on.
2095  *
2096  * Returns 0 if the page was removed from an LRU list.
2097  * Returns -EBUSY if the page was not on an LRU list.
2098  *
2099  * The returned page will have PageLRU() cleared.  If it was found on
2100  * the active list, it will have PageActive set.  If it was found on
2101  * the unevictable list, it will have the PageUnevictable bit set. That flag
2102  * may need to be cleared by the caller before letting the page go.
2103  *
2104  * The vmstat statistic corresponding to the list on which the page was
2105  * found will be decremented.
2106  *
2107  * Restrictions:
2108  *
2109  * (1) Must be called with an elevated refcount on the page. This is a
2110  *     fundamental difference from isolate_lru_pages (which is called
2111  *     without a stable reference).
2112  * (2) the lru_lock must not be held.
2113  * (3) interrupts must be enabled.
2114  */
isolate_lru_page(struct page * page)2115 int isolate_lru_page(struct page *page)
2116 {
2117 	int ret = -EBUSY;
2118 
2119 	VM_BUG_ON_PAGE(!page_count(page), page);
2120 	WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
2121 
2122 	if (TestClearPageLRU(page)) {
2123 		struct lruvec *lruvec;
2124 
2125 		get_page(page);
2126 		lruvec = lock_page_lruvec_irq(page);
2127 		del_page_from_lru_list(page, lruvec);
2128 		unlock_page_lruvec_irq(lruvec);
2129 		ret = 0;
2130 	}
2131 
2132 	return ret;
2133 }
2134 
2135 /*
2136  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
2137  * then get rescheduled. When there are massive number of tasks doing page
2138  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
2139  * the LRU list will go small and be scanned faster than necessary, leading to
2140  * unnecessary swapping, thrashing and OOM.
2141  */
too_many_isolated(struct pglist_data * pgdat,int file,struct scan_control * sc)2142 static int too_many_isolated(struct pglist_data *pgdat, int file,
2143 		struct scan_control *sc)
2144 {
2145 	unsigned long inactive, isolated;
2146 
2147 	if (current_is_kswapd())
2148 		return 0;
2149 
2150 	if (!writeback_throttling_sane(sc))
2151 		return 0;
2152 
2153 	if (file) {
2154 		inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
2155 		isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
2156 	} else {
2157 		inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
2158 		isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
2159 	}
2160 
2161 	/*
2162 	 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
2163 	 * won't get blocked by normal direct-reclaimers, forming a circular
2164 	 * deadlock.
2165 	 */
2166 	if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
2167 		inactive >>= 3;
2168 
2169 	return isolated > inactive;
2170 }
2171 
2172 /*
2173  * move_pages_to_lru() moves pages from private @list to appropriate LRU list.
2174  * On return, @list is reused as a list of pages to be freed by the caller.
2175  *
2176  * Returns the number of pages moved to the given lruvec.
2177  */
move_pages_to_lru(struct lruvec * lruvec,struct list_head * list)2178 static unsigned int move_pages_to_lru(struct lruvec *lruvec,
2179 				      struct list_head *list)
2180 {
2181 	int nr_pages, nr_moved = 0;
2182 	LIST_HEAD(pages_to_free);
2183 	struct page *page;
2184 
2185 	while (!list_empty(list)) {
2186 		page = lru_to_page(list);
2187 		VM_BUG_ON_PAGE(PageLRU(page), page);
2188 		list_del(&page->lru);
2189 		if (unlikely(!page_evictable(page))) {
2190 			spin_unlock_irq(&lruvec->lru_lock);
2191 			putback_lru_page(page);
2192 			spin_lock_irq(&lruvec->lru_lock);
2193 			continue;
2194 		}
2195 
2196 		/*
2197 		 * The SetPageLRU needs to be kept here for list integrity.
2198 		 * Otherwise:
2199 		 *   #0 move_pages_to_lru             #1 release_pages
2200 		 *   if !put_page_testzero
2201 		 *				      if (put_page_testzero())
2202 		 *				        !PageLRU //skip lru_lock
2203 		 *     SetPageLRU()
2204 		 *     list_add(&page->lru,)
2205 		 *                                        list_add(&page->lru,)
2206 		 */
2207 		SetPageLRU(page);
2208 
2209 		if (unlikely(put_page_testzero(page))) {
2210 			__clear_page_lru_flags(page);
2211 
2212 			if (unlikely(PageCompound(page))) {
2213 				spin_unlock_irq(&lruvec->lru_lock);
2214 				destroy_compound_page(page);
2215 				spin_lock_irq(&lruvec->lru_lock);
2216 			} else
2217 				list_add(&page->lru, &pages_to_free);
2218 
2219 			continue;
2220 		}
2221 
2222 		/*
2223 		 * All pages were isolated from the same lruvec (and isolation
2224 		 * inhibits memcg migration).
2225 		 */
2226 		VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
2227 		add_page_to_lru_list(page, lruvec);
2228 		nr_pages = thp_nr_pages(page);
2229 		nr_moved += nr_pages;
2230 		if (PageActive(page))
2231 			workingset_age_nonresident(lruvec, nr_pages);
2232 	}
2233 
2234 	/*
2235 	 * To save our caller's stack, now use input list for pages to free.
2236 	 */
2237 	list_splice(&pages_to_free, list);
2238 
2239 	return nr_moved;
2240 }
2241 
2242 /*
2243  * If a kernel thread (such as nfsd for loop-back mounts) services
2244  * a backing device by writing to the page cache it sets PF_LOCAL_THROTTLE.
2245  * In that case we should only throttle if the backing device it is
2246  * writing to is congested.  In other cases it is safe to throttle.
2247  */
current_may_throttle(void)2248 static int current_may_throttle(void)
2249 {
2250 	return !(current->flags & PF_LOCAL_THROTTLE) ||
2251 		current->backing_dev_info == NULL ||
2252 		bdi_write_congested(current->backing_dev_info);
2253 }
2254 
2255 /*
2256  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
2257  * of reclaimed pages
2258  */
2259 static unsigned long
shrink_inactive_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2260 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
2261 		     struct scan_control *sc, enum lru_list lru)
2262 {
2263 	LIST_HEAD(page_list);
2264 	unsigned long nr_scanned;
2265 	unsigned int nr_reclaimed = 0;
2266 	unsigned long nr_taken;
2267 	struct reclaim_stat stat;
2268 	bool file = is_file_lru(lru);
2269 	enum vm_event_item item;
2270 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2271 	bool stalled = false;
2272 
2273 	while (unlikely(too_many_isolated(pgdat, file, sc))) {
2274 		if (stalled)
2275 			return 0;
2276 
2277 		/* wait a bit for the reclaimer. */
2278 		msleep(100);
2279 		stalled = true;
2280 
2281 		/* We are about to die and free our memory. Return now. */
2282 		if (fatal_signal_pending(current))
2283 			return SWAP_CLUSTER_MAX;
2284 	}
2285 
2286 	lru_add_drain();
2287 
2288 	spin_lock_irq(&lruvec->lru_lock);
2289 
2290 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
2291 				     &nr_scanned, sc, lru);
2292 
2293 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2294 	item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
2295 	if (!cgroup_reclaim(sc))
2296 		__count_vm_events(item, nr_scanned);
2297 	__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2298 	__count_vm_events(PGSCAN_ANON + file, nr_scanned);
2299 
2300 	spin_unlock_irq(&lruvec->lru_lock);
2301 
2302 	if (nr_taken == 0)
2303 		return 0;
2304 
2305 	nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
2306 	trace_android_vh_handle_failed_page_trylock(&page_list);
2307 
2308 	spin_lock_irq(&lruvec->lru_lock);
2309 	move_pages_to_lru(lruvec, &page_list);
2310 
2311 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2312 	item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
2313 	if (!cgroup_reclaim(sc))
2314 		__count_vm_events(item, nr_reclaimed);
2315 	__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2316 	__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
2317 	spin_unlock_irq(&lruvec->lru_lock);
2318 
2319 	lru_note_cost(lruvec, file, stat.nr_pageout);
2320 	mem_cgroup_uncharge_list(&page_list);
2321 	free_unref_page_list(&page_list);
2322 
2323 	/*
2324 	 * If dirty pages are scanned that are not queued for IO, it
2325 	 * implies that flushers are not doing their job. This can
2326 	 * happen when memory pressure pushes dirty pages to the end of
2327 	 * the LRU before the dirty limits are breached and the dirty
2328 	 * data has expired. It can also happen when the proportion of
2329 	 * dirty pages grows not through writes but through memory
2330 	 * pressure reclaiming all the clean cache. And in some cases,
2331 	 * the flushers simply cannot keep up with the allocation
2332 	 * rate. Nudge the flusher threads in case they are asleep.
2333 	 */
2334 	if (stat.nr_unqueued_dirty == nr_taken)
2335 		wakeup_flusher_threads(WB_REASON_VMSCAN);
2336 
2337 	sc->nr.dirty += stat.nr_dirty;
2338 	sc->nr.congested += stat.nr_congested;
2339 	sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2340 	sc->nr.writeback += stat.nr_writeback;
2341 	sc->nr.immediate += stat.nr_immediate;
2342 	sc->nr.taken += nr_taken;
2343 	if (file)
2344 		sc->nr.file_taken += nr_taken;
2345 
2346 	trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2347 			nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2348 	return nr_reclaimed;
2349 }
2350 
2351 /*
2352  * shrink_active_list() moves pages from the active LRU to the inactive LRU.
2353  *
2354  * We move them the other way if the page is referenced by one or more
2355  * processes.
2356  *
2357  * If the pages are mostly unmapped, the processing is fast and it is
2358  * appropriate to hold lru_lock across the whole operation.  But if
2359  * the pages are mapped, the processing is slow (page_referenced()), so
2360  * we should drop lru_lock around each page.  It's impossible to balance
2361  * this, so instead we remove the pages from the LRU while processing them.
2362  * It is safe to rely on PG_active against the non-LRU pages in here because
2363  * nobody will play with that bit on a non-LRU page.
2364  *
2365  * The downside is that we have to touch page->_refcount against each page.
2366  * But we had to alter page->flags anyway.
2367  */
shrink_active_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2368 static void shrink_active_list(unsigned long nr_to_scan,
2369 			       struct lruvec *lruvec,
2370 			       struct scan_control *sc,
2371 			       enum lru_list lru)
2372 {
2373 	unsigned long nr_taken;
2374 	unsigned long nr_scanned;
2375 	unsigned long vm_flags;
2376 	LIST_HEAD(l_hold);	/* The pages which were snipped off */
2377 	LIST_HEAD(l_active);
2378 	LIST_HEAD(l_inactive);
2379 	struct page *page;
2380 	unsigned nr_deactivate, nr_activate;
2381 	unsigned nr_rotated = 0;
2382 	int file = is_file_lru(lru);
2383 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2384 	bool bypass = false;
2385 	bool should_protect = false;
2386 
2387 	lru_add_drain();
2388 
2389 	spin_lock_irq(&lruvec->lru_lock);
2390 
2391 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2392 				     &nr_scanned, sc, lru);
2393 
2394 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2395 
2396 	if (!cgroup_reclaim(sc))
2397 		__count_vm_events(PGREFILL, nr_scanned);
2398 	__count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2399 
2400 	spin_unlock_irq(&lruvec->lru_lock);
2401 
2402 	while (!list_empty(&l_hold)) {
2403 		cond_resched();
2404 		page = lru_to_page(&l_hold);
2405 		list_del(&page->lru);
2406 
2407 		if (unlikely(!page_evictable(page))) {
2408 			putback_lru_page(page);
2409 			continue;
2410 		}
2411 
2412 		if (unlikely(buffer_heads_over_limit)) {
2413 			if (page_has_private(page) && trylock_page(page)) {
2414 				if (page_has_private(page))
2415 					try_to_release_page(page, 0);
2416 				unlock_page(page);
2417 			}
2418 		}
2419 
2420 		trace_android_vh_page_should_be_protected(page, &should_protect);
2421 		if (unlikely(should_protect)) {
2422 			nr_rotated += thp_nr_pages(page);
2423 			list_add(&page->lru, &l_active);
2424 			continue;
2425 		}
2426 
2427 		trace_android_vh_page_referenced_check_bypass(page, nr_to_scan, lru, &bypass);
2428 		if (bypass)
2429 			goto skip_page_referenced;
2430 		trace_android_vh_page_trylock_set(page);
2431 		/* Referenced or rmap lock contention: rotate */
2432 		if (page_referenced(page, 0, sc->target_mem_cgroup,
2433 				     &vm_flags) != 0) {
2434 			/*
2435 			 * Identify referenced, file-backed active pages and
2436 			 * give them one more trip around the active list. So
2437 			 * that executable code get better chances to stay in
2438 			 * memory under moderate memory pressure.  Anon pages
2439 			 * are not likely to be evicted by use-once streaming
2440 			 * IO, plus JVM can create lots of anon VM_EXEC pages,
2441 			 * so we ignore them here.
2442 			 */
2443 			if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
2444 				trace_android_vh_page_trylock_clear(page);
2445 				nr_rotated += thp_nr_pages(page);
2446 				list_add(&page->lru, &l_active);
2447 				continue;
2448 			}
2449 		}
2450 		trace_android_vh_page_trylock_clear(page);
2451 skip_page_referenced:
2452 		ClearPageActive(page);	/* we are de-activating */
2453 		SetPageWorkingset(page);
2454 		list_add(&page->lru, &l_inactive);
2455 	}
2456 
2457 	/*
2458 	 * Move pages back to the lru list.
2459 	 */
2460 	spin_lock_irq(&lruvec->lru_lock);
2461 
2462 	nr_activate = move_pages_to_lru(lruvec, &l_active);
2463 	nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2464 	/* Keep all free pages in l_active list */
2465 	list_splice(&l_inactive, &l_active);
2466 
2467 	__count_vm_events(PGDEACTIVATE, nr_deactivate);
2468 	__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2469 
2470 	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2471 	spin_unlock_irq(&lruvec->lru_lock);
2472 
2473 	mem_cgroup_uncharge_list(&l_active);
2474 	free_unref_page_list(&l_active);
2475 	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2476 			nr_deactivate, nr_rotated, sc->priority, file);
2477 }
2478 
reclaim_pages(struct list_head * page_list)2479 unsigned long reclaim_pages(struct list_head *page_list)
2480 {
2481 	int nid = NUMA_NO_NODE;
2482 	unsigned int nr_reclaimed = 0;
2483 	LIST_HEAD(node_page_list);
2484 	struct reclaim_stat dummy_stat;
2485 	struct page *page;
2486 	unsigned int noreclaim_flag;
2487 	struct scan_control sc = {
2488 		.gfp_mask = GFP_KERNEL,
2489 		.may_writepage = 1,
2490 		.may_unmap = 1,
2491 		.may_swap = 1,
2492 		.no_demotion = 1,
2493 	};
2494 
2495 	noreclaim_flag = memalloc_noreclaim_save();
2496 
2497 	while (!list_empty(page_list)) {
2498 		page = lru_to_page(page_list);
2499 		if (nid == NUMA_NO_NODE) {
2500 			nid = page_to_nid(page);
2501 			INIT_LIST_HEAD(&node_page_list);
2502 		}
2503 
2504 		if (nid == page_to_nid(page)) {
2505 			ClearPageActive(page);
2506 			list_move(&page->lru, &node_page_list);
2507 			continue;
2508 		}
2509 
2510 		nr_reclaimed += shrink_page_list(&node_page_list,
2511 						NODE_DATA(nid),
2512 						&sc, &dummy_stat, false);
2513 		while (!list_empty(&node_page_list)) {
2514 			page = lru_to_page(&node_page_list);
2515 			list_del(&page->lru);
2516 			putback_lru_page(page);
2517 		}
2518 
2519 		nid = NUMA_NO_NODE;
2520 	}
2521 
2522 	if (!list_empty(&node_page_list)) {
2523 		nr_reclaimed += shrink_page_list(&node_page_list,
2524 						NODE_DATA(nid),
2525 						&sc, &dummy_stat, false);
2526 		while (!list_empty(&node_page_list)) {
2527 			page = lru_to_page(&node_page_list);
2528 			list_del(&page->lru);
2529 			putback_lru_page(page);
2530 		}
2531 	}
2532 
2533 	memalloc_noreclaim_restore(noreclaim_flag);
2534 
2535 	return nr_reclaimed;
2536 }
2537 EXPORT_SYMBOL_GPL(reclaim_pages);
2538 
shrink_list(enum lru_list lru,unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc)2539 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2540 				 struct lruvec *lruvec, struct scan_control *sc)
2541 {
2542 	if (is_active_lru(lru)) {
2543 		if (sc->may_deactivate & (1 << is_file_lru(lru)))
2544 			shrink_active_list(nr_to_scan, lruvec, sc, lru);
2545 		else
2546 			sc->skipped_deactivate = 1;
2547 		return 0;
2548 	}
2549 
2550 	return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2551 }
2552 
2553 /*
2554  * The inactive anon list should be small enough that the VM never has
2555  * to do too much work.
2556  *
2557  * The inactive file list should be small enough to leave most memory
2558  * to the established workingset on the scan-resistant active list,
2559  * but large enough to avoid thrashing the aggregate readahead window.
2560  *
2561  * Both inactive lists should also be large enough that each inactive
2562  * page has a chance to be referenced again before it is reclaimed.
2563  *
2564  * If that fails and refaulting is observed, the inactive list grows.
2565  *
2566  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2567  * on this LRU, maintained by the pageout code. An inactive_ratio
2568  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2569  *
2570  * total     target    max
2571  * memory    ratio     inactive
2572  * -------------------------------------
2573  *   10MB       1         5MB
2574  *  100MB       1        50MB
2575  *    1GB       3       250MB
2576  *   10GB      10       0.9GB
2577  *  100GB      31         3GB
2578  *    1TB     101        10GB
2579  *   10TB     320        32GB
2580  */
inactive_is_low(struct lruvec * lruvec,enum lru_list inactive_lru)2581 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2582 {
2583 	enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2584 	unsigned long inactive, active;
2585 	unsigned long inactive_ratio;
2586 	unsigned long gb;
2587 
2588 	inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2589 	active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2590 
2591 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
2592 	if (gb)
2593 		inactive_ratio = int_sqrt(10 * gb);
2594 	else
2595 		inactive_ratio = 1;
2596 
2597 	trace_android_vh_tune_inactive_ratio(&inactive_ratio, is_file_lru(inactive_lru));
2598 
2599 	return inactive * inactive_ratio < active;
2600 }
2601 
2602 enum scan_balance {
2603 	SCAN_EQUAL,
2604 	SCAN_FRACT,
2605 	SCAN_ANON,
2606 	SCAN_FILE,
2607 };
2608 
prepare_scan_count(pg_data_t * pgdat,struct scan_control * sc)2609 static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
2610 {
2611 	unsigned long file;
2612 	struct lruvec *target_lruvec;
2613 
2614 	if (lru_gen_enabled())
2615 		return;
2616 
2617 	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2618 
2619 	/*
2620 	 * Flush the memory cgroup stats, so that we read accurate per-memcg
2621 	 * lruvec stats for heuristics.
2622 	 */
2623 	mem_cgroup_flush_stats();
2624 
2625 	/*
2626 	 * Determine the scan balance between anon and file LRUs.
2627 	 */
2628 	spin_lock_irq(&target_lruvec->lru_lock);
2629 	sc->anon_cost = target_lruvec->anon_cost;
2630 	sc->file_cost = target_lruvec->file_cost;
2631 	spin_unlock_irq(&target_lruvec->lru_lock);
2632 
2633 	/*
2634 	 * Target desirable inactive:active list ratios for the anon
2635 	 * and file LRU lists.
2636 	 */
2637 	if (!sc->force_deactivate) {
2638 		unsigned long refaults;
2639 
2640 		refaults = lruvec_page_state(target_lruvec,
2641 				WORKINGSET_ACTIVATE_ANON);
2642 		if (refaults != target_lruvec->refaults[0] ||
2643 			inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2644 			sc->may_deactivate |= DEACTIVATE_ANON;
2645 		else
2646 			sc->may_deactivate &= ~DEACTIVATE_ANON;
2647 
2648 		/*
2649 		 * When refaults are being observed, it means a new
2650 		 * workingset is being established. Deactivate to get
2651 		 * rid of any stale active pages quickly.
2652 		 */
2653 		refaults = lruvec_page_state(target_lruvec,
2654 				WORKINGSET_ACTIVATE_FILE);
2655 		if (refaults != target_lruvec->refaults[1] ||
2656 		    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2657 			sc->may_deactivate |= DEACTIVATE_FILE;
2658 		else
2659 			sc->may_deactivate &= ~DEACTIVATE_FILE;
2660 	} else
2661 		sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2662 
2663 	/*
2664 	 * If we have plenty of inactive file pages that aren't
2665 	 * thrashing, try to reclaim those first before touching
2666 	 * anonymous pages.
2667 	 */
2668 	file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2669 	if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2670 		sc->cache_trim_mode = 1;
2671 	else
2672 		sc->cache_trim_mode = 0;
2673 
2674 	/*
2675 	 * Prevent the reclaimer from falling into the cache trap: as
2676 	 * cache pages start out inactive, every cache fault will tip
2677 	 * the scan balance towards the file LRU.  And as the file LRU
2678 	 * shrinks, so does the window for rotation from references.
2679 	 * This means we have a runaway feedback loop where a tiny
2680 	 * thrashing file LRU becomes infinitely more attractive than
2681 	 * anon pages.  Try to detect this based on file LRU size.
2682 	 */
2683 	if (!cgroup_reclaim(sc)) {
2684 		unsigned long total_high_wmark = 0;
2685 		unsigned long free, anon;
2686 		int z;
2687 
2688 		free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2689 		file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2690 			   node_page_state(pgdat, NR_INACTIVE_FILE);
2691 
2692 		for (z = 0; z < MAX_NR_ZONES; z++) {
2693 			struct zone *zone = &pgdat->node_zones[z];
2694 
2695 			if (!managed_zone(zone))
2696 				continue;
2697 
2698 			total_high_wmark += high_wmark_pages(zone);
2699 		}
2700 
2701 		/*
2702 		 * Consider anon: if that's low too, this isn't a
2703 		 * runaway file reclaim problem, but rather just
2704 		 * extreme pressure. Reclaim as per usual then.
2705 		 */
2706 		anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2707 
2708 		sc->file_is_tiny =
2709 			file + free <= total_high_wmark &&
2710 			!(sc->may_deactivate & DEACTIVATE_ANON) &&
2711 			anon >> sc->priority;
2712 	}
2713 }
2714 
2715 /*
2716  * Determine how aggressively the anon and file LRU lists should be
2717  * scanned.  The relative value of each set of LRU lists is determined
2718  * by looking at the fraction of the pages scanned we did rotate back
2719  * onto the active list instead of evict.
2720  *
2721  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2722  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2723  */
get_scan_count(struct lruvec * lruvec,struct scan_control * sc,unsigned long * nr)2724 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2725 			   unsigned long *nr)
2726 {
2727 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2728 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2729 	unsigned long anon_cost, file_cost, total_cost;
2730 	int swappiness = mem_cgroup_swappiness(memcg);
2731 	u64 fraction[ANON_AND_FILE];
2732 	u64 denominator = 0;	/* gcc */
2733 	enum scan_balance scan_balance;
2734 	unsigned long ap, fp;
2735 	enum lru_list lru;
2736 	bool balance_anon_file_reclaim = false;
2737 
2738 	/* If we have no swap space, do not bother scanning anon pages. */
2739 	if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
2740 		scan_balance = SCAN_FILE;
2741 		goto out;
2742 	}
2743 
2744 	trace_android_vh_tune_swappiness(&swappiness);
2745 	/*
2746 	 * Global reclaim will swap to prevent OOM even with no
2747 	 * swappiness, but memcg users want to use this knob to
2748 	 * disable swapping for individual groups completely when
2749 	 * using the memory controller's swap limit feature would be
2750 	 * too expensive.
2751 	 */
2752 	if (cgroup_reclaim(sc) && !swappiness) {
2753 		scan_balance = SCAN_FILE;
2754 		goto out;
2755 	}
2756 
2757 	/*
2758 	 * Do not apply any pressure balancing cleverness when the
2759 	 * system is close to OOM, scan both anon and file equally
2760 	 * (unless the swappiness setting disagrees with swapping).
2761 	 */
2762 	if (!sc->priority && swappiness) {
2763 		scan_balance = SCAN_EQUAL;
2764 		goto out;
2765 	}
2766 
2767 	/*
2768 	 * If the system is almost out of file pages, force-scan anon.
2769 	 */
2770 	if (sc->file_is_tiny) {
2771 		scan_balance = SCAN_ANON;
2772 		goto out;
2773 	}
2774 
2775 	trace_android_rvh_set_balance_anon_file_reclaim(&balance_anon_file_reclaim);
2776 
2777 	/*
2778 	 * If there is enough inactive page cache, we do not reclaim
2779 	 * anything from the anonymous working right now. But when balancing
2780 	 * anon and page cache files for reclaim, allow swapping of anon pages
2781 	 * even if there are a number of inactive file cache pages.
2782 	 */
2783 	if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
2784 		scan_balance = SCAN_FILE;
2785 		goto out;
2786 	}
2787 
2788 	scan_balance = SCAN_FRACT;
2789 	/*
2790 	 * Calculate the pressure balance between anon and file pages.
2791 	 *
2792 	 * The amount of pressure we put on each LRU is inversely
2793 	 * proportional to the cost of reclaiming each list, as
2794 	 * determined by the share of pages that are refaulting, times
2795 	 * the relative IO cost of bringing back a swapped out
2796 	 * anonymous page vs reloading a filesystem page (swappiness).
2797 	 *
2798 	 * Although we limit that influence to ensure no list gets
2799 	 * left behind completely: at least a third of the pressure is
2800 	 * applied, before swappiness.
2801 	 *
2802 	 * With swappiness at 100, anon and file have equal IO cost.
2803 	 */
2804 	total_cost = sc->anon_cost + sc->file_cost;
2805 	anon_cost = total_cost + sc->anon_cost;
2806 	file_cost = total_cost + sc->file_cost;
2807 	total_cost = anon_cost + file_cost;
2808 
2809 	ap = swappiness * (total_cost + 1);
2810 	ap /= anon_cost + 1;
2811 
2812 	fp = (200 - swappiness) * (total_cost + 1);
2813 	fp /= file_cost + 1;
2814 
2815 	fraction[0] = ap;
2816 	fraction[1] = fp;
2817 	denominator = ap + fp;
2818 out:
2819 	trace_android_vh_tune_scan_type((char *)(&scan_balance));
2820 	trace_android_vh_tune_memcg_scan_type(memcg, (char *)(&scan_balance));
2821 	for_each_evictable_lru(lru) {
2822 		int file = is_file_lru(lru);
2823 		unsigned long lruvec_size;
2824 		unsigned long low, min;
2825 		unsigned long scan;
2826 
2827 		lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2828 		mem_cgroup_protection(sc->target_mem_cgroup, memcg,
2829 				      &min, &low);
2830 
2831 		if (min || low) {
2832 			/*
2833 			 * Scale a cgroup's reclaim pressure by proportioning
2834 			 * its current usage to its memory.low or memory.min
2835 			 * setting.
2836 			 *
2837 			 * This is important, as otherwise scanning aggression
2838 			 * becomes extremely binary -- from nothing as we
2839 			 * approach the memory protection threshold, to totally
2840 			 * nominal as we exceed it.  This results in requiring
2841 			 * setting extremely liberal protection thresholds. It
2842 			 * also means we simply get no protection at all if we
2843 			 * set it too low, which is not ideal.
2844 			 *
2845 			 * If there is any protection in place, we reduce scan
2846 			 * pressure by how much of the total memory used is
2847 			 * within protection thresholds.
2848 			 *
2849 			 * There is one special case: in the first reclaim pass,
2850 			 * we skip over all groups that are within their low
2851 			 * protection. If that fails to reclaim enough pages to
2852 			 * satisfy the reclaim goal, we come back and override
2853 			 * the best-effort low protection. However, we still
2854 			 * ideally want to honor how well-behaved groups are in
2855 			 * that case instead of simply punishing them all
2856 			 * equally. As such, we reclaim them based on how much
2857 			 * memory they are using, reducing the scan pressure
2858 			 * again by how much of the total memory used is under
2859 			 * hard protection.
2860 			 */
2861 			unsigned long cgroup_size = mem_cgroup_size(memcg);
2862 			unsigned long protection;
2863 
2864 			/* memory.low scaling, make sure we retry before OOM */
2865 			if (!sc->memcg_low_reclaim && low > min) {
2866 				protection = low;
2867 				sc->memcg_low_skipped = 1;
2868 			} else {
2869 				protection = min;
2870 			}
2871 
2872 			/* Avoid TOCTOU with earlier protection check */
2873 			cgroup_size = max(cgroup_size, protection);
2874 
2875 			scan = lruvec_size - lruvec_size * protection /
2876 				(cgroup_size + 1);
2877 
2878 			/*
2879 			 * Minimally target SWAP_CLUSTER_MAX pages to keep
2880 			 * reclaim moving forwards, avoiding decrementing
2881 			 * sc->priority further than desirable.
2882 			 */
2883 			scan = max(scan, SWAP_CLUSTER_MAX);
2884 		} else {
2885 			scan = lruvec_size;
2886 		}
2887 
2888 		scan >>= sc->priority;
2889 
2890 		/*
2891 		 * If the cgroup's already been deleted, make sure to
2892 		 * scrape out the remaining cache.
2893 		 */
2894 		if (!scan && !mem_cgroup_online(memcg))
2895 			scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2896 
2897 		switch (scan_balance) {
2898 		case SCAN_EQUAL:
2899 			/* Scan lists relative to size */
2900 			break;
2901 		case SCAN_FRACT:
2902 			/*
2903 			 * Scan types proportional to swappiness and
2904 			 * their relative recent reclaim efficiency.
2905 			 * Make sure we don't miss the last page on
2906 			 * the offlined memory cgroups because of a
2907 			 * round-off error.
2908 			 */
2909 			scan = mem_cgroup_online(memcg) ?
2910 			       div64_u64(scan * fraction[file], denominator) :
2911 			       DIV64_U64_ROUND_UP(scan * fraction[file],
2912 						  denominator);
2913 			break;
2914 		case SCAN_FILE:
2915 		case SCAN_ANON:
2916 			/* Scan one type exclusively */
2917 			if ((scan_balance == SCAN_FILE) != file)
2918 				scan = 0;
2919 			break;
2920 		default:
2921 			/* Look ma, no brain */
2922 			BUG();
2923 		}
2924 
2925 		nr[lru] = scan;
2926 	}
2927 }
2928 
2929 /*
2930  * Anonymous LRU management is a waste if there is
2931  * ultimately no way to reclaim the memory.
2932  */
can_age_anon_pages(struct pglist_data * pgdat,struct scan_control * sc)2933 static bool can_age_anon_pages(struct pglist_data *pgdat,
2934 			       struct scan_control *sc)
2935 {
2936 	/* Aging the anon LRU is valuable if swap is present: */
2937 	if (total_swap_pages > 0)
2938 		return true;
2939 
2940 	/* Also valuable if anon pages can be demoted: */
2941 	return can_demote(pgdat->node_id, sc);
2942 }
2943 
2944 #ifdef CONFIG_LRU_GEN
2945 
2946 #ifdef CONFIG_LRU_GEN_ENABLED
2947 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
2948 #define get_cap(cap)	static_branch_likely(&lru_gen_caps[cap])
2949 #else
2950 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
2951 #define get_cap(cap)	static_branch_unlikely(&lru_gen_caps[cap])
2952 #endif
2953 
2954 /******************************************************************************
2955  *                          shorthand helpers
2956  ******************************************************************************/
2957 
2958 #define LRU_REFS_FLAGS	(BIT(PG_referenced) | BIT(PG_workingset))
2959 
2960 #define DEFINE_MAX_SEQ(lruvec)						\
2961 	unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2962 
2963 #define DEFINE_MIN_SEQ(lruvec)						\
2964 	unsigned long min_seq[ANON_AND_FILE] = {			\
2965 		READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]),	\
2966 		READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]),	\
2967 	}
2968 
2969 #define for_each_gen_type_zone(gen, type, zone)				\
2970 	for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)			\
2971 		for ((type) = 0; (type) < ANON_AND_FILE; (type)++)	\
2972 			for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
2973 
get_lruvec(struct mem_cgroup * memcg,int nid)2974 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2975 {
2976 	struct pglist_data *pgdat = NODE_DATA(nid);
2977 
2978 #ifdef CONFIG_MEMCG
2979 	if (memcg) {
2980 		struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2981 
2982 		/* for hotadd_new_pgdat() */
2983 		if (!lruvec->pgdat)
2984 			lruvec->pgdat = pgdat;
2985 
2986 		return lruvec;
2987 	}
2988 #endif
2989 	VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2990 
2991 	return pgdat ? &pgdat->__lruvec : NULL;
2992 }
2993 
get_swappiness(struct lruvec * lruvec,struct scan_control * sc)2994 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
2995 {
2996 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2997 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2998 
2999 	if (!can_demote(pgdat->node_id, sc) &&
3000 		mem_cgroup_get_nr_swap_pages(memcg) <= 0)
3001 		return 0;
3002 
3003 	return mem_cgroup_swappiness(memcg);
3004 }
3005 
get_nr_gens(struct lruvec * lruvec,int type)3006 static int get_nr_gens(struct lruvec *lruvec, int type)
3007 {
3008 	return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
3009 }
3010 
seq_is_valid(struct lruvec * lruvec)3011 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
3012 {
3013 	/* see the comment on lru_gen_struct */
3014 	return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
3015 	       get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
3016 	       get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
3017 }
3018 
3019 /******************************************************************************
3020  *                          mm_struct list
3021  ******************************************************************************/
3022 
get_mm_list(struct mem_cgroup * memcg)3023 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
3024 {
3025 	static struct lru_gen_mm_list mm_list = {
3026 		.fifo = LIST_HEAD_INIT(mm_list.fifo),
3027 		.lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
3028 	};
3029 
3030 #ifdef CONFIG_MEMCG
3031 	if (memcg)
3032 		return &memcg->mm_list;
3033 #endif
3034 	VM_WARN_ON_ONCE(!mem_cgroup_disabled());
3035 
3036 	return &mm_list;
3037 }
3038 
lru_gen_add_mm(struct mm_struct * mm)3039 void lru_gen_add_mm(struct mm_struct *mm)
3040 {
3041 	int nid;
3042 	struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
3043 	struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3044 
3045 	VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
3046 #ifdef CONFIG_MEMCG
3047 	VM_WARN_ON_ONCE(mm->lru_gen.memcg);
3048 	mm->lru_gen.memcg = memcg;
3049 #endif
3050 	spin_lock(&mm_list->lock);
3051 
3052 	for_each_node_state(nid, N_MEMORY) {
3053 		struct lruvec *lruvec = get_lruvec(memcg, nid);
3054 
3055 		if (!lruvec)
3056 			continue;
3057 
3058 		/* the first addition since the last iteration */
3059 		if (lruvec->mm_state.tail == &mm_list->fifo)
3060 			lruvec->mm_state.tail = &mm->lru_gen.list;
3061 	}
3062 
3063 	list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
3064 
3065 	spin_unlock(&mm_list->lock);
3066 }
3067 
lru_gen_del_mm(struct mm_struct * mm)3068 void lru_gen_del_mm(struct mm_struct *mm)
3069 {
3070 	int nid;
3071 	struct lru_gen_mm_list *mm_list;
3072 	struct mem_cgroup *memcg = NULL;
3073 
3074 	if (list_empty(&mm->lru_gen.list))
3075 		return;
3076 
3077 #ifdef CONFIG_MEMCG
3078 	memcg = mm->lru_gen.memcg;
3079 #endif
3080 	mm_list = get_mm_list(memcg);
3081 
3082 	spin_lock(&mm_list->lock);
3083 
3084 	for_each_node(nid) {
3085 		struct lruvec *lruvec = get_lruvec(memcg, nid);
3086 
3087 		if (!lruvec)
3088 			continue;
3089 
3090 		/* where the current iteration continues after */
3091 		if (lruvec->mm_state.head == &mm->lru_gen.list)
3092 			lruvec->mm_state.head = lruvec->mm_state.head->prev;
3093 
3094 		/* where the last iteration ended before */
3095 		if (lruvec->mm_state.tail == &mm->lru_gen.list)
3096 			lruvec->mm_state.tail = lruvec->mm_state.tail->next;
3097 	}
3098 
3099 	list_del_init(&mm->lru_gen.list);
3100 
3101 	spin_unlock(&mm_list->lock);
3102 
3103 #ifdef CONFIG_MEMCG
3104 	mem_cgroup_put(mm->lru_gen.memcg);
3105 	mm->lru_gen.memcg = NULL;
3106 #endif
3107 }
3108 
3109 #ifdef CONFIG_MEMCG
lru_gen_migrate_mm(struct mm_struct * mm)3110 void lru_gen_migrate_mm(struct mm_struct *mm)
3111 {
3112 	struct mem_cgroup *memcg;
3113 	struct task_struct *task = rcu_dereference_protected(mm->owner, true);
3114 
3115 	VM_WARN_ON_ONCE(task->mm != mm);
3116 	lockdep_assert_held(&task->alloc_lock);
3117 
3118 	/* for mm_update_next_owner() */
3119 	if (mem_cgroup_disabled())
3120 		return;
3121 
3122 	/* migration can happen before addition */
3123 	if (!mm->lru_gen.memcg)
3124 		return;
3125 
3126 	rcu_read_lock();
3127 	memcg = mem_cgroup_from_task(task);
3128 	rcu_read_unlock();
3129 	if (memcg == mm->lru_gen.memcg)
3130 		return;
3131 
3132 	VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
3133 
3134 	lru_gen_del_mm(mm);
3135 	lru_gen_add_mm(mm);
3136 }
3137 #endif
3138 
3139 /*
3140  * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
3141  * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
3142  * bits in a bitmap, k is the number of hash functions and n is the number of
3143  * inserted items.
3144  *
3145  * Page table walkers use one of the two filters to reduce their search space.
3146  * To get rid of non-leaf entries that no longer have enough leaf entries, the
3147  * aging uses the double-buffering technique to flip to the other filter each
3148  * time it produces a new generation. For non-leaf entries that have enough
3149  * leaf entries, the aging carries them over to the next generation in
3150  * walk_pmd_range(); the eviction also report them when walking the rmap
3151  * in lru_gen_look_around().
3152  *
3153  * For future optimizations:
3154  * 1. It's not necessary to keep both filters all the time. The spare one can be
3155  *    freed after the RCU grace period and reallocated if needed again.
3156  * 2. And when reallocating, it's worth scaling its size according to the number
3157  *    of inserted entries in the other filter, to reduce the memory overhead on
3158  *    small systems and false positives on large systems.
3159  * 3. Jenkins' hash function is an alternative to Knuth's.
3160  */
3161 #define BLOOM_FILTER_SHIFT	15
3162 
filter_gen_from_seq(unsigned long seq)3163 static inline int filter_gen_from_seq(unsigned long seq)
3164 {
3165 	return seq % NR_BLOOM_FILTERS;
3166 }
3167 
get_item_key(void * item,int * key)3168 static void get_item_key(void *item, int *key)
3169 {
3170 	u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
3171 
3172 	BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
3173 
3174 	key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
3175 	key[1] = hash >> BLOOM_FILTER_SHIFT;
3176 }
3177 
reset_bloom_filter(struct lruvec * lruvec,unsigned long seq)3178 static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
3179 {
3180 	unsigned long *filter;
3181 	int gen = filter_gen_from_seq(seq);
3182 
3183 	filter = lruvec->mm_state.filters[gen];
3184 	if (filter) {
3185 		bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
3186 		return;
3187 	}
3188 
3189 	filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
3190 			       __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3191 	WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
3192 }
3193 
update_bloom_filter(struct lruvec * lruvec,unsigned long seq,void * item)3194 static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
3195 {
3196 	int key[2];
3197 	unsigned long *filter;
3198 	int gen = filter_gen_from_seq(seq);
3199 
3200 	filter = READ_ONCE(lruvec->mm_state.filters[gen]);
3201 	if (!filter)
3202 		return;
3203 
3204 	get_item_key(item, key);
3205 
3206 	if (!test_bit(key[0], filter))
3207 		set_bit(key[0], filter);
3208 	if (!test_bit(key[1], filter))
3209 		set_bit(key[1], filter);
3210 }
3211 
test_bloom_filter(struct lruvec * lruvec,unsigned long seq,void * item)3212 static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
3213 {
3214 	int key[2];
3215 	unsigned long *filter;
3216 	int gen = filter_gen_from_seq(seq);
3217 
3218 	filter = READ_ONCE(lruvec->mm_state.filters[gen]);
3219 	if (!filter)
3220 		return true;
3221 
3222 	get_item_key(item, key);
3223 
3224 	return test_bit(key[0], filter) && test_bit(key[1], filter);
3225 }
3226 
reset_mm_stats(struct lruvec * lruvec,struct lru_gen_mm_walk * walk,bool last)3227 static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
3228 {
3229 	int i;
3230 	int hist;
3231 
3232 	lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
3233 
3234 	if (walk) {
3235 		hist = lru_hist_from_seq(walk->max_seq);
3236 
3237 		for (i = 0; i < NR_MM_STATS; i++) {
3238 			WRITE_ONCE(lruvec->mm_state.stats[hist][i],
3239 				   lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
3240 			walk->mm_stats[i] = 0;
3241 		}
3242 	}
3243 
3244 	if (NR_HIST_GENS > 1 && last) {
3245 		hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
3246 
3247 		for (i = 0; i < NR_MM_STATS; i++)
3248 			WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
3249 	}
3250 }
3251 
should_skip_mm(struct mm_struct * mm,struct lru_gen_mm_walk * walk)3252 static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3253 {
3254 	int type;
3255 	unsigned long size = 0;
3256 	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3257 	int key = pgdat->node_id;
3258 
3259 	if (!walk->full_scan && !node_isset(key, mm->lru_gen.nodes))
3260 		return true;
3261 
3262 	node_clear(key, mm->lru_gen.nodes);
3263 
3264 	for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
3265 		size += type ? get_mm_counter(mm, MM_FILEPAGES) :
3266 			       get_mm_counter(mm, MM_ANONPAGES) +
3267 			       get_mm_counter(mm, MM_SHMEMPAGES);
3268 	}
3269 
3270 	if (size < MIN_LRU_BATCH)
3271 		return true;
3272 
3273 	return !mmget_not_zero(mm);
3274 }
3275 
iterate_mm_list(struct lruvec * lruvec,struct lru_gen_mm_walk * walk,struct mm_struct ** iter)3276 static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
3277 			    struct mm_struct **iter)
3278 {
3279 	bool first = false;
3280 	bool last = false;
3281 	struct mm_struct *mm = NULL;
3282 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3283 	struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3284 	struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3285 
3286 	/*
3287 	 * mm_state->seq is incremented after each iteration of mm_list. There
3288 	 * are three interesting cases for this page table walker:
3289 	 * 1. It tries to start a new iteration with a stale max_seq: there is
3290 	 *    nothing left to do.
3291 	 * 2. It started the next iteration: it needs to reset the Bloom filter
3292 	 *    so that a fresh set of PTE tables can be recorded.
3293 	 * 3. It ended the current iteration: it needs to reset the mm stats
3294 	 *    counters and tell its caller to increment max_seq.
3295 	 */
3296 	spin_lock(&mm_list->lock);
3297 
3298 	VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
3299 
3300 	if (walk->max_seq <= mm_state->seq)
3301 		goto done;
3302 
3303 	if (!mm_state->head)
3304 		mm_state->head = &mm_list->fifo;
3305 
3306 	if (mm_state->head == &mm_list->fifo)
3307 		first = true;
3308 
3309 	do {
3310 		mm_state->head = mm_state->head->next;
3311 		if (mm_state->head == &mm_list->fifo) {
3312 			WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3313 			last = true;
3314 			break;
3315 		}
3316 
3317 		/* force scan for those added after the last iteration */
3318 		if (!mm_state->tail || mm_state->tail == mm_state->head) {
3319 			mm_state->tail = mm_state->head->next;
3320 			walk->full_scan = true;
3321 		}
3322 
3323 		mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
3324 		if (should_skip_mm(mm, walk))
3325 			mm = NULL;
3326 	} while (!mm);
3327 done:
3328 	if (*iter || last)
3329 		reset_mm_stats(lruvec, walk, last);
3330 
3331 	spin_unlock(&mm_list->lock);
3332 
3333 	if (mm && first)
3334 		reset_bloom_filter(lruvec, walk->max_seq + 1);
3335 
3336 	if (*iter)
3337 		mmput_async(*iter);
3338 
3339 	*iter = mm;
3340 
3341 	return last;
3342 }
3343 
iterate_mm_list_nowalk(struct lruvec * lruvec,unsigned long max_seq)3344 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
3345 {
3346 	bool success = false;
3347 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3348 	struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3349 	struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3350 
3351 	spin_lock(&mm_list->lock);
3352 
3353 	VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
3354 
3355 	if (max_seq > mm_state->seq) {
3356 		mm_state->head = NULL;
3357 		mm_state->tail = NULL;
3358 		WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3359 		reset_mm_stats(lruvec, NULL, true);
3360 		success = true;
3361 	}
3362 
3363 	spin_unlock(&mm_list->lock);
3364 
3365 	return success;
3366 }
3367 
3368 /******************************************************************************
3369  *                          refault feedback loop
3370  ******************************************************************************/
3371 
3372 /*
3373  * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3374  *
3375  * The P term is refaulted/(evicted+protected) from a tier in the generation
3376  * currently being evicted; the I term is the exponential moving average of the
3377  * P term over the generations previously evicted, using the smoothing factor
3378  * 1/2; the D term isn't supported.
3379  *
3380  * The setpoint (SP) is always the first tier of one type; the process variable
3381  * (PV) is either any tier of the other type or any other tier of the same
3382  * type.
3383  *
3384  * The error is the difference between the SP and the PV; the correction is to
3385  * turn off protection when SP>PV or turn on protection when SP<PV.
3386  *
3387  * For future optimizations:
3388  * 1. The D term may discount the other two terms over time so that long-lived
3389  *    generations can resist stale information.
3390  */
3391 struct ctrl_pos {
3392 	unsigned long refaulted;
3393 	unsigned long total;
3394 	int gain;
3395 };
3396 
read_ctrl_pos(struct lruvec * lruvec,int type,int tier,int gain,struct ctrl_pos * pos)3397 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3398 			  struct ctrl_pos *pos)
3399 {
3400 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3401 	int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3402 
3403 	pos->refaulted = lrugen->avg_refaulted[type][tier] +
3404 			 atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3405 	pos->total = lrugen->avg_total[type][tier] +
3406 		     atomic_long_read(&lrugen->evicted[hist][type][tier]);
3407 	if (tier)
3408 		pos->total += lrugen->protected[hist][type][tier - 1];
3409 	pos->gain = gain;
3410 }
3411 
reset_ctrl_pos(struct lruvec * lruvec,int type,bool carryover)3412 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3413 {
3414 	int hist, tier;
3415 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3416 	bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3417 	unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3418 
3419 	lockdep_assert_held(&lruvec->lru_lock);
3420 
3421 	if (!carryover && !clear)
3422 		return;
3423 
3424 	hist = lru_hist_from_seq(seq);
3425 
3426 	for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3427 		if (carryover) {
3428 			unsigned long sum;
3429 
3430 			sum = lrugen->avg_refaulted[type][tier] +
3431 			      atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3432 			WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3433 
3434 			sum = lrugen->avg_total[type][tier] +
3435 			      atomic_long_read(&lrugen->evicted[hist][type][tier]);
3436 			if (tier)
3437 				sum += lrugen->protected[hist][type][tier - 1];
3438 			WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3439 		}
3440 
3441 		if (clear) {
3442 			atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3443 			atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3444 			if (tier)
3445 				WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
3446 		}
3447 	}
3448 }
3449 
positive_ctrl_err(struct ctrl_pos * sp,struct ctrl_pos * pv)3450 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3451 {
3452 	/*
3453 	 * Return true if the PV has a limited number of refaults or a lower
3454 	 * refaulted/total than the SP.
3455 	 */
3456 	return pv->refaulted < MIN_LRU_BATCH ||
3457 	       pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3458 	       (sp->refaulted + 1) * pv->total * pv->gain;
3459 }
3460 
3461 /******************************************************************************
3462  *                          the aging
3463  ******************************************************************************/
3464 
3465 /* promote pages accessed through page tables */
page_update_gen(struct page * page,int gen)3466 static int page_update_gen(struct page *page, int gen)
3467 {
3468 	unsigned long new_flags, old_flags = READ_ONCE(page->flags);
3469 
3470 	VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3471 	VM_WARN_ON_ONCE(!rcu_read_lock_held());
3472 
3473 	do {
3474 		/* lru_gen_del_page() has isolated this page? */
3475 		if (!(old_flags & LRU_GEN_MASK)) {
3476 			/* for shrink_page_list() */
3477 			new_flags = old_flags | BIT(PG_referenced);
3478 			continue;
3479 		}
3480 
3481 		new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3482 		new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
3483 	} while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
3484 
3485 	return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3486 }
3487 
3488 /* protect pages accessed multiple times through file descriptors */
page_inc_gen(struct lruvec * lruvec,struct page * page,bool reclaiming)3489 static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
3490 {
3491 	int type = page_is_file_lru(page);
3492 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3493 	int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3494 	unsigned long new_flags, old_flags = READ_ONCE(page->flags);
3495 
3496 	VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
3497 
3498 	do {
3499 		new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3500 		/* page_update_gen() has promoted this page? */
3501 		if (new_gen >= 0 && new_gen != old_gen)
3502 			return new_gen;
3503 
3504 		new_gen = (old_gen + 1) % MAX_NR_GENS;
3505 
3506 		new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3507 		new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3508 		/* for end_page_writeback() */
3509 		if (reclaiming)
3510 			new_flags |= BIT(PG_reclaim);
3511 	} while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
3512 
3513 	lru_gen_update_size(lruvec, page, old_gen, new_gen);
3514 
3515 	return new_gen;
3516 }
3517 
update_batch_size(struct lru_gen_mm_walk * walk,struct page * page,int old_gen,int new_gen)3518 static void update_batch_size(struct lru_gen_mm_walk *walk, struct page *page,
3519 			      int old_gen, int new_gen)
3520 {
3521 	int type = page_is_file_lru(page);
3522 	int zone = page_zonenum(page);
3523 	int delta = thp_nr_pages(page);
3524 
3525 	VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3526 	VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3527 
3528 	walk->batched++;
3529 
3530 	walk->nr_pages[old_gen][type][zone] -= delta;
3531 	walk->nr_pages[new_gen][type][zone] += delta;
3532 }
3533 
reset_batch_size(struct lruvec * lruvec,struct lru_gen_mm_walk * walk)3534 static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
3535 {
3536 	int gen, type, zone;
3537 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
3538 
3539 	walk->batched = 0;
3540 
3541 	for_each_gen_type_zone(gen, type, zone) {
3542 		enum lru_list lru = type * LRU_INACTIVE_FILE;
3543 		int delta = walk->nr_pages[gen][type][zone];
3544 
3545 		if (!delta)
3546 			continue;
3547 
3548 		walk->nr_pages[gen][type][zone] = 0;
3549 		WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3550 			   lrugen->nr_pages[gen][type][zone] + delta);
3551 
3552 		if (lru_gen_is_active(lruvec, gen))
3553 			lru += LRU_ACTIVE;
3554 		__update_lru_size(lruvec, lru, zone, delta);
3555 	}
3556 }
3557 
should_skip_vma(unsigned long start,unsigned long end,struct mm_walk * args)3558 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3559 {
3560 	struct address_space *mapping;
3561 	struct vm_area_struct *vma = args->vma;
3562 	struct lru_gen_mm_walk *walk = args->private;
3563 
3564 	if (!vma_is_accessible(vma))
3565 		return true;
3566 
3567 	if (is_vm_hugetlb_page(vma))
3568 		return true;
3569 
3570 	if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ))
3571 		return true;
3572 
3573 	if (vma == get_gate_vma(vma->vm_mm))
3574 		return true;
3575 
3576 	if (vma_is_anonymous(vma))
3577 		return !walk->can_swap;
3578 
3579 	if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3580 		return true;
3581 
3582 	mapping = vma->vm_file->f_mapping;
3583 	if (mapping_unevictable(mapping))
3584 		return true;
3585 
3586 	if (shmem_mapping(mapping))
3587 		return !walk->can_swap;
3588 
3589 	/* to exclude special mappings like dax, etc. */
3590 	return !mapping->a_ops->readpage;
3591 }
3592 
3593 /*
3594  * Some userspace memory allocators map many single-page VMAs. Instead of
3595  * returning back to the PGD table for each of such VMAs, finish an entire PMD
3596  * table to reduce zigzags and improve cache performance.
3597  */
get_next_vma(unsigned long mask,unsigned long size,struct mm_walk * args,unsigned long * vm_start,unsigned long * vm_end)3598 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3599 			 unsigned long *vm_start, unsigned long *vm_end)
3600 {
3601 	unsigned long start = round_up(*vm_end, size);
3602 	unsigned long end = (start | ~mask) + 1;
3603 
3604 	VM_WARN_ON_ONCE(mask & size);
3605 	VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3606 
3607 	while (args->vma) {
3608 		if (start >= args->vma->vm_end) {
3609 			args->vma = args->vma->vm_next;
3610 			continue;
3611 		}
3612 
3613 		if (end && end <= args->vma->vm_start)
3614 			return false;
3615 
3616 		if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) {
3617 			args->vma = args->vma->vm_next;
3618 			continue;
3619 		}
3620 
3621 		*vm_start = max(start, args->vma->vm_start);
3622 		*vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3623 
3624 		return true;
3625 	}
3626 
3627 	return false;
3628 }
3629 
get_pte_pfn(pte_t pte,struct vm_area_struct * vma,unsigned long addr)3630 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
3631 {
3632 	unsigned long pfn = pte_pfn(pte);
3633 
3634 	VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3635 
3636 	if (!pte_present(pte) || is_zero_pfn(pfn))
3637 		return -1;
3638 
3639 	if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3640 		return -1;
3641 
3642 	if (WARN_ON_ONCE(!pfn_valid(pfn)))
3643 		return -1;
3644 
3645 	return pfn;
3646 }
3647 
3648 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
get_pmd_pfn(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr)3649 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
3650 {
3651 	unsigned long pfn = pmd_pfn(pmd);
3652 
3653 	VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3654 
3655 	if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3656 		return -1;
3657 
3658 	if (WARN_ON_ONCE(pmd_devmap(pmd)))
3659 		return -1;
3660 
3661 	if (WARN_ON_ONCE(!pfn_valid(pfn)))
3662 		return -1;
3663 
3664 	return pfn;
3665 }
3666 #endif
3667 
get_pfn_page(unsigned long pfn,struct mem_cgroup * memcg,struct pglist_data * pgdat,bool can_swap)3668 static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg,
3669 				 struct pglist_data *pgdat, bool can_swap)
3670 {
3671 	struct page *page;
3672 
3673 	/* try to avoid unnecessary memory loads */
3674 	if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3675 		return NULL;
3676 
3677 	page = compound_head(pfn_to_page(pfn));
3678 	if (page_to_nid(page) != pgdat->node_id)
3679 		return NULL;
3680 
3681 	if (page_memcg_rcu(page) != memcg)
3682 		return NULL;
3683 
3684 	/* file VMAs can contain anon pages from COW */
3685 	if (!page_is_file_lru(page) && !can_swap)
3686 		return NULL;
3687 
3688 	return page;
3689 }
3690 
suitable_to_scan(int total,int young)3691 static bool suitable_to_scan(int total, int young)
3692 {
3693 	int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3694 
3695 	/* suitable if the average number of young PTEs per cacheline is >=1 */
3696 	return young * n >= total;
3697 }
3698 
walk_pte_range(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * args)3699 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
3700 			   struct mm_walk *args)
3701 {
3702 	int i;
3703 	pte_t *pte;
3704 	spinlock_t *ptl;
3705 	unsigned long addr;
3706 	int total = 0;
3707 	int young = 0;
3708 	struct lru_gen_mm_walk *walk = args->private;
3709 	struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3710 	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3711 	int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3712 
3713 	VM_WARN_ON_ONCE(pmd_leaf(*pmd));
3714 
3715 	ptl = pte_lockptr(args->mm, pmd);
3716 	if (!spin_trylock(ptl))
3717 		return false;
3718 
3719 	arch_enter_lazy_mmu_mode();
3720 
3721 	pte = pte_offset_map(pmd, start & PMD_MASK);
3722 restart:
3723 	for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
3724 		unsigned long pfn;
3725 		struct page *page;
3726 
3727 		total++;
3728 		walk->mm_stats[MM_LEAF_TOTAL]++;
3729 
3730 		pfn = get_pte_pfn(pte[i], args->vma, addr);
3731 		if (pfn == -1)
3732 			continue;
3733 
3734 		if (!pte_young(pte[i])) {
3735 			walk->mm_stats[MM_LEAF_OLD]++;
3736 			continue;
3737 		}
3738 
3739 		page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3740 		if (!page)
3741 			continue;
3742 
3743 		if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
3744 			VM_WARN_ON_ONCE(true);
3745 
3746 		young++;
3747 		walk->mm_stats[MM_LEAF_YOUNG]++;
3748 
3749 		if (pte_dirty(pte[i]) && !PageDirty(page) &&
3750 		    !(PageAnon(page) && PageSwapBacked(page) &&
3751 		      !PageSwapCache(page)))
3752 			set_page_dirty(page);
3753 
3754 		old_gen = page_update_gen(page, new_gen);
3755 		if (old_gen >= 0 && old_gen != new_gen)
3756 			update_batch_size(walk, page, old_gen, new_gen);
3757 	}
3758 
3759 	if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
3760 		goto restart;
3761 
3762 	pte_unmap(pte);
3763 
3764 	arch_leave_lazy_mmu_mode();
3765 	spin_unlock(ptl);
3766 
3767 	return suitable_to_scan(total, young);
3768 }
3769 
3770 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
walk_pmd_range_locked(pud_t * pud,unsigned long next,struct vm_area_struct * vma,struct mm_walk * args,unsigned long * bitmap,unsigned long * start)3771 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3772 				  struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3773 {
3774 	int i;
3775 	pmd_t *pmd;
3776 	spinlock_t *ptl;
3777 	struct lru_gen_mm_walk *walk = args->private;
3778 	struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3779 	struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3780 	int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3781 
3782 	VM_WARN_ON_ONCE(pud_leaf(*pud));
3783 
3784 	/* try to batch at most 1+MIN_LRU_BATCH+1 entries */
3785 	if (*start == -1) {
3786 		*start = next;
3787 		return;
3788 	}
3789 
3790 	i = next == -1 ? 0 : pmd_index(next) - pmd_index(*start);
3791 	if (i && i <= MIN_LRU_BATCH) {
3792 		__set_bit(i - 1, bitmap);
3793 		return;
3794 	}
3795 
3796 	pmd = pmd_offset(pud, *start);
3797 
3798 	ptl = pmd_lockptr(args->mm, pmd);
3799 	if (!spin_trylock(ptl))
3800 		goto done;
3801 
3802 	arch_enter_lazy_mmu_mode();
3803 
3804 	do {
3805 		unsigned long pfn;
3806 		struct page *page;
3807 		unsigned long addr = i ? (*start & PMD_MASK) + i * PMD_SIZE : *start;
3808 
3809 		pfn = get_pmd_pfn(pmd[i], vma, addr);
3810 		if (pfn == -1)
3811 			goto next;
3812 
3813 		if (!pmd_trans_huge(pmd[i])) {
3814 			if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) &&
3815 			    get_cap(LRU_GEN_NONLEAF_YOUNG))
3816 				pmdp_test_and_clear_young(vma, addr, pmd + i);
3817 			goto next;
3818 		}
3819 
3820 		page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3821 		if (!page)
3822 			goto next;
3823 
3824 		if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
3825 			goto next;
3826 
3827 		walk->mm_stats[MM_LEAF_YOUNG]++;
3828 
3829 		if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
3830 		    !(PageAnon(page) && PageSwapBacked(page) &&
3831 		      !PageSwapCache(page)))
3832 			set_page_dirty(page);
3833 
3834 		old_gen = page_update_gen(page, new_gen);
3835 		if (old_gen >= 0 && old_gen != new_gen)
3836 			update_batch_size(walk, page, old_gen, new_gen);
3837 next:
3838 		i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
3839 	} while (i <= MIN_LRU_BATCH);
3840 
3841 	arch_leave_lazy_mmu_mode();
3842 	spin_unlock(ptl);
3843 done:
3844 	*start = -1;
3845 	bitmap_zero(bitmap, MIN_LRU_BATCH);
3846 }
3847 #else
walk_pmd_range_locked(pud_t * pud,unsigned long next,struct vm_area_struct * vma,struct mm_walk * args,unsigned long * bitmap,unsigned long * start)3848 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3849 				  struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3850 {
3851 }
3852 #endif
3853 
walk_pmd_range(pud_t * pud,unsigned long start,unsigned long end,struct mm_walk * args)3854 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
3855 			   struct mm_walk *args)
3856 {
3857 	int i;
3858 	pmd_t *pmd;
3859 	unsigned long next;
3860 	unsigned long addr;
3861 	struct vm_area_struct *vma;
3862 	unsigned long pos = -1;
3863 	struct lru_gen_mm_walk *walk = args->private;
3864 	unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
3865 
3866 	VM_WARN_ON_ONCE(pud_leaf(*pud));
3867 
3868 	/*
3869 	 * Finish an entire PMD in two passes: the first only reaches to PTE
3870 	 * tables to avoid taking the PMD lock; the second, if necessary, takes
3871 	 * the PMD lock to clear the accessed bit in PMD entries.
3872 	 */
3873 	pmd = pmd_offset(pud, start & PUD_MASK);
3874 restart:
3875 	/* walk_pte_range() may call get_next_vma() */
3876 	vma = args->vma;
3877 	for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
3878 		pmd_t val = pmd_read_atomic(pmd + i);
3879 
3880 		/* for pmd_read_atomic() */
3881 		barrier();
3882 
3883 		next = pmd_addr_end(addr, end);
3884 
3885 		if (!pmd_present(val) || is_huge_zero_pmd(val)) {
3886 			walk->mm_stats[MM_LEAF_TOTAL]++;
3887 			continue;
3888 		}
3889 
3890 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3891 		if (pmd_trans_huge(val)) {
3892 			unsigned long pfn = pmd_pfn(val);
3893 			struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3894 
3895 			walk->mm_stats[MM_LEAF_TOTAL]++;
3896 
3897 			if (!pmd_young(val)) {
3898 				walk->mm_stats[MM_LEAF_OLD]++;
3899 				continue;
3900 			}
3901 
3902 			/* try to avoid unnecessary memory loads */
3903 			if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3904 				continue;
3905 
3906 			walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3907 			continue;
3908 		}
3909 #endif
3910 		walk->mm_stats[MM_NONLEAF_TOTAL]++;
3911 
3912 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
3913 		if (get_cap(LRU_GEN_NONLEAF_YOUNG)) {
3914 			if (!pmd_young(val))
3915 				continue;
3916 
3917 			walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3918 		}
3919 #endif
3920 		if (!walk->full_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
3921 			continue;
3922 
3923 		walk->mm_stats[MM_NONLEAF_FOUND]++;
3924 
3925 		if (!walk_pte_range(&val, addr, next, args))
3926 			continue;
3927 
3928 		walk->mm_stats[MM_NONLEAF_ADDED]++;
3929 
3930 		/* carry over to the next generation */
3931 		update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
3932 	}
3933 
3934 	walk_pmd_range_locked(pud, -1, vma, args, bitmap, &pos);
3935 
3936 	if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
3937 		goto restart;
3938 }
3939 
walk_pud_range(p4d_t * p4d,unsigned long start,unsigned long end,struct mm_walk * args)3940 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
3941 			  struct mm_walk *args)
3942 {
3943 	int i;
3944 	pud_t *pud;
3945 	unsigned long addr;
3946 	unsigned long next;
3947 	struct lru_gen_mm_walk *walk = args->private;
3948 
3949 	VM_WARN_ON_ONCE(p4d_leaf(*p4d));
3950 
3951 	pud = pud_offset(p4d, start & P4D_MASK);
3952 restart:
3953 	for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
3954 		pud_t val = READ_ONCE(pud[i]);
3955 
3956 		next = pud_addr_end(addr, end);
3957 
3958 		if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
3959 			continue;
3960 
3961 		walk_pmd_range(&val, addr, next, args);
3962 
3963 		if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
3964 			end = (addr | ~PUD_MASK) + 1;
3965 			goto done;
3966 		}
3967 	}
3968 
3969 	if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
3970 		goto restart;
3971 
3972 	end = round_up(end, P4D_SIZE);
3973 done:
3974 	if (!end || !args->vma)
3975 		return 1;
3976 
3977 	walk->next_addr = max(end, args->vma->vm_start);
3978 
3979 	return -EAGAIN;
3980 }
3981 
walk_mm(struct lruvec * lruvec,struct mm_struct * mm,struct lru_gen_mm_walk * walk)3982 static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3983 {
3984 	static const struct mm_walk_ops mm_walk_ops = {
3985 		.test_walk = should_skip_vma,
3986 		.p4d_entry = walk_pud_range,
3987 	};
3988 
3989 	int err;
3990 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3991 
3992 	walk->next_addr = FIRST_USER_ADDRESS;
3993 
3994 	do {
3995 		DEFINE_MAX_SEQ(lruvec);
3996 
3997 		err = -EBUSY;
3998 
3999 		/* another thread might have called inc_max_seq() */
4000 		if (walk->max_seq != max_seq)
4001 			break;
4002 
4003 		/* page_update_gen() requires stable page_memcg() */
4004 		if (!mem_cgroup_trylock_pages(memcg))
4005 			break;
4006 
4007 		/* the caller might be holding the lock for write */
4008 		if (mmap_read_trylock(mm)) {
4009 			err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
4010 
4011 			mmap_read_unlock(mm);
4012 		}
4013 
4014 		mem_cgroup_unlock_pages();
4015 
4016 		if (walk->batched) {
4017 			spin_lock_irq(&lruvec->lru_lock);
4018 			reset_batch_size(lruvec, walk);
4019 			spin_unlock_irq(&lruvec->lru_lock);
4020 		}
4021 
4022 		cond_resched();
4023 	} while (err == -EAGAIN);
4024 }
4025 
set_mm_walk(struct pglist_data * pgdat)4026 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat)
4027 {
4028 	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
4029 
4030 	if (pgdat && current_is_kswapd()) {
4031 		VM_WARN_ON_ONCE(walk);
4032 
4033 		walk = &pgdat->mm_walk;
4034 	} else if (!pgdat && !walk) {
4035 		VM_WARN_ON_ONCE(current_is_kswapd());
4036 
4037 		walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
4038 	}
4039 
4040 	current->reclaim_state->mm_walk = walk;
4041 
4042 	return walk;
4043 }
4044 
clear_mm_walk(void)4045 static void clear_mm_walk(void)
4046 {
4047 	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
4048 
4049 	VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
4050 	VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
4051 
4052 	current->reclaim_state->mm_walk = NULL;
4053 
4054 	if (!current_is_kswapd())
4055 		kfree(walk);
4056 }
4057 
inc_min_seq(struct lruvec * lruvec,int type,bool can_swap)4058 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
4059 {
4060 	int zone;
4061 	int remaining = MAX_LRU_BATCH;
4062 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4063 	int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
4064 
4065 	if (type == LRU_GEN_ANON && !can_swap)
4066 		goto done;
4067 
4068 	/* prevent cold/hot inversion if full_scan is true */
4069 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4070 		struct list_head *head = &lrugen->lists[old_gen][type][zone];
4071 
4072 		while (!list_empty(head)) {
4073 			struct page *page = lru_to_page(head);
4074 
4075 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4076 			VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4077 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
4078 			VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4079 
4080 			new_gen = page_inc_gen(lruvec, page, false);
4081 			list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
4082 
4083 			if (!--remaining)
4084 				return false;
4085 		}
4086 	}
4087 done:
4088 	reset_ctrl_pos(lruvec, type, true);
4089 	WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
4090 
4091 	return true;
4092 }
4093 
try_to_inc_min_seq(struct lruvec * lruvec,bool can_swap)4094 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
4095 {
4096 	int gen, type, zone;
4097 	bool success = false;
4098 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4099 	DEFINE_MIN_SEQ(lruvec);
4100 
4101 	VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4102 
4103 	/* find the oldest populated generation */
4104 	for (type = !can_swap; type < ANON_AND_FILE; type++) {
4105 		while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
4106 			gen = lru_gen_from_seq(min_seq[type]);
4107 
4108 			for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4109 				if (!list_empty(&lrugen->lists[gen][type][zone]))
4110 					goto next;
4111 			}
4112 
4113 			min_seq[type]++;
4114 		}
4115 next:
4116 		;
4117 	}
4118 
4119 	/* see the comment on lru_gen_struct */
4120 	if (can_swap) {
4121 		min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
4122 		min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
4123 	}
4124 
4125 	for (type = !can_swap; type < ANON_AND_FILE; type++) {
4126 		if (min_seq[type] == lrugen->min_seq[type])
4127 			continue;
4128 
4129 		reset_ctrl_pos(lruvec, type, true);
4130 		WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
4131 		success = true;
4132 	}
4133 
4134 	return success;
4135 }
4136 
inc_max_seq(struct lruvec * lruvec,bool can_swap,bool full_scan)4137 static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool full_scan)
4138 {
4139 	int prev, next;
4140 	int type, zone;
4141 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4142 restart:
4143 	spin_lock_irq(&lruvec->lru_lock);
4144 
4145 	VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4146 
4147 	for (type = ANON_AND_FILE - 1; type >= 0; type--) {
4148 		if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
4149 			continue;
4150 
4151 		VM_WARN_ON_ONCE(!full_scan && (type == LRU_GEN_FILE || can_swap));
4152 
4153 		while (!inc_min_seq(lruvec, type, can_swap)) {
4154 			spin_unlock_irq(&lruvec->lru_lock);
4155 			cond_resched();
4156 			spin_lock_irq(&lruvec->lru_lock);
4157 		}
4158 		if (inc_min_seq(lruvec, type, can_swap))
4159 			continue;
4160 
4161 		spin_unlock_irq(&lruvec->lru_lock);
4162 		cond_resched();
4163 		goto restart;
4164 
4165 	}
4166 
4167 	/*
4168 	 * Update the active/inactive LRU sizes for compatibility. Both sides of
4169 	 * the current max_seq need to be covered, since max_seq+1 can overlap
4170 	 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
4171 	 * overlap, cold/hot inversion happens.
4172 	 */
4173 	prev = lru_gen_from_seq(lrugen->max_seq - 1);
4174 	next = lru_gen_from_seq(lrugen->max_seq + 1);
4175 
4176 	for (type = 0; type < ANON_AND_FILE; type++) {
4177 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4178 			enum lru_list lru = type * LRU_INACTIVE_FILE;
4179 			long delta = lrugen->nr_pages[prev][type][zone] -
4180 				     lrugen->nr_pages[next][type][zone];
4181 
4182 			if (!delta)
4183 				continue;
4184 
4185 			__update_lru_size(lruvec, lru, zone, delta);
4186 			__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
4187 		}
4188 	}
4189 
4190 	for (type = 0; type < ANON_AND_FILE; type++)
4191 		reset_ctrl_pos(lruvec, type, false);
4192 
4193 	WRITE_ONCE(lrugen->timestamps[next], jiffies);
4194 	/* make sure preceding modifications appear */
4195 	smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
4196 
4197 	spin_unlock_irq(&lruvec->lru_lock);
4198 }
4199 
try_to_inc_max_seq(struct lruvec * lruvec,unsigned long max_seq,struct scan_control * sc,bool can_swap,bool full_scan)4200 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
4201 			       struct scan_control *sc, bool can_swap, bool full_scan)
4202 {
4203 	bool success;
4204 	struct lru_gen_mm_walk *walk;
4205 	struct mm_struct *mm = NULL;
4206 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4207 
4208 	VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
4209 
4210 	/* see the comment in iterate_mm_list() */
4211 	if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
4212 		success = false;
4213 		goto done;
4214 	}
4215 
4216 	/*
4217 	 * If the hardware doesn't automatically set the accessed bit, fallback
4218 	 * to lru_gen_look_around(), which only clears the accessed bit in a
4219 	 * handful of PTEs. Spreading the work out over a period of time usually
4220 	 * is less efficient, but it avoids bursty page faults.
4221 	 */
4222 	if (!full_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
4223 		success = iterate_mm_list_nowalk(lruvec, max_seq);
4224 		goto done;
4225 	}
4226 
4227 	walk = set_mm_walk(NULL);
4228 	if (!walk) {
4229 		success = iterate_mm_list_nowalk(lruvec, max_seq);
4230 		goto done;
4231 	}
4232 
4233 	walk->lruvec = lruvec;
4234 	walk->max_seq = max_seq;
4235 	walk->can_swap = can_swap;
4236 	walk->full_scan = full_scan;
4237 
4238 	do {
4239 		success = iterate_mm_list(lruvec, walk, &mm);
4240 		if (mm)
4241 			walk_mm(lruvec, mm, walk);
4242 	} while (mm);
4243 done:
4244 	if (success)
4245 		inc_max_seq(lruvec, can_swap, full_scan);
4246 
4247 	return success;
4248 }
4249 
should_run_aging(struct lruvec * lruvec,unsigned long max_seq,unsigned long * min_seq,struct scan_control * sc,bool can_swap,unsigned long * nr_to_scan)4250 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
4251 			     struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
4252 {
4253 	int gen, type, zone;
4254 	unsigned long old = 0;
4255 	unsigned long young = 0;
4256 	unsigned long total = 0;
4257 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4258 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4259 
4260 	for (type = !can_swap; type < ANON_AND_FILE; type++) {
4261 		unsigned long seq;
4262 
4263 		for (seq = min_seq[type]; seq <= max_seq; seq++) {
4264 			unsigned long size = 0;
4265 
4266 			gen = lru_gen_from_seq(seq);
4267 
4268 			for (zone = 0; zone < MAX_NR_ZONES; zone++)
4269 				size += max_t(long, READ_ONCE(lrugen->nr_pages[gen][type][zone]),
4270 						0);
4271 
4272 			total += size;
4273 			if (seq == max_seq)
4274 				young += size;
4275 			else if (seq + MIN_NR_GENS == max_seq)
4276 				old += size;
4277 		}
4278 	}
4279 
4280 	/* try to scrape all its memory if this memcg was deleted */
4281 	*nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
4282 
4283 	/*
4284 	 * The aging tries to be lazy to reduce the overhead, while the eviction
4285 	 * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
4286 	 * ideal number of generations is MIN_NR_GENS+1.
4287 	 */
4288 	if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
4289 		return true;
4290 	if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
4291 		return false;
4292 
4293 	/*
4294 	 * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
4295 	 * of the total number of pages for each generation. A reasonable range
4296 	 * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
4297 	 * aging cares about the upper bound of hot pages, while the eviction
4298 	 * cares about the lower bound of cold pages.
4299 	 */
4300 	if (young * MIN_NR_GENS > total)
4301 		return true;
4302 	if (old * (MIN_NR_GENS + 2) < total)
4303 		return true;
4304 
4305 	return false;
4306 }
4307 
age_lruvec(struct lruvec * lruvec,struct scan_control * sc,unsigned long min_ttl)4308 static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
4309 {
4310 	bool need_aging;
4311 	unsigned long nr_to_scan;
4312 	int swappiness = get_swappiness(lruvec, sc);
4313 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4314 	DEFINE_MAX_SEQ(lruvec);
4315 	DEFINE_MIN_SEQ(lruvec);
4316 
4317 	VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
4318 
4319 	mem_cgroup_calculate_protection(NULL, memcg);
4320 
4321 	if (mem_cgroup_below_min(memcg))
4322 		return false;
4323 
4324 	need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
4325 
4326 	if (min_ttl) {
4327 		int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
4328 		unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4329 
4330 		if (time_is_after_jiffies(birth + min_ttl))
4331 			return false;
4332 
4333 		/* the size is likely too small to be helpful */
4334 		if (!nr_to_scan && sc->priority != DEF_PRIORITY)
4335 			return false;
4336 	}
4337 
4338 	if (need_aging)
4339 		try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
4340 
4341 	return true;
4342 }
4343 
4344 /* to protect the working set of the last N jiffies */
4345 static unsigned long lru_gen_min_ttl __read_mostly;
4346 
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)4347 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
4348 {
4349 	struct mem_cgroup *memcg;
4350 	bool success = false;
4351 	unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
4352 
4353 	VM_WARN_ON_ONCE(!current_is_kswapd());
4354 
4355 	sc->last_reclaimed = sc->nr_reclaimed;
4356 
4357 	/*
4358 	 * To reduce the chance of going into the aging path, which can be
4359 	 * costly, optimistically skip it if the flag below was cleared in the
4360 	 * eviction path. This improves the overall performance when multiple
4361 	 * memcgs are available.
4362 	 */
4363 	if (!sc->memcgs_need_aging) {
4364 		sc->memcgs_need_aging = true;
4365 		return;
4366 	}
4367 
4368 	set_mm_walk(pgdat);
4369 
4370 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
4371 	do {
4372 		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4373 
4374 		if (age_lruvec(lruvec, sc, min_ttl))
4375 			success = true;
4376 
4377 		cond_resched();
4378 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4379 
4380 	clear_mm_walk();
4381 
4382 	/* check the order to exclude compaction-induced reclaim */
4383 	if (success || !min_ttl || sc->order)
4384 		return;
4385 
4386 	/*
4387 	 * The main goal is to OOM kill if every generation from all memcgs is
4388 	 * younger than min_ttl. However, another possibility is all memcgs are
4389 	 * either below min or empty.
4390 	 */
4391 	if (mutex_trylock(&oom_lock)) {
4392 		struct oom_control oc = {
4393 			.gfp_mask = sc->gfp_mask,
4394 		};
4395 
4396 		out_of_memory(&oc);
4397 
4398 		mutex_unlock(&oom_lock);
4399 	}
4400 }
4401 
4402 /*
4403  * This function exploits spatial locality when shrink_page_list() walks the
4404  * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
4405  * the scan was done cacheline efficiently, it adds the PMD entry pointing to
4406  * the PTE table to the Bloom filter. This forms a feedback loop between the
4407  * eviction and the aging.
4408  */
lru_gen_look_around(struct page_vma_mapped_walk * pvmw)4409 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
4410 {
4411 	int i;
4412 	pte_t *pte;
4413 	unsigned long start;
4414 	unsigned long end;
4415 	unsigned long addr;
4416 	struct lru_gen_mm_walk *walk;
4417 	int young = 0;
4418 	unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
4419 	struct page *page = pvmw->page;
4420 	bool can_swap = !page_is_file_lru(page);
4421 	struct mem_cgroup *memcg = page_memcg(page);
4422 	struct pglist_data *pgdat = page_pgdat(page);
4423 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4424 	DEFINE_MAX_SEQ(lruvec);
4425 	int old_gen, new_gen = lru_gen_from_seq(max_seq);
4426 
4427 	lockdep_assert_held(pvmw->ptl);
4428 	VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
4429 
4430 	if (spin_is_contended(pvmw->ptl))
4431 		return;
4432 
4433 	/* avoid taking the LRU lock under the PTL when possible */
4434 	walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
4435 
4436 	start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
4437 	end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
4438 
4439 	if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
4440 		if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
4441 			end = start + MIN_LRU_BATCH * PAGE_SIZE;
4442 		else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
4443 			start = end - MIN_LRU_BATCH * PAGE_SIZE;
4444 		else {
4445 			start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
4446 			end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
4447 		}
4448 	}
4449 
4450 	pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
4451 
4452 	rcu_read_lock();
4453 	arch_enter_lazy_mmu_mode();
4454 
4455 	for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
4456 		unsigned long pfn;
4457 
4458 		pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
4459 		if (pfn == -1)
4460 			continue;
4461 
4462 		if (!pte_young(pte[i]))
4463 			continue;
4464 
4465 		page = get_pfn_page(pfn, memcg, pgdat, can_swap);
4466 		if (!page)
4467 			continue;
4468 
4469 		if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
4470 			VM_WARN_ON_ONCE(true);
4471 
4472 		young++;
4473 
4474 		if (pte_dirty(pte[i]) && !PageDirty(page) &&
4475 		    !(PageAnon(page) && PageSwapBacked(page) &&
4476 		      !PageSwapCache(page)))
4477 			set_page_dirty(page);
4478 
4479 		old_gen = page_lru_gen(page);
4480 		if (old_gen < 0)
4481 			SetPageReferenced(page);
4482 		else if (old_gen != new_gen)
4483 			__set_bit(i, bitmap);
4484 	}
4485 
4486 	arch_leave_lazy_mmu_mode();
4487 	rcu_read_unlock();
4488 
4489 	/* feedback from rmap walkers to page table walkers */
4490 	if (suitable_to_scan(i, young))
4491 		update_bloom_filter(lruvec, max_seq, pvmw->pmd);
4492 
4493 	if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
4494 		for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4495 			page = pte_page(pte[i]);
4496 			activate_page(page);
4497 		}
4498 		return;
4499 	}
4500 
4501 	/* page_update_gen() requires stable page_memcg() */
4502 	if (!mem_cgroup_trylock_pages(memcg))
4503 		return;
4504 
4505 	if (!walk) {
4506 		spin_lock_irq(&lruvec->lru_lock);
4507 		new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
4508 	}
4509 
4510 	for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4511 		page = compound_head(pte_page(pte[i]));
4512 		if (page_memcg_rcu(page) != memcg)
4513 			continue;
4514 
4515 		old_gen = page_update_gen(page, new_gen);
4516 		if (old_gen < 0 || old_gen == new_gen)
4517 			continue;
4518 
4519 		if (walk)
4520 			update_batch_size(walk, page, old_gen, new_gen);
4521 		else
4522 			lru_gen_update_size(lruvec, page, old_gen, new_gen);
4523 	}
4524 
4525 	if (!walk)
4526 		spin_unlock_irq(&lruvec->lru_lock);
4527 
4528 	mem_cgroup_unlock_pages();
4529 }
4530 
4531 /******************************************************************************
4532  *                          the eviction
4533  ******************************************************************************/
4534 
sort_page(struct lruvec * lruvec,struct page * page,struct scan_control * sc,int tier_idx)4535 static bool sort_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc,
4536 		       int tier_idx)
4537 {
4538 	bool success;
4539 	int gen = page_lru_gen(page);
4540 	int type = page_is_file_lru(page);
4541 	int zone = page_zonenum(page);
4542 	int delta = thp_nr_pages(page);
4543 	int refs = page_lru_refs(page);
4544 	int tier = lru_tier_from_refs(refs);
4545 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4546 
4547 	VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
4548 
4549 	/* unevictable */
4550 	if (!page_evictable(page)) {
4551 		success = lru_gen_del_page(lruvec, page, true);
4552 		VM_WARN_ON_ONCE_PAGE(!success, page);
4553 		SetPageUnevictable(page);
4554 		add_page_to_lru_list(page, lruvec);
4555 		__count_vm_events(UNEVICTABLE_PGCULLED, delta);
4556 		return true;
4557 	}
4558 
4559 	/* dirty lazyfree */
4560 	if (type == LRU_GEN_FILE && PageAnon(page) && PageDirty(page)) {
4561 		success = lru_gen_del_page(lruvec, page, true);
4562 		VM_WARN_ON_ONCE_PAGE(!success, page);
4563 		SetPageSwapBacked(page);
4564 		add_page_to_lru_list_tail(page, lruvec);
4565 		return true;
4566 	}
4567 
4568 	/* promoted */
4569 	if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4570 		list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4571 		return true;
4572 	}
4573 
4574 	/* protected */
4575 	if (tier > tier_idx) {
4576 		int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4577 
4578 		gen = page_inc_gen(lruvec, page, false);
4579 		list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
4580 
4581 		WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
4582 			   lrugen->protected[hist][type][tier - 1] + delta);
4583 		return true;
4584 	}
4585 
4586 	/* ineligible */
4587 	if (zone > sc->reclaim_idx || skip_cma(page, sc)) {
4588 		gen = page_inc_gen(lruvec, page, false);
4589 		list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
4590 		return true;
4591 	}
4592 
4593 	/* waiting for writeback */
4594 	if (PageLocked(page) || PageWriteback(page) ||
4595 	    (type == LRU_GEN_FILE && PageDirty(page))) {
4596 		gen = page_inc_gen(lruvec, page, true);
4597 		list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4598 		return true;
4599 	}
4600 
4601 	return false;
4602 }
4603 
isolate_page(struct lruvec * lruvec,struct page * page,struct scan_control * sc)4604 static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc)
4605 {
4606 	bool success;
4607 
4608 	/* unmapping inhibited */
4609 	if (!sc->may_unmap && page_mapped(page))
4610 		return false;
4611 
4612 	/* swapping inhibited */
4613 	if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
4614 	    (PageDirty(page) ||
4615 	     (PageAnon(page) && !PageSwapCache(page))))
4616 		return false;
4617 
4618 	/* raced with release_pages() */
4619 	if (!get_page_unless_zero(page))
4620 		return false;
4621 
4622 	/* raced with another isolation */
4623 	if (!TestClearPageLRU(page)) {
4624 		put_page(page);
4625 		return false;
4626 	}
4627 
4628 	/* see the comment on MAX_NR_TIERS */
4629 	if (!PageReferenced(page))
4630 		set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
4631 
4632 	/* for shrink_page_list() */
4633 	ClearPageReclaim(page);
4634 	ClearPageReferenced(page);
4635 
4636 	success = lru_gen_del_page(lruvec, page, true);
4637 	VM_WARN_ON_ONCE_PAGE(!success, page);
4638 
4639 	return true;
4640 }
4641 
scan_pages(struct lruvec * lruvec,struct scan_control * sc,int type,int tier,struct list_head * list)4642 static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
4643 		      int type, int tier, struct list_head *list)
4644 {
4645 	int i;
4646 	int gen;
4647 	enum vm_event_item item;
4648 	int sorted = 0;
4649 	int scanned = 0;
4650 	int isolated = 0;
4651 	int remaining = MAX_LRU_BATCH;
4652 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
4653 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4654 
4655 	VM_WARN_ON_ONCE(!list_empty(list));
4656 
4657 	if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
4658 		return 0;
4659 
4660 	gen = lru_gen_from_seq(lrugen->min_seq[type]);
4661 
4662 	for (i = MAX_NR_ZONES; i > 0; i--) {
4663 		LIST_HEAD(moved);
4664 		int skipped = 0;
4665 		int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
4666 		struct list_head *head = &lrugen->lists[gen][type][zone];
4667 
4668 		while (!list_empty(head)) {
4669 			struct page *page = lru_to_page(head);
4670 			int delta = thp_nr_pages(page);
4671 
4672 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4673 			VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4674 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
4675 			VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4676 
4677 			scanned += delta;
4678 
4679 			if (sort_page(lruvec, page, sc, tier))
4680 				sorted += delta;
4681 			else if (isolate_page(lruvec, page, sc)) {
4682 				list_add(&page->lru, list);
4683 				isolated += delta;
4684 			} else {
4685 				list_move(&page->lru, &moved);
4686 				skipped += delta;
4687 			}
4688 
4689 			if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
4690 				break;
4691 		}
4692 
4693 		if (skipped) {
4694 			list_splice(&moved, head);
4695 			__count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
4696 		}
4697 
4698 		if (!remaining || isolated >= MIN_LRU_BATCH)
4699 			break;
4700 	}
4701 
4702 	item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
4703 	if (!cgroup_reclaim(sc)) {
4704 		__count_vm_events(item, isolated);
4705 		__count_vm_events(PGREFILL, sorted);
4706 	}
4707 	__count_memcg_events(memcg, item, isolated);
4708 	__count_memcg_events(memcg, PGREFILL, sorted);
4709 	__count_vm_events(PGSCAN_ANON + type, isolated);
4710 
4711 	/*
4712 	 * There might not be eligible pages due to reclaim_idx, may_unmap and
4713 	 * may_writepage. Check the remaining to prevent livelock if it's not
4714 	 * making progress.
4715 	 */
4716 	return isolated || !remaining ? scanned : 0;
4717 }
4718 
get_tier_idx(struct lruvec * lruvec,int type)4719 static int get_tier_idx(struct lruvec *lruvec, int type)
4720 {
4721 	int tier;
4722 	struct ctrl_pos sp, pv;
4723 
4724 	/*
4725 	 * To leave a margin for fluctuations, use a larger gain factor (1:2).
4726 	 * This value is chosen because any other tier would have at least twice
4727 	 * as many refaults as the first tier.
4728 	 */
4729 	read_ctrl_pos(lruvec, type, 0, 1, &sp);
4730 	for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4731 		read_ctrl_pos(lruvec, type, tier, 2, &pv);
4732 		if (!positive_ctrl_err(&sp, &pv))
4733 			break;
4734 	}
4735 
4736 	return tier - 1;
4737 }
4738 
get_type_to_scan(struct lruvec * lruvec,int swappiness,int * tier_idx)4739 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
4740 {
4741 	int type, tier;
4742 	struct ctrl_pos sp, pv;
4743 	int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
4744 
4745 	/*
4746 	 * Compare the first tier of anon with that of file to determine which
4747 	 * type to scan. Also need to compare other tiers of the selected type
4748 	 * with the first tier of the other type to determine the last tier (of
4749 	 * the selected type) to evict.
4750 	 */
4751 	read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
4752 	read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
4753 	type = positive_ctrl_err(&sp, &pv);
4754 
4755 	read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
4756 	for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4757 		read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
4758 		if (!positive_ctrl_err(&sp, &pv))
4759 			break;
4760 	}
4761 
4762 	*tier_idx = tier - 1;
4763 
4764 	return type;
4765 }
4766 
isolate_pages(struct lruvec * lruvec,struct scan_control * sc,int swappiness,int * type_scanned,struct list_head * list)4767 static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4768 			 int *type_scanned, struct list_head *list)
4769 {
4770 	int i;
4771 	int type;
4772 	int scanned;
4773 	int tier = -1;
4774 	DEFINE_MIN_SEQ(lruvec);
4775 
4776 	/*
4777 	 * Try to make the obvious choice first. When anon and file are both
4778 	 * available from the same generation, interpret swappiness 1 as file
4779 	 * first and 200 as anon first.
4780 	 */
4781 	if (!swappiness)
4782 		type = LRU_GEN_FILE;
4783 	else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
4784 		type = LRU_GEN_ANON;
4785 	else if (swappiness == 1)
4786 		type = LRU_GEN_FILE;
4787 	else if (swappiness == 200)
4788 		type = LRU_GEN_ANON;
4789 	else
4790 		type = get_type_to_scan(lruvec, swappiness, &tier);
4791 
4792 	for (i = !swappiness; i < ANON_AND_FILE; i++) {
4793 		if (tier < 0)
4794 			tier = get_tier_idx(lruvec, type);
4795 
4796 		scanned = scan_pages(lruvec, sc, type, tier, list);
4797 		if (scanned)
4798 			break;
4799 
4800 		type = !type;
4801 		tier = -1;
4802 	}
4803 
4804 	*type_scanned = type;
4805 
4806 	return scanned;
4807 }
4808 
evict_pages(struct lruvec * lruvec,struct scan_control * sc,int swappiness,bool * need_swapping)4809 static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4810 		       bool *need_swapping)
4811 {
4812 	int type;
4813 	int scanned;
4814 	int reclaimed;
4815 	LIST_HEAD(list);
4816 	LIST_HEAD(clean);
4817 	struct page *page;
4818 	struct page *next;
4819 	enum vm_event_item item;
4820 	struct reclaim_stat stat;
4821 	struct lru_gen_mm_walk *walk;
4822 	bool skip_retry = false;
4823 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4824 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4825 
4826 	spin_lock_irq(&lruvec->lru_lock);
4827 
4828 	scanned = isolate_pages(lruvec, sc, swappiness, &type, &list);
4829 
4830 	scanned += try_to_inc_min_seq(lruvec, swappiness);
4831 
4832 	if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
4833 		scanned = 0;
4834 
4835 	spin_unlock_irq(&lruvec->lru_lock);
4836 
4837 	if (list_empty(&list))
4838 		return scanned;
4839 retry:
4840 	reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
4841 	sc->nr_reclaimed += reclaimed;
4842 
4843 	list_for_each_entry_safe_reverse(page, next, &list, lru) {
4844 		if (!page_evictable(page)) {
4845 			list_del(&page->lru);
4846 			putback_lru_page(page);
4847 			continue;
4848 		}
4849 
4850 		if (PageReclaim(page) &&
4851 		    (PageDirty(page) || PageWriteback(page))) {
4852 			/* restore LRU_REFS_FLAGS cleared by isolate_page() */
4853 			if (PageWorkingset(page))
4854 				SetPageReferenced(page);
4855 			continue;
4856 		}
4857 
4858 		if (skip_retry || PageActive(page) || PageReferenced(page) ||
4859 		    page_mapped(page) || PageLocked(page) ||
4860 		    PageDirty(page) || PageWriteback(page)) {
4861 			/* don't add rejected pages to the oldest generation */
4862 			set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
4863 				      BIT(PG_active));
4864 			continue;
4865 		}
4866 
4867 		/* retry pages that may have missed rotate_reclaimable_page() */
4868 		list_move(&page->lru, &clean);
4869 		sc->nr_scanned -= thp_nr_pages(page);
4870 	}
4871 
4872 	spin_lock_irq(&lruvec->lru_lock);
4873 
4874 	move_pages_to_lru(lruvec, &list);
4875 
4876 	walk = current->reclaim_state->mm_walk;
4877 	if (walk && walk->batched)
4878 		reset_batch_size(lruvec, walk);
4879 
4880 	item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
4881 	if (!cgroup_reclaim(sc))
4882 		__count_vm_events(item, reclaimed);
4883 	__count_memcg_events(memcg, item, reclaimed);
4884 	__count_vm_events(PGSTEAL_ANON + type, reclaimed);
4885 
4886 	spin_unlock_irq(&lruvec->lru_lock);
4887 
4888 	mem_cgroup_uncharge_list(&list);
4889 	free_unref_page_list(&list);
4890 
4891 	INIT_LIST_HEAD(&list);
4892 	list_splice_init(&clean, &list);
4893 
4894 	if (!list_empty(&list)) {
4895 		skip_retry = true;
4896 		goto retry;
4897 	}
4898 
4899 	if (need_swapping && type == LRU_GEN_ANON)
4900 		*need_swapping = true;
4901 
4902 	return scanned;
4903 }
4904 
4905 /*
4906  * For future optimizations:
4907  * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4908  *    reclaim.
4909  */
get_nr_to_scan(struct lruvec * lruvec,struct scan_control * sc,bool can_swap,bool * need_aging)4910 static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
4911 				    bool can_swap, bool *need_aging)
4912 {
4913 	unsigned long nr_to_scan;
4914 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4915 	DEFINE_MAX_SEQ(lruvec);
4916 	DEFINE_MIN_SEQ(lruvec);
4917 
4918 	if (mem_cgroup_below_min(memcg) ||
4919 	    (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
4920 		return 0;
4921 
4922 	*need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
4923 	if (!*need_aging)
4924 		return nr_to_scan;
4925 
4926 	/* skip the aging path at the default priority */
4927 	if (sc->priority == DEF_PRIORITY)
4928 		goto done;
4929 
4930 	/* leave the work to lru_gen_age_node() */
4931 	if (current_is_kswapd())
4932 		return 0;
4933 
4934 	if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
4935 		return nr_to_scan;
4936 done:
4937 	return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
4938 }
4939 
should_abort_scan(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,bool need_swapping)4940 static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
4941 			      struct scan_control *sc, bool need_swapping)
4942 {
4943 	int i;
4944 	DEFINE_MAX_SEQ(lruvec);
4945 
4946 	if (!current_is_kswapd()) {
4947 		/* age each memcg at most once to ensure fairness */
4948 		if (max_seq - seq > 1)
4949 			return true;
4950 
4951 		/* over-swapping can increase allocation latency */
4952 		if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
4953 			return true;
4954 
4955 		/* give this thread a chance to exit and free its memory */
4956 		if (fatal_signal_pending(current)) {
4957 			sc->nr_reclaimed += MIN_LRU_BATCH;
4958 			return true;
4959 		}
4960 
4961 		if (cgroup_reclaim(sc))
4962 			return false;
4963 	} else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
4964 		return false;
4965 
4966 	/* keep scanning at low priorities to ensure fairness */
4967 	if (sc->priority > DEF_PRIORITY - 2)
4968 		return false;
4969 
4970 	/*
4971 	 * A minimum amount of work was done under global memory pressure. For
4972 	 * kswapd, it may be overshooting. For direct reclaim, the allocation
4973 	 * may succeed if all suitable zones are somewhat safe. In either case,
4974 	 * it's better to stop now, and restart later if necessary.
4975 	 */
4976 	for (i = 0; i <= sc->reclaim_idx; i++) {
4977 		unsigned long wmark;
4978 		struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
4979 
4980 		if (!managed_zone(zone))
4981 			continue;
4982 
4983 		wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
4984 		if (wmark > zone_page_state(zone, NR_FREE_PAGES))
4985 			return false;
4986 	}
4987 
4988 	sc->nr_reclaimed += MIN_LRU_BATCH;
4989 
4990 	return true;
4991 }
4992 
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)4993 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4994 {
4995 	struct blk_plug plug;
4996 	bool need_aging = false;
4997 	bool need_swapping = false;
4998 	unsigned long scanned = 0;
4999 	unsigned long reclaimed = sc->nr_reclaimed;
5000 	DEFINE_MAX_SEQ(lruvec);
5001 
5002 	lru_add_drain();
5003 
5004 	blk_start_plug(&plug);
5005 
5006 	set_mm_walk(lruvec_pgdat(lruvec));
5007 
5008 	while (true) {
5009 		int delta;
5010 		int swappiness;
5011 		unsigned long nr_to_scan;
5012 
5013 		if (sc->may_swap)
5014 			swappiness = get_swappiness(lruvec, sc);
5015 		else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
5016 			swappiness = 1;
5017 		else
5018 			swappiness = 0;
5019 
5020 		nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
5021 		if (!nr_to_scan)
5022 			goto done;
5023 
5024 		delta = evict_pages(lruvec, sc, swappiness, &need_swapping);
5025 		if (!delta)
5026 			goto done;
5027 
5028 		scanned += delta;
5029 		if (scanned >= nr_to_scan)
5030 			break;
5031 
5032 		if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
5033 			break;
5034 
5035 		cond_resched();
5036 	}
5037 
5038 	/* see the comment in lru_gen_age_node() */
5039 	if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
5040 		sc->memcgs_need_aging = false;
5041 done:
5042 	clear_mm_walk();
5043 
5044 	blk_finish_plug(&plug);
5045 }
5046 
5047 /******************************************************************************
5048  *                          state change
5049  ******************************************************************************/
5050 
state_is_valid(struct lruvec * lruvec)5051 static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
5052 {
5053 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5054 
5055 	if (lrugen->enabled) {
5056 		enum lru_list lru;
5057 
5058 		for_each_evictable_lru(lru) {
5059 			if (!list_empty(&lruvec->lists[lru]))
5060 				return false;
5061 		}
5062 	} else {
5063 		int gen, type, zone;
5064 
5065 		for_each_gen_type_zone(gen, type, zone) {
5066 			if (!list_empty(&lrugen->lists[gen][type][zone]))
5067 				return false;
5068 		}
5069 	}
5070 
5071 	return true;
5072 }
5073 
fill_evictable(struct lruvec * lruvec)5074 static bool fill_evictable(struct lruvec *lruvec)
5075 {
5076 	enum lru_list lru;
5077 	int remaining = MAX_LRU_BATCH;
5078 
5079 	for_each_evictable_lru(lru) {
5080 		int type = is_file_lru(lru);
5081 		bool active = is_active_lru(lru);
5082 		struct list_head *head = &lruvec->lists[lru];
5083 
5084 		while (!list_empty(head)) {
5085 			bool success;
5086 			struct page *page = lru_to_page(head);
5087 
5088 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
5089 			VM_WARN_ON_ONCE_PAGE(PageActive(page) != active, page);
5090 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
5091 			VM_WARN_ON_ONCE_PAGE(page_lru_gen(page) != -1, page);
5092 
5093 			del_page_from_lru_list(page, lruvec);
5094 			success = lru_gen_add_page(lruvec, page, false);
5095 			VM_WARN_ON_ONCE(!success);
5096 
5097 			if (!--remaining)
5098 				return false;
5099 		}
5100 	}
5101 
5102 	return true;
5103 }
5104 
drain_evictable(struct lruvec * lruvec)5105 static bool drain_evictable(struct lruvec *lruvec)
5106 {
5107 	int gen, type, zone;
5108 	int remaining = MAX_LRU_BATCH;
5109 
5110 	for_each_gen_type_zone(gen, type, zone) {
5111 		struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
5112 
5113 		while (!list_empty(head)) {
5114 			bool success;
5115 			struct page *page = lru_to_page(head);
5116 
5117 			VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
5118 			VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
5119 			VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
5120 			VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
5121 
5122 			success = lru_gen_del_page(lruvec, page, false);
5123 			VM_WARN_ON_ONCE(!success);
5124 			add_page_to_lru_list(page, lruvec);
5125 
5126 			if (!--remaining)
5127 				return false;
5128 		}
5129 	}
5130 
5131 	return true;
5132 }
5133 
lru_gen_change_state(bool enabled)5134 static void lru_gen_change_state(bool enabled)
5135 {
5136 	static DEFINE_MUTEX(state_mutex);
5137 
5138 	struct mem_cgroup *memcg;
5139 
5140 	cgroup_lock();
5141 	cpus_read_lock();
5142 	get_online_mems();
5143 	mutex_lock(&state_mutex);
5144 
5145 	if (enabled == lru_gen_enabled())
5146 		goto unlock;
5147 
5148 	if (enabled)
5149 		static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5150 	else
5151 		static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5152 
5153 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
5154 	do {
5155 		int nid;
5156 
5157 		for_each_node(nid) {
5158 			struct lruvec *lruvec = get_lruvec(memcg, nid);
5159 
5160 			if (!lruvec)
5161 				continue;
5162 
5163 			spin_lock_irq(&lruvec->lru_lock);
5164 
5165 			VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
5166 			VM_WARN_ON_ONCE(!state_is_valid(lruvec));
5167 
5168 			lruvec->lrugen.enabled = enabled;
5169 
5170 			while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
5171 				spin_unlock_irq(&lruvec->lru_lock);
5172 				cond_resched();
5173 				spin_lock_irq(&lruvec->lru_lock);
5174 			}
5175 
5176 			spin_unlock_irq(&lruvec->lru_lock);
5177 		}
5178 
5179 		cond_resched();
5180 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5181 unlock:
5182 	mutex_unlock(&state_mutex);
5183 	put_online_mems();
5184 	cpus_read_unlock();
5185 	cgroup_unlock();
5186 }
5187 
5188 /******************************************************************************
5189  *                          sysfs interface
5190  ******************************************************************************/
5191 
show_min_ttl(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5192 static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5193 {
5194 	return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
5195 }
5196 
5197 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
store_min_ttl(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)5198 static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr,
5199 			     const char *buf, size_t len)
5200 {
5201 	unsigned int msecs;
5202 
5203 	if (kstrtouint(buf, 0, &msecs))
5204 		return -EINVAL;
5205 
5206 	WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
5207 
5208 	return len;
5209 }
5210 
5211 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR(
5212 	min_ttl_ms, 0644, show_min_ttl, store_min_ttl
5213 );
5214 
show_enabled(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5215 static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5216 {
5217 	unsigned int caps = 0;
5218 
5219 	if (get_cap(LRU_GEN_CORE))
5220 		caps |= BIT(LRU_GEN_CORE);
5221 
5222 	if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
5223 		caps |= BIT(LRU_GEN_MM_WALK);
5224 
5225 	if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG))
5226 		caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
5227 
5228 	return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps);
5229 }
5230 
5231 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
store_enabled(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)5232 static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr,
5233 			     const char *buf, size_t len)
5234 {
5235 	int i;
5236 	unsigned int caps;
5237 
5238 	if (tolower(*buf) == 'n')
5239 		caps = 0;
5240 	else if (tolower(*buf) == 'y')
5241 		caps = -1;
5242 	else if (kstrtouint(buf, 0, &caps))
5243 		return -EINVAL;
5244 
5245 	for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
5246 		bool enabled = caps & BIT(i);
5247 
5248 		if (i == LRU_GEN_CORE)
5249 			lru_gen_change_state(enabled);
5250 		else if (enabled)
5251 			static_branch_enable(&lru_gen_caps[i]);
5252 		else
5253 			static_branch_disable(&lru_gen_caps[i]);
5254 	}
5255 
5256 	return len;
5257 }
5258 
5259 static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
5260 	enabled, 0644, show_enabled, store_enabled
5261 );
5262 
5263 static struct attribute *lru_gen_attrs[] = {
5264 	&lru_gen_min_ttl_attr.attr,
5265 	&lru_gen_enabled_attr.attr,
5266 	NULL
5267 };
5268 
5269 static struct attribute_group lru_gen_attr_group = {
5270 	.name = "lru_gen",
5271 	.attrs = lru_gen_attrs,
5272 };
5273 
5274 /******************************************************************************
5275  *                          debugfs interface
5276  ******************************************************************************/
5277 
lru_gen_seq_start(struct seq_file * m,loff_t * pos)5278 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
5279 {
5280 	struct mem_cgroup *memcg;
5281 	loff_t nr_to_skip = *pos;
5282 
5283 	m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
5284 	if (!m->private)
5285 		return ERR_PTR(-ENOMEM);
5286 
5287 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
5288 	do {
5289 		int nid;
5290 
5291 		for_each_node_state(nid, N_MEMORY) {
5292 			if (!nr_to_skip--)
5293 				return get_lruvec(memcg, nid);
5294 		}
5295 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5296 
5297 	return NULL;
5298 }
5299 
lru_gen_seq_stop(struct seq_file * m,void * v)5300 static void lru_gen_seq_stop(struct seq_file *m, void *v)
5301 {
5302 	if (!IS_ERR_OR_NULL(v))
5303 		mem_cgroup_iter_break(NULL, lruvec_memcg(v));
5304 
5305 	kvfree(m->private);
5306 	m->private = NULL;
5307 }
5308 
lru_gen_seq_next(struct seq_file * m,void * v,loff_t * pos)5309 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
5310 {
5311 	int nid = lruvec_pgdat(v)->node_id;
5312 	struct mem_cgroup *memcg = lruvec_memcg(v);
5313 
5314 	++*pos;
5315 
5316 	nid = next_memory_node(nid);
5317 	if (nid == MAX_NUMNODES) {
5318 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
5319 		if (!memcg)
5320 			return NULL;
5321 
5322 		nid = first_memory_node;
5323 	}
5324 
5325 	return get_lruvec(memcg, nid);
5326 }
5327 
lru_gen_seq_show_full(struct seq_file * m,struct lruvec * lruvec,unsigned long max_seq,unsigned long * min_seq,unsigned long seq)5328 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
5329 				  unsigned long max_seq, unsigned long *min_seq,
5330 				  unsigned long seq)
5331 {
5332 	int i;
5333 	int type, tier;
5334 	int hist = lru_hist_from_seq(seq);
5335 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5336 
5337 	for (tier = 0; tier < MAX_NR_TIERS; tier++) {
5338 		seq_printf(m, "            %10d", tier);
5339 		for (type = 0; type < ANON_AND_FILE; type++) {
5340 			const char *s = "   ";
5341 			unsigned long n[3] = {};
5342 
5343 			if (seq == max_seq) {
5344 				s = "RT ";
5345 				n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
5346 				n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
5347 			} else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
5348 				s = "rep";
5349 				n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
5350 				n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
5351 				if (tier)
5352 					n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
5353 			}
5354 
5355 			for (i = 0; i < 3; i++)
5356 				seq_printf(m, " %10lu%c", n[i], s[i]);
5357 		}
5358 		seq_putc(m, '\n');
5359 	}
5360 
5361 	seq_puts(m, "                      ");
5362 	for (i = 0; i < NR_MM_STATS; i++) {
5363 		const char *s = "      ";
5364 		unsigned long n = 0;
5365 
5366 		if (seq == max_seq && NR_HIST_GENS == 1) {
5367 			s = "LOYNFA";
5368 			n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5369 		} else if (seq != max_seq && NR_HIST_GENS > 1) {
5370 			s = "loynfa";
5371 			n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5372 		}
5373 
5374 		seq_printf(m, " %10lu%c", n, s[i]);
5375 	}
5376 	seq_putc(m, '\n');
5377 }
5378 
5379 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_show(struct seq_file * m,void * v)5380 static int lru_gen_seq_show(struct seq_file *m, void *v)
5381 {
5382 	unsigned long seq;
5383 	bool full = !debugfs_real_fops(m->file)->write;
5384 	struct lruvec *lruvec = v;
5385 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5386 	int nid = lruvec_pgdat(lruvec)->node_id;
5387 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5388 	DEFINE_MAX_SEQ(lruvec);
5389 	DEFINE_MIN_SEQ(lruvec);
5390 
5391 	if (nid == first_memory_node) {
5392 		const char *path = memcg ? m->private : "";
5393 
5394 #ifdef CONFIG_MEMCG
5395 		if (memcg)
5396 			cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5397 #endif
5398 		seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
5399 	}
5400 
5401 	seq_printf(m, " node %5d\n", nid);
5402 
5403 	if (!full)
5404 		seq = min_seq[LRU_GEN_ANON];
5405 	else if (max_seq >= MAX_NR_GENS)
5406 		seq = max_seq - MAX_NR_GENS + 1;
5407 	else
5408 		seq = 0;
5409 
5410 	for (; seq <= max_seq; seq++) {
5411 		int type, zone;
5412 		int gen = lru_gen_from_seq(seq);
5413 		unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
5414 
5415 		seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
5416 
5417 		for (type = 0; type < ANON_AND_FILE; type++) {
5418 			unsigned long size = 0;
5419 			char mark = full && seq < min_seq[type] ? 'x' : ' ';
5420 
5421 			for (zone = 0; zone < MAX_NR_ZONES; zone++)
5422 				size += max_t(long, READ_ONCE(lrugen->nr_pages[gen][type][zone]),
5423 						0);
5424 
5425 			seq_printf(m, " %10lu%c", size, mark);
5426 		}
5427 
5428 		seq_putc(m, '\n');
5429 
5430 		if (full)
5431 			lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
5432 	}
5433 
5434 	return 0;
5435 }
5436 
5437 static const struct seq_operations lru_gen_seq_ops = {
5438 	.start = lru_gen_seq_start,
5439 	.stop = lru_gen_seq_stop,
5440 	.next = lru_gen_seq_next,
5441 	.show = lru_gen_seq_show,
5442 };
5443 
run_aging(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,bool can_swap,bool full_scan)5444 static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5445 		     bool can_swap, bool full_scan)
5446 {
5447 	DEFINE_MAX_SEQ(lruvec);
5448 	DEFINE_MIN_SEQ(lruvec);
5449 
5450 	if (seq < max_seq)
5451 		return 0;
5452 
5453 	if (seq > max_seq)
5454 		return -EINVAL;
5455 
5456 	if (!full_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
5457 		return -ERANGE;
5458 
5459 	try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, full_scan);
5460 
5461 	return 0;
5462 }
5463 
run_eviction(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long nr_to_reclaim)5464 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5465 			int swappiness, unsigned long nr_to_reclaim)
5466 {
5467 	DEFINE_MAX_SEQ(lruvec);
5468 
5469 	if (seq + MIN_NR_GENS > max_seq)
5470 		return -EINVAL;
5471 
5472 	sc->nr_reclaimed = 0;
5473 
5474 	while (!signal_pending(current)) {
5475 		DEFINE_MIN_SEQ(lruvec);
5476 
5477 		if (seq < min_seq[!swappiness])
5478 			return 0;
5479 
5480 		if (sc->nr_reclaimed >= nr_to_reclaim)
5481 			return 0;
5482 
5483 		if (!evict_pages(lruvec, sc, swappiness, NULL))
5484 			return 0;
5485 
5486 		cond_resched();
5487 	}
5488 
5489 	return -EINTR;
5490 }
5491 
run_cmd(char cmd,int memcg_id,int nid,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long opt)5492 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
5493 		   struct scan_control *sc, int swappiness, unsigned long opt)
5494 {
5495 	struct lruvec *lruvec;
5496 	int err = -EINVAL;
5497 	struct mem_cgroup *memcg = NULL;
5498 
5499 	if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
5500 		return -EINVAL;
5501 
5502 	if (!mem_cgroup_disabled()) {
5503 		rcu_read_lock();
5504 		memcg = mem_cgroup_from_id(memcg_id);
5505 #ifdef CONFIG_MEMCG
5506 		if (memcg && !css_tryget(&memcg->css))
5507 			memcg = NULL;
5508 #endif
5509 		rcu_read_unlock();
5510 
5511 		if (!memcg)
5512 			return -EINVAL;
5513 	}
5514 
5515 	if (memcg_id != mem_cgroup_id(memcg))
5516 		goto done;
5517 
5518 	lruvec = get_lruvec(memcg, nid);
5519 
5520 	if (swappiness < 0)
5521 		swappiness = get_swappiness(lruvec, sc);
5522 	else if (swappiness > 200)
5523 		goto done;
5524 
5525 	switch (cmd) {
5526 	case '+':
5527 		err = run_aging(lruvec, seq, sc, swappiness, opt);
5528 		break;
5529 	case '-':
5530 		err = run_eviction(lruvec, seq, sc, swappiness, opt);
5531 		break;
5532 	}
5533 done:
5534 	mem_cgroup_put(memcg);
5535 
5536 	return err;
5537 }
5538 
5539 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_write(struct file * file,const char __user * src,size_t len,loff_t * pos)5540 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
5541 				 size_t len, loff_t *pos)
5542 {
5543 	void *buf;
5544 	char *cur, *next;
5545 	unsigned int flags;
5546 	struct blk_plug plug;
5547 	int err = -EINVAL;
5548 	struct scan_control sc = {
5549 		.may_writepage = true,
5550 		.may_unmap = true,
5551 		.may_swap = true,
5552 		.reclaim_idx = MAX_NR_ZONES - 1,
5553 		.gfp_mask = GFP_KERNEL,
5554 	};
5555 
5556 	buf = kvmalloc(len + 1, GFP_KERNEL);
5557 	if (!buf)
5558 		return -ENOMEM;
5559 
5560 	if (copy_from_user(buf, src, len)) {
5561 		kvfree(buf);
5562 		return -EFAULT;
5563 	}
5564 
5565 	set_task_reclaim_state(current, &sc.reclaim_state);
5566 	flags = memalloc_noreclaim_save();
5567 	blk_start_plug(&plug);
5568 	if (!set_mm_walk(NULL)) {
5569 		err = -ENOMEM;
5570 		goto done;
5571 	}
5572 
5573 	next = buf;
5574 	next[len] = '\0';
5575 
5576 	while ((cur = strsep(&next, ",;\n"))) {
5577 		int n;
5578 		int end;
5579 		char cmd;
5580 		unsigned int memcg_id;
5581 		unsigned int nid;
5582 		unsigned long seq;
5583 		unsigned int swappiness = -1;
5584 		unsigned long opt = -1;
5585 
5586 		cur = skip_spaces(cur);
5587 		if (!*cur)
5588 			continue;
5589 
5590 		n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
5591 			   &seq, &end, &swappiness, &end, &opt, &end);
5592 		if (n < 4 || cur[end]) {
5593 			err = -EINVAL;
5594 			break;
5595 		}
5596 
5597 		err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
5598 		if (err)
5599 			break;
5600 	}
5601 done:
5602 	clear_mm_walk();
5603 	blk_finish_plug(&plug);
5604 	memalloc_noreclaim_restore(flags);
5605 	set_task_reclaim_state(current, NULL);
5606 
5607 	kvfree(buf);
5608 
5609 	return err ? : len;
5610 }
5611 
lru_gen_seq_open(struct inode * inode,struct file * file)5612 static int lru_gen_seq_open(struct inode *inode, struct file *file)
5613 {
5614 	return seq_open(file, &lru_gen_seq_ops);
5615 }
5616 
5617 static const struct file_operations lru_gen_rw_fops = {
5618 	.open = lru_gen_seq_open,
5619 	.read = seq_read,
5620 	.write = lru_gen_seq_write,
5621 	.llseek = seq_lseek,
5622 	.release = seq_release,
5623 };
5624 
5625 static const struct file_operations lru_gen_ro_fops = {
5626 	.open = lru_gen_seq_open,
5627 	.read = seq_read,
5628 	.llseek = seq_lseek,
5629 	.release = seq_release,
5630 };
5631 
5632 /******************************************************************************
5633  *                          initialization
5634  ******************************************************************************/
5635 
lru_gen_init_lruvec(struct lruvec * lruvec)5636 void lru_gen_init_lruvec(struct lruvec *lruvec)
5637 {
5638 	int i;
5639 	int gen, type, zone;
5640 	struct lru_gen_struct *lrugen = &lruvec->lrugen;
5641 
5642 	lrugen->max_seq = MIN_NR_GENS + 1;
5643 	lrugen->enabled = lru_gen_enabled();
5644 
5645 	for (i = 0; i <= MIN_NR_GENS + 1; i++)
5646 		lrugen->timestamps[i] = jiffies;
5647 
5648 	for_each_gen_type_zone(gen, type, zone)
5649 		INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
5650 
5651 	lruvec->mm_state.seq = MIN_NR_GENS;
5652 }
5653 
5654 #ifdef CONFIG_MEMCG
lru_gen_init_memcg(struct mem_cgroup * memcg)5655 void lru_gen_init_memcg(struct mem_cgroup *memcg)
5656 {
5657 	INIT_LIST_HEAD(&memcg->mm_list.fifo);
5658 	spin_lock_init(&memcg->mm_list.lock);
5659 }
5660 
lru_gen_exit_memcg(struct mem_cgroup * memcg)5661 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
5662 {
5663 	int i;
5664 	int nid;
5665 
5666 	for_each_node(nid) {
5667 		struct lruvec *lruvec = get_lruvec(memcg, nid);
5668 
5669 		VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
5670 					   sizeof(lruvec->lrugen.nr_pages)));
5671 
5672 		for (i = 0; i < NR_BLOOM_FILTERS; i++) {
5673 			bitmap_free(lruvec->mm_state.filters[i]);
5674 			lruvec->mm_state.filters[i] = NULL;
5675 		}
5676 	}
5677 }
5678 #endif
5679 
init_lru_gen(void)5680 static int __init init_lru_gen(void)
5681 {
5682 	BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
5683 	BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
5684 
5685 	if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
5686 		pr_err("lru_gen: failed to create sysfs group\n");
5687 
5688 	debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
5689 	debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
5690 
5691 	return 0;
5692 };
5693 late_initcall(init_lru_gen);
5694 
5695 #else /* !CONFIG_LRU_GEN */
5696 
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)5697 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5698 {
5699 }
5700 
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5701 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5702 {
5703 }
5704 
5705 #endif /* CONFIG_LRU_GEN */
5706 
shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5707 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5708 {
5709 	unsigned long nr[NR_LRU_LISTS];
5710 	unsigned long targets[NR_LRU_LISTS];
5711 	unsigned long nr_to_scan;
5712 	enum lru_list lru;
5713 	unsigned long nr_reclaimed = 0;
5714 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
5715 	bool proportional_reclaim;
5716 	struct blk_plug plug;
5717 
5718 	if (lru_gen_enabled()) {
5719 		lru_gen_shrink_lruvec(lruvec, sc);
5720 		return;
5721 	}
5722 
5723 	get_scan_count(lruvec, sc, nr);
5724 
5725 	/* Record the original scan target for proportional adjustments later */
5726 	memcpy(targets, nr, sizeof(nr));
5727 
5728 	/*
5729 	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
5730 	 * event that can occur when there is little memory pressure e.g.
5731 	 * multiple streaming readers/writers. Hence, we do not abort scanning
5732 	 * when the requested number of pages are reclaimed when scanning at
5733 	 * DEF_PRIORITY on the assumption that the fact we are direct
5734 	 * reclaiming implies that kswapd is not keeping up and it is best to
5735 	 * do a batch of work at once. For memcg reclaim one check is made to
5736 	 * abort proportional reclaim if either the file or anon lru has already
5737 	 * dropped to zero at the first pass.
5738 	 */
5739 	proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
5740 				sc->priority == DEF_PRIORITY);
5741 
5742 	blk_start_plug(&plug);
5743 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
5744 					nr[LRU_INACTIVE_FILE]) {
5745 		unsigned long nr_anon, nr_file, percentage;
5746 		unsigned long nr_scanned;
5747 
5748 		for_each_evictable_lru(lru) {
5749 			if (nr[lru]) {
5750 				nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
5751 				nr[lru] -= nr_to_scan;
5752 
5753 				nr_reclaimed += shrink_list(lru, nr_to_scan,
5754 							    lruvec, sc);
5755 			}
5756 		}
5757 
5758 		cond_resched();
5759 
5760 		if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
5761 			continue;
5762 
5763 		/*
5764 		 * For kswapd and memcg, reclaim at least the number of pages
5765 		 * requested. Ensure that the anon and file LRUs are scanned
5766 		 * proportionally what was requested by get_scan_count(). We
5767 		 * stop reclaiming one LRU and reduce the amount scanning
5768 		 * proportional to the original scan target.
5769 		 */
5770 		nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
5771 		nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
5772 
5773 		/*
5774 		 * It's just vindictive to attack the larger once the smaller
5775 		 * has gone to zero.  And given the way we stop scanning the
5776 		 * smaller below, this makes sure that we only make one nudge
5777 		 * towards proportionality once we've got nr_to_reclaim.
5778 		 */
5779 		if (!nr_file || !nr_anon)
5780 			break;
5781 
5782 		if (nr_file > nr_anon) {
5783 			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
5784 						targets[LRU_ACTIVE_ANON] + 1;
5785 			lru = LRU_BASE;
5786 			percentage = nr_anon * 100 / scan_target;
5787 		} else {
5788 			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
5789 						targets[LRU_ACTIVE_FILE] + 1;
5790 			lru = LRU_FILE;
5791 			percentage = nr_file * 100 / scan_target;
5792 		}
5793 
5794 		/* Stop scanning the smaller of the LRU */
5795 		nr[lru] = 0;
5796 		nr[lru + LRU_ACTIVE] = 0;
5797 
5798 		/*
5799 		 * Recalculate the other LRU scan count based on its original
5800 		 * scan target and the percentage scanning already complete
5801 		 */
5802 		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
5803 		nr_scanned = targets[lru] - nr[lru];
5804 		nr[lru] = targets[lru] * (100 - percentage) / 100;
5805 		nr[lru] -= min(nr[lru], nr_scanned);
5806 
5807 		lru += LRU_ACTIVE;
5808 		nr_scanned = targets[lru] - nr[lru];
5809 		nr[lru] = targets[lru] * (100 - percentage) / 100;
5810 		nr[lru] -= min(nr[lru], nr_scanned);
5811 	}
5812 	blk_finish_plug(&plug);
5813 	sc->nr_reclaimed += nr_reclaimed;
5814 
5815 	/*
5816 	 * Even if we did not try to evict anon pages at all, we want to
5817 	 * rebalance the anon lru active/inactive ratio.
5818 	 */
5819 	if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) &&
5820 	    inactive_is_low(lruvec, LRU_INACTIVE_ANON))
5821 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
5822 				   sc, LRU_ACTIVE_ANON);
5823 }
5824 
5825 /* Use reclaim/compaction for costly allocs or under memory pressure */
in_reclaim_compaction(struct scan_control * sc)5826 static bool in_reclaim_compaction(struct scan_control *sc)
5827 {
5828 	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
5829 			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
5830 			 sc->priority < DEF_PRIORITY - 2))
5831 		return true;
5832 
5833 	return false;
5834 }
5835 
5836 /*
5837  * Reclaim/compaction is used for high-order allocation requests. It reclaims
5838  * order-0 pages before compacting the zone. should_continue_reclaim() returns
5839  * true if more pages should be reclaimed such that when the page allocator
5840  * calls try_to_compact_pages() that it will have enough free pages to succeed.
5841  * It will give up earlier than that if there is difficulty reclaiming pages.
5842  */
should_continue_reclaim(struct pglist_data * pgdat,unsigned long nr_reclaimed,struct scan_control * sc)5843 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
5844 					unsigned long nr_reclaimed,
5845 					struct scan_control *sc)
5846 {
5847 	unsigned long pages_for_compaction;
5848 	unsigned long inactive_lru_pages;
5849 	int z;
5850 
5851 	/* If not in reclaim/compaction mode, stop */
5852 	if (!in_reclaim_compaction(sc))
5853 		return false;
5854 
5855 	/*
5856 	 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
5857 	 * number of pages that were scanned. This will return to the caller
5858 	 * with the risk reclaim/compaction and the resulting allocation attempt
5859 	 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
5860 	 * allocations through requiring that the full LRU list has been scanned
5861 	 * first, by assuming that zero delta of sc->nr_scanned means full LRU
5862 	 * scan, but that approximation was wrong, and there were corner cases
5863 	 * where always a non-zero amount of pages were scanned.
5864 	 */
5865 	if (!nr_reclaimed)
5866 		return false;
5867 
5868 	/* If compaction would go ahead or the allocation would succeed, stop */
5869 	for (z = 0; z <= sc->reclaim_idx; z++) {
5870 		struct zone *zone = &pgdat->node_zones[z];
5871 		if (!managed_zone(zone))
5872 			continue;
5873 
5874 		switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
5875 		case COMPACT_SUCCESS:
5876 		case COMPACT_CONTINUE:
5877 			return false;
5878 		default:
5879 			/* check next zone */
5880 			;
5881 		}
5882 	}
5883 
5884 	/*
5885 	 * If we have not reclaimed enough pages for compaction and the
5886 	 * inactive lists are large enough, continue reclaiming
5887 	 */
5888 	pages_for_compaction = compact_gap(sc->order);
5889 	inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
5890 	if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
5891 		inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
5892 
5893 	return inactive_lru_pages > pages_for_compaction;
5894 }
5895 
shrink_node_memcgs(pg_data_t * pgdat,struct scan_control * sc)5896 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
5897 {
5898 	struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
5899 	struct mem_cgroup *memcg;
5900 
5901 	memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
5902 	do {
5903 		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5904 		unsigned long reclaimed;
5905 		unsigned long scanned;
5906 		bool skip = false;
5907 
5908 		/*
5909 		 * This loop can become CPU-bound when target memcgs
5910 		 * aren't eligible for reclaim - either because they
5911 		 * don't have any reclaimable pages, or because their
5912 		 * memory is explicitly protected. Avoid soft lockups.
5913 		 */
5914 		cond_resched();
5915 
5916 		trace_android_vh_shrink_node_memcgs(memcg, &skip);
5917 		if (skip)
5918 			continue;
5919 
5920 		mem_cgroup_calculate_protection(target_memcg, memcg);
5921 
5922 		if (mem_cgroup_below_min(memcg)) {
5923 			/*
5924 			 * Hard protection.
5925 			 * If there is no reclaimable memory, OOM.
5926 			 */
5927 			continue;
5928 		} else if (mem_cgroup_below_low(memcg)) {
5929 			/*
5930 			 * Soft protection.
5931 			 * Respect the protection only as long as
5932 			 * there is an unprotected supply
5933 			 * of reclaimable memory from other cgroups.
5934 			 */
5935 			if (!sc->memcg_low_reclaim) {
5936 				sc->memcg_low_skipped = 1;
5937 				continue;
5938 			}
5939 			memcg_memory_event(memcg, MEMCG_LOW);
5940 		}
5941 
5942 		reclaimed = sc->nr_reclaimed;
5943 		scanned = sc->nr_scanned;
5944 
5945 		shrink_lruvec(lruvec, sc);
5946 
5947 		shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
5948 			    sc->priority);
5949 
5950 		/* Record the group's reclaim efficiency */
5951 		vmpressure(sc->gfp_mask, memcg, false,
5952 			   sc->nr_scanned - scanned,
5953 			   sc->nr_reclaimed - reclaimed);
5954 
5955 	} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
5956 }
5957 
shrink_node(pg_data_t * pgdat,struct scan_control * sc)5958 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
5959 {
5960 	struct reclaim_state *reclaim_state = current->reclaim_state;
5961 	unsigned long nr_reclaimed, nr_scanned;
5962 	struct lruvec *target_lruvec;
5963 	bool reclaimable = false;
5964 
5965 	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
5966 
5967 again:
5968 	memset(&sc->nr, 0, sizeof(sc->nr));
5969 
5970 	nr_reclaimed = sc->nr_reclaimed;
5971 	nr_scanned = sc->nr_scanned;
5972 
5973 	prepare_scan_count(pgdat, sc);
5974 
5975 	shrink_node_memcgs(pgdat, sc);
5976 
5977 	if (reclaim_state) {
5978 		sc->nr_reclaimed += reclaim_state->reclaimed_slab;
5979 		reclaim_state->reclaimed_slab = 0;
5980 	}
5981 
5982 	/* Record the subtree's reclaim efficiency */
5983 	vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
5984 		   sc->nr_scanned - nr_scanned,
5985 		   sc->nr_reclaimed - nr_reclaimed);
5986 
5987 	if (sc->nr_reclaimed - nr_reclaimed)
5988 		reclaimable = true;
5989 
5990 	if (current_is_kswapd()) {
5991 		/*
5992 		 * If reclaim is isolating dirty pages under writeback,
5993 		 * it implies that the long-lived page allocation rate
5994 		 * is exceeding the page laundering rate. Either the
5995 		 * global limits are not being effective at throttling
5996 		 * processes due to the page distribution throughout
5997 		 * zones or there is heavy usage of a slow backing
5998 		 * device. The only option is to throttle from reclaim
5999 		 * context which is not ideal as there is no guarantee
6000 		 * the dirtying process is throttled in the same way
6001 		 * balance_dirty_pages() manages.
6002 		 *
6003 		 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
6004 		 * count the number of pages under pages flagged for
6005 		 * immediate reclaim and stall if any are encountered
6006 		 * in the nr_immediate check below.
6007 		 */
6008 		if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
6009 			set_bit(PGDAT_WRITEBACK, &pgdat->flags);
6010 
6011 		/* Allow kswapd to start writing pages during reclaim.*/
6012 		if (sc->nr.unqueued_dirty == sc->nr.file_taken)
6013 			set_bit(PGDAT_DIRTY, &pgdat->flags);
6014 
6015 		/*
6016 		 * If kswapd scans pages marked for immediate
6017 		 * reclaim and under writeback (nr_immediate), it
6018 		 * implies that pages are cycling through the LRU
6019 		 * faster than they are written so also forcibly stall.
6020 		 */
6021 		if (sc->nr.immediate)
6022 			congestion_wait(BLK_RW_ASYNC, HZ/10);
6023 	}
6024 
6025 	/*
6026 	 * Tag a node/memcg as congested if all the dirty pages
6027 	 * scanned were backed by a congested BDI and
6028 	 * wait_iff_congested will stall.
6029 	 *
6030 	 * Legacy memcg will stall in page writeback so avoid forcibly
6031 	 * stalling in wait_iff_congested().
6032 	 */
6033 	if ((current_is_kswapd() ||
6034 	     (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
6035 	    sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
6036 		set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
6037 
6038 	/*
6039 	 * Stall direct reclaim for IO completions if underlying BDIs
6040 	 * and node is congested. Allow kswapd to continue until it
6041 	 * starts encountering unqueued dirty pages or cycling through
6042 	 * the LRU too quickly.
6043 	 */
6044 	if (!current_is_kswapd() && current_may_throttle() &&
6045 	    !sc->hibernation_mode &&
6046 	    test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
6047 		wait_iff_congested(BLK_RW_ASYNC, HZ/10);
6048 
6049 	if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
6050 				    sc))
6051 		goto again;
6052 
6053 	/*
6054 	 * Kswapd gives up on balancing particular nodes after too
6055 	 * many failures to reclaim anything from them and goes to
6056 	 * sleep. On reclaim progress, reset the failure counter. A
6057 	 * successful direct reclaim run will revive a dormant kswapd.
6058 	 */
6059 	if (reclaimable)
6060 		pgdat->kswapd_failures = 0;
6061 }
6062 
6063 /*
6064  * Returns true if compaction should go ahead for a costly-order request, or
6065  * the allocation would already succeed without compaction. Return false if we
6066  * should reclaim first.
6067  */
compaction_ready(struct zone * zone,struct scan_control * sc)6068 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
6069 {
6070 	unsigned long watermark;
6071 	enum compact_result suitable;
6072 
6073 	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
6074 	if (suitable == COMPACT_SUCCESS)
6075 		/* Allocation should succeed already. Don't reclaim. */
6076 		return true;
6077 	if (suitable == COMPACT_SKIPPED)
6078 		/* Compaction cannot yet proceed. Do reclaim. */
6079 		return false;
6080 
6081 	/*
6082 	 * Compaction is already possible, but it takes time to run and there
6083 	 * are potentially other callers using the pages just freed. So proceed
6084 	 * with reclaim to make a buffer of free pages available to give
6085 	 * compaction a reasonable chance of completing and allocating the page.
6086 	 * Note that we won't actually reclaim the whole buffer in one attempt
6087 	 * as the target watermark in should_continue_reclaim() is lower. But if
6088 	 * we are already above the high+gap watermark, don't reclaim at all.
6089 	 */
6090 	watermark = high_wmark_pages(zone) + compact_gap(sc->order);
6091 
6092 	return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
6093 }
6094 
6095 /*
6096  * This is the direct reclaim path, for page-allocating processes.  We only
6097  * try to reclaim pages from zones which will satisfy the caller's allocation
6098  * request.
6099  *
6100  * If a zone is deemed to be full of pinned pages then just give it a light
6101  * scan then give up on it.
6102  */
shrink_zones(struct zonelist * zonelist,struct scan_control * sc)6103 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
6104 {
6105 	struct zoneref *z;
6106 	struct zone *zone;
6107 	unsigned long nr_soft_reclaimed;
6108 	unsigned long nr_soft_scanned;
6109 	gfp_t orig_mask;
6110 	pg_data_t *last_pgdat = NULL;
6111 
6112 	/*
6113 	 * If the number of buffer_heads in the machine exceeds the maximum
6114 	 * allowed level, force direct reclaim to scan the highmem zone as
6115 	 * highmem pages could be pinning lowmem pages storing buffer_heads
6116 	 */
6117 	orig_mask = sc->gfp_mask;
6118 	if (buffer_heads_over_limit) {
6119 		sc->gfp_mask |= __GFP_HIGHMEM;
6120 		sc->reclaim_idx = gfp_zone(sc->gfp_mask);
6121 	}
6122 
6123 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
6124 					sc->reclaim_idx, sc->nodemask) {
6125 		/*
6126 		 * Take care memory controller reclaiming has small influence
6127 		 * to global LRU.
6128 		 */
6129 		if (!cgroup_reclaim(sc)) {
6130 			if (!cpuset_zone_allowed(zone,
6131 						 GFP_KERNEL | __GFP_HARDWALL))
6132 				continue;
6133 
6134 			/*
6135 			 * If we already have plenty of memory free for
6136 			 * compaction in this zone, don't free any more.
6137 			 * Even though compaction is invoked for any
6138 			 * non-zero order, only frequent costly order
6139 			 * reclamation is disruptive enough to become a
6140 			 * noticeable problem, like transparent huge
6141 			 * page allocations.
6142 			 */
6143 			if (IS_ENABLED(CONFIG_COMPACTION) &&
6144 			    sc->order > PAGE_ALLOC_COSTLY_ORDER &&
6145 			    compaction_ready(zone, sc)) {
6146 				sc->compaction_ready = true;
6147 				continue;
6148 			}
6149 
6150 			/*
6151 			 * Shrink each node in the zonelist once. If the
6152 			 * zonelist is ordered by zone (not the default) then a
6153 			 * node may be shrunk multiple times but in that case
6154 			 * the user prefers lower zones being preserved.
6155 			 */
6156 			if (zone->zone_pgdat == last_pgdat)
6157 				continue;
6158 
6159 			/*
6160 			 * This steals pages from memory cgroups over softlimit
6161 			 * and returns the number of reclaimed pages and
6162 			 * scanned pages. This works for global memory pressure
6163 			 * and balancing, not for a memcg's limit.
6164 			 */
6165 			nr_soft_scanned = 0;
6166 			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
6167 						sc->order, sc->gfp_mask,
6168 						&nr_soft_scanned);
6169 			sc->nr_reclaimed += nr_soft_reclaimed;
6170 			sc->nr_scanned += nr_soft_scanned;
6171 			/* need some check for avoid more shrink_zone() */
6172 		}
6173 
6174 		/* See comment about same check for global reclaim above */
6175 		if (zone->zone_pgdat == last_pgdat)
6176 			continue;
6177 		last_pgdat = zone->zone_pgdat;
6178 		shrink_node(zone->zone_pgdat, sc);
6179 	}
6180 
6181 	/*
6182 	 * Restore to original mask to avoid the impact on the caller if we
6183 	 * promoted it to __GFP_HIGHMEM.
6184 	 */
6185 	sc->gfp_mask = orig_mask;
6186 }
6187 
snapshot_refaults(struct mem_cgroup * target_memcg,pg_data_t * pgdat)6188 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
6189 {
6190 	struct lruvec *target_lruvec;
6191 	unsigned long refaults;
6192 
6193 	if (lru_gen_enabled())
6194 		return;
6195 
6196 	target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
6197 	refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
6198 	target_lruvec->refaults[0] = refaults;
6199 	refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
6200 	target_lruvec->refaults[1] = refaults;
6201 }
6202 
6203 /*
6204  * This is the main entry point to direct page reclaim.
6205  *
6206  * If a full scan of the inactive list fails to free enough memory then we
6207  * are "out of memory" and something needs to be killed.
6208  *
6209  * If the caller is !__GFP_FS then the probability of a failure is reasonably
6210  * high - the zone may be full of dirty or under-writeback pages, which this
6211  * caller can't do much about.  We kick the writeback threads and take explicit
6212  * naps in the hope that some of these pages can be written.  But if the
6213  * allocating task holds filesystem locks which prevent writeout this might not
6214  * work, and the allocation attempt will fail.
6215  *
6216  * returns:	0, if no pages reclaimed
6217  * 		else, the number of pages reclaimed
6218  */
do_try_to_free_pages(struct zonelist * zonelist,struct scan_control * sc)6219 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
6220 					  struct scan_control *sc)
6221 {
6222 	int initial_priority = sc->priority;
6223 	pg_data_t *last_pgdat;
6224 	struct zoneref *z;
6225 	struct zone *zone;
6226 retry:
6227 	delayacct_freepages_start();
6228 
6229 	if (!cgroup_reclaim(sc))
6230 		__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
6231 
6232 	do {
6233 		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
6234 				sc->priority);
6235 		sc->nr_scanned = 0;
6236 		shrink_zones(zonelist, sc);
6237 
6238 		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
6239 			break;
6240 
6241 		if (sc->compaction_ready)
6242 			break;
6243 
6244 		/*
6245 		 * If we're getting trouble reclaiming, start doing
6246 		 * writepage even in laptop mode.
6247 		 */
6248 		if (sc->priority < DEF_PRIORITY - 2)
6249 			sc->may_writepage = 1;
6250 	} while (--sc->priority >= 0);
6251 
6252 	last_pgdat = NULL;
6253 	for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
6254 					sc->nodemask) {
6255 		if (zone->zone_pgdat == last_pgdat)
6256 			continue;
6257 		last_pgdat = zone->zone_pgdat;
6258 
6259 		snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
6260 
6261 		if (cgroup_reclaim(sc)) {
6262 			struct lruvec *lruvec;
6263 
6264 			lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
6265 						   zone->zone_pgdat);
6266 			clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
6267 		}
6268 	}
6269 
6270 	delayacct_freepages_end();
6271 
6272 	if (sc->nr_reclaimed)
6273 		return sc->nr_reclaimed;
6274 
6275 	/* Aborted reclaim to try compaction? don't OOM, then */
6276 	if (sc->compaction_ready)
6277 		return 1;
6278 
6279 	/*
6280 	 * We make inactive:active ratio decisions based on the node's
6281 	 * composition of memory, but a restrictive reclaim_idx or a
6282 	 * memory.low cgroup setting can exempt large amounts of
6283 	 * memory from reclaim. Neither of which are very common, so
6284 	 * instead of doing costly eligibility calculations of the
6285 	 * entire cgroup subtree up front, we assume the estimates are
6286 	 * good, and retry with forcible deactivation if that fails.
6287 	 */
6288 	if (sc->skipped_deactivate) {
6289 		sc->priority = initial_priority;
6290 		sc->force_deactivate = 1;
6291 		sc->skipped_deactivate = 0;
6292 		goto retry;
6293 	}
6294 
6295 	/* Untapped cgroup reserves?  Don't OOM, retry. */
6296 	if (sc->memcg_low_skipped) {
6297 		sc->priority = initial_priority;
6298 		sc->force_deactivate = 0;
6299 		sc->memcg_low_reclaim = 1;
6300 		sc->memcg_low_skipped = 0;
6301 		goto retry;
6302 	}
6303 
6304 	return 0;
6305 }
6306 
allow_direct_reclaim(pg_data_t * pgdat)6307 static bool allow_direct_reclaim(pg_data_t *pgdat)
6308 {
6309 	struct zone *zone;
6310 	unsigned long pfmemalloc_reserve = 0;
6311 	unsigned long free_pages = 0;
6312 	int i;
6313 	bool wmark_ok;
6314 
6315 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6316 		return true;
6317 
6318 	for (i = 0; i <= ZONE_NORMAL; i++) {
6319 		zone = &pgdat->node_zones[i];
6320 		if (!managed_zone(zone))
6321 			continue;
6322 
6323 		if (!zone_reclaimable_pages(zone))
6324 			continue;
6325 
6326 		pfmemalloc_reserve += min_wmark_pages(zone);
6327 		free_pages += zone_page_state(zone, NR_FREE_PAGES);
6328 	}
6329 
6330 	/* If there are no reserves (unexpected config) then do not throttle */
6331 	if (!pfmemalloc_reserve)
6332 		return true;
6333 
6334 	wmark_ok = free_pages > pfmemalloc_reserve / 2;
6335 
6336 	/* kswapd must be awake if processes are being throttled */
6337 	if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
6338 		if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
6339 			WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
6340 
6341 		wake_up_interruptible(&pgdat->kswapd_wait);
6342 	}
6343 
6344 	return wmark_ok;
6345 }
6346 
6347 /*
6348  * Throttle direct reclaimers if backing storage is backed by the network
6349  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
6350  * depleted. kswapd will continue to make progress and wake the processes
6351  * when the low watermark is reached.
6352  *
6353  * Returns true if a fatal signal was delivered during throttling. If this
6354  * happens, the page allocator should not consider triggering the OOM killer.
6355  */
throttle_direct_reclaim(gfp_t gfp_mask,struct zonelist * zonelist,nodemask_t * nodemask)6356 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
6357 					nodemask_t *nodemask)
6358 {
6359 	struct zoneref *z;
6360 	struct zone *zone;
6361 	pg_data_t *pgdat = NULL;
6362 
6363 	/*
6364 	 * Kernel threads should not be throttled as they may be indirectly
6365 	 * responsible for cleaning pages necessary for reclaim to make forward
6366 	 * progress. kjournald for example may enter direct reclaim while
6367 	 * committing a transaction where throttling it could forcing other
6368 	 * processes to block on log_wait_commit().
6369 	 */
6370 	if (current->flags & PF_KTHREAD)
6371 		goto out;
6372 
6373 	/*
6374 	 * If a fatal signal is pending, this process should not throttle.
6375 	 * It should return quickly so it can exit and free its memory
6376 	 */
6377 	if (fatal_signal_pending(current))
6378 		goto out;
6379 
6380 	/*
6381 	 * Check if the pfmemalloc reserves are ok by finding the first node
6382 	 * with a usable ZONE_NORMAL or lower zone. The expectation is that
6383 	 * GFP_KERNEL will be required for allocating network buffers when
6384 	 * swapping over the network so ZONE_HIGHMEM is unusable.
6385 	 *
6386 	 * Throttling is based on the first usable node and throttled processes
6387 	 * wait on a queue until kswapd makes progress and wakes them. There
6388 	 * is an affinity then between processes waking up and where reclaim
6389 	 * progress has been made assuming the process wakes on the same node.
6390 	 * More importantly, processes running on remote nodes will not compete
6391 	 * for remote pfmemalloc reserves and processes on different nodes
6392 	 * should make reasonable progress.
6393 	 */
6394 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
6395 					gfp_zone(gfp_mask), nodemask) {
6396 		if (zone_idx(zone) > ZONE_NORMAL)
6397 			continue;
6398 
6399 		/* Throttle based on the first usable node */
6400 		pgdat = zone->zone_pgdat;
6401 		if (allow_direct_reclaim(pgdat))
6402 			goto out;
6403 		break;
6404 	}
6405 
6406 	/* If no zone was usable by the allocation flags then do not throttle */
6407 	if (!pgdat)
6408 		goto out;
6409 
6410 	/* Account for the throttling */
6411 	count_vm_event(PGSCAN_DIRECT_THROTTLE);
6412 
6413 	/*
6414 	 * If the caller cannot enter the filesystem, it's possible that it
6415 	 * is due to the caller holding an FS lock or performing a journal
6416 	 * transaction in the case of a filesystem like ext[3|4]. In this case,
6417 	 * it is not safe to block on pfmemalloc_wait as kswapd could be
6418 	 * blocked waiting on the same lock. Instead, throttle for up to a
6419 	 * second before continuing.
6420 	 */
6421 	if (!(gfp_mask & __GFP_FS))
6422 		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
6423 			allow_direct_reclaim(pgdat), HZ);
6424 	else
6425 		/* Throttle until kswapd wakes the process */
6426 		wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
6427 			allow_direct_reclaim(pgdat));
6428 
6429 	if (fatal_signal_pending(current))
6430 		return true;
6431 
6432 out:
6433 	return false;
6434 }
6435 
try_to_free_pages(struct zonelist * zonelist,int order,gfp_t gfp_mask,nodemask_t * nodemask)6436 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
6437 				gfp_t gfp_mask, nodemask_t *nodemask)
6438 {
6439 	unsigned long nr_reclaimed;
6440 	struct scan_control sc = {
6441 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
6442 		.gfp_mask = current_gfp_context(gfp_mask),
6443 		.reclaim_idx = gfp_zone(gfp_mask),
6444 		.order = order,
6445 		.nodemask = nodemask,
6446 		.priority = DEF_PRIORITY,
6447 		.may_writepage = !laptop_mode,
6448 		.may_unmap = 1,
6449 		.may_swap = 1,
6450 	};
6451 
6452 	/*
6453 	 * scan_control uses s8 fields for order, priority, and reclaim_idx.
6454 	 * Confirm they are large enough for max values.
6455 	 */
6456 	BUILD_BUG_ON(MAX_ORDER > S8_MAX);
6457 	BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
6458 	BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
6459 
6460 	/*
6461 	 * Do not enter reclaim if fatal signal was delivered while throttled.
6462 	 * 1 is returned so that the page allocator does not OOM kill at this
6463 	 * point.
6464 	 */
6465 	if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
6466 		return 1;
6467 
6468 	set_task_reclaim_state(current, &sc.reclaim_state);
6469 	trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
6470 
6471 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6472 
6473 	trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
6474 	set_task_reclaim_state(current, NULL);
6475 
6476 	return nr_reclaimed;
6477 }
6478 
6479 #ifdef CONFIG_MEMCG
6480 
6481 /* Only used by soft limit reclaim. Do not reuse for anything else. */
mem_cgroup_shrink_node(struct mem_cgroup * memcg,gfp_t gfp_mask,bool noswap,pg_data_t * pgdat,unsigned long * nr_scanned)6482 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
6483 						gfp_t gfp_mask, bool noswap,
6484 						pg_data_t *pgdat,
6485 						unsigned long *nr_scanned)
6486 {
6487 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6488 	struct scan_control sc = {
6489 		.nr_to_reclaim = SWAP_CLUSTER_MAX,
6490 		.target_mem_cgroup = memcg,
6491 		.may_writepage = !laptop_mode,
6492 		.may_unmap = 1,
6493 		.reclaim_idx = MAX_NR_ZONES - 1,
6494 		.may_swap = !noswap,
6495 	};
6496 
6497 	WARN_ON_ONCE(!current->reclaim_state);
6498 
6499 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
6500 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
6501 
6502 	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
6503 						      sc.gfp_mask);
6504 
6505 	/*
6506 	 * NOTE: Although we can get the priority field, using it
6507 	 * here is not a good idea, since it limits the pages we can scan.
6508 	 * if we don't reclaim here, the shrink_node from balance_pgdat
6509 	 * will pick up pages from other mem cgroup's as well. We hack
6510 	 * the priority and make it zero.
6511 	 */
6512 	shrink_lruvec(lruvec, &sc);
6513 
6514 	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
6515 
6516 	*nr_scanned = sc.nr_scanned;
6517 
6518 	return sc.nr_reclaimed;
6519 }
6520 
try_to_free_mem_cgroup_pages(struct mem_cgroup * memcg,unsigned long nr_pages,gfp_t gfp_mask,bool may_swap)6521 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6522 					   unsigned long nr_pages,
6523 					   gfp_t gfp_mask,
6524 					   bool may_swap)
6525 {
6526 	unsigned long nr_reclaimed;
6527 	unsigned int noreclaim_flag;
6528 	struct scan_control sc = {
6529 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6530 		.gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
6531 				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
6532 		.reclaim_idx = MAX_NR_ZONES - 1,
6533 		.target_mem_cgroup = memcg,
6534 		.priority = DEF_PRIORITY,
6535 		.may_writepage = !laptop_mode,
6536 		.may_unmap = 1,
6537 		.may_swap = may_swap,
6538 	};
6539 	/*
6540 	 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
6541 	 * equal pressure on all the nodes. This is based on the assumption that
6542 	 * the reclaim does not bail out early.
6543 	 */
6544 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6545 
6546 	set_task_reclaim_state(current, &sc.reclaim_state);
6547 	trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
6548 	noreclaim_flag = memalloc_noreclaim_save();
6549 
6550 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6551 
6552 	memalloc_noreclaim_restore(noreclaim_flag);
6553 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
6554 	set_task_reclaim_state(current, NULL);
6555 
6556 	return nr_reclaimed;
6557 }
6558 EXPORT_SYMBOL_GPL(try_to_free_mem_cgroup_pages);
6559 #endif
6560 
kswapd_age_node(struct pglist_data * pgdat,struct scan_control * sc)6561 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6562 {
6563 	struct mem_cgroup *memcg;
6564 	struct lruvec *lruvec;
6565 
6566 	if (lru_gen_enabled()) {
6567 		lru_gen_age_node(pgdat, sc);
6568 		return;
6569 	}
6570 
6571 	if (!can_age_anon_pages(pgdat, sc))
6572 		return;
6573 
6574 	lruvec = mem_cgroup_lruvec(NULL, pgdat);
6575 	if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6576 		return;
6577 
6578 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
6579 	do {
6580 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
6581 		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6582 				   sc, LRU_ACTIVE_ANON);
6583 		memcg = mem_cgroup_iter(NULL, memcg, NULL);
6584 	} while (memcg);
6585 }
6586 
pgdat_watermark_boosted(pg_data_t * pgdat,int highest_zoneidx)6587 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
6588 {
6589 	int i;
6590 	struct zone *zone;
6591 
6592 	/*
6593 	 * Check for watermark boosts top-down as the higher zones
6594 	 * are more likely to be boosted. Both watermarks and boosts
6595 	 * should not be checked at the same time as reclaim would
6596 	 * start prematurely when there is no boosting and a lower
6597 	 * zone is balanced.
6598 	 */
6599 	for (i = highest_zoneidx; i >= 0; i--) {
6600 		zone = pgdat->node_zones + i;
6601 		if (!managed_zone(zone))
6602 			continue;
6603 
6604 		if (zone->watermark_boost)
6605 			return true;
6606 	}
6607 
6608 	return false;
6609 }
6610 
6611 /*
6612  * Returns true if there is an eligible zone balanced for the request order
6613  * and highest_zoneidx
6614  */
pgdat_balanced(pg_data_t * pgdat,int order,int highest_zoneidx)6615 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
6616 {
6617 	int i;
6618 	unsigned long mark = -1;
6619 	struct zone *zone;
6620 
6621 	/*
6622 	 * Check watermarks bottom-up as lower zones are more likely to
6623 	 * meet watermarks.
6624 	 */
6625 	for (i = 0; i <= highest_zoneidx; i++) {
6626 		zone = pgdat->node_zones + i;
6627 
6628 		if (!managed_zone(zone))
6629 			continue;
6630 
6631 		mark = high_wmark_pages(zone);
6632 		if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
6633 			return true;
6634 	}
6635 
6636 	/*
6637 	 * If a node has no populated zone within highest_zoneidx, it does not
6638 	 * need balancing by definition. This can happen if a zone-restricted
6639 	 * allocation tries to wake a remote kswapd.
6640 	 */
6641 	if (mark == -1)
6642 		return true;
6643 
6644 	return false;
6645 }
6646 
6647 /* Clear pgdat state for congested, dirty or under writeback. */
clear_pgdat_congested(pg_data_t * pgdat)6648 static void clear_pgdat_congested(pg_data_t *pgdat)
6649 {
6650 	struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
6651 
6652 	clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
6653 	clear_bit(PGDAT_DIRTY, &pgdat->flags);
6654 	clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
6655 }
6656 
6657 /*
6658  * Prepare kswapd for sleeping. This verifies that there are no processes
6659  * waiting in throttle_direct_reclaim() and that watermarks have been met.
6660  *
6661  * Returns true if kswapd is ready to sleep
6662  */
prepare_kswapd_sleep(pg_data_t * pgdat,int order,int highest_zoneidx)6663 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
6664 				int highest_zoneidx)
6665 {
6666 	/*
6667 	 * The throttled processes are normally woken up in balance_pgdat() as
6668 	 * soon as allow_direct_reclaim() is true. But there is a potential
6669 	 * race between when kswapd checks the watermarks and a process gets
6670 	 * throttled. There is also a potential race if processes get
6671 	 * throttled, kswapd wakes, a large process exits thereby balancing the
6672 	 * zones, which causes kswapd to exit balance_pgdat() before reaching
6673 	 * the wake up checks. If kswapd is going to sleep, no process should
6674 	 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
6675 	 * the wake up is premature, processes will wake kswapd and get
6676 	 * throttled again. The difference from wake ups in balance_pgdat() is
6677 	 * that here we are under prepare_to_wait().
6678 	 */
6679 	if (waitqueue_active(&pgdat->pfmemalloc_wait))
6680 		wake_up_all(&pgdat->pfmemalloc_wait);
6681 
6682 	/* Hopeless node, leave it to direct reclaim */
6683 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6684 		return true;
6685 
6686 	if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
6687 		clear_pgdat_congested(pgdat);
6688 		return true;
6689 	}
6690 
6691 	return false;
6692 }
6693 
6694 /*
6695  * kswapd shrinks a node of pages that are at or below the highest usable
6696  * zone that is currently unbalanced.
6697  *
6698  * Returns true if kswapd scanned at least the requested number of pages to
6699  * reclaim or if the lack of progress was due to pages under writeback.
6700  * This is used to determine if the scanning priority needs to be raised.
6701  */
kswapd_shrink_node(pg_data_t * pgdat,struct scan_control * sc)6702 static bool kswapd_shrink_node(pg_data_t *pgdat,
6703 			       struct scan_control *sc)
6704 {
6705 	struct zone *zone;
6706 	int z;
6707 
6708 	/* Reclaim a number of pages proportional to the number of zones */
6709 	sc->nr_to_reclaim = 0;
6710 	for (z = 0; z <= sc->reclaim_idx; z++) {
6711 		zone = pgdat->node_zones + z;
6712 		if (!managed_zone(zone))
6713 			continue;
6714 
6715 		sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
6716 	}
6717 
6718 	/*
6719 	 * Historically care was taken to put equal pressure on all zones but
6720 	 * now pressure is applied based on node LRU order.
6721 	 */
6722 	shrink_node(pgdat, sc);
6723 
6724 	/*
6725 	 * Fragmentation may mean that the system cannot be rebalanced for
6726 	 * high-order allocations. If twice the allocation size has been
6727 	 * reclaimed then recheck watermarks only at order-0 to prevent
6728 	 * excessive reclaim. Assume that a process requested a high-order
6729 	 * can direct reclaim/compact.
6730 	 */
6731 	if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
6732 		sc->order = 0;
6733 
6734 	return sc->nr_scanned >= sc->nr_to_reclaim;
6735 }
6736 
6737 /* Page allocator PCP high watermark is lowered if reclaim is active. */
6738 static inline void
update_reclaim_active(pg_data_t * pgdat,int highest_zoneidx,bool active)6739 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
6740 {
6741 	int i;
6742 	struct zone *zone;
6743 
6744 	for (i = 0; i <= highest_zoneidx; i++) {
6745 		zone = pgdat->node_zones + i;
6746 
6747 		if (!managed_zone(zone))
6748 			continue;
6749 
6750 		if (active)
6751 			set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
6752 		else
6753 			clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
6754 	}
6755 }
6756 
6757 static inline void
set_reclaim_active(pg_data_t * pgdat,int highest_zoneidx)6758 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6759 {
6760 	update_reclaim_active(pgdat, highest_zoneidx, true);
6761 }
6762 
6763 static inline void
clear_reclaim_active(pg_data_t * pgdat,int highest_zoneidx)6764 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6765 {
6766 	update_reclaim_active(pgdat, highest_zoneidx, false);
6767 }
6768 
6769 /*
6770  * For kswapd, balance_pgdat() will reclaim pages across a node from zones
6771  * that are eligible for use by the caller until at least one zone is
6772  * balanced.
6773  *
6774  * Returns the order kswapd finished reclaiming at.
6775  *
6776  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
6777  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
6778  * found to have free_pages <= high_wmark_pages(zone), any page in that zone
6779  * or lower is eligible for reclaim until at least one usable zone is
6780  * balanced.
6781  */
balance_pgdat(pg_data_t * pgdat,int order,int highest_zoneidx)6782 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
6783 {
6784 	int i;
6785 	unsigned long nr_soft_reclaimed;
6786 	unsigned long nr_soft_scanned;
6787 	unsigned long pflags;
6788 	unsigned long nr_boost_reclaim;
6789 	unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
6790 	bool boosted;
6791 	struct zone *zone;
6792 	struct scan_control sc = {
6793 		.gfp_mask = GFP_KERNEL,
6794 		.order = order,
6795 		.may_unmap = 1,
6796 	};
6797 
6798 	set_task_reclaim_state(current, &sc.reclaim_state);
6799 	psi_memstall_enter(&pflags);
6800 	__fs_reclaim_acquire(_THIS_IP_);
6801 
6802 	count_vm_event(PAGEOUTRUN);
6803 
6804 	/*
6805 	 * Account for the reclaim boost. Note that the zone boost is left in
6806 	 * place so that parallel allocations that are near the watermark will
6807 	 * stall or direct reclaim until kswapd is finished.
6808 	 */
6809 	nr_boost_reclaim = 0;
6810 	for (i = 0; i <= highest_zoneidx; i++) {
6811 		zone = pgdat->node_zones + i;
6812 		if (!managed_zone(zone))
6813 			continue;
6814 
6815 		nr_boost_reclaim += zone->watermark_boost;
6816 		zone_boosts[i] = zone->watermark_boost;
6817 	}
6818 	boosted = nr_boost_reclaim;
6819 
6820 restart:
6821 	set_reclaim_active(pgdat, highest_zoneidx);
6822 	sc.priority = DEF_PRIORITY;
6823 	do {
6824 		unsigned long nr_reclaimed = sc.nr_reclaimed;
6825 		bool raise_priority = true;
6826 		bool balanced;
6827 		bool ret;
6828 
6829 		sc.reclaim_idx = highest_zoneidx;
6830 
6831 		/*
6832 		 * If the number of buffer_heads exceeds the maximum allowed
6833 		 * then consider reclaiming from all zones. This has a dual
6834 		 * purpose -- on 64-bit systems it is expected that
6835 		 * buffer_heads are stripped during active rotation. On 32-bit
6836 		 * systems, highmem pages can pin lowmem memory and shrinking
6837 		 * buffers can relieve lowmem pressure. Reclaim may still not
6838 		 * go ahead if all eligible zones for the original allocation
6839 		 * request are balanced to avoid excessive reclaim from kswapd.
6840 		 */
6841 		if (buffer_heads_over_limit) {
6842 			for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
6843 				zone = pgdat->node_zones + i;
6844 				if (!managed_zone(zone))
6845 					continue;
6846 
6847 				sc.reclaim_idx = i;
6848 				break;
6849 			}
6850 		}
6851 
6852 		/*
6853 		 * If the pgdat is imbalanced then ignore boosting and preserve
6854 		 * the watermarks for a later time and restart. Note that the
6855 		 * zone watermarks will be still reset at the end of balancing
6856 		 * on the grounds that the normal reclaim should be enough to
6857 		 * re-evaluate if boosting is required when kswapd next wakes.
6858 		 */
6859 		balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
6860 		if (!balanced && nr_boost_reclaim) {
6861 			nr_boost_reclaim = 0;
6862 			goto restart;
6863 		}
6864 
6865 		/*
6866 		 * If boosting is not active then only reclaim if there are no
6867 		 * eligible zones. Note that sc.reclaim_idx is not used as
6868 		 * buffer_heads_over_limit may have adjusted it.
6869 		 */
6870 		if (!nr_boost_reclaim && balanced)
6871 			goto out;
6872 
6873 		/* Limit the priority of boosting to avoid reclaim writeback */
6874 		if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
6875 			raise_priority = false;
6876 
6877 		/*
6878 		 * Do not writeback or swap pages for boosted reclaim. The
6879 		 * intent is to relieve pressure not issue sub-optimal IO
6880 		 * from reclaim context. If no pages are reclaimed, the
6881 		 * reclaim will be aborted.
6882 		 */
6883 		sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
6884 		sc.may_swap = !nr_boost_reclaim;
6885 
6886 		/*
6887 		 * Do some background aging, to give pages a chance to be
6888 		 * referenced before reclaiming. All pages are rotated
6889 		 * regardless of classzone as this is about consistent aging.
6890 		 */
6891 		kswapd_age_node(pgdat, &sc);
6892 
6893 		/*
6894 		 * If we're getting trouble reclaiming, start doing writepage
6895 		 * even in laptop mode.
6896 		 */
6897 		if (sc.priority < DEF_PRIORITY - 2)
6898 			sc.may_writepage = 1;
6899 
6900 		/* Call soft limit reclaim before calling shrink_node. */
6901 		sc.nr_scanned = 0;
6902 		nr_soft_scanned = 0;
6903 		nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
6904 						sc.gfp_mask, &nr_soft_scanned);
6905 		sc.nr_reclaimed += nr_soft_reclaimed;
6906 
6907 		/*
6908 		 * There should be no need to raise the scanning priority if
6909 		 * enough pages are already being scanned that that high
6910 		 * watermark would be met at 100% efficiency.
6911 		 */
6912 		if (kswapd_shrink_node(pgdat, &sc))
6913 			raise_priority = false;
6914 
6915 		/*
6916 		 * If the low watermark is met there is no need for processes
6917 		 * to be throttled on pfmemalloc_wait as they should not be
6918 		 * able to safely make forward progress. Wake them
6919 		 */
6920 		if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
6921 				allow_direct_reclaim(pgdat))
6922 			wake_up_all(&pgdat->pfmemalloc_wait);
6923 
6924 		/* Check if kswapd should be suspending */
6925 		__fs_reclaim_release(_THIS_IP_);
6926 		ret = try_to_freeze();
6927 		__fs_reclaim_acquire(_THIS_IP_);
6928 		if (ret || kthread_should_stop())
6929 			break;
6930 
6931 		/*
6932 		 * Raise priority if scanning rate is too low or there was no
6933 		 * progress in reclaiming pages
6934 		 */
6935 		nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
6936 		nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
6937 
6938 		/*
6939 		 * If reclaim made no progress for a boost, stop reclaim as
6940 		 * IO cannot be queued and it could be an infinite loop in
6941 		 * extreme circumstances.
6942 		 */
6943 		if (nr_boost_reclaim && !nr_reclaimed)
6944 			break;
6945 
6946 		if (raise_priority || !nr_reclaimed)
6947 			sc.priority--;
6948 	} while (sc.priority >= 1);
6949 
6950 	if (!sc.nr_reclaimed)
6951 		pgdat->kswapd_failures++;
6952 
6953 out:
6954 	clear_reclaim_active(pgdat, highest_zoneidx);
6955 
6956 	/* If reclaim was boosted, account for the reclaim done in this pass */
6957 	if (boosted) {
6958 		unsigned long flags;
6959 
6960 		for (i = 0; i <= highest_zoneidx; i++) {
6961 			if (!zone_boosts[i])
6962 				continue;
6963 
6964 			/* Increments are under the zone lock */
6965 			zone = pgdat->node_zones + i;
6966 			spin_lock_irqsave(&zone->lock, flags);
6967 			zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
6968 			spin_unlock_irqrestore(&zone->lock, flags);
6969 		}
6970 
6971 		/*
6972 		 * As there is now likely space, wakeup kcompact to defragment
6973 		 * pageblocks.
6974 		 */
6975 		wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
6976 	}
6977 
6978 	snapshot_refaults(NULL, pgdat);
6979 	__fs_reclaim_release(_THIS_IP_);
6980 	psi_memstall_leave(&pflags);
6981 	set_task_reclaim_state(current, NULL);
6982 
6983 	/*
6984 	 * Return the order kswapd stopped reclaiming at as
6985 	 * prepare_kswapd_sleep() takes it into account. If another caller
6986 	 * entered the allocator slow path while kswapd was awake, order will
6987 	 * remain at the higher level.
6988 	 */
6989 	return sc.order;
6990 }
6991 
6992 /*
6993  * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
6994  * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
6995  * not a valid index then either kswapd runs for first time or kswapd couldn't
6996  * sleep after previous reclaim attempt (node is still unbalanced). In that
6997  * case return the zone index of the previous kswapd reclaim cycle.
6998  */
kswapd_highest_zoneidx(pg_data_t * pgdat,enum zone_type prev_highest_zoneidx)6999 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
7000 					   enum zone_type prev_highest_zoneidx)
7001 {
7002 	enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7003 
7004 	return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
7005 }
7006 
kswapd_try_to_sleep(pg_data_t * pgdat,int alloc_order,int reclaim_order,unsigned int highest_zoneidx)7007 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
7008 				unsigned int highest_zoneidx)
7009 {
7010 	long remaining = 0;
7011 	DEFINE_WAIT(wait);
7012 
7013 	if (freezing(current) || kthread_should_stop())
7014 		return;
7015 
7016 	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7017 
7018 	/*
7019 	 * Try to sleep for a short interval. Note that kcompactd will only be
7020 	 * woken if it is possible to sleep for a short interval. This is
7021 	 * deliberate on the assumption that if reclaim cannot keep an
7022 	 * eligible zone balanced that it's also unlikely that compaction will
7023 	 * succeed.
7024 	 */
7025 	if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7026 		/*
7027 		 * Compaction records what page blocks it recently failed to
7028 		 * isolate pages from and skips them in the future scanning.
7029 		 * When kswapd is going to sleep, it is reasonable to assume
7030 		 * that pages and compaction may succeed so reset the cache.
7031 		 */
7032 		reset_isolation_suitable(pgdat);
7033 
7034 		/*
7035 		 * We have freed the memory, now we should compact it to make
7036 		 * allocation of the requested order possible.
7037 		 */
7038 		wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
7039 
7040 		remaining = schedule_timeout(HZ/10);
7041 
7042 		/*
7043 		 * If woken prematurely then reset kswapd_highest_zoneidx and
7044 		 * order. The values will either be from a wakeup request or
7045 		 * the previous request that slept prematurely.
7046 		 */
7047 		if (remaining) {
7048 			WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
7049 					kswapd_highest_zoneidx(pgdat,
7050 							highest_zoneidx));
7051 
7052 			if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
7053 				WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
7054 		}
7055 
7056 		finish_wait(&pgdat->kswapd_wait, &wait);
7057 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7058 	}
7059 
7060 	/*
7061 	 * After a short sleep, check if it was a premature sleep. If not, then
7062 	 * go fully to sleep until explicitly woken up.
7063 	 */
7064 	if (!remaining &&
7065 	    prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7066 		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
7067 
7068 		/*
7069 		 * vmstat counters are not perfectly accurate and the estimated
7070 		 * value for counters such as NR_FREE_PAGES can deviate from the
7071 		 * true value by nr_online_cpus * threshold. To avoid the zone
7072 		 * watermarks being breached while under pressure, we reduce the
7073 		 * per-cpu vmstat threshold while kswapd is awake and restore
7074 		 * them before going back to sleep.
7075 		 */
7076 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
7077 
7078 		if (!kthread_should_stop())
7079 			schedule();
7080 
7081 		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
7082 	} else {
7083 		if (remaining)
7084 			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
7085 		else
7086 			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
7087 	}
7088 	finish_wait(&pgdat->kswapd_wait, &wait);
7089 }
7090 
7091 /*
7092  * The background pageout daemon, started as a kernel thread
7093  * from the init process.
7094  *
7095  * This basically trickles out pages so that we have _some_
7096  * free memory available even if there is no other activity
7097  * that frees anything up. This is needed for things like routing
7098  * etc, where we otherwise might have all activity going on in
7099  * asynchronous contexts that cannot page things out.
7100  *
7101  * If there are applications that are active memory-allocators
7102  * (most normal use), this basically shouldn't matter.
7103  */
kswapd(void * p)7104 int kswapd(void *p)
7105 {
7106 	unsigned int alloc_order, reclaim_order;
7107 	unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
7108 	pg_data_t *pgdat = (pg_data_t *)p;
7109 	struct task_struct *tsk = current;
7110 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7111 
7112 	if (!cpumask_empty(cpumask))
7113 		set_cpus_allowed_ptr(tsk, cpumask);
7114 
7115 	/*
7116 	 * Tell the memory management that we're a "memory allocator",
7117 	 * and that if we need more memory we should get access to it
7118 	 * regardless (see "__alloc_pages()"). "kswapd" should
7119 	 * never get caught in the normal page freeing logic.
7120 	 *
7121 	 * (Kswapd normally doesn't need memory anyway, but sometimes
7122 	 * you need a small amount of memory in order to be able to
7123 	 * page out something else, and this flag essentially protects
7124 	 * us from recursively trying to free more memory as we're
7125 	 * trying to free the first piece of memory in the first place).
7126 	 */
7127 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
7128 	set_freezable();
7129 
7130 	WRITE_ONCE(pgdat->kswapd_order, 0);
7131 	WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7132 	for ( ; ; ) {
7133 		bool ret;
7134 
7135 		alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
7136 		highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7137 							highest_zoneidx);
7138 
7139 kswapd_try_sleep:
7140 		kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
7141 					highest_zoneidx);
7142 
7143 		/* Read the new order and highest_zoneidx */
7144 		alloc_order = READ_ONCE(pgdat->kswapd_order);
7145 		highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7146 							highest_zoneidx);
7147 		WRITE_ONCE(pgdat->kswapd_order, 0);
7148 		WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7149 
7150 		ret = try_to_freeze();
7151 		if (kthread_should_stop())
7152 			break;
7153 
7154 		/*
7155 		 * We can speed up thawing tasks if we don't call balance_pgdat
7156 		 * after returning from the refrigerator
7157 		 */
7158 		if (ret)
7159 			continue;
7160 
7161 		/*
7162 		 * Reclaim begins at the requested order but if a high-order
7163 		 * reclaim fails then kswapd falls back to reclaiming for
7164 		 * order-0. If that happens, kswapd will consider sleeping
7165 		 * for the order it finished reclaiming at (reclaim_order)
7166 		 * but kcompactd is woken to compact for the original
7167 		 * request (alloc_order).
7168 		 */
7169 		trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
7170 						alloc_order);
7171 		reclaim_order = balance_pgdat(pgdat, alloc_order,
7172 						highest_zoneidx);
7173 		trace_android_vh_vmscan_kswapd_done(pgdat->node_id, highest_zoneidx,
7174 						alloc_order, reclaim_order);
7175 		if (reclaim_order < alloc_order)
7176 			goto kswapd_try_sleep;
7177 	}
7178 
7179 	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
7180 
7181 	return 0;
7182 }
7183 EXPORT_SYMBOL_GPL(kswapd);
7184 
7185 /*
7186  * A zone is low on free memory or too fragmented for high-order memory.  If
7187  * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
7188  * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
7189  * has failed or is not needed, still wake up kcompactd if only compaction is
7190  * needed.
7191  */
wakeup_kswapd(struct zone * zone,gfp_t gfp_flags,int order,enum zone_type highest_zoneidx)7192 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
7193 		   enum zone_type highest_zoneidx)
7194 {
7195 	pg_data_t *pgdat;
7196 	enum zone_type curr_idx;
7197 
7198 	if (!managed_zone(zone))
7199 		return;
7200 
7201 	if (!cpuset_zone_allowed(zone, gfp_flags))
7202 		return;
7203 
7204 	pgdat = zone->zone_pgdat;
7205 	curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7206 
7207 	if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
7208 		WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
7209 
7210 	if (READ_ONCE(pgdat->kswapd_order) < order)
7211 		WRITE_ONCE(pgdat->kswapd_order, order);
7212 
7213 	if (!waitqueue_active(&pgdat->kswapd_wait))
7214 		return;
7215 
7216 	/* Hopeless node, leave it to direct reclaim if possible */
7217 	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
7218 	    (pgdat_balanced(pgdat, order, highest_zoneidx) &&
7219 	     !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
7220 		/*
7221 		 * There may be plenty of free memory available, but it's too
7222 		 * fragmented for high-order allocations.  Wake up kcompactd
7223 		 * and rely on compaction_suitable() to determine if it's
7224 		 * needed.  If it fails, it will defer subsequent attempts to
7225 		 * ratelimit its work.
7226 		 */
7227 		if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
7228 			wakeup_kcompactd(pgdat, order, highest_zoneidx);
7229 		return;
7230 	}
7231 
7232 	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
7233 				      gfp_flags);
7234 	wake_up_interruptible(&pgdat->kswapd_wait);
7235 }
7236 
7237 #ifdef CONFIG_HIBERNATION
7238 /*
7239  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7240  * freed pages.
7241  *
7242  * Rather than trying to age LRUs the aim is to preserve the overall
7243  * LRU order by reclaiming preferentially
7244  * inactive > active > active referenced > active mapped
7245  */
shrink_all_memory(unsigned long nr_to_reclaim)7246 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
7247 {
7248 	struct scan_control sc = {
7249 		.nr_to_reclaim = nr_to_reclaim,
7250 		.gfp_mask = GFP_HIGHUSER_MOVABLE,
7251 		.reclaim_idx = MAX_NR_ZONES - 1,
7252 		.priority = DEF_PRIORITY,
7253 		.may_writepage = 1,
7254 		.may_unmap = 1,
7255 		.may_swap = 1,
7256 		.hibernation_mode = 1,
7257 	};
7258 	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7259 	unsigned long nr_reclaimed;
7260 	unsigned int noreclaim_flag;
7261 
7262 	fs_reclaim_acquire(sc.gfp_mask);
7263 	noreclaim_flag = memalloc_noreclaim_save();
7264 	set_task_reclaim_state(current, &sc.reclaim_state);
7265 
7266 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
7267 
7268 	set_task_reclaim_state(current, NULL);
7269 	memalloc_noreclaim_restore(noreclaim_flag);
7270 	fs_reclaim_release(sc.gfp_mask);
7271 
7272 	return nr_reclaimed;
7273 }
7274 #endif /* CONFIG_HIBERNATION */
7275 
7276 /*
7277  * This kswapd start function will be called by init and node-hot-add.
7278  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
7279  */
kswapd_run(int nid)7280 void kswapd_run(int nid)
7281 {
7282 	pg_data_t *pgdat = NODE_DATA(nid);
7283 	bool skip = false;
7284 
7285 	if (pgdat->kswapd)
7286 		return;
7287 
7288 	trace_android_vh_kswapd_per_node(nid, &skip, true);
7289 	if (skip)
7290 		return;
7291 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
7292 	if (IS_ERR(pgdat->kswapd)) {
7293 		/* failure at boot is fatal */
7294 		BUG_ON(system_state < SYSTEM_RUNNING);
7295 		pr_err("Failed to start kswapd on node %d\n", nid);
7296 		pgdat->kswapd = NULL;
7297 	}
7298 }
7299 
7300 /*
7301  * Called by memory hotplug when all memory in a node is offlined.  Caller must
7302  * hold mem_hotplug_begin/end().
7303  */
kswapd_stop(int nid)7304 void kswapd_stop(int nid)
7305 {
7306 	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
7307 	bool skip = false;
7308 
7309 	trace_android_vh_kswapd_per_node(nid, &skip, false);
7310 	if (skip)
7311 		return;
7312 	if (kswapd) {
7313 		kthread_stop(kswapd);
7314 		NODE_DATA(nid)->kswapd = NULL;
7315 	}
7316 }
7317 
kswapd_init(void)7318 static int __init kswapd_init(void)
7319 {
7320 	int nid;
7321 
7322 	swap_setup();
7323 	for_each_node_state(nid, N_MEMORY)
7324  		kswapd_run(nid);
7325 	return 0;
7326 }
7327 
7328 module_init(kswapd_init)
7329 
7330 #ifdef CONFIG_NUMA
7331 /*
7332  * Node reclaim mode
7333  *
7334  * If non-zero call node_reclaim when the number of free pages falls below
7335  * the watermarks.
7336  */
7337 int node_reclaim_mode __read_mostly;
7338 
7339 /*
7340  * Priority for NODE_RECLAIM. This determines the fraction of pages
7341  * of a node considered for each zone_reclaim. 4 scans 1/16th of
7342  * a zone.
7343  */
7344 #define NODE_RECLAIM_PRIORITY 4
7345 
7346 /*
7347  * Percentage of pages in a zone that must be unmapped for node_reclaim to
7348  * occur.
7349  */
7350 int sysctl_min_unmapped_ratio = 1;
7351 
7352 /*
7353  * If the number of slab pages in a zone grows beyond this percentage then
7354  * slab reclaim needs to occur.
7355  */
7356 int sysctl_min_slab_ratio = 5;
7357 
node_unmapped_file_pages(struct pglist_data * pgdat)7358 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
7359 {
7360 	unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
7361 	unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
7362 		node_page_state(pgdat, NR_ACTIVE_FILE);
7363 
7364 	/*
7365 	 * It's possible for there to be more file mapped pages than
7366 	 * accounted for by the pages on the file LRU lists because
7367 	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
7368 	 */
7369 	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
7370 }
7371 
7372 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
node_pagecache_reclaimable(struct pglist_data * pgdat)7373 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
7374 {
7375 	unsigned long nr_pagecache_reclaimable;
7376 	unsigned long delta = 0;
7377 
7378 	/*
7379 	 * If RECLAIM_UNMAP is set, then all file pages are considered
7380 	 * potentially reclaimable. Otherwise, we have to worry about
7381 	 * pages like swapcache and node_unmapped_file_pages() provides
7382 	 * a better estimate
7383 	 */
7384 	if (node_reclaim_mode & RECLAIM_UNMAP)
7385 		nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
7386 	else
7387 		nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
7388 
7389 	/* If we can't clean pages, remove dirty pages from consideration */
7390 	if (!(node_reclaim_mode & RECLAIM_WRITE))
7391 		delta += node_page_state(pgdat, NR_FILE_DIRTY);
7392 
7393 	/* Watch for any possible underflows due to delta */
7394 	if (unlikely(delta > nr_pagecache_reclaimable))
7395 		delta = nr_pagecache_reclaimable;
7396 
7397 	return nr_pagecache_reclaimable - delta;
7398 }
7399 
7400 /*
7401  * Try to free up some pages from this node through reclaim.
7402  */
__node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7403 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7404 {
7405 	/* Minimum pages needed in order to stay on node */
7406 	const unsigned long nr_pages = 1 << order;
7407 	struct task_struct *p = current;
7408 	unsigned int noreclaim_flag;
7409 	struct scan_control sc = {
7410 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
7411 		.gfp_mask = current_gfp_context(gfp_mask),
7412 		.order = order,
7413 		.priority = NODE_RECLAIM_PRIORITY,
7414 		.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
7415 		.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
7416 		.may_swap = 1,
7417 		.reclaim_idx = gfp_zone(gfp_mask),
7418 	};
7419 	unsigned long pflags;
7420 
7421 	trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
7422 					   sc.gfp_mask);
7423 
7424 	cond_resched();
7425 	psi_memstall_enter(&pflags);
7426 	fs_reclaim_acquire(sc.gfp_mask);
7427 	/*
7428 	 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
7429 	 * and we also need to be able to write out pages for RECLAIM_WRITE
7430 	 * and RECLAIM_UNMAP.
7431 	 */
7432 	noreclaim_flag = memalloc_noreclaim_save();
7433 	p->flags |= PF_SWAPWRITE;
7434 	set_task_reclaim_state(p, &sc.reclaim_state);
7435 
7436 	if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
7437 		/*
7438 		 * Free memory by calling shrink node with increasing
7439 		 * priorities until we have enough memory freed.
7440 		 */
7441 		do {
7442 			shrink_node(pgdat, &sc);
7443 		} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
7444 	}
7445 
7446 	set_task_reclaim_state(p, NULL);
7447 	current->flags &= ~PF_SWAPWRITE;
7448 	memalloc_noreclaim_restore(noreclaim_flag);
7449 	fs_reclaim_release(sc.gfp_mask);
7450 	psi_memstall_leave(&pflags);
7451 
7452 	trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
7453 
7454 	return sc.nr_reclaimed >= nr_pages;
7455 }
7456 
node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7457 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7458 {
7459 	int ret;
7460 
7461 	/*
7462 	 * Node reclaim reclaims unmapped file backed pages and
7463 	 * slab pages if we are over the defined limits.
7464 	 *
7465 	 * A small portion of unmapped file backed pages is needed for
7466 	 * file I/O otherwise pages read by file I/O will be immediately
7467 	 * thrown out if the node is overallocated. So we do not reclaim
7468 	 * if less than a specified percentage of the node is used by
7469 	 * unmapped file backed pages.
7470 	 */
7471 	if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
7472 	    node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
7473 	    pgdat->min_slab_pages)
7474 		return NODE_RECLAIM_FULL;
7475 
7476 	/*
7477 	 * Do not scan if the allocation should not be delayed.
7478 	 */
7479 	if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
7480 		return NODE_RECLAIM_NOSCAN;
7481 
7482 	/*
7483 	 * Only run node reclaim on the local node or on nodes that do not
7484 	 * have associated processors. This will favor the local processor
7485 	 * over remote processors and spread off node memory allocations
7486 	 * as wide as possible.
7487 	 */
7488 	if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
7489 		return NODE_RECLAIM_NOSCAN;
7490 
7491 	if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
7492 		return NODE_RECLAIM_NOSCAN;
7493 
7494 	ret = __node_reclaim(pgdat, gfp_mask, order);
7495 	clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
7496 
7497 	if (!ret)
7498 		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
7499 
7500 	return ret;
7501 }
7502 #endif
7503 
7504 /**
7505  * check_move_unevictable_pages - check pages for evictability and move to
7506  * appropriate zone lru list
7507  * @pvec: pagevec with lru pages to check
7508  *
7509  * Checks pages for evictability, if an evictable page is in the unevictable
7510  * lru list, moves it to the appropriate evictable lru list. This function
7511  * should be only used for lru pages.
7512  */
check_move_unevictable_pages(struct pagevec * pvec)7513 void check_move_unevictable_pages(struct pagevec *pvec)
7514 {
7515 	struct lruvec *lruvec = NULL;
7516 	int pgscanned = 0;
7517 	int pgrescued = 0;
7518 	int i;
7519 
7520 	for (i = 0; i < pvec->nr; i++) {
7521 		struct page *page = pvec->pages[i];
7522 		int nr_pages;
7523 
7524 		if (PageTransTail(page))
7525 			continue;
7526 
7527 		nr_pages = thp_nr_pages(page);
7528 		pgscanned += nr_pages;
7529 
7530 		/* block memcg migration during page moving between lru */
7531 		if (!TestClearPageLRU(page))
7532 			continue;
7533 
7534 		lruvec = relock_page_lruvec_irq(page, lruvec);
7535 		if (page_evictable(page) && PageUnevictable(page)) {
7536 			del_page_from_lru_list(page, lruvec);
7537 			ClearPageUnevictable(page);
7538 			add_page_to_lru_list(page, lruvec);
7539 			pgrescued += nr_pages;
7540 		}
7541 		SetPageLRU(page);
7542 	}
7543 
7544 	if (lruvec) {
7545 		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
7546 		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7547 		unlock_page_lruvec_irq(lruvec);
7548 	} else if (pgscanned) {
7549 		count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7550 	}
7551 }
7552 EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
7553