1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/swap.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8 /*
9 * This file contains the default values for the operation of the
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
12 * Started 18.12.91
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
15 */
16
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/pagevec.h>
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/mm_inline.h>
27 #include <linux/percpu_counter.h>
28 #include <linux/memremap.h>
29 #include <linux/percpu.h>
30 #include <linux/cpu.h>
31 #include <linux/notifier.h>
32 #include <linux/backing-dev.h>
33 #include <linux/memcontrol.h>
34 #include <linux/gfp.h>
35 #include <linux/uio.h>
36 #include <linux/hugetlb.h>
37 #include <linux/page_idle.h>
38 #include <linux/local_lock.h>
39
40 #include "internal.h"
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/pagemap.h>
44
45 /* How many pages do we try to swap or page in/out together? */
46 int page_cluster;
47
48 /* Protecting only lru_rotate.pvec which requires disabling interrupts */
49 struct lru_rotate {
50 local_lock_t lock;
51 struct pagevec pvec;
52 };
53 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
54 .lock = INIT_LOCAL_LOCK(lock),
55 };
56
57 /*
58 * The following struct pagevec are grouped together because they are protected
59 * by disabling preemption (and interrupts remain enabled).
60 */
61 struct lru_pvecs {
62 local_lock_t lock;
63 struct pagevec lru_add;
64 struct pagevec lru_deactivate_file;
65 struct pagevec lru_deactivate;
66 struct pagevec lru_lazyfree;
67 #ifdef CONFIG_SMP
68 struct pagevec activate_page;
69 #endif
70 };
71 static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
72 .lock = INIT_LOCAL_LOCK(lock),
73 };
74
75 /*
76 * This path almost never happens for VM activity - pages are normally
77 * freed via pagevecs. But it gets used by networking.
78 */
__page_cache_release(struct page * page)79 static void __page_cache_release(struct page *page)
80 {
81 if (PageLRU(page)) {
82 pg_data_t *pgdat = page_pgdat(page);
83 struct lruvec *lruvec;
84 unsigned long flags;
85
86 spin_lock_irqsave(&pgdat->lru_lock, flags);
87 lruvec = mem_cgroup_page_lruvec(page, pgdat);
88 VM_BUG_ON_PAGE(!PageLRU(page), page);
89 __ClearPageLRU(page);
90 del_page_from_lru_list(page, lruvec, page_off_lru(page));
91 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
92 }
93 __ClearPageWaiters(page);
94 }
95
__put_single_page(struct page * page)96 static void __put_single_page(struct page *page)
97 {
98 __page_cache_release(page);
99 mem_cgroup_uncharge(page);
100 free_unref_page(page);
101 }
102
__put_compound_page(struct page * page)103 static void __put_compound_page(struct page *page)
104 {
105 /*
106 * __page_cache_release() is supposed to be called for thp, not for
107 * hugetlb. This is because hugetlb page does never have PageLRU set
108 * (it's never listed to any LRU lists) and no memcg routines should
109 * be called for hugetlb (it has a separate hugetlb_cgroup.)
110 */
111 if (!PageHuge(page))
112 __page_cache_release(page);
113 destroy_compound_page(page);
114 }
115
__put_page(struct page * page)116 void __put_page(struct page *page)
117 {
118 if (is_zone_device_page(page)) {
119 put_dev_pagemap(page->pgmap);
120
121 /*
122 * The page belongs to the device that created pgmap. Do
123 * not return it to page allocator.
124 */
125 return;
126 }
127
128 if (unlikely(PageCompound(page)))
129 __put_compound_page(page);
130 else
131 __put_single_page(page);
132 }
133 EXPORT_SYMBOL(__put_page);
134
135 /**
136 * put_pages_list() - release a list of pages
137 * @pages: list of pages threaded on page->lru
138 *
139 * Release a list of pages which are strung together on page.lru. Currently
140 * used by read_cache_pages() and related error recovery code.
141 */
put_pages_list(struct list_head * pages)142 void put_pages_list(struct list_head *pages)
143 {
144 while (!list_empty(pages)) {
145 struct page *victim;
146
147 victim = lru_to_page(pages);
148 list_del(&victim->lru);
149 put_page(victim);
150 }
151 }
152 EXPORT_SYMBOL(put_pages_list);
153
154 /*
155 * get_kernel_pages() - pin kernel pages in memory
156 * @kiov: An array of struct kvec structures
157 * @nr_segs: number of segments to pin
158 * @write: pinning for read/write, currently ignored
159 * @pages: array that receives pointers to the pages pinned.
160 * Should be at least nr_segs long.
161 *
162 * Returns number of pages pinned. This may be fewer than the number
163 * requested. If nr_pages is 0 or negative, returns 0. If no pages
164 * were pinned, returns -errno. Each page returned must be released
165 * with a put_page() call when it is finished with.
166 */
get_kernel_pages(const struct kvec * kiov,int nr_segs,int write,struct page ** pages)167 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
168 struct page **pages)
169 {
170 int seg;
171
172 for (seg = 0; seg < nr_segs; seg++) {
173 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
174 return seg;
175
176 pages[seg] = kmap_to_page(kiov[seg].iov_base);
177 get_page(pages[seg]);
178 }
179
180 return seg;
181 }
182 EXPORT_SYMBOL_GPL(get_kernel_pages);
183
184 /*
185 * get_kernel_page() - pin a kernel page in memory
186 * @start: starting kernel address
187 * @write: pinning for read/write, currently ignored
188 * @pages: array that receives pointer to the page pinned.
189 * Must be at least nr_segs long.
190 *
191 * Returns 1 if page is pinned. If the page was not pinned, returns
192 * -errno. The page returned must be released with a put_page() call
193 * when it is finished with.
194 */
get_kernel_page(unsigned long start,int write,struct page ** pages)195 int get_kernel_page(unsigned long start, int write, struct page **pages)
196 {
197 const struct kvec kiov = {
198 .iov_base = (void *)start,
199 .iov_len = PAGE_SIZE
200 };
201
202 return get_kernel_pages(&kiov, 1, write, pages);
203 }
204 EXPORT_SYMBOL_GPL(get_kernel_page);
205
pagevec_lru_move_fn(struct pagevec * pvec,void (* move_fn)(struct page * page,struct lruvec * lruvec,void * arg),void * arg)206 static void pagevec_lru_move_fn(struct pagevec *pvec,
207 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
208 void *arg)
209 {
210 int i;
211 struct pglist_data *pgdat = NULL;
212 struct lruvec *lruvec;
213 unsigned long flags = 0;
214
215 for (i = 0; i < pagevec_count(pvec); i++) {
216 struct page *page = pvec->pages[i];
217 struct pglist_data *pagepgdat = page_pgdat(page);
218
219 if (pagepgdat != pgdat) {
220 if (pgdat)
221 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
222 pgdat = pagepgdat;
223 spin_lock_irqsave(&pgdat->lru_lock, flags);
224 }
225
226 lruvec = mem_cgroup_page_lruvec(page, pgdat);
227 (*move_fn)(page, lruvec, arg);
228 }
229 if (pgdat)
230 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
231 release_pages(pvec->pages, pvec->nr);
232 pagevec_reinit(pvec);
233 }
234
pagevec_move_tail_fn(struct page * page,struct lruvec * lruvec,void * arg)235 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
236 void *arg)
237 {
238 int *pgmoved = arg;
239
240 if (PageLRU(page) && !PageUnevictable(page)) {
241 del_page_from_lru_list(page, lruvec, page_lru(page));
242 ClearPageActive(page);
243 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
244 (*pgmoved) += thp_nr_pages(page);
245 }
246 }
247
248 /*
249 * pagevec_move_tail() must be called with IRQ disabled.
250 * Otherwise this may cause nasty races.
251 */
pagevec_move_tail(struct pagevec * pvec)252 static void pagevec_move_tail(struct pagevec *pvec)
253 {
254 int pgmoved = 0;
255
256 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
257 __count_vm_events(PGROTATED, pgmoved);
258 }
259
260 /*
261 * Writeback is about to end against a page which has been marked for immediate
262 * reclaim. If it still appears to be reclaimable, move it to the tail of the
263 * inactive list.
264 */
rotate_reclaimable_page(struct page * page)265 void rotate_reclaimable_page(struct page *page)
266 {
267 if (!PageLocked(page) && !PageDirty(page) &&
268 !PageUnevictable(page) && PageLRU(page)) {
269 struct pagevec *pvec;
270 unsigned long flags;
271
272 get_page(page);
273 local_lock_irqsave(&lru_rotate.lock, flags);
274 pvec = this_cpu_ptr(&lru_rotate.pvec);
275 if (!pagevec_add(pvec, page) || PageCompound(page))
276 pagevec_move_tail(pvec);
277 local_unlock_irqrestore(&lru_rotate.lock, flags);
278 }
279 }
280
lru_note_cost(struct lruvec * lruvec,bool file,unsigned int nr_pages)281 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
282 {
283 do {
284 unsigned long lrusize;
285
286 /* Record cost event */
287 if (file)
288 lruvec->file_cost += nr_pages;
289 else
290 lruvec->anon_cost += nr_pages;
291
292 /*
293 * Decay previous events
294 *
295 * Because workloads change over time (and to avoid
296 * overflow) we keep these statistics as a floating
297 * average, which ends up weighing recent refaults
298 * more than old ones.
299 */
300 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
301 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
302 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
303 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
304
305 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
306 lruvec->file_cost /= 2;
307 lruvec->anon_cost /= 2;
308 }
309 } while ((lruvec = parent_lruvec(lruvec)));
310 }
311
lru_note_cost_page(struct page * page)312 void lru_note_cost_page(struct page *page)
313 {
314 #ifdef CONFIG_HYPERHOLD_FILE_LRU
315 if (page_is_file_lru(page)) {
316 lru_note_cost(&(page_pgdat(page)->__lruvec), 1, thp_nr_pages(page));
317 return;
318 }
319 #endif
320 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
321 page_is_file_lru(page), thp_nr_pages(page));
322 }
323
__activate_page(struct page * page,struct lruvec * lruvec,void * arg)324 static void __activate_page(struct page *page, struct lruvec *lruvec,
325 void *arg)
326 {
327 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
328 int lru = page_lru_base_type(page);
329 int nr_pages = thp_nr_pages(page);
330
331 del_page_from_lru_list(page, lruvec, lru);
332 SetPageActive(page);
333 lru += LRU_ACTIVE;
334 add_page_to_lru_list(page, lruvec, lru);
335 trace_mm_lru_activate(page);
336
337 __count_vm_events(PGACTIVATE, nr_pages);
338 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
339 nr_pages);
340 }
341 }
342
343 #ifdef CONFIG_SMP
activate_page_drain(int cpu)344 static void activate_page_drain(int cpu)
345 {
346 struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
347
348 if (pagevec_count(pvec))
349 pagevec_lru_move_fn(pvec, __activate_page, NULL);
350 }
351
need_activate_page_drain(int cpu)352 static bool need_activate_page_drain(int cpu)
353 {
354 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
355 }
356
activate_page(struct page * page)357 static void activate_page(struct page *page)
358 {
359 page = compound_head(page);
360 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
361 struct pagevec *pvec;
362
363 local_lock(&lru_pvecs.lock);
364 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
365 get_page(page);
366 if (!pagevec_add(pvec, page) || PageCompound(page))
367 pagevec_lru_move_fn(pvec, __activate_page, NULL);
368 local_unlock(&lru_pvecs.lock);
369 }
370 }
371
372 #else
activate_page_drain(int cpu)373 static inline void activate_page_drain(int cpu)
374 {
375 }
376
activate_page(struct page * page)377 static void activate_page(struct page *page)
378 {
379 pg_data_t *pgdat = page_pgdat(page);
380
381 page = compound_head(page);
382 spin_lock_irq(&pgdat->lru_lock);
383 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
384 spin_unlock_irq(&pgdat->lru_lock);
385 }
386 #endif
387
__lru_cache_activate_page(struct page * page)388 static void __lru_cache_activate_page(struct page *page)
389 {
390 struct pagevec *pvec;
391 int i;
392
393 local_lock(&lru_pvecs.lock);
394 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
395
396 /*
397 * Search backwards on the optimistic assumption that the page being
398 * activated has just been added to this pagevec. Note that only
399 * the local pagevec is examined as a !PageLRU page could be in the
400 * process of being released, reclaimed, migrated or on a remote
401 * pagevec that is currently being drained. Furthermore, marking
402 * a remote pagevec's page PageActive potentially hits a race where
403 * a page is marked PageActive just after it is added to the inactive
404 * list causing accounting errors and BUG_ON checks to trigger.
405 */
406 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
407 struct page *pagevec_page = pvec->pages[i];
408
409 if (pagevec_page == page) {
410 SetPageActive(page);
411 break;
412 }
413 }
414
415 local_unlock(&lru_pvecs.lock);
416 }
417
418 /*
419 * Mark a page as having seen activity.
420 *
421 * inactive,unreferenced -> inactive,referenced
422 * inactive,referenced -> active,unreferenced
423 * active,unreferenced -> active,referenced
424 *
425 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
426 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
427 */
mark_page_accessed(struct page * page)428 void mark_page_accessed(struct page *page)
429 {
430 page = compound_head(page);
431
432 if (!PageReferenced(page)) {
433 SetPageReferenced(page);
434 } else if (PageUnevictable(page)) {
435 /*
436 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
437 * this list is never rotated or maintained, so marking an
438 * evictable page accessed has no effect.
439 */
440 } else if (!PageActive(page)) {
441 /*
442 * If the page is on the LRU, queue it for activation via
443 * lru_pvecs.activate_page. Otherwise, assume the page is on a
444 * pagevec, mark it active and it'll be moved to the active
445 * LRU on the next drain.
446 */
447 if (PageLRU(page))
448 activate_page(page);
449 else
450 __lru_cache_activate_page(page);
451 ClearPageReferenced(page);
452 workingset_activation(page);
453 }
454 if (page_is_idle(page))
455 clear_page_idle(page);
456 }
457 EXPORT_SYMBOL(mark_page_accessed);
458
459 /**
460 * lru_cache_add - add a page to a page list
461 * @page: the page to be added to the LRU.
462 *
463 * Queue the page for addition to the LRU via pagevec. The decision on whether
464 * to add the page to the [in]active [file|anon] list is deferred until the
465 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
466 * have the page added to the active list using mark_page_accessed().
467 */
lru_cache_add(struct page * page)468 void lru_cache_add(struct page *page)
469 {
470 struct pagevec *pvec;
471
472 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
473 VM_BUG_ON_PAGE(PageLRU(page), page);
474
475 get_page(page);
476 local_lock(&lru_pvecs.lock);
477 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
478 if (!pagevec_add(pvec, page) || PageCompound(page))
479 __pagevec_lru_add(pvec);
480 local_unlock(&lru_pvecs.lock);
481 }
482 EXPORT_SYMBOL(lru_cache_add);
483
484 /**
485 * lru_cache_add_inactive_or_unevictable
486 * @page: the page to be added to LRU
487 * @vma: vma in which page is mapped for determining reclaimability
488 *
489 * Place @page on the inactive or unevictable LRU list, depending on its
490 * evictability.
491 */
lru_cache_add_inactive_or_unevictable(struct page * page,struct vm_area_struct * vma)492 void lru_cache_add_inactive_or_unevictable(struct page *page,
493 struct vm_area_struct *vma)
494 {
495 bool unevictable;
496
497 VM_BUG_ON_PAGE(PageLRU(page), page);
498
499 unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
500 if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
501 int nr_pages = thp_nr_pages(page);
502 /*
503 * We use the irq-unsafe __mod_zone_page_stat because this
504 * counter is not modified from interrupt context, and the pte
505 * lock is held(spinlock), which implies preemption disabled.
506 */
507 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
508 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
509 }
510 lru_cache_add(page);
511 }
512
513 /*
514 * If the page can not be invalidated, it is moved to the
515 * inactive list to speed up its reclaim. It is moved to the
516 * head of the list, rather than the tail, to give the flusher
517 * threads some time to write it out, as this is much more
518 * effective than the single-page writeout from reclaim.
519 *
520 * If the page isn't page_mapped and dirty/writeback, the page
521 * could reclaim asap using PG_reclaim.
522 *
523 * 1. active, mapped page -> none
524 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
525 * 3. inactive, mapped page -> none
526 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
527 * 5. inactive, clean -> inactive, tail
528 * 6. Others -> none
529 *
530 * In 4, why it moves inactive's head, the VM expects the page would
531 * be write it out by flusher threads as this is much more effective
532 * than the single-page writeout from reclaim.
533 */
lru_deactivate_file_fn(struct page * page,struct lruvec * lruvec,void * arg)534 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
535 void *arg)
536 {
537 int lru;
538 bool active;
539 int nr_pages = thp_nr_pages(page);
540
541 if (!PageLRU(page))
542 return;
543
544 if (PageUnevictable(page))
545 return;
546
547 /* Some processes are using the page */
548 if (page_mapped(page))
549 return;
550
551 active = PageActive(page);
552 lru = page_lru_base_type(page);
553
554 del_page_from_lru_list(page, lruvec, lru + active);
555 ClearPageActive(page);
556 ClearPageReferenced(page);
557
558 if (PageWriteback(page) || PageDirty(page)) {
559 /*
560 * PG_reclaim could be raced with end_page_writeback
561 * It can make readahead confusing. But race window
562 * is _really_ small and it's non-critical problem.
563 */
564 add_page_to_lru_list(page, lruvec, lru);
565 SetPageReclaim(page);
566 } else {
567 /*
568 * The page's writeback ends up during pagevec
569 * We moves tha page into tail of inactive.
570 */
571 add_page_to_lru_list_tail(page, lruvec, lru);
572 __count_vm_events(PGROTATED, nr_pages);
573 }
574
575 if (active) {
576 __count_vm_events(PGDEACTIVATE, nr_pages);
577 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
578 nr_pages);
579 }
580 }
581
lru_deactivate_fn(struct page * page,struct lruvec * lruvec,void * arg)582 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
583 void *arg)
584 {
585 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
586 int lru = page_lru_base_type(page);
587 int nr_pages = thp_nr_pages(page);
588
589 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
590 ClearPageActive(page);
591 ClearPageReferenced(page);
592 add_page_to_lru_list(page, lruvec, lru);
593
594 __count_vm_events(PGDEACTIVATE, nr_pages);
595 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
596 nr_pages);
597 }
598 }
599
lru_lazyfree_fn(struct page * page,struct lruvec * lruvec,void * arg)600 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
601 void *arg)
602 {
603 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
604 !PageSwapCache(page) && !PageUnevictable(page)) {
605 bool active = PageActive(page);
606 int nr_pages = thp_nr_pages(page);
607
608 del_page_from_lru_list(page, lruvec,
609 LRU_INACTIVE_ANON + active);
610 ClearPageActive(page);
611 ClearPageReferenced(page);
612 /*
613 * Lazyfree pages are clean anonymous pages. They have
614 * PG_swapbacked flag cleared, to distinguish them from normal
615 * anonymous pages
616 */
617 ClearPageSwapBacked(page);
618 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
619
620 __count_vm_events(PGLAZYFREE, nr_pages);
621 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
622 nr_pages);
623 }
624 }
625
626 /*
627 * Drain pages out of the cpu's pagevecs.
628 * Either "cpu" is the current CPU, and preemption has already been
629 * disabled; or "cpu" is being hot-unplugged, and is already dead.
630 */
lru_add_drain_cpu(int cpu)631 void lru_add_drain_cpu(int cpu)
632 {
633 struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
634
635 if (pagevec_count(pvec))
636 __pagevec_lru_add(pvec);
637
638 pvec = &per_cpu(lru_rotate.pvec, cpu);
639 /* Disabling interrupts below acts as a compiler barrier. */
640 if (data_race(pagevec_count(pvec))) {
641 unsigned long flags;
642
643 /* No harm done if a racing interrupt already did this */
644 local_lock_irqsave(&lru_rotate.lock, flags);
645 pagevec_move_tail(pvec);
646 local_unlock_irqrestore(&lru_rotate.lock, flags);
647 }
648
649 pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
650 if (pagevec_count(pvec))
651 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
652
653 pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
654 if (pagevec_count(pvec))
655 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
656
657 pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
658 if (pagevec_count(pvec))
659 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
660
661 activate_page_drain(cpu);
662 }
663
664 /**
665 * deactivate_file_page - forcefully deactivate a file page
666 * @page: page to deactivate
667 *
668 * This function hints the VM that @page is a good reclaim candidate,
669 * for example if its invalidation fails due to the page being dirty
670 * or under writeback.
671 */
deactivate_file_page(struct page * page)672 void deactivate_file_page(struct page *page)
673 {
674 /*
675 * In a workload with many unevictable page such as mprotect,
676 * unevictable page deactivation for accelerating reclaim is pointless.
677 */
678 if (PageUnevictable(page))
679 return;
680
681 if (likely(get_page_unless_zero(page))) {
682 struct pagevec *pvec;
683
684 local_lock(&lru_pvecs.lock);
685 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
686
687 if (!pagevec_add(pvec, page) || PageCompound(page))
688 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
689 local_unlock(&lru_pvecs.lock);
690 }
691 }
692
693 /*
694 * deactivate_page - deactivate a page
695 * @page: page to deactivate
696 *
697 * deactivate_page() moves @page to the inactive list if @page was on the active
698 * list and was not an unevictable page. This is done to accelerate the reclaim
699 * of @page.
700 */
deactivate_page(struct page * page)701 void deactivate_page(struct page *page)
702 {
703 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
704 struct pagevec *pvec;
705
706 local_lock(&lru_pvecs.lock);
707 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
708 get_page(page);
709 if (!pagevec_add(pvec, page) || PageCompound(page))
710 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
711 local_unlock(&lru_pvecs.lock);
712 }
713 }
714
715 /**
716 * mark_page_lazyfree - make an anon page lazyfree
717 * @page: page to deactivate
718 *
719 * mark_page_lazyfree() moves @page to the inactive file list.
720 * This is done to accelerate the reclaim of @page.
721 */
mark_page_lazyfree(struct page * page)722 void mark_page_lazyfree(struct page *page)
723 {
724 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
725 !PageSwapCache(page) && !PageUnevictable(page)) {
726 struct pagevec *pvec;
727
728 local_lock(&lru_pvecs.lock);
729 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
730 get_page(page);
731 if (!pagevec_add(pvec, page) || PageCompound(page))
732 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
733 local_unlock(&lru_pvecs.lock);
734 }
735 }
736
lru_add_drain(void)737 void lru_add_drain(void)
738 {
739 local_lock(&lru_pvecs.lock);
740 lru_add_drain_cpu(smp_processor_id());
741 local_unlock(&lru_pvecs.lock);
742 }
743
lru_add_drain_cpu_zone(struct zone * zone)744 void lru_add_drain_cpu_zone(struct zone *zone)
745 {
746 local_lock(&lru_pvecs.lock);
747 lru_add_drain_cpu(smp_processor_id());
748 drain_local_pages(zone);
749 local_unlock(&lru_pvecs.lock);
750 }
751
752 #ifdef CONFIG_SMP
753
754 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
755
lru_add_drain_per_cpu(struct work_struct * dummy)756 static void lru_add_drain_per_cpu(struct work_struct *dummy)
757 {
758 lru_add_drain();
759 }
760
761 /*
762 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
763 * kworkers being shut down before our page_alloc_cpu_dead callback is
764 * executed on the offlined cpu.
765 * Calling this function with cpu hotplug locks held can actually lead
766 * to obscure indirect dependencies via WQ context.
767 */
lru_add_drain_all(void)768 void lru_add_drain_all(void)
769 {
770 /*
771 * lru_drain_gen - Global pages generation number
772 *
773 * (A) Definition: global lru_drain_gen = x implies that all generations
774 * 0 < n <= x are already *scheduled* for draining.
775 *
776 * This is an optimization for the highly-contended use case where a
777 * user space workload keeps constantly generating a flow of pages for
778 * each CPU.
779 */
780 static unsigned int lru_drain_gen;
781 static struct cpumask has_work;
782 static DEFINE_MUTEX(lock);
783 unsigned cpu, this_gen;
784
785 /*
786 * Make sure nobody triggers this path before mm_percpu_wq is fully
787 * initialized.
788 */
789 if (WARN_ON(!mm_percpu_wq))
790 return;
791
792 /*
793 * Guarantee pagevec counter stores visible by this CPU are visible to
794 * other CPUs before loading the current drain generation.
795 */
796 smp_mb();
797
798 /*
799 * (B) Locally cache global LRU draining generation number
800 *
801 * The read barrier ensures that the counter is loaded before the mutex
802 * is taken. It pairs with smp_mb() inside the mutex critical section
803 * at (D).
804 */
805 this_gen = smp_load_acquire(&lru_drain_gen);
806
807 mutex_lock(&lock);
808
809 /*
810 * (C) Exit the draining operation if a newer generation, from another
811 * lru_add_drain_all(), was already scheduled for draining. Check (A).
812 */
813 if (unlikely(this_gen != lru_drain_gen))
814 goto done;
815
816 /*
817 * (D) Increment global generation number
818 *
819 * Pairs with smp_load_acquire() at (B), outside of the critical
820 * section. Use a full memory barrier to guarantee that the new global
821 * drain generation number is stored before loading pagevec counters.
822 *
823 * This pairing must be done here, before the for_each_online_cpu loop
824 * below which drains the page vectors.
825 *
826 * Let x, y, and z represent some system CPU numbers, where x < y < z.
827 * Assume CPU #z is is in the middle of the for_each_online_cpu loop
828 * below and has already reached CPU #y's per-cpu data. CPU #x comes
829 * along, adds some pages to its per-cpu vectors, then calls
830 * lru_add_drain_all().
831 *
832 * If the paired barrier is done at any later step, e.g. after the
833 * loop, CPU #x will just exit at (C) and miss flushing out all of its
834 * added pages.
835 */
836 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
837 smp_mb();
838
839 cpumask_clear(&has_work);
840 for_each_online_cpu(cpu) {
841 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
842
843 if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
844 data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
845 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
846 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
847 pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
848 need_activate_page_drain(cpu)) {
849 INIT_WORK(work, lru_add_drain_per_cpu);
850 queue_work_on(cpu, mm_percpu_wq, work);
851 __cpumask_set_cpu(cpu, &has_work);
852 }
853 }
854
855 for_each_cpu(cpu, &has_work)
856 flush_work(&per_cpu(lru_add_drain_work, cpu));
857
858 done:
859 mutex_unlock(&lock);
860 }
861 #else
lru_add_drain_all(void)862 void lru_add_drain_all(void)
863 {
864 lru_add_drain();
865 }
866 #endif /* CONFIG_SMP */
867
868 /**
869 * release_pages - batched put_page()
870 * @pages: array of pages to release
871 * @nr: number of pages
872 *
873 * Decrement the reference count on all the pages in @pages. If it
874 * fell to zero, remove the page from the LRU and free it.
875 */
release_pages(struct page ** pages,int nr)876 void release_pages(struct page **pages, int nr)
877 {
878 int i;
879 LIST_HEAD(pages_to_free);
880 struct pglist_data *locked_pgdat = NULL;
881 struct lruvec *lruvec;
882 unsigned long flags;
883 unsigned int lock_batch;
884
885 for (i = 0; i < nr; i++) {
886 struct page *page = pages[i];
887
888 /*
889 * Make sure the IRQ-safe lock-holding time does not get
890 * excessive with a continuous string of pages from the
891 * same pgdat. The lock is held only if pgdat != NULL.
892 */
893 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
894 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
895 locked_pgdat = NULL;
896 }
897
898 page = compound_head(page);
899 if (is_huge_zero_page(page))
900 continue;
901
902 if (is_zone_device_page(page)) {
903 if (locked_pgdat) {
904 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
905 flags);
906 locked_pgdat = NULL;
907 }
908 /*
909 * ZONE_DEVICE pages that return 'false' from
910 * page_is_devmap_managed() do not require special
911 * processing, and instead, expect a call to
912 * put_page_testzero().
913 */
914 if (page_is_devmap_managed(page)) {
915 put_devmap_managed_page(page);
916 continue;
917 }
918 }
919
920 if (!put_page_testzero(page))
921 continue;
922
923 if (PageCompound(page)) {
924 if (locked_pgdat) {
925 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
926 locked_pgdat = NULL;
927 }
928 __put_compound_page(page);
929 continue;
930 }
931
932 if (PageLRU(page)) {
933 struct pglist_data *pgdat = page_pgdat(page);
934
935 if (pgdat != locked_pgdat) {
936 if (locked_pgdat)
937 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
938 flags);
939 lock_batch = 0;
940 locked_pgdat = pgdat;
941 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
942 }
943
944 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
945 VM_BUG_ON_PAGE(!PageLRU(page), page);
946 __ClearPageLRU(page);
947 del_page_from_lru_list(page, lruvec, page_off_lru(page));
948 }
949
950 __ClearPageWaiters(page);
951
952 list_add(&page->lru, &pages_to_free);
953 }
954 if (locked_pgdat)
955 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
956
957 mem_cgroup_uncharge_list(&pages_to_free);
958 free_unref_page_list(&pages_to_free);
959 }
960 EXPORT_SYMBOL(release_pages);
961
962 /*
963 * The pages which we're about to release may be in the deferred lru-addition
964 * queues. That would prevent them from really being freed right now. That's
965 * OK from a correctness point of view but is inefficient - those pages may be
966 * cache-warm and we want to give them back to the page allocator ASAP.
967 *
968 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
969 * and __pagevec_lru_add_active() call release_pages() directly to avoid
970 * mutual recursion.
971 */
__pagevec_release(struct pagevec * pvec)972 void __pagevec_release(struct pagevec *pvec)
973 {
974 if (!pvec->percpu_pvec_drained) {
975 lru_add_drain();
976 pvec->percpu_pvec_drained = true;
977 }
978 release_pages(pvec->pages, pagevec_count(pvec));
979 pagevec_reinit(pvec);
980 }
981 EXPORT_SYMBOL(__pagevec_release);
982
983 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
984 /* used by __split_huge_page_refcount() */
lru_add_page_tail(struct page * page,struct page * page_tail,struct lruvec * lruvec,struct list_head * list)985 void lru_add_page_tail(struct page *page, struct page *page_tail,
986 struct lruvec *lruvec, struct list_head *list)
987 {
988 VM_BUG_ON_PAGE(!PageHead(page), page);
989 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
990 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
991 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
992
993 if (!list)
994 SetPageLRU(page_tail);
995
996 if (likely(PageLRU(page)))
997 list_add_tail(&page_tail->lru, &page->lru);
998 else if (list) {
999 /* page reclaim is reclaiming a huge page */
1000 get_page(page_tail);
1001 list_add_tail(&page_tail->lru, list);
1002 } else {
1003 /*
1004 * Head page has not yet been counted, as an hpage,
1005 * so we must account for each subpage individually.
1006 *
1007 * Put page_tail on the list at the correct position
1008 * so they all end up in order.
1009 */
1010 add_page_to_lru_list_tail(page_tail, lruvec,
1011 page_lru(page_tail));
1012 }
1013 }
1014 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1015
__pagevec_lru_add_fn(struct page * page,struct lruvec * lruvec,void * arg)1016 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
1017 void *arg)
1018 {
1019 enum lru_list lru;
1020 int was_unevictable = TestClearPageUnevictable(page);
1021 int nr_pages = thp_nr_pages(page);
1022
1023 VM_BUG_ON_PAGE(PageLRU(page), page);
1024
1025 /*
1026 * Page becomes evictable in two ways:
1027 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
1028 * 2) Before acquiring LRU lock to put the page to correct LRU and then
1029 * a) do PageLRU check with lock [check_move_unevictable_pages]
1030 * b) do PageLRU check before lock [clear_page_mlock]
1031 *
1032 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
1033 * following strict ordering:
1034 *
1035 * #0: __pagevec_lru_add_fn #1: clear_page_mlock
1036 *
1037 * SetPageLRU() TestClearPageMlocked()
1038 * smp_mb() // explicit ordering // above provides strict
1039 * // ordering
1040 * PageMlocked() PageLRU()
1041 *
1042 *
1043 * if '#1' does not observe setting of PG_lru by '#0' and fails
1044 * isolation, the explicit barrier will make sure that page_evictable
1045 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
1046 * can be reordered after PageMlocked check and can make '#1' to fail
1047 * the isolation of the page whose Mlocked bit is cleared (#0 is also
1048 * looking at the same page) and the evictable page will be stranded
1049 * in an unevictable LRU.
1050 */
1051 SetPageLRU(page);
1052 smp_mb__after_atomic();
1053
1054 if (page_evictable(page)) {
1055 lru = page_lru(page);
1056 if (was_unevictable)
1057 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
1058 } else {
1059 lru = LRU_UNEVICTABLE;
1060 ClearPageActive(page);
1061 SetPageUnevictable(page);
1062 if (!was_unevictable)
1063 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
1064 }
1065
1066 add_page_to_lru_list(page, lruvec, lru);
1067 trace_mm_lru_insertion(page, lru);
1068 }
1069
1070 /*
1071 * Add the passed pages to the LRU, then drop the caller's refcount
1072 * on them. Reinitialises the caller's pagevec.
1073 */
__pagevec_lru_add(struct pagevec * pvec)1074 void __pagevec_lru_add(struct pagevec *pvec)
1075 {
1076 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
1077 }
1078
1079 /**
1080 * pagevec_lookup_entries - gang pagecache lookup
1081 * @pvec: Where the resulting entries are placed
1082 * @mapping: The address_space to search
1083 * @start: The starting entry index
1084 * @nr_entries: The maximum number of pages
1085 * @indices: The cache indices corresponding to the entries in @pvec
1086 *
1087 * pagevec_lookup_entries() will search for and return a group of up
1088 * to @nr_pages pages and shadow entries in the mapping. All
1089 * entries are placed in @pvec. pagevec_lookup_entries() takes a
1090 * reference against actual pages in @pvec.
1091 *
1092 * The search returns a group of mapping-contiguous entries with
1093 * ascending indexes. There may be holes in the indices due to
1094 * not-present entries.
1095 *
1096 * Only one subpage of a Transparent Huge Page is returned in one call:
1097 * allowing truncate_inode_pages_range() to evict the whole THP without
1098 * cycling through a pagevec of extra references.
1099 *
1100 * pagevec_lookup_entries() returns the number of entries which were
1101 * found.
1102 */
pagevec_lookup_entries(struct pagevec * pvec,struct address_space * mapping,pgoff_t start,unsigned nr_entries,pgoff_t * indices)1103 unsigned pagevec_lookup_entries(struct pagevec *pvec,
1104 struct address_space *mapping,
1105 pgoff_t start, unsigned nr_entries,
1106 pgoff_t *indices)
1107 {
1108 pvec->nr = find_get_entries(mapping, start, nr_entries,
1109 pvec->pages, indices);
1110 return pagevec_count(pvec);
1111 }
1112
1113 /**
1114 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1115 * @pvec: The pagevec to prune
1116 *
1117 * pagevec_lookup_entries() fills both pages and exceptional radix
1118 * tree entries into the pagevec. This function prunes all
1119 * exceptionals from @pvec without leaving holes, so that it can be
1120 * passed on to page-only pagevec operations.
1121 */
pagevec_remove_exceptionals(struct pagevec * pvec)1122 void pagevec_remove_exceptionals(struct pagevec *pvec)
1123 {
1124 int i, j;
1125
1126 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1127 struct page *page = pvec->pages[i];
1128 if (!xa_is_value(page))
1129 pvec->pages[j++] = page;
1130 }
1131 pvec->nr = j;
1132 }
1133
1134 /**
1135 * pagevec_lookup_range - gang pagecache lookup
1136 * @pvec: Where the resulting pages are placed
1137 * @mapping: The address_space to search
1138 * @start: The starting page index
1139 * @end: The final page index
1140 *
1141 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
1142 * pages in the mapping starting from index @start and upto index @end
1143 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
1144 * reference against the pages in @pvec.
1145 *
1146 * The search returns a group of mapping-contiguous pages with ascending
1147 * indexes. There may be holes in the indices due to not-present pages. We
1148 * also update @start to index the next page for the traversal.
1149 *
1150 * pagevec_lookup_range() returns the number of pages which were found. If this
1151 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
1152 * reached.
1153 */
pagevec_lookup_range(struct pagevec * pvec,struct address_space * mapping,pgoff_t * start,pgoff_t end)1154 unsigned pagevec_lookup_range(struct pagevec *pvec,
1155 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1156 {
1157 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1158 pvec->pages);
1159 return pagevec_count(pvec);
1160 }
1161 EXPORT_SYMBOL(pagevec_lookup_range);
1162
pagevec_lookup_range_tag(struct pagevec * pvec,struct address_space * mapping,pgoff_t * index,pgoff_t end,xa_mark_t tag)1163 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1164 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1165 xa_mark_t tag)
1166 {
1167 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1168 PAGEVEC_SIZE, pvec->pages);
1169 return pagevec_count(pvec);
1170 }
1171 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1172
pagevec_lookup_range_nr_tag(struct pagevec * pvec,struct address_space * mapping,pgoff_t * index,pgoff_t end,xa_mark_t tag,unsigned max_pages)1173 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1174 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1175 xa_mark_t tag, unsigned max_pages)
1176 {
1177 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1178 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1179 return pagevec_count(pvec);
1180 }
1181 EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1182 /*
1183 * Perform any setup for the swap system
1184 */
swap_setup(void)1185 void __init swap_setup(void)
1186 {
1187 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1188
1189 /* Use a smaller cluster for small-memory machines */
1190 if (megs < 16)
1191 page_cluster = 2;
1192 else
1193 page_cluster = 3;
1194 /*
1195 * Right now other parts of the system means that we
1196 * _really_ don't want to cluster much more
1197 */
1198 }
1199
1200 #ifdef CONFIG_DEV_PAGEMAP_OPS
put_devmap_managed_page(struct page * page)1201 void put_devmap_managed_page(struct page *page)
1202 {
1203 int count;
1204
1205 if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1206 return;
1207
1208 count = page_ref_dec_return(page);
1209
1210 /*
1211 * devmap page refcounts are 1-based, rather than 0-based: if
1212 * refcount is 1, then the page is free and the refcount is
1213 * stable because nobody holds a reference on the page.
1214 */
1215 if (count == 1)
1216 free_devmap_managed_page(page);
1217 else if (!count)
1218 __put_page(page);
1219 }
1220 EXPORT_SYMBOL(put_devmap_managed_page);
1221 #endif
1222